Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 0b13832c

History | View | Annotate | Download (250.6 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import time
29 a8083063 Iustin Pop
import tempfile
30 a8083063 Iustin Pop
import re
31 a8083063 Iustin Pop
import platform
32 ffa1c0dc Iustin Pop
import logging
33 74409b12 Iustin Pop
import copy
34 4b7735f9 Iustin Pop
import random
35 a8083063 Iustin Pop
36 a8083063 Iustin Pop
from ganeti import ssh
37 a8083063 Iustin Pop
from ganeti import utils
38 a8083063 Iustin Pop
from ganeti import errors
39 a8083063 Iustin Pop
from ganeti import hypervisor
40 6048c986 Guido Trotter
from ganeti import locking
41 a8083063 Iustin Pop
from ganeti import constants
42 a8083063 Iustin Pop
from ganeti import objects
43 a8083063 Iustin Pop
from ganeti import opcodes
44 8d14b30d Iustin Pop
from ganeti import serializer
45 112f18a5 Iustin Pop
from ganeti import ssconf
46 d61df03e Iustin Pop
47 d61df03e Iustin Pop
48 a8083063 Iustin Pop
class LogicalUnit(object):
49 396e1b78 Michael Hanselmann
  """Logical Unit base class.
50 a8083063 Iustin Pop

51 a8083063 Iustin Pop
  Subclasses must follow these rules:
52 d465bdc8 Guido Trotter
    - implement ExpandNames
53 d465bdc8 Guido Trotter
    - implement CheckPrereq
54 a8083063 Iustin Pop
    - implement Exec
55 a8083063 Iustin Pop
    - implement BuildHooksEnv
56 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
57 05f86716 Guido Trotter
    - optionally redefine their run requirements:
58 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
59 05f86716 Guido Trotter

60 05f86716 Guido Trotter
  Note that all commands require root permissions.
61 a8083063 Iustin Pop

62 a8083063 Iustin Pop
  """
63 a8083063 Iustin Pop
  HPATH = None
64 a8083063 Iustin Pop
  HTYPE = None
65 a8083063 Iustin Pop
  _OP_REQP = []
66 7e55040e Guido Trotter
  REQ_BGL = True
67 a8083063 Iustin Pop
68 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
69 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
70 a8083063 Iustin Pop

71 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
72 a8083063 Iustin Pop
    validity.
73 a8083063 Iustin Pop

74 a8083063 Iustin Pop
    """
75 5bfac263 Iustin Pop
    self.proc = processor
76 a8083063 Iustin Pop
    self.op = op
77 77b657a3 Guido Trotter
    self.cfg = context.cfg
78 77b657a3 Guido Trotter
    self.context = context
79 72737a7f Iustin Pop
    self.rpc = rpc
80 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
81 d465bdc8 Guido Trotter
    self.needed_locks = None
82 6683bba2 Guido Trotter
    self.acquired_locks = {}
83 3977a4c1 Guido Trotter
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
84 ca2a79e1 Guido Trotter
    self.add_locks = {}
85 ca2a79e1 Guido Trotter
    self.remove_locks = {}
86 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
87 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
88 c92b310a Michael Hanselmann
    self.__ssh = None
89 86d9d3bb Iustin Pop
    # logging
90 86d9d3bb Iustin Pop
    self.LogWarning = processor.LogWarning
91 86d9d3bb Iustin Pop
    self.LogInfo = processor.LogInfo
92 c92b310a Michael Hanselmann
93 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
94 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
95 a8083063 Iustin Pop
      if attr_val is None:
96 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
97 3ecf6786 Iustin Pop
                                   attr_name)
98 4be4691d Iustin Pop
    self.CheckArguments()
99 a8083063 Iustin Pop
100 c92b310a Michael Hanselmann
  def __GetSSH(self):
101 c92b310a Michael Hanselmann
    """Returns the SshRunner object
102 c92b310a Michael Hanselmann

103 c92b310a Michael Hanselmann
    """
104 c92b310a Michael Hanselmann
    if not self.__ssh:
105 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
106 c92b310a Michael Hanselmann
    return self.__ssh
107 c92b310a Michael Hanselmann
108 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
109 c92b310a Michael Hanselmann
110 4be4691d Iustin Pop
  def CheckArguments(self):
111 4be4691d Iustin Pop
    """Check syntactic validity for the opcode arguments.
112 4be4691d Iustin Pop

113 4be4691d Iustin Pop
    This method is for doing a simple syntactic check and ensure
114 4be4691d Iustin Pop
    validity of opcode parameters, without any cluster-related
115 4be4691d Iustin Pop
    checks. While the same can be accomplished in ExpandNames and/or
116 4be4691d Iustin Pop
    CheckPrereq, doing these separate is better because:
117 4be4691d Iustin Pop

118 4be4691d Iustin Pop
      - ExpandNames is left as as purely a lock-related function
119 4be4691d Iustin Pop
      - CheckPrereq is run after we have aquired locks (and possible
120 4be4691d Iustin Pop
        waited for them)
121 4be4691d Iustin Pop

122 4be4691d Iustin Pop
    The function is allowed to change the self.op attribute so that
123 4be4691d Iustin Pop
    later methods can no longer worry about missing parameters.
124 4be4691d Iustin Pop

125 4be4691d Iustin Pop
    """
126 4be4691d Iustin Pop
    pass
127 4be4691d Iustin Pop
128 d465bdc8 Guido Trotter
  def ExpandNames(self):
129 d465bdc8 Guido Trotter
    """Expand names for this LU.
130 d465bdc8 Guido Trotter

131 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
132 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
133 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
134 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
135 d465bdc8 Guido Trotter

136 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
137 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
138 d465bdc8 Guido Trotter
    as values. Rules:
139 e4376078 Iustin Pop

140 e4376078 Iustin Pop
      - use an empty dict if you don't need any lock
141 e4376078 Iustin Pop
      - if you don't need any lock at a particular level omit that level
142 e4376078 Iustin Pop
      - don't put anything for the BGL level
143 e4376078 Iustin Pop
      - if you want all locks at a level use locking.ALL_SET as a value
144 d465bdc8 Guido Trotter

145 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
146 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
147 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
148 3977a4c1 Guido Trotter

149 e4376078 Iustin Pop
    Examples::
150 e4376078 Iustin Pop

151 e4376078 Iustin Pop
      # Acquire all nodes and one instance
152 e4376078 Iustin Pop
      self.needed_locks = {
153 e4376078 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
154 e4376078 Iustin Pop
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
155 e4376078 Iustin Pop
      }
156 e4376078 Iustin Pop
      # Acquire just two nodes
157 e4376078 Iustin Pop
      self.needed_locks = {
158 e4376078 Iustin Pop
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
159 e4376078 Iustin Pop
      }
160 e4376078 Iustin Pop
      # Acquire no locks
161 e4376078 Iustin Pop
      self.needed_locks = {} # No, you can't leave it to the default value None
162 d465bdc8 Guido Trotter

163 d465bdc8 Guido Trotter
    """
164 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
165 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
166 d465bdc8 Guido Trotter
    # time.
167 d465bdc8 Guido Trotter
    if self.REQ_BGL:
168 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
169 d465bdc8 Guido Trotter
    else:
170 d465bdc8 Guido Trotter
      raise NotImplementedError
171 d465bdc8 Guido Trotter
172 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
173 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
174 fb8dcb62 Guido Trotter

175 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
176 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
177 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
178 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
179 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
180 fb8dcb62 Guido Trotter
    default it does nothing.
181 fb8dcb62 Guido Trotter

182 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
183 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
184 fb8dcb62 Guido Trotter

185 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
186 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
187 fb8dcb62 Guido Trotter

188 fb8dcb62 Guido Trotter
    """
189 fb8dcb62 Guido Trotter
190 a8083063 Iustin Pop
  def CheckPrereq(self):
191 a8083063 Iustin Pop
    """Check prerequisites for this LU.
192 a8083063 Iustin Pop

193 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
194 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
195 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
196 a8083063 Iustin Pop
    allowed.
197 a8083063 Iustin Pop

198 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
199 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
200 a8083063 Iustin Pop

201 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
202 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
203 a8083063 Iustin Pop

204 a8083063 Iustin Pop
    """
205 a8083063 Iustin Pop
    raise NotImplementedError
206 a8083063 Iustin Pop
207 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
208 a8083063 Iustin Pop
    """Execute the LU.
209 a8083063 Iustin Pop

210 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
211 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
212 a8083063 Iustin Pop
    code, or expected.
213 a8083063 Iustin Pop

214 a8083063 Iustin Pop
    """
215 a8083063 Iustin Pop
    raise NotImplementedError
216 a8083063 Iustin Pop
217 a8083063 Iustin Pop
  def BuildHooksEnv(self):
218 a8083063 Iustin Pop
    """Build hooks environment for this LU.
219 a8083063 Iustin Pop

220 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
221 a8083063 Iustin Pop
    containing the environment that will be used for running the
222 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
223 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
224 a8083063 Iustin Pop
    the hook should run after the execution.
225 a8083063 Iustin Pop

226 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
227 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
228 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
229 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
230 a8083063 Iustin Pop

231 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
232 a8083063 Iustin Pop

233 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
234 a8083063 Iustin Pop
    not be called.
235 a8083063 Iustin Pop

236 a8083063 Iustin Pop
    """
237 a8083063 Iustin Pop
    raise NotImplementedError
238 a8083063 Iustin Pop
239 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
240 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
241 1fce5219 Guido Trotter

242 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
243 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
244 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
245 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
246 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
247 1fce5219 Guido Trotter

248 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
249 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
250 e4376078 Iustin Pop
    @param hook_results: the results of the multi-node hooks rpc call
251 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
252 e4376078 Iustin Pop
    @param lu_result: the previous Exec result this LU had, or None
253 e4376078 Iustin Pop
        in the PRE phase
254 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
255 e4376078 Iustin Pop
        and hook results
256 1fce5219 Guido Trotter

257 1fce5219 Guido Trotter
    """
258 1fce5219 Guido Trotter
    return lu_result
259 1fce5219 Guido Trotter
260 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
261 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
262 43905206 Guido Trotter

263 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
264 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
265 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
266 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
267 43905206 Guido Trotter
    before.
268 43905206 Guido Trotter

269 43905206 Guido Trotter
    """
270 43905206 Guido Trotter
    if self.needed_locks is None:
271 43905206 Guido Trotter
      self.needed_locks = {}
272 43905206 Guido Trotter
    else:
273 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
274 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
275 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
276 43905206 Guido Trotter
    if expanded_name is None:
277 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
278 43905206 Guido Trotter
                                  self.op.instance_name)
279 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
280 43905206 Guido Trotter
    self.op.instance_name = expanded_name
281 43905206 Guido Trotter
282 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
283 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
284 c4a2fee1 Guido Trotter

285 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
286 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
287 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
288 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
289 c4a2fee1 Guido Trotter

290 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
291 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
292 c4a2fee1 Guido Trotter

293 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
294 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
295 c4a2fee1 Guido Trotter

296 e4376078 Iustin Pop
    If should be called in DeclareLocks in a way similar to::
297 c4a2fee1 Guido Trotter

298 e4376078 Iustin Pop
      if level == locking.LEVEL_NODE:
299 e4376078 Iustin Pop
        self._LockInstancesNodes()
300 c4a2fee1 Guido Trotter

301 a82ce292 Guido Trotter
    @type primary_only: boolean
302 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
303 a82ce292 Guido Trotter

304 c4a2fee1 Guido Trotter
    """
305 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
306 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
307 c4a2fee1 Guido Trotter
308 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
309 c4a2fee1 Guido Trotter
310 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
311 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
312 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
313 c4a2fee1 Guido Trotter
    wanted_nodes = []
314 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
315 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
316 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
317 a82ce292 Guido Trotter
      if not primary_only:
318 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
319 9513b6ab Guido Trotter
320 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
321 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
322 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
323 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
324 c4a2fee1 Guido Trotter
325 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
326 c4a2fee1 Guido Trotter
327 a8083063 Iustin Pop
328 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
329 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
330 a8083063 Iustin Pop

331 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
332 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
333 a8083063 Iustin Pop

334 a8083063 Iustin Pop
  """
335 a8083063 Iustin Pop
  HPATH = None
336 a8083063 Iustin Pop
  HTYPE = None
337 a8083063 Iustin Pop
338 a8083063 Iustin Pop
339 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
340 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
341 83120a01 Michael Hanselmann

342 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
343 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
344 e4376078 Iustin Pop
  @type nodes: list
345 e4376078 Iustin Pop
  @param nodes: list of node names or None for all nodes
346 e4376078 Iustin Pop
  @rtype: list
347 e4376078 Iustin Pop
  @return: the list of nodes, sorted
348 e4376078 Iustin Pop
  @raise errors.OpProgrammerError: if the nodes parameter is wrong type
349 83120a01 Michael Hanselmann

350 83120a01 Michael Hanselmann
  """
351 3312b702 Iustin Pop
  if not isinstance(nodes, list):
352 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
353 dcb93971 Michael Hanselmann
354 ea47808a Guido Trotter
  if not nodes:
355 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
356 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
357 dcb93971 Michael Hanselmann
358 ea47808a Guido Trotter
  wanted = []
359 ea47808a Guido Trotter
  for name in nodes:
360 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
361 ea47808a Guido Trotter
    if node is None:
362 ea47808a Guido Trotter
      raise errors.OpPrereqError("No such node name '%s'" % name)
363 ea47808a Guido Trotter
    wanted.append(node)
364 dcb93971 Michael Hanselmann
365 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
366 3312b702 Iustin Pop
367 3312b702 Iustin Pop
368 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
369 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
370 3312b702 Iustin Pop

371 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
372 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
373 e4376078 Iustin Pop
  @type instances: list
374 e4376078 Iustin Pop
  @param instances: list of instance names or None for all instances
375 e4376078 Iustin Pop
  @rtype: list
376 e4376078 Iustin Pop
  @return: the list of instances, sorted
377 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if the instances parameter is wrong type
378 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if any of the passed instances is not found
379 3312b702 Iustin Pop

380 3312b702 Iustin Pop
  """
381 3312b702 Iustin Pop
  if not isinstance(instances, list):
382 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
383 3312b702 Iustin Pop
384 3312b702 Iustin Pop
  if instances:
385 3312b702 Iustin Pop
    wanted = []
386 3312b702 Iustin Pop
387 3312b702 Iustin Pop
    for name in instances:
388 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
389 3312b702 Iustin Pop
      if instance is None:
390 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
391 3312b702 Iustin Pop
      wanted.append(instance)
392 3312b702 Iustin Pop
393 3312b702 Iustin Pop
  else:
394 a7f5dc98 Iustin Pop
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
395 a7f5dc98 Iustin Pop
  return wanted
396 dcb93971 Michael Hanselmann
397 dcb93971 Michael Hanselmann
398 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
399 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
400 83120a01 Michael Hanselmann

401 a2d2e1a7 Iustin Pop
  @type static: L{utils.FieldSet}
402 31bf511f Iustin Pop
  @param static: static fields set
403 a2d2e1a7 Iustin Pop
  @type dynamic: L{utils.FieldSet}
404 31bf511f Iustin Pop
  @param dynamic: dynamic fields set
405 83120a01 Michael Hanselmann

406 83120a01 Michael Hanselmann
  """
407 a2d2e1a7 Iustin Pop
  f = utils.FieldSet()
408 31bf511f Iustin Pop
  f.Extend(static)
409 31bf511f Iustin Pop
  f.Extend(dynamic)
410 dcb93971 Michael Hanselmann
411 31bf511f Iustin Pop
  delta = f.NonMatching(selected)
412 31bf511f Iustin Pop
  if delta:
413 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
414 31bf511f Iustin Pop
                               % ",".join(delta))
415 dcb93971 Michael Hanselmann
416 dcb93971 Michael Hanselmann
417 a5961235 Iustin Pop
def _CheckBooleanOpField(op, name):
418 a5961235 Iustin Pop
  """Validates boolean opcode parameters.
419 a5961235 Iustin Pop

420 a5961235 Iustin Pop
  This will ensure that an opcode parameter is either a boolean value,
421 a5961235 Iustin Pop
  or None (but that it always exists).
422 a5961235 Iustin Pop

423 a5961235 Iustin Pop
  """
424 a5961235 Iustin Pop
  val = getattr(op, name, None)
425 a5961235 Iustin Pop
  if not (val is None or isinstance(val, bool)):
426 a5961235 Iustin Pop
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
427 a5961235 Iustin Pop
                               (name, str(val)))
428 a5961235 Iustin Pop
  setattr(op, name, val)
429 a5961235 Iustin Pop
430 a5961235 Iustin Pop
431 a5961235 Iustin Pop
def _CheckNodeOnline(lu, node):
432 a5961235 Iustin Pop
  """Ensure that a given node is online.
433 a5961235 Iustin Pop

434 a5961235 Iustin Pop
  @param lu: the LU on behalf of which we make the check
435 a5961235 Iustin Pop
  @param node: the node to check
436 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is offline
437 a5961235 Iustin Pop

438 a5961235 Iustin Pop
  """
439 a5961235 Iustin Pop
  if lu.cfg.GetNodeInfo(node).offline:
440 a5961235 Iustin Pop
    raise errors.OpPrereqError("Can't use offline node %s" % node)
441 a5961235 Iustin Pop
442 a5961235 Iustin Pop
443 733a2b6a Iustin Pop
def _CheckNodeNotDrained(lu, node):
444 733a2b6a Iustin Pop
  """Ensure that a given node is not drained.
445 733a2b6a Iustin Pop

446 733a2b6a Iustin Pop
  @param lu: the LU on behalf of which we make the check
447 733a2b6a Iustin Pop
  @param node: the node to check
448 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is drained
449 733a2b6a Iustin Pop

450 733a2b6a Iustin Pop
  """
451 733a2b6a Iustin Pop
  if lu.cfg.GetNodeInfo(node).drained:
452 733a2b6a Iustin Pop
    raise errors.OpPrereqError("Can't use drained node %s" % node)
453 733a2b6a Iustin Pop
454 733a2b6a Iustin Pop
455 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
456 67fc3042 Iustin Pop
                          memory, vcpus, nics, disk_template, disks,
457 67fc3042 Iustin Pop
                          bep, hvp, hypervisor):
458 e4376078 Iustin Pop
  """Builds instance related env variables for hooks
459 e4376078 Iustin Pop

460 e4376078 Iustin Pop
  This builds the hook environment from individual variables.
461 e4376078 Iustin Pop

462 e4376078 Iustin Pop
  @type name: string
463 e4376078 Iustin Pop
  @param name: the name of the instance
464 e4376078 Iustin Pop
  @type primary_node: string
465 e4376078 Iustin Pop
  @param primary_node: the name of the instance's primary node
466 e4376078 Iustin Pop
  @type secondary_nodes: list
467 e4376078 Iustin Pop
  @param secondary_nodes: list of secondary nodes as strings
468 e4376078 Iustin Pop
  @type os_type: string
469 e4376078 Iustin Pop
  @param os_type: the name of the instance's OS
470 0d68c45d Iustin Pop
  @type status: boolean
471 0d68c45d Iustin Pop
  @param status: the should_run status of the instance
472 e4376078 Iustin Pop
  @type memory: string
473 e4376078 Iustin Pop
  @param memory: the memory size of the instance
474 e4376078 Iustin Pop
  @type vcpus: string
475 e4376078 Iustin Pop
  @param vcpus: the count of VCPUs the instance has
476 e4376078 Iustin Pop
  @type nics: list
477 e4376078 Iustin Pop
  @param nics: list of tuples (ip, bridge, mac) representing
478 e4376078 Iustin Pop
      the NICs the instance  has
479 2c2690c9 Iustin Pop
  @type disk_template: string
480 2c2690c9 Iustin Pop
  @param disk_template: the distk template of the instance
481 2c2690c9 Iustin Pop
  @type disks: list
482 2c2690c9 Iustin Pop
  @param disks: the list of (size, mode) pairs
483 67fc3042 Iustin Pop
  @type bep: dict
484 67fc3042 Iustin Pop
  @param bep: the backend parameters for the instance
485 67fc3042 Iustin Pop
  @type hvp: dict
486 67fc3042 Iustin Pop
  @param hvp: the hypervisor parameters for the instance
487 67fc3042 Iustin Pop
  @type hypervisor: string
488 67fc3042 Iustin Pop
  @param hypervisor: the hypervisor for the instance
489 e4376078 Iustin Pop
  @rtype: dict
490 e4376078 Iustin Pop
  @return: the hook environment for this instance
491 ecb215b5 Michael Hanselmann

492 396e1b78 Michael Hanselmann
  """
493 0d68c45d Iustin Pop
  if status:
494 0d68c45d Iustin Pop
    str_status = "up"
495 0d68c45d Iustin Pop
  else:
496 0d68c45d Iustin Pop
    str_status = "down"
497 396e1b78 Michael Hanselmann
  env = {
498 0e137c28 Iustin Pop
    "OP_TARGET": name,
499 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
500 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
501 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
502 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
503 0d68c45d Iustin Pop
    "INSTANCE_STATUS": str_status,
504 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
505 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
506 2c2690c9 Iustin Pop
    "INSTANCE_DISK_TEMPLATE": disk_template,
507 67fc3042 Iustin Pop
    "INSTANCE_HYPERVISOR": hypervisor,
508 396e1b78 Michael Hanselmann
  }
509 396e1b78 Michael Hanselmann
510 396e1b78 Michael Hanselmann
  if nics:
511 396e1b78 Michael Hanselmann
    nic_count = len(nics)
512 62f0dd02 Guido Trotter
    for idx, (ip, mac, mode, link) in enumerate(nics):
513 396e1b78 Michael Hanselmann
      if ip is None:
514 396e1b78 Michael Hanselmann
        ip = ""
515 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
516 2c2690c9 Iustin Pop
      env["INSTANCE_NIC%d_MAC" % idx] = mac
517 62f0dd02 Guido Trotter
      env["INSTANCE_NIC%d_MODE" % idx] = mode
518 62f0dd02 Guido Trotter
      env["INSTANCE_NIC%d_LINK" % idx] = link
519 62f0dd02 Guido Trotter
      if mode == constants.NIC_MODE_BRIDGED:
520 62f0dd02 Guido Trotter
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
521 396e1b78 Michael Hanselmann
  else:
522 396e1b78 Michael Hanselmann
    nic_count = 0
523 396e1b78 Michael Hanselmann
524 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
525 396e1b78 Michael Hanselmann
526 2c2690c9 Iustin Pop
  if disks:
527 2c2690c9 Iustin Pop
    disk_count = len(disks)
528 2c2690c9 Iustin Pop
    for idx, (size, mode) in enumerate(disks):
529 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_SIZE" % idx] = size
530 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_MODE" % idx] = mode
531 2c2690c9 Iustin Pop
  else:
532 2c2690c9 Iustin Pop
    disk_count = 0
533 2c2690c9 Iustin Pop
534 2c2690c9 Iustin Pop
  env["INSTANCE_DISK_COUNT"] = disk_count
535 2c2690c9 Iustin Pop
536 67fc3042 Iustin Pop
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
537 67fc3042 Iustin Pop
    for key, value in source.items():
538 67fc3042 Iustin Pop
      env["INSTANCE_%s_%s" % (kind, key)] = value
539 67fc3042 Iustin Pop
540 396e1b78 Michael Hanselmann
  return env
541 396e1b78 Michael Hanselmann
542 f9b10246 Guido Trotter
def _NICListToTuple(lu, nics):
543 62f0dd02 Guido Trotter
  """Build a list of nic information tuples.
544 62f0dd02 Guido Trotter

545 f9b10246 Guido Trotter
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
546 f9b10246 Guido Trotter
  value in LUQueryInstanceData.
547 62f0dd02 Guido Trotter

548 62f0dd02 Guido Trotter
  @type lu:  L{LogicalUnit}
549 62f0dd02 Guido Trotter
  @param lu: the logical unit on whose behalf we execute
550 62f0dd02 Guido Trotter
  @type nics: list of L{objects.NIC}
551 62f0dd02 Guido Trotter
  @param nics: list of nics to convert to hooks tuples
552 62f0dd02 Guido Trotter

553 62f0dd02 Guido Trotter
  """
554 62f0dd02 Guido Trotter
  hooks_nics = []
555 62f0dd02 Guido Trotter
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[constants.PP_DEFAULT]
556 62f0dd02 Guido Trotter
  for nic in nics:
557 62f0dd02 Guido Trotter
    ip = nic.ip
558 62f0dd02 Guido Trotter
    mac = nic.mac
559 62f0dd02 Guido Trotter
    filled_params = objects.FillDict(c_nicparams, nic.nicparams)
560 62f0dd02 Guido Trotter
    mode = filled_params[constants.NIC_MODE]
561 62f0dd02 Guido Trotter
    link = filled_params[constants.NIC_LINK]
562 62f0dd02 Guido Trotter
    hooks_nics.append((ip, mac, mode, link))
563 62f0dd02 Guido Trotter
  return hooks_nics
564 396e1b78 Michael Hanselmann
565 338e51e8 Iustin Pop
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
566 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
567 ecb215b5 Michael Hanselmann

568 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
569 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
570 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
571 e4376078 Iustin Pop
  @param instance: the instance for which we should build the
572 e4376078 Iustin Pop
      environment
573 e4376078 Iustin Pop
  @type override: dict
574 e4376078 Iustin Pop
  @param override: dictionary with key/values that will override
575 e4376078 Iustin Pop
      our values
576 e4376078 Iustin Pop
  @rtype: dict
577 e4376078 Iustin Pop
  @return: the hook environment dictionary
578 e4376078 Iustin Pop

579 ecb215b5 Michael Hanselmann
  """
580 67fc3042 Iustin Pop
  cluster = lu.cfg.GetClusterInfo()
581 67fc3042 Iustin Pop
  bep = cluster.FillBE(instance)
582 67fc3042 Iustin Pop
  hvp = cluster.FillHV(instance)
583 396e1b78 Michael Hanselmann
  args = {
584 396e1b78 Michael Hanselmann
    'name': instance.name,
585 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
586 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
587 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
588 0d68c45d Iustin Pop
    'status': instance.admin_up,
589 338e51e8 Iustin Pop
    'memory': bep[constants.BE_MEMORY],
590 338e51e8 Iustin Pop
    'vcpus': bep[constants.BE_VCPUS],
591 f9b10246 Guido Trotter
    'nics': _NICListToTuple(lu, instance.nics),
592 2c2690c9 Iustin Pop
    'disk_template': instance.disk_template,
593 2c2690c9 Iustin Pop
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
594 67fc3042 Iustin Pop
    'bep': bep,
595 67fc3042 Iustin Pop
    'hvp': hvp,
596 67fc3042 Iustin Pop
    'hypervisor': instance.hypervisor,
597 396e1b78 Michael Hanselmann
  }
598 396e1b78 Michael Hanselmann
  if override:
599 396e1b78 Michael Hanselmann
    args.update(override)
600 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
601 396e1b78 Michael Hanselmann
602 396e1b78 Michael Hanselmann
603 ec0292f1 Iustin Pop
def _AdjustCandidatePool(lu):
604 ec0292f1 Iustin Pop
  """Adjust the candidate pool after node operations.
605 ec0292f1 Iustin Pop

606 ec0292f1 Iustin Pop
  """
607 ec0292f1 Iustin Pop
  mod_list = lu.cfg.MaintainCandidatePool()
608 ec0292f1 Iustin Pop
  if mod_list:
609 ec0292f1 Iustin Pop
    lu.LogInfo("Promoted nodes to master candidate role: %s",
610 ee513a66 Iustin Pop
               ", ".join(node.name for node in mod_list))
611 ec0292f1 Iustin Pop
    for name in mod_list:
612 ec0292f1 Iustin Pop
      lu.context.ReaddNode(name)
613 ec0292f1 Iustin Pop
  mc_now, mc_max = lu.cfg.GetMasterCandidateStats()
614 ec0292f1 Iustin Pop
  if mc_now > mc_max:
615 ec0292f1 Iustin Pop
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
616 ec0292f1 Iustin Pop
               (mc_now, mc_max))
617 ec0292f1 Iustin Pop
618 ec0292f1 Iustin Pop
619 b165e77e Guido Trotter
def _CheckNicsBridgesExist(lu, target_nics, target_node,
620 b165e77e Guido Trotter
                               profile=constants.PP_DEFAULT):
621 b165e77e Guido Trotter
  """Check that the brigdes needed by a list of nics exist.
622 b165e77e Guido Trotter

623 b165e77e Guido Trotter
  """
624 b165e77e Guido Trotter
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[profile]
625 b165e77e Guido Trotter
  paramslist = [objects.FillDict(c_nicparams, nic.nicparams)
626 b165e77e Guido Trotter
                for nic in target_nics]
627 b165e77e Guido Trotter
  brlist = [params[constants.NIC_LINK] for params in paramslist
628 b165e77e Guido Trotter
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
629 b165e77e Guido Trotter
  if brlist:
630 b165e77e Guido Trotter
    result = lu.rpc.call_bridges_exist(target_node, brlist)
631 4c4e4e1e Iustin Pop
    result.Raise("Error checking bridges on destination node '%s'" %
632 4c4e4e1e Iustin Pop
                 target_node, prereq=True)
633 b165e77e Guido Trotter
634 b165e77e Guido Trotter
635 b165e77e Guido Trotter
def _CheckInstanceBridgesExist(lu, instance, node=None):
636 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
637 bf6929a2 Alexander Schreiber

638 bf6929a2 Alexander Schreiber
  """
639 b165e77e Guido Trotter
  if node is None:
640 b165e77e Guido Trotter
    node=instance.primary_node
641 b165e77e Guido Trotter
  _CheckNicsBridgesExist(lu, instance.nics, node)
642 bf6929a2 Alexander Schreiber
643 bf6929a2 Alexander Schreiber
644 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
645 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
646 a8083063 Iustin Pop

647 a8083063 Iustin Pop
  """
648 a8083063 Iustin Pop
  _OP_REQP = []
649 a8083063 Iustin Pop
650 a8083063 Iustin Pop
  def CheckPrereq(self):
651 a8083063 Iustin Pop
    """Check prerequisites.
652 a8083063 Iustin Pop

653 a8083063 Iustin Pop
    This checks whether the cluster is empty.
654 a8083063 Iustin Pop

655 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
656 a8083063 Iustin Pop

657 a8083063 Iustin Pop
    """
658 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
659 a8083063 Iustin Pop
660 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
661 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
662 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
663 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
664 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
665 db915bd1 Michael Hanselmann
    if instancelist:
666 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
667 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
668 a8083063 Iustin Pop
669 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
670 a8083063 Iustin Pop
    """Destroys the cluster.
671 a8083063 Iustin Pop

672 a8083063 Iustin Pop
    """
673 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
674 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
675 4c4e4e1e Iustin Pop
    result.Raise("Could not disable the master role")
676 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
677 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
678 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
679 140aa4a8 Iustin Pop
    return master
680 a8083063 Iustin Pop
681 a8083063 Iustin Pop
682 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
683 a8083063 Iustin Pop
  """Verifies the cluster status.
684 a8083063 Iustin Pop

685 a8083063 Iustin Pop
  """
686 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
687 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
688 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
689 d4b9d97f Guido Trotter
  REQ_BGL = False
690 d4b9d97f Guido Trotter
691 d4b9d97f Guido Trotter
  def ExpandNames(self):
692 d4b9d97f Guido Trotter
    self.needed_locks = {
693 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
694 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
695 d4b9d97f Guido Trotter
    }
696 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
697 a8083063 Iustin Pop
698 25361b9a Iustin Pop
  def _VerifyNode(self, nodeinfo, file_list, local_cksum,
699 6d2e83d5 Iustin Pop
                  node_result, feedback_fn, master_files,
700 cc9e1230 Guido Trotter
                  drbd_map, vg_name):
701 a8083063 Iustin Pop
    """Run multiple tests against a node.
702 a8083063 Iustin Pop

703 112f18a5 Iustin Pop
    Test list:
704 e4376078 Iustin Pop

705 a8083063 Iustin Pop
      - compares ganeti version
706 a8083063 Iustin Pop
      - checks vg existance and size > 20G
707 a8083063 Iustin Pop
      - checks config file checksum
708 a8083063 Iustin Pop
      - checks ssh to other nodes
709 a8083063 Iustin Pop

710 112f18a5 Iustin Pop
    @type nodeinfo: L{objects.Node}
711 112f18a5 Iustin Pop
    @param nodeinfo: the node to check
712 e4376078 Iustin Pop
    @param file_list: required list of files
713 e4376078 Iustin Pop
    @param local_cksum: dictionary of local files and their checksums
714 e4376078 Iustin Pop
    @param node_result: the results from the node
715 e4376078 Iustin Pop
    @param feedback_fn: function used to accumulate results
716 112f18a5 Iustin Pop
    @param master_files: list of files that only masters should have
717 6d2e83d5 Iustin Pop
    @param drbd_map: the useddrbd minors for this node, in
718 6d2e83d5 Iustin Pop
        form of minor: (instance, must_exist) which correspond to instances
719 6d2e83d5 Iustin Pop
        and their running status
720 cc9e1230 Guido Trotter
    @param vg_name: Ganeti Volume Group (result of self.cfg.GetVGName())
721 098c0958 Michael Hanselmann

722 a8083063 Iustin Pop
    """
723 112f18a5 Iustin Pop
    node = nodeinfo.name
724 25361b9a Iustin Pop
725 25361b9a Iustin Pop
    # main result, node_result should be a non-empty dict
726 25361b9a Iustin Pop
    if not node_result or not isinstance(node_result, dict):
727 25361b9a Iustin Pop
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
728 25361b9a Iustin Pop
      return True
729 25361b9a Iustin Pop
730 a8083063 Iustin Pop
    # compares ganeti version
731 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
732 25361b9a Iustin Pop
    remote_version = node_result.get('version', None)
733 e9ce0a64 Iustin Pop
    if not (remote_version and isinstance(remote_version, (list, tuple)) and
734 e9ce0a64 Iustin Pop
            len(remote_version) == 2):
735 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
736 a8083063 Iustin Pop
      return True
737 a8083063 Iustin Pop
738 e9ce0a64 Iustin Pop
    if local_version != remote_version[0]:
739 e9ce0a64 Iustin Pop
      feedback_fn("  - ERROR: incompatible protocol versions: master %s,"
740 e9ce0a64 Iustin Pop
                  " node %s %s" % (local_version, node, remote_version[0]))
741 a8083063 Iustin Pop
      return True
742 a8083063 Iustin Pop
743 e9ce0a64 Iustin Pop
    # node seems compatible, we can actually try to look into its results
744 a8083063 Iustin Pop
745 a8083063 Iustin Pop
    bad = False
746 e9ce0a64 Iustin Pop
747 e9ce0a64 Iustin Pop
    # full package version
748 e9ce0a64 Iustin Pop
    if constants.RELEASE_VERSION != remote_version[1]:
749 e9ce0a64 Iustin Pop
      feedback_fn("  - WARNING: software version mismatch: master %s,"
750 e9ce0a64 Iustin Pop
                  " node %s %s" %
751 e9ce0a64 Iustin Pop
                  (constants.RELEASE_VERSION, node, remote_version[1]))
752 e9ce0a64 Iustin Pop
753 e9ce0a64 Iustin Pop
    # checks vg existence and size > 20G
754 cc9e1230 Guido Trotter
    if vg_name is not None:
755 cc9e1230 Guido Trotter
      vglist = node_result.get(constants.NV_VGLIST, None)
756 cc9e1230 Guido Trotter
      if not vglist:
757 cc9e1230 Guido Trotter
        feedback_fn("  - ERROR: unable to check volume groups on node %s." %
758 cc9e1230 Guido Trotter
                        (node,))
759 a8083063 Iustin Pop
        bad = True
760 cc9e1230 Guido Trotter
      else:
761 cc9e1230 Guido Trotter
        vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
762 cc9e1230 Guido Trotter
                                              constants.MIN_VG_SIZE)
763 cc9e1230 Guido Trotter
        if vgstatus:
764 cc9e1230 Guido Trotter
          feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
765 cc9e1230 Guido Trotter
          bad = True
766 a8083063 Iustin Pop
767 a8083063 Iustin Pop
    # checks config file checksum
768 a8083063 Iustin Pop
769 25361b9a Iustin Pop
    remote_cksum = node_result.get(constants.NV_FILELIST, None)
770 25361b9a Iustin Pop
    if not isinstance(remote_cksum, dict):
771 a8083063 Iustin Pop
      bad = True
772 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
773 a8083063 Iustin Pop
    else:
774 a8083063 Iustin Pop
      for file_name in file_list:
775 112f18a5 Iustin Pop
        node_is_mc = nodeinfo.master_candidate
776 112f18a5 Iustin Pop
        must_have_file = file_name not in master_files
777 a8083063 Iustin Pop
        if file_name not in remote_cksum:
778 112f18a5 Iustin Pop
          if node_is_mc or must_have_file:
779 112f18a5 Iustin Pop
            bad = True
780 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' missing" % file_name)
781 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
782 112f18a5 Iustin Pop
          if node_is_mc or must_have_file:
783 112f18a5 Iustin Pop
            bad = True
784 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
785 112f18a5 Iustin Pop
          else:
786 112f18a5 Iustin Pop
            # not candidate and this is not a must-have file
787 112f18a5 Iustin Pop
            bad = True
788 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: non master-candidate has old/wrong file"
789 112f18a5 Iustin Pop
                        " '%s'" % file_name)
790 112f18a5 Iustin Pop
        else:
791 112f18a5 Iustin Pop
          # all good, except non-master/non-must have combination
792 112f18a5 Iustin Pop
          if not node_is_mc and not must_have_file:
793 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' should not exist on non master"
794 112f18a5 Iustin Pop
                        " candidates" % file_name)
795 a8083063 Iustin Pop
796 25361b9a Iustin Pop
    # checks ssh to any
797 25361b9a Iustin Pop
798 25361b9a Iustin Pop
    if constants.NV_NODELIST not in node_result:
799 a8083063 Iustin Pop
      bad = True
800 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
801 a8083063 Iustin Pop
    else:
802 25361b9a Iustin Pop
      if node_result[constants.NV_NODELIST]:
803 a8083063 Iustin Pop
        bad = True
804 25361b9a Iustin Pop
        for node in node_result[constants.NV_NODELIST]:
805 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
806 25361b9a Iustin Pop
                          (node, node_result[constants.NV_NODELIST][node]))
807 25361b9a Iustin Pop
808 25361b9a Iustin Pop
    if constants.NV_NODENETTEST not in node_result:
809 9d4bfc96 Iustin Pop
      bad = True
810 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
811 9d4bfc96 Iustin Pop
    else:
812 25361b9a Iustin Pop
      if node_result[constants.NV_NODENETTEST]:
813 9d4bfc96 Iustin Pop
        bad = True
814 25361b9a Iustin Pop
        nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys())
815 9d4bfc96 Iustin Pop
        for node in nlist:
816 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
817 25361b9a Iustin Pop
                          (node, node_result[constants.NV_NODENETTEST][node]))
818 9d4bfc96 Iustin Pop
819 25361b9a Iustin Pop
    hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
820 e69d05fd Iustin Pop
    if isinstance(hyp_result, dict):
821 e69d05fd Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
822 e69d05fd Iustin Pop
        if hv_result is not None:
823 e69d05fd Iustin Pop
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
824 e69d05fd Iustin Pop
                      (hv_name, hv_result))
825 6d2e83d5 Iustin Pop
826 6d2e83d5 Iustin Pop
    # check used drbd list
827 cc9e1230 Guido Trotter
    if vg_name is not None:
828 cc9e1230 Guido Trotter
      used_minors = node_result.get(constants.NV_DRBDLIST, [])
829 cc9e1230 Guido Trotter
      if not isinstance(used_minors, (tuple, list)):
830 cc9e1230 Guido Trotter
        feedback_fn("  - ERROR: cannot parse drbd status file: %s" %
831 cc9e1230 Guido Trotter
                    str(used_minors))
832 cc9e1230 Guido Trotter
      else:
833 cc9e1230 Guido Trotter
        for minor, (iname, must_exist) in drbd_map.items():
834 cc9e1230 Guido Trotter
          if minor not in used_minors and must_exist:
835 35e994e9 Iustin Pop
            feedback_fn("  - ERROR: drbd minor %d of instance %s is"
836 35e994e9 Iustin Pop
                        " not active" % (minor, iname))
837 cc9e1230 Guido Trotter
            bad = True
838 cc9e1230 Guido Trotter
        for minor in used_minors:
839 cc9e1230 Guido Trotter
          if minor not in drbd_map:
840 35e994e9 Iustin Pop
            feedback_fn("  - ERROR: unallocated drbd minor %d is in use" %
841 35e994e9 Iustin Pop
                        minor)
842 cc9e1230 Guido Trotter
            bad = True
843 6d2e83d5 Iustin Pop
844 a8083063 Iustin Pop
    return bad
845 a8083063 Iustin Pop
846 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
847 0a66c968 Iustin Pop
                      node_instance, feedback_fn, n_offline):
848 a8083063 Iustin Pop
    """Verify an instance.
849 a8083063 Iustin Pop

850 a8083063 Iustin Pop
    This function checks to see if the required block devices are
851 a8083063 Iustin Pop
    available on the instance's node.
852 a8083063 Iustin Pop

853 a8083063 Iustin Pop
    """
854 a8083063 Iustin Pop
    bad = False
855 a8083063 Iustin Pop
856 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
857 a8083063 Iustin Pop
858 a8083063 Iustin Pop
    node_vol_should = {}
859 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
860 a8083063 Iustin Pop
861 a8083063 Iustin Pop
    for node in node_vol_should:
862 0a66c968 Iustin Pop
      if node in n_offline:
863 0a66c968 Iustin Pop
        # ignore missing volumes on offline nodes
864 0a66c968 Iustin Pop
        continue
865 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
866 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
867 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
868 a8083063 Iustin Pop
                          (volume, node))
869 a8083063 Iustin Pop
          bad = True
870 a8083063 Iustin Pop
871 0d68c45d Iustin Pop
    if instanceconfig.admin_up:
872 0a66c968 Iustin Pop
      if ((node_current not in node_instance or
873 0a66c968 Iustin Pop
          not instance in node_instance[node_current]) and
874 0a66c968 Iustin Pop
          node_current not in n_offline):
875 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
876 a8083063 Iustin Pop
                        (instance, node_current))
877 a8083063 Iustin Pop
        bad = True
878 a8083063 Iustin Pop
879 a8083063 Iustin Pop
    for node in node_instance:
880 a8083063 Iustin Pop
      if (not node == node_current):
881 a8083063 Iustin Pop
        if instance in node_instance[node]:
882 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
883 a8083063 Iustin Pop
                          (instance, node))
884 a8083063 Iustin Pop
          bad = True
885 a8083063 Iustin Pop
886 6a438c98 Michael Hanselmann
    return bad
887 a8083063 Iustin Pop
888 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
889 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
890 a8083063 Iustin Pop

891 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
892 a8083063 Iustin Pop
    reported as unknown.
893 a8083063 Iustin Pop

894 a8083063 Iustin Pop
    """
895 a8083063 Iustin Pop
    bad = False
896 a8083063 Iustin Pop
897 a8083063 Iustin Pop
    for node in node_vol_is:
898 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
899 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
900 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
901 a8083063 Iustin Pop
                      (volume, node))
902 a8083063 Iustin Pop
          bad = True
903 a8083063 Iustin Pop
    return bad
904 a8083063 Iustin Pop
905 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
906 a8083063 Iustin Pop
    """Verify the list of running instances.
907 a8083063 Iustin Pop

908 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
909 a8083063 Iustin Pop

910 a8083063 Iustin Pop
    """
911 a8083063 Iustin Pop
    bad = False
912 a8083063 Iustin Pop
    for node in node_instance:
913 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
914 a8083063 Iustin Pop
        if runninginstance not in instancelist:
915 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
916 a8083063 Iustin Pop
                          (runninginstance, node))
917 a8083063 Iustin Pop
          bad = True
918 a8083063 Iustin Pop
    return bad
919 a8083063 Iustin Pop
920 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
921 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
922 2b3b6ddd Guido Trotter

923 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
924 2b3b6ddd Guido Trotter
    was primary for.
925 2b3b6ddd Guido Trotter

926 2b3b6ddd Guido Trotter
    """
927 2b3b6ddd Guido Trotter
    bad = False
928 2b3b6ddd Guido Trotter
929 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
930 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
931 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
932 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
933 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
934 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
935 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
936 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
937 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
938 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
939 2b3b6ddd Guido Trotter
        needed_mem = 0
940 2b3b6ddd Guido Trotter
        for instance in instances:
941 338e51e8 Iustin Pop
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
942 c0f2b229 Iustin Pop
          if bep[constants.BE_AUTO_BALANCE]:
943 3924700f Iustin Pop
            needed_mem += bep[constants.BE_MEMORY]
944 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
945 2b3b6ddd Guido Trotter
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
946 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
947 2b3b6ddd Guido Trotter
          bad = True
948 2b3b6ddd Guido Trotter
    return bad
949 2b3b6ddd Guido Trotter
950 a8083063 Iustin Pop
  def CheckPrereq(self):
951 a8083063 Iustin Pop
    """Check prerequisites.
952 a8083063 Iustin Pop

953 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
954 e54c4c5e Guido Trotter
    all its members are valid.
955 a8083063 Iustin Pop

956 a8083063 Iustin Pop
    """
957 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
958 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
959 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
960 a8083063 Iustin Pop
961 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
962 d8fff41c Guido Trotter
    """Build hooks env.
963 d8fff41c Guido Trotter

964 d8fff41c Guido Trotter
    Cluster-Verify hooks just rone in the post phase and their failure makes
965 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
966 d8fff41c Guido Trotter

967 d8fff41c Guido Trotter
    """
968 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
969 35e994e9 Iustin Pop
    env = {
970 35e994e9 Iustin Pop
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
971 35e994e9 Iustin Pop
      }
972 35e994e9 Iustin Pop
    for node in self.cfg.GetAllNodesInfo().values():
973 35e994e9 Iustin Pop
      env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
974 35e994e9 Iustin Pop
975 d8fff41c Guido Trotter
    return env, [], all_nodes
976 d8fff41c Guido Trotter
977 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
978 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
979 a8083063 Iustin Pop

980 a8083063 Iustin Pop
    """
981 a8083063 Iustin Pop
    bad = False
982 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
983 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
984 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
985 a8083063 Iustin Pop
986 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
987 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
988 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
989 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
990 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
991 6d2e83d5 Iustin Pop
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
992 6d2e83d5 Iustin Pop
                        for iname in instancelist)
993 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
994 3924700f Iustin Pop
    i_non_a_balanced = [] # Non auto-balanced instances
995 0a66c968 Iustin Pop
    n_offline = [] # List of offline nodes
996 22f0f71d Iustin Pop
    n_drained = [] # List of nodes being drained
997 a8083063 Iustin Pop
    node_volume = {}
998 a8083063 Iustin Pop
    node_instance = {}
999 9c9c7d30 Guido Trotter
    node_info = {}
1000 26b6af5e Guido Trotter
    instance_cfg = {}
1001 a8083063 Iustin Pop
1002 a8083063 Iustin Pop
    # FIXME: verify OS list
1003 a8083063 Iustin Pop
    # do local checksums
1004 112f18a5 Iustin Pop
    master_files = [constants.CLUSTER_CONF_FILE]
1005 112f18a5 Iustin Pop
1006 112f18a5 Iustin Pop
    file_names = ssconf.SimpleStore().GetFileList()
1007 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
1008 699777f2 Michael Hanselmann
    file_names.append(constants.RAPI_CERT_FILE)
1009 112f18a5 Iustin Pop
    file_names.extend(master_files)
1010 112f18a5 Iustin Pop
1011 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
1012 a8083063 Iustin Pop
1013 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
1014 a8083063 Iustin Pop
    node_verify_param = {
1015 25361b9a Iustin Pop
      constants.NV_FILELIST: file_names,
1016 82e37788 Iustin Pop
      constants.NV_NODELIST: [node.name for node in nodeinfo
1017 82e37788 Iustin Pop
                              if not node.offline],
1018 25361b9a Iustin Pop
      constants.NV_HYPERVISOR: hypervisors,
1019 25361b9a Iustin Pop
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
1020 82e37788 Iustin Pop
                                  node.secondary_ip) for node in nodeinfo
1021 82e37788 Iustin Pop
                                 if not node.offline],
1022 25361b9a Iustin Pop
      constants.NV_INSTANCELIST: hypervisors,
1023 25361b9a Iustin Pop
      constants.NV_VERSION: None,
1024 25361b9a Iustin Pop
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
1025 a8083063 Iustin Pop
      }
1026 cc9e1230 Guido Trotter
    if vg_name is not None:
1027 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_VGLIST] = None
1028 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_LVLIST] = vg_name
1029 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_DRBDLIST] = None
1030 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
1031 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
1032 a8083063 Iustin Pop
1033 3924700f Iustin Pop
    cluster = self.cfg.GetClusterInfo()
1034 112f18a5 Iustin Pop
    master_node = self.cfg.GetMasterNode()
1035 6d2e83d5 Iustin Pop
    all_drbd_map = self.cfg.ComputeDRBDMap()
1036 6d2e83d5 Iustin Pop
1037 112f18a5 Iustin Pop
    for node_i in nodeinfo:
1038 112f18a5 Iustin Pop
      node = node_i.name
1039 25361b9a Iustin Pop
1040 0a66c968 Iustin Pop
      if node_i.offline:
1041 0a66c968 Iustin Pop
        feedback_fn("* Skipping offline node %s" % (node,))
1042 0a66c968 Iustin Pop
        n_offline.append(node)
1043 0a66c968 Iustin Pop
        continue
1044 0a66c968 Iustin Pop
1045 112f18a5 Iustin Pop
      if node == master_node:
1046 25361b9a Iustin Pop
        ntype = "master"
1047 112f18a5 Iustin Pop
      elif node_i.master_candidate:
1048 25361b9a Iustin Pop
        ntype = "master candidate"
1049 22f0f71d Iustin Pop
      elif node_i.drained:
1050 22f0f71d Iustin Pop
        ntype = "drained"
1051 22f0f71d Iustin Pop
        n_drained.append(node)
1052 112f18a5 Iustin Pop
      else:
1053 25361b9a Iustin Pop
        ntype = "regular"
1054 112f18a5 Iustin Pop
      feedback_fn("* Verifying node %s (%s)" % (node, ntype))
1055 25361b9a Iustin Pop
1056 4c4e4e1e Iustin Pop
      msg = all_nvinfo[node].fail_msg
1057 6f68a739 Iustin Pop
      if msg:
1058 6f68a739 Iustin Pop
        feedback_fn("  - ERROR: while contacting node %s: %s" % (node, msg))
1059 25361b9a Iustin Pop
        bad = True
1060 25361b9a Iustin Pop
        continue
1061 25361b9a Iustin Pop
1062 6f68a739 Iustin Pop
      nresult = all_nvinfo[node].payload
1063 6d2e83d5 Iustin Pop
      node_drbd = {}
1064 6d2e83d5 Iustin Pop
      for minor, instance in all_drbd_map[node].items():
1065 c614e5fb Iustin Pop
        if instance not in instanceinfo:
1066 c614e5fb Iustin Pop
          feedback_fn("  - ERROR: ghost instance '%s' in temporary DRBD map" %
1067 c614e5fb Iustin Pop
                      instance)
1068 c614e5fb Iustin Pop
          # ghost instance should not be running, but otherwise we
1069 c614e5fb Iustin Pop
          # don't give double warnings (both ghost instance and
1070 c614e5fb Iustin Pop
          # unallocated minor in use)
1071 c614e5fb Iustin Pop
          node_drbd[minor] = (instance, False)
1072 c614e5fb Iustin Pop
        else:
1073 c614e5fb Iustin Pop
          instance = instanceinfo[instance]
1074 c614e5fb Iustin Pop
          node_drbd[minor] = (instance.name, instance.admin_up)
1075 112f18a5 Iustin Pop
      result = self._VerifyNode(node_i, file_names, local_checksums,
1076 6d2e83d5 Iustin Pop
                                nresult, feedback_fn, master_files,
1077 cc9e1230 Guido Trotter
                                node_drbd, vg_name)
1078 a8083063 Iustin Pop
      bad = bad or result
1079 a8083063 Iustin Pop
1080 25361b9a Iustin Pop
      lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1081 cc9e1230 Guido Trotter
      if vg_name is None:
1082 cc9e1230 Guido Trotter
        node_volume[node] = {}
1083 cc9e1230 Guido Trotter
      elif isinstance(lvdata, basestring):
1084 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
1085 26f15862 Iustin Pop
                    (node, utils.SafeEncode(lvdata)))
1086 b63ed789 Iustin Pop
        bad = True
1087 b63ed789 Iustin Pop
        node_volume[node] = {}
1088 25361b9a Iustin Pop
      elif not isinstance(lvdata, dict):
1089 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (lvlist)" % (node,))
1090 a8083063 Iustin Pop
        bad = True
1091 a8083063 Iustin Pop
        continue
1092 b63ed789 Iustin Pop
      else:
1093 25361b9a Iustin Pop
        node_volume[node] = lvdata
1094 a8083063 Iustin Pop
1095 a8083063 Iustin Pop
      # node_instance
1096 25361b9a Iustin Pop
      idata = nresult.get(constants.NV_INSTANCELIST, None)
1097 25361b9a Iustin Pop
      if not isinstance(idata, list):
1098 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (instancelist)" %
1099 25361b9a Iustin Pop
                    (node,))
1100 a8083063 Iustin Pop
        bad = True
1101 a8083063 Iustin Pop
        continue
1102 a8083063 Iustin Pop
1103 25361b9a Iustin Pop
      node_instance[node] = idata
1104 a8083063 Iustin Pop
1105 9c9c7d30 Guido Trotter
      # node_info
1106 25361b9a Iustin Pop
      nodeinfo = nresult.get(constants.NV_HVINFO, None)
1107 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
1108 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (hvinfo)" % (node,))
1109 9c9c7d30 Guido Trotter
        bad = True
1110 9c9c7d30 Guido Trotter
        continue
1111 9c9c7d30 Guido Trotter
1112 9c9c7d30 Guido Trotter
      try:
1113 9c9c7d30 Guido Trotter
        node_info[node] = {
1114 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
1115 93e4c50b Guido Trotter
          "pinst": [],
1116 93e4c50b Guido Trotter
          "sinst": [],
1117 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
1118 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
1119 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
1120 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
1121 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
1122 36e7da50 Guido Trotter
          # secondary.
1123 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
1124 9c9c7d30 Guido Trotter
        }
1125 cc9e1230 Guido Trotter
        # FIXME: devise a free space model for file based instances as well
1126 cc9e1230 Guido Trotter
        if vg_name is not None:
1127 9a198532 Iustin Pop
          if (constants.NV_VGLIST not in nresult or
1128 9a198532 Iustin Pop
              vg_name not in nresult[constants.NV_VGLIST]):
1129 9a198532 Iustin Pop
            feedback_fn("  - ERROR: node %s didn't return data for the"
1130 9a198532 Iustin Pop
                        " volume group '%s' - it is either missing or broken" %
1131 9a198532 Iustin Pop
                        (node, vg_name))
1132 9a198532 Iustin Pop
            bad = True
1133 9a198532 Iustin Pop
            continue
1134 cc9e1230 Guido Trotter
          node_info[node]["dfree"] = int(nresult[constants.NV_VGLIST][vg_name])
1135 9a198532 Iustin Pop
      except (ValueError, KeyError):
1136 9a198532 Iustin Pop
        feedback_fn("  - ERROR: invalid nodeinfo value returned"
1137 9a198532 Iustin Pop
                    " from node %s" % (node,))
1138 9c9c7d30 Guido Trotter
        bad = True
1139 9c9c7d30 Guido Trotter
        continue
1140 9c9c7d30 Guido Trotter
1141 a8083063 Iustin Pop
    node_vol_should = {}
1142 a8083063 Iustin Pop
1143 a8083063 Iustin Pop
    for instance in instancelist:
1144 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
1145 6d2e83d5 Iustin Pop
      inst_config = instanceinfo[instance]
1146 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
1147 0a66c968 Iustin Pop
                                     node_instance, feedback_fn, n_offline)
1148 c5705f58 Guido Trotter
      bad = bad or result
1149 832261fd Iustin Pop
      inst_nodes_offline = []
1150 a8083063 Iustin Pop
1151 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
1152 a8083063 Iustin Pop
1153 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
1154 26b6af5e Guido Trotter
1155 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
1156 93e4c50b Guido Trotter
      if pnode in node_info:
1157 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
1158 0a66c968 Iustin Pop
      elif pnode not in n_offline:
1159 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
1160 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
1161 93e4c50b Guido Trotter
        bad = True
1162 93e4c50b Guido Trotter
1163 832261fd Iustin Pop
      if pnode in n_offline:
1164 832261fd Iustin Pop
        inst_nodes_offline.append(pnode)
1165 832261fd Iustin Pop
1166 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
1167 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
1168 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
1169 93e4c50b Guido Trotter
      # supported either.
1170 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
1171 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
1172 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
1173 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
1174 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
1175 93e4c50b Guido Trotter
                    % instance)
1176 93e4c50b Guido Trotter
1177 c0f2b229 Iustin Pop
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1178 3924700f Iustin Pop
        i_non_a_balanced.append(instance)
1179 3924700f Iustin Pop
1180 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
1181 93e4c50b Guido Trotter
        if snode in node_info:
1182 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
1183 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
1184 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
1185 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
1186 0a66c968 Iustin Pop
        elif snode not in n_offline:
1187 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
1188 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
1189 832261fd Iustin Pop
          bad = True
1190 832261fd Iustin Pop
        if snode in n_offline:
1191 832261fd Iustin Pop
          inst_nodes_offline.append(snode)
1192 832261fd Iustin Pop
1193 832261fd Iustin Pop
      if inst_nodes_offline:
1194 832261fd Iustin Pop
        # warn that the instance lives on offline nodes, and set bad=True
1195 832261fd Iustin Pop
        feedback_fn("  - ERROR: instance lives on offline node(s) %s" %
1196 832261fd Iustin Pop
                    ", ".join(inst_nodes_offline))
1197 832261fd Iustin Pop
        bad = True
1198 93e4c50b Guido Trotter
1199 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
1200 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
1201 a8083063 Iustin Pop
                                       feedback_fn)
1202 a8083063 Iustin Pop
    bad = bad or result
1203 a8083063 Iustin Pop
1204 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
1205 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
1206 a8083063 Iustin Pop
                                         feedback_fn)
1207 a8083063 Iustin Pop
    bad = bad or result
1208 a8083063 Iustin Pop
1209 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1210 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
1211 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
1212 e54c4c5e Guido Trotter
      bad = bad or result
1213 2b3b6ddd Guido Trotter
1214 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
1215 2b3b6ddd Guido Trotter
    if i_non_redundant:
1216 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1217 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
1218 2b3b6ddd Guido Trotter
1219 3924700f Iustin Pop
    if i_non_a_balanced:
1220 3924700f Iustin Pop
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1221 3924700f Iustin Pop
                  % len(i_non_a_balanced))
1222 3924700f Iustin Pop
1223 0a66c968 Iustin Pop
    if n_offline:
1224 0a66c968 Iustin Pop
      feedback_fn("  - NOTICE: %d offline node(s) found." % len(n_offline))
1225 0a66c968 Iustin Pop
1226 22f0f71d Iustin Pop
    if n_drained:
1227 22f0f71d Iustin Pop
      feedback_fn("  - NOTICE: %d drained node(s) found." % len(n_drained))
1228 22f0f71d Iustin Pop
1229 34290825 Michael Hanselmann
    return not bad
1230 a8083063 Iustin Pop
1231 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1232 e4376078 Iustin Pop
    """Analize the post-hooks' result
1233 e4376078 Iustin Pop

1234 e4376078 Iustin Pop
    This method analyses the hook result, handles it, and sends some
1235 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
1236 d8fff41c Guido Trotter

1237 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1238 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1239 e4376078 Iustin Pop
    @param hooks_results: the results of the multi-node hooks rpc call
1240 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
1241 e4376078 Iustin Pop
    @param lu_result: previous Exec result
1242 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
1243 e4376078 Iustin Pop
        and hook results
1244 d8fff41c Guido Trotter

1245 d8fff41c Guido Trotter
    """
1246 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
1247 38206f3c Iustin Pop
    # their results
1248 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
1249 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
1250 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
1251 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
1252 d8fff41c Guido Trotter
      if not hooks_results:
1253 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
1254 d8fff41c Guido Trotter
        lu_result = 1
1255 d8fff41c Guido Trotter
      else:
1256 d8fff41c Guido Trotter
        for node_name in hooks_results:
1257 d8fff41c Guido Trotter
          show_node_header = True
1258 d8fff41c Guido Trotter
          res = hooks_results[node_name]
1259 4c4e4e1e Iustin Pop
          msg = res.fail_msg
1260 3fb4f740 Iustin Pop
          if msg:
1261 0a66c968 Iustin Pop
            if res.offline:
1262 0a66c968 Iustin Pop
              # no need to warn or set fail return value
1263 0a66c968 Iustin Pop
              continue
1264 3fb4f740 Iustin Pop
            feedback_fn("    Communication failure in hooks execution: %s" %
1265 3fb4f740 Iustin Pop
                        msg)
1266 d8fff41c Guido Trotter
            lu_result = 1
1267 d8fff41c Guido Trotter
            continue
1268 3fb4f740 Iustin Pop
          for script, hkr, output in res.payload:
1269 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
1270 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
1271 d8fff41c Guido Trotter
              # failing hooks on that node
1272 d8fff41c Guido Trotter
              if show_node_header:
1273 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
1274 d8fff41c Guido Trotter
                show_node_header = False
1275 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
1276 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
1277 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
1278 d8fff41c Guido Trotter
              lu_result = 1
1279 d8fff41c Guido Trotter
1280 d8fff41c Guido Trotter
      return lu_result
1281 d8fff41c Guido Trotter
1282 a8083063 Iustin Pop
1283 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
1284 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
1285 2c95a8d4 Iustin Pop

1286 2c95a8d4 Iustin Pop
  """
1287 2c95a8d4 Iustin Pop
  _OP_REQP = []
1288 d4b9d97f Guido Trotter
  REQ_BGL = False
1289 d4b9d97f Guido Trotter
1290 d4b9d97f Guido Trotter
  def ExpandNames(self):
1291 d4b9d97f Guido Trotter
    self.needed_locks = {
1292 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1293 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1294 d4b9d97f Guido Trotter
    }
1295 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1296 2c95a8d4 Iustin Pop
1297 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
1298 2c95a8d4 Iustin Pop
    """Check prerequisites.
1299 2c95a8d4 Iustin Pop

1300 2c95a8d4 Iustin Pop
    This has no prerequisites.
1301 2c95a8d4 Iustin Pop

1302 2c95a8d4 Iustin Pop
    """
1303 2c95a8d4 Iustin Pop
    pass
1304 2c95a8d4 Iustin Pop
1305 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
1306 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
1307 2c95a8d4 Iustin Pop

1308 29d376ec Iustin Pop
    @rtype: tuple of three items
1309 29d376ec Iustin Pop
    @return: a tuple of (dict of node-to-node_error, list of instances
1310 29d376ec Iustin Pop
        which need activate-disks, dict of instance: (node, volume) for
1311 29d376ec Iustin Pop
        missing volumes
1312 29d376ec Iustin Pop

1313 2c95a8d4 Iustin Pop
    """
1314 29d376ec Iustin Pop
    result = res_nodes, res_instances, res_missing = {}, [], {}
1315 2c95a8d4 Iustin Pop
1316 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
1317 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1318 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
1319 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
1320 2c95a8d4 Iustin Pop
1321 2c95a8d4 Iustin Pop
    nv_dict = {}
1322 2c95a8d4 Iustin Pop
    for inst in instances:
1323 2c95a8d4 Iustin Pop
      inst_lvs = {}
1324 0d68c45d Iustin Pop
      if (not inst.admin_up or
1325 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
1326 2c95a8d4 Iustin Pop
        continue
1327 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
1328 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1329 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
1330 2c95a8d4 Iustin Pop
        for vol in vol_list:
1331 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
1332 2c95a8d4 Iustin Pop
1333 2c95a8d4 Iustin Pop
    if not nv_dict:
1334 2c95a8d4 Iustin Pop
      return result
1335 2c95a8d4 Iustin Pop
1336 72737a7f Iustin Pop
    node_lvs = self.rpc.call_volume_list(nodes, vg_name)
1337 2c95a8d4 Iustin Pop
1338 2c95a8d4 Iustin Pop
    to_act = set()
1339 2c95a8d4 Iustin Pop
    for node in nodes:
1340 2c95a8d4 Iustin Pop
      # node_volume
1341 29d376ec Iustin Pop
      node_res = node_lvs[node]
1342 29d376ec Iustin Pop
      if node_res.offline:
1343 ea9ddc07 Iustin Pop
        continue
1344 4c4e4e1e Iustin Pop
      msg = node_res.fail_msg
1345 29d376ec Iustin Pop
      if msg:
1346 29d376ec Iustin Pop
        logging.warning("Error enumerating LVs on node %s: %s", node, msg)
1347 29d376ec Iustin Pop
        res_nodes[node] = msg
1348 2c95a8d4 Iustin Pop
        continue
1349 2c95a8d4 Iustin Pop
1350 29d376ec Iustin Pop
      lvs = node_res.payload
1351 29d376ec Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.items():
1352 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
1353 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
1354 b63ed789 Iustin Pop
            and inst.name not in res_instances):
1355 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
1356 2c95a8d4 Iustin Pop
1357 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1358 b63ed789 Iustin Pop
    # data better
1359 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1360 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1361 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1362 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1363 b63ed789 Iustin Pop
1364 2c95a8d4 Iustin Pop
    return result
1365 2c95a8d4 Iustin Pop
1366 2c95a8d4 Iustin Pop
1367 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1368 07bd8a51 Iustin Pop
  """Rename the cluster.
1369 07bd8a51 Iustin Pop

1370 07bd8a51 Iustin Pop
  """
1371 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1372 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1373 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1374 07bd8a51 Iustin Pop
1375 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1376 07bd8a51 Iustin Pop
    """Build hooks env.
1377 07bd8a51 Iustin Pop

1378 07bd8a51 Iustin Pop
    """
1379 07bd8a51 Iustin Pop
    env = {
1380 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1381 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1382 07bd8a51 Iustin Pop
      }
1383 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1384 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1385 07bd8a51 Iustin Pop
1386 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1387 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1388 07bd8a51 Iustin Pop

1389 07bd8a51 Iustin Pop
    """
1390 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1391 07bd8a51 Iustin Pop
1392 bcf043c9 Iustin Pop
    new_name = hostname.name
1393 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1394 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
1395 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
1396 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1397 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1398 07bd8a51 Iustin Pop
                                 " cluster has changed")
1399 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1400 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1401 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1402 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1403 07bd8a51 Iustin Pop
                                   new_ip)
1404 07bd8a51 Iustin Pop
1405 07bd8a51 Iustin Pop
    self.op.name = new_name
1406 07bd8a51 Iustin Pop
1407 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1408 07bd8a51 Iustin Pop
    """Rename the cluster.
1409 07bd8a51 Iustin Pop

1410 07bd8a51 Iustin Pop
    """
1411 07bd8a51 Iustin Pop
    clustername = self.op.name
1412 07bd8a51 Iustin Pop
    ip = self.ip
1413 07bd8a51 Iustin Pop
1414 07bd8a51 Iustin Pop
    # shutdown the master IP
1415 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
1416 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
1417 4c4e4e1e Iustin Pop
    result.Raise("Could not disable the master role")
1418 07bd8a51 Iustin Pop
1419 07bd8a51 Iustin Pop
    try:
1420 55cf7d83 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
1421 55cf7d83 Iustin Pop
      cluster.cluster_name = clustername
1422 55cf7d83 Iustin Pop
      cluster.master_ip = ip
1423 55cf7d83 Iustin Pop
      self.cfg.Update(cluster)
1424 ec85e3d5 Iustin Pop
1425 ec85e3d5 Iustin Pop
      # update the known hosts file
1426 ec85e3d5 Iustin Pop
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
1427 ec85e3d5 Iustin Pop
      node_list = self.cfg.GetNodeList()
1428 ec85e3d5 Iustin Pop
      try:
1429 ec85e3d5 Iustin Pop
        node_list.remove(master)
1430 ec85e3d5 Iustin Pop
      except ValueError:
1431 ec85e3d5 Iustin Pop
        pass
1432 ec85e3d5 Iustin Pop
      result = self.rpc.call_upload_file(node_list,
1433 ec85e3d5 Iustin Pop
                                         constants.SSH_KNOWN_HOSTS_FILE)
1434 ec85e3d5 Iustin Pop
      for to_node, to_result in result.iteritems():
1435 6f7d4e75 Iustin Pop
        msg = to_result.fail_msg
1436 6f7d4e75 Iustin Pop
        if msg:
1437 6f7d4e75 Iustin Pop
          msg = ("Copy of file %s to node %s failed: %s" %
1438 6f7d4e75 Iustin Pop
                 (constants.SSH_KNOWN_HOSTS_FILE, to_node, msg))
1439 6f7d4e75 Iustin Pop
          self.proc.LogWarning(msg)
1440 ec85e3d5 Iustin Pop
1441 07bd8a51 Iustin Pop
    finally:
1442 781de953 Iustin Pop
      result = self.rpc.call_node_start_master(master, False)
1443 4c4e4e1e Iustin Pop
      msg = result.fail_msg
1444 b726aff0 Iustin Pop
      if msg:
1445 86d9d3bb Iustin Pop
        self.LogWarning("Could not re-enable the master role on"
1446 b726aff0 Iustin Pop
                        " the master, please restart manually: %s", msg)
1447 07bd8a51 Iustin Pop
1448 07bd8a51 Iustin Pop
1449 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1450 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1451 8084f9f6 Manuel Franceschini

1452 e4376078 Iustin Pop
  @type disk: L{objects.Disk}
1453 e4376078 Iustin Pop
  @param disk: the disk to check
1454 e4376078 Iustin Pop
  @rtype: booleean
1455 e4376078 Iustin Pop
  @return: boolean indicating whether a LD_LV dev_type was found or not
1456 8084f9f6 Manuel Franceschini

1457 8084f9f6 Manuel Franceschini
  """
1458 8084f9f6 Manuel Franceschini
  if disk.children:
1459 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1460 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1461 8084f9f6 Manuel Franceschini
        return True
1462 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1463 8084f9f6 Manuel Franceschini
1464 8084f9f6 Manuel Franceschini
1465 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1466 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1467 8084f9f6 Manuel Franceschini

1468 8084f9f6 Manuel Franceschini
  """
1469 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1470 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1471 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1472 c53279cf Guido Trotter
  REQ_BGL = False
1473 c53279cf Guido Trotter
1474 3994f455 Iustin Pop
  def CheckArguments(self):
1475 4b7735f9 Iustin Pop
    """Check parameters
1476 4b7735f9 Iustin Pop

1477 4b7735f9 Iustin Pop
    """
1478 4b7735f9 Iustin Pop
    if not hasattr(self.op, "candidate_pool_size"):
1479 4b7735f9 Iustin Pop
      self.op.candidate_pool_size = None
1480 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1481 4b7735f9 Iustin Pop
      try:
1482 4b7735f9 Iustin Pop
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1483 3994f455 Iustin Pop
      except (ValueError, TypeError), err:
1484 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1485 4b7735f9 Iustin Pop
                                   str(err))
1486 4b7735f9 Iustin Pop
      if self.op.candidate_pool_size < 1:
1487 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("At least one master candidate needed")
1488 4b7735f9 Iustin Pop
1489 c53279cf Guido Trotter
  def ExpandNames(self):
1490 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
1491 c53279cf Guido Trotter
    # all nodes to be modified.
1492 c53279cf Guido Trotter
    self.needed_locks = {
1493 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1494 c53279cf Guido Trotter
    }
1495 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1496 8084f9f6 Manuel Franceschini
1497 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1498 8084f9f6 Manuel Franceschini
    """Build hooks env.
1499 8084f9f6 Manuel Franceschini

1500 8084f9f6 Manuel Franceschini
    """
1501 8084f9f6 Manuel Franceschini
    env = {
1502 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1503 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1504 8084f9f6 Manuel Franceschini
      }
1505 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1506 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1507 8084f9f6 Manuel Franceschini
1508 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1509 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1510 8084f9f6 Manuel Franceschini

1511 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1512 5f83e263 Iustin Pop
    if the given volume group is valid.
1513 8084f9f6 Manuel Franceschini

1514 8084f9f6 Manuel Franceschini
    """
1515 779c15bb Iustin Pop
    if self.op.vg_name is not None and not self.op.vg_name:
1516 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
1517 8084f9f6 Manuel Franceschini
      for inst in instances:
1518 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1519 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1520 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1521 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1522 8084f9f6 Manuel Franceschini
1523 779c15bb Iustin Pop
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1524 779c15bb Iustin Pop
1525 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1526 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1527 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
1528 8084f9f6 Manuel Franceschini
      for node in node_list:
1529 4c4e4e1e Iustin Pop
        msg = vglist[node].fail_msg
1530 e480923b Iustin Pop
        if msg:
1531 781de953 Iustin Pop
          # ignoring down node
1532 e480923b Iustin Pop
          self.LogWarning("Error while gathering data on node %s"
1533 e480923b Iustin Pop
                          " (ignoring node): %s", node, msg)
1534 781de953 Iustin Pop
          continue
1535 e480923b Iustin Pop
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
1536 781de953 Iustin Pop
                                              self.op.vg_name,
1537 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1538 8084f9f6 Manuel Franceschini
        if vgstatus:
1539 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1540 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1541 8084f9f6 Manuel Franceschini
1542 779c15bb Iustin Pop
    self.cluster = cluster = self.cfg.GetClusterInfo()
1543 5af3da74 Guido Trotter
    # validate params changes
1544 779c15bb Iustin Pop
    if self.op.beparams:
1545 a5728081 Guido Trotter
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
1546 abe609b2 Guido Trotter
      self.new_beparams = objects.FillDict(
1547 4ef7f423 Guido Trotter
        cluster.beparams[constants.PP_DEFAULT], self.op.beparams)
1548 779c15bb Iustin Pop
1549 5af3da74 Guido Trotter
    if self.op.nicparams:
1550 5af3da74 Guido Trotter
      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
1551 5af3da74 Guido Trotter
      self.new_nicparams = objects.FillDict(
1552 5af3da74 Guido Trotter
        cluster.nicparams[constants.PP_DEFAULT], self.op.nicparams)
1553 5af3da74 Guido Trotter
      objects.NIC.CheckParameterSyntax(self.new_nicparams)
1554 5af3da74 Guido Trotter
1555 779c15bb Iustin Pop
    # hypervisor list/parameters
1556 abe609b2 Guido Trotter
    self.new_hvparams = objects.FillDict(cluster.hvparams, {})
1557 779c15bb Iustin Pop
    if self.op.hvparams:
1558 779c15bb Iustin Pop
      if not isinstance(self.op.hvparams, dict):
1559 779c15bb Iustin Pop
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1560 779c15bb Iustin Pop
      for hv_name, hv_dict in self.op.hvparams.items():
1561 779c15bb Iustin Pop
        if hv_name not in self.new_hvparams:
1562 779c15bb Iustin Pop
          self.new_hvparams[hv_name] = hv_dict
1563 779c15bb Iustin Pop
        else:
1564 779c15bb Iustin Pop
          self.new_hvparams[hv_name].update(hv_dict)
1565 779c15bb Iustin Pop
1566 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1567 779c15bb Iustin Pop
      self.hv_list = self.op.enabled_hypervisors
1568 779c15bb Iustin Pop
    else:
1569 779c15bb Iustin Pop
      self.hv_list = cluster.enabled_hypervisors
1570 779c15bb Iustin Pop
1571 779c15bb Iustin Pop
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1572 779c15bb Iustin Pop
      # either the enabled list has changed, or the parameters have, validate
1573 779c15bb Iustin Pop
      for hv_name, hv_params in self.new_hvparams.items():
1574 779c15bb Iustin Pop
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1575 779c15bb Iustin Pop
            (self.op.enabled_hypervisors and
1576 779c15bb Iustin Pop
             hv_name in self.op.enabled_hypervisors)):
1577 779c15bb Iustin Pop
          # either this is a new hypervisor, or its parameters have changed
1578 779c15bb Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
1579 a5728081 Guido Trotter
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1580 779c15bb Iustin Pop
          hv_class.CheckParameterSyntax(hv_params)
1581 779c15bb Iustin Pop
          _CheckHVParams(self, node_list, hv_name, hv_params)
1582 779c15bb Iustin Pop
1583 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1584 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1585 8084f9f6 Manuel Franceschini

1586 8084f9f6 Manuel Franceschini
    """
1587 779c15bb Iustin Pop
    if self.op.vg_name is not None:
1588 b2482333 Guido Trotter
      new_volume = self.op.vg_name
1589 b2482333 Guido Trotter
      if not new_volume:
1590 b2482333 Guido Trotter
        new_volume = None
1591 b2482333 Guido Trotter
      if new_volume != self.cfg.GetVGName():
1592 b2482333 Guido Trotter
        self.cfg.SetVGName(new_volume)
1593 779c15bb Iustin Pop
      else:
1594 779c15bb Iustin Pop
        feedback_fn("Cluster LVM configuration already in desired"
1595 779c15bb Iustin Pop
                    " state, not changing")
1596 779c15bb Iustin Pop
    if self.op.hvparams:
1597 779c15bb Iustin Pop
      self.cluster.hvparams = self.new_hvparams
1598 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1599 779c15bb Iustin Pop
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1600 779c15bb Iustin Pop
    if self.op.beparams:
1601 4ef7f423 Guido Trotter
      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
1602 5af3da74 Guido Trotter
    if self.op.nicparams:
1603 5af3da74 Guido Trotter
      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
1604 5af3da74 Guido Trotter
1605 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1606 4b7735f9 Iustin Pop
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
1607 4b7735f9 Iustin Pop
1608 779c15bb Iustin Pop
    self.cfg.Update(self.cluster)
1609 8084f9f6 Manuel Franceschini
1610 4b7735f9 Iustin Pop
    # we want to update nodes after the cluster so that if any errors
1611 4b7735f9 Iustin Pop
    # happen, we have recorded and saved the cluster info
1612 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1613 ec0292f1 Iustin Pop
      _AdjustCandidatePool(self)
1614 4b7735f9 Iustin Pop
1615 8084f9f6 Manuel Franceschini
1616 28eddce5 Guido Trotter
def _RedistributeAncillaryFiles(lu, additional_nodes=None):
1617 28eddce5 Guido Trotter
  """Distribute additional files which are part of the cluster configuration.
1618 28eddce5 Guido Trotter

1619 28eddce5 Guido Trotter
  ConfigWriter takes care of distributing the config and ssconf files, but
1620 28eddce5 Guido Trotter
  there are more files which should be distributed to all nodes. This function
1621 28eddce5 Guido Trotter
  makes sure those are copied.
1622 28eddce5 Guido Trotter

1623 28eddce5 Guido Trotter
  @param lu: calling logical unit
1624 28eddce5 Guido Trotter
  @param additional_nodes: list of nodes not in the config to distribute to
1625 28eddce5 Guido Trotter

1626 28eddce5 Guido Trotter
  """
1627 28eddce5 Guido Trotter
  # 1. Gather target nodes
1628 28eddce5 Guido Trotter
  myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
1629 28eddce5 Guido Trotter
  dist_nodes = lu.cfg.GetNodeList()
1630 28eddce5 Guido Trotter
  if additional_nodes is not None:
1631 28eddce5 Guido Trotter
    dist_nodes.extend(additional_nodes)
1632 28eddce5 Guido Trotter
  if myself.name in dist_nodes:
1633 28eddce5 Guido Trotter
    dist_nodes.remove(myself.name)
1634 28eddce5 Guido Trotter
  # 2. Gather files to distribute
1635 28eddce5 Guido Trotter
  dist_files = set([constants.ETC_HOSTS,
1636 28eddce5 Guido Trotter
                    constants.SSH_KNOWN_HOSTS_FILE,
1637 28eddce5 Guido Trotter
                    constants.RAPI_CERT_FILE,
1638 28eddce5 Guido Trotter
                    constants.RAPI_USERS_FILE,
1639 28eddce5 Guido Trotter
                   ])
1640 e1b8653f Guido Trotter
1641 e1b8653f Guido Trotter
  enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
1642 e1b8653f Guido Trotter
  for hv_name in enabled_hypervisors:
1643 e1b8653f Guido Trotter
    hv_class = hypervisor.GetHypervisor(hv_name)
1644 e1b8653f Guido Trotter
    dist_files.update(hv_class.GetAncillaryFiles())
1645 e1b8653f Guido Trotter
1646 28eddce5 Guido Trotter
  # 3. Perform the files upload
1647 28eddce5 Guido Trotter
  for fname in dist_files:
1648 28eddce5 Guido Trotter
    if os.path.exists(fname):
1649 28eddce5 Guido Trotter
      result = lu.rpc.call_upload_file(dist_nodes, fname)
1650 28eddce5 Guido Trotter
      for to_node, to_result in result.items():
1651 6f7d4e75 Iustin Pop
        msg = to_result.fail_msg
1652 6f7d4e75 Iustin Pop
        if msg:
1653 6f7d4e75 Iustin Pop
          msg = ("Copy of file %s to node %s failed: %s" %
1654 6f7d4e75 Iustin Pop
                 (fname, to_node, msg))
1655 6f7d4e75 Iustin Pop
          lu.proc.LogWarning(msg)
1656 28eddce5 Guido Trotter
1657 28eddce5 Guido Trotter
1658 afee0879 Iustin Pop
class LURedistributeConfig(NoHooksLU):
1659 afee0879 Iustin Pop
  """Force the redistribution of cluster configuration.
1660 afee0879 Iustin Pop

1661 afee0879 Iustin Pop
  This is a very simple LU.
1662 afee0879 Iustin Pop

1663 afee0879 Iustin Pop
  """
1664 afee0879 Iustin Pop
  _OP_REQP = []
1665 afee0879 Iustin Pop
  REQ_BGL = False
1666 afee0879 Iustin Pop
1667 afee0879 Iustin Pop
  def ExpandNames(self):
1668 afee0879 Iustin Pop
    self.needed_locks = {
1669 afee0879 Iustin Pop
      locking.LEVEL_NODE: locking.ALL_SET,
1670 afee0879 Iustin Pop
    }
1671 afee0879 Iustin Pop
    self.share_locks[locking.LEVEL_NODE] = 1
1672 afee0879 Iustin Pop
1673 afee0879 Iustin Pop
  def CheckPrereq(self):
1674 afee0879 Iustin Pop
    """Check prerequisites.
1675 afee0879 Iustin Pop

1676 afee0879 Iustin Pop
    """
1677 afee0879 Iustin Pop
1678 afee0879 Iustin Pop
  def Exec(self, feedback_fn):
1679 afee0879 Iustin Pop
    """Redistribute the configuration.
1680 afee0879 Iustin Pop

1681 afee0879 Iustin Pop
    """
1682 afee0879 Iustin Pop
    self.cfg.Update(self.cfg.GetClusterInfo())
1683 28eddce5 Guido Trotter
    _RedistributeAncillaryFiles(self)
1684 afee0879 Iustin Pop
1685 afee0879 Iustin Pop
1686 b9bddb6b Iustin Pop
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1687 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1688 a8083063 Iustin Pop

1689 a8083063 Iustin Pop
  """
1690 a8083063 Iustin Pop
  if not instance.disks:
1691 a8083063 Iustin Pop
    return True
1692 a8083063 Iustin Pop
1693 a8083063 Iustin Pop
  if not oneshot:
1694 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1695 a8083063 Iustin Pop
1696 a8083063 Iustin Pop
  node = instance.primary_node
1697 a8083063 Iustin Pop
1698 a8083063 Iustin Pop
  for dev in instance.disks:
1699 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
1700 a8083063 Iustin Pop
1701 a8083063 Iustin Pop
  retries = 0
1702 fbafd7a8 Iustin Pop
  degr_retries = 10 # in seconds, as we sleep 1 second each time
1703 a8083063 Iustin Pop
  while True:
1704 a8083063 Iustin Pop
    max_time = 0
1705 a8083063 Iustin Pop
    done = True
1706 a8083063 Iustin Pop
    cumul_degraded = False
1707 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1708 4c4e4e1e Iustin Pop
    msg = rstats.fail_msg
1709 3efa9051 Iustin Pop
    if msg:
1710 3efa9051 Iustin Pop
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
1711 a8083063 Iustin Pop
      retries += 1
1712 a8083063 Iustin Pop
      if retries >= 10:
1713 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1714 3ecf6786 Iustin Pop
                                 " aborting." % node)
1715 a8083063 Iustin Pop
      time.sleep(6)
1716 a8083063 Iustin Pop
      continue
1717 3efa9051 Iustin Pop
    rstats = rstats.payload
1718 a8083063 Iustin Pop
    retries = 0
1719 1492cca7 Iustin Pop
    for i, mstat in enumerate(rstats):
1720 a8083063 Iustin Pop
      if mstat is None:
1721 86d9d3bb Iustin Pop
        lu.LogWarning("Can't compute data for node %s/%s",
1722 86d9d3bb Iustin Pop
                           node, instance.disks[i].iv_name)
1723 a8083063 Iustin Pop
        continue
1724 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1725 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1726 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1727 a8083063 Iustin Pop
      if perc_done is not None:
1728 a8083063 Iustin Pop
        done = False
1729 a8083063 Iustin Pop
        if est_time is not None:
1730 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1731 a8083063 Iustin Pop
          max_time = est_time
1732 a8083063 Iustin Pop
        else:
1733 a8083063 Iustin Pop
          rem_time = "no time estimate"
1734 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1735 b9bddb6b Iustin Pop
                        (instance.disks[i].iv_name, perc_done, rem_time))
1736 fbafd7a8 Iustin Pop
1737 fbafd7a8 Iustin Pop
    # if we're done but degraded, let's do a few small retries, to
1738 fbafd7a8 Iustin Pop
    # make sure we see a stable and not transient situation; therefore
1739 fbafd7a8 Iustin Pop
    # we force restart of the loop
1740 fbafd7a8 Iustin Pop
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
1741 fbafd7a8 Iustin Pop
      logging.info("Degraded disks found, %d retries left", degr_retries)
1742 fbafd7a8 Iustin Pop
      degr_retries -= 1
1743 fbafd7a8 Iustin Pop
      time.sleep(1)
1744 fbafd7a8 Iustin Pop
      continue
1745 fbafd7a8 Iustin Pop
1746 a8083063 Iustin Pop
    if done or oneshot:
1747 a8083063 Iustin Pop
      break
1748 a8083063 Iustin Pop
1749 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
1750 a8083063 Iustin Pop
1751 a8083063 Iustin Pop
  if done:
1752 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1753 a8083063 Iustin Pop
  return not cumul_degraded
1754 a8083063 Iustin Pop
1755 a8083063 Iustin Pop
1756 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1757 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1758 a8083063 Iustin Pop

1759 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1760 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1761 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1762 0834c866 Iustin Pop

1763 a8083063 Iustin Pop
  """
1764 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
1765 0834c866 Iustin Pop
  if ldisk:
1766 0834c866 Iustin Pop
    idx = 6
1767 0834c866 Iustin Pop
  else:
1768 0834c866 Iustin Pop
    idx = 5
1769 a8083063 Iustin Pop
1770 a8083063 Iustin Pop
  result = True
1771 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1772 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
1773 4c4e4e1e Iustin Pop
    msg = rstats.fail_msg
1774 23829f6f Iustin Pop
    if msg:
1775 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
1776 23829f6f Iustin Pop
      result = False
1777 23829f6f Iustin Pop
    elif not rstats.payload:
1778 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s", node)
1779 a8083063 Iustin Pop
      result = False
1780 a8083063 Iustin Pop
    else:
1781 23829f6f Iustin Pop
      result = result and (not rstats.payload[idx])
1782 a8083063 Iustin Pop
  if dev.children:
1783 a8083063 Iustin Pop
    for child in dev.children:
1784 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1785 a8083063 Iustin Pop
1786 a8083063 Iustin Pop
  return result
1787 a8083063 Iustin Pop
1788 a8083063 Iustin Pop
1789 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1790 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1791 a8083063 Iustin Pop

1792 a8083063 Iustin Pop
  """
1793 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1794 6bf01bbb Guido Trotter
  REQ_BGL = False
1795 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet()
1796 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
1797 a8083063 Iustin Pop
1798 6bf01bbb Guido Trotter
  def ExpandNames(self):
1799 1f9430d6 Iustin Pop
    if self.op.names:
1800 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1801 1f9430d6 Iustin Pop
1802 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1803 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1804 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1805 1f9430d6 Iustin Pop
1806 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
1807 a6ab004b Iustin Pop
    # Temporary removal of locks, should be reverted later
1808 a6ab004b Iustin Pop
    # TODO: reintroduce locks when they are lighter-weight
1809 6bf01bbb Guido Trotter
    self.needed_locks = {}
1810 a6ab004b Iustin Pop
    #self.share_locks[locking.LEVEL_NODE] = 1
1811 a6ab004b Iustin Pop
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1812 6bf01bbb Guido Trotter
1813 6bf01bbb Guido Trotter
  def CheckPrereq(self):
1814 6bf01bbb Guido Trotter
    """Check prerequisites.
1815 6bf01bbb Guido Trotter

1816 6bf01bbb Guido Trotter
    """
1817 6bf01bbb Guido Trotter
1818 1f9430d6 Iustin Pop
  @staticmethod
1819 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1820 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1821 1f9430d6 Iustin Pop

1822 e4376078 Iustin Pop
    @param node_list: a list with the names of all nodes
1823 e4376078 Iustin Pop
    @param rlist: a map with node names as keys and OS objects as values
1824 1f9430d6 Iustin Pop

1825 e4376078 Iustin Pop
    @rtype: dict
1826 5fcc718f Iustin Pop
    @return: a dictionary with osnames as keys and as value another map, with
1827 255dcebd Iustin Pop
        nodes as keys and tuples of (path, status, diagnose) as values, eg::
1828 e4376078 Iustin Pop

1829 255dcebd Iustin Pop
          {"debian-etch": {"node1": [(/usr/lib/..., True, ""),
1830 255dcebd Iustin Pop
                                     (/srv/..., False, "invalid api")],
1831 255dcebd Iustin Pop
                           "node2": [(/srv/..., True, "")]}
1832 e4376078 Iustin Pop
          }
1833 1f9430d6 Iustin Pop

1834 1f9430d6 Iustin Pop
    """
1835 1f9430d6 Iustin Pop
    all_os = {}
1836 a6ab004b Iustin Pop
    # we build here the list of nodes that didn't fail the RPC (at RPC
1837 a6ab004b Iustin Pop
    # level), so that nodes with a non-responding node daemon don't
1838 a6ab004b Iustin Pop
    # make all OSes invalid
1839 a6ab004b Iustin Pop
    good_nodes = [node_name for node_name in rlist
1840 4c4e4e1e Iustin Pop
                  if not rlist[node_name].fail_msg]
1841 83d92ad8 Iustin Pop
    for node_name, nr in rlist.items():
1842 4c4e4e1e Iustin Pop
      if nr.fail_msg or not nr.payload:
1843 1f9430d6 Iustin Pop
        continue
1844 255dcebd Iustin Pop
      for name, path, status, diagnose in nr.payload:
1845 255dcebd Iustin Pop
        if name not in all_os:
1846 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1847 1f9430d6 Iustin Pop
          # for each node in node_list
1848 255dcebd Iustin Pop
          all_os[name] = {}
1849 a6ab004b Iustin Pop
          for nname in good_nodes:
1850 255dcebd Iustin Pop
            all_os[name][nname] = []
1851 255dcebd Iustin Pop
        all_os[name][node_name].append((path, status, diagnose))
1852 1f9430d6 Iustin Pop
    return all_os
1853 a8083063 Iustin Pop
1854 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1855 a8083063 Iustin Pop
    """Compute the list of OSes.
1856 a8083063 Iustin Pop

1857 a8083063 Iustin Pop
    """
1858 a6ab004b Iustin Pop
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
1859 94a02bb5 Iustin Pop
    node_data = self.rpc.call_os_diagnose(valid_nodes)
1860 94a02bb5 Iustin Pop
    pol = self._DiagnoseByOS(valid_nodes, node_data)
1861 1f9430d6 Iustin Pop
    output = []
1862 83d92ad8 Iustin Pop
    for os_name, os_data in pol.items():
1863 1f9430d6 Iustin Pop
      row = []
1864 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1865 1f9430d6 Iustin Pop
        if field == "name":
1866 1f9430d6 Iustin Pop
          val = os_name
1867 1f9430d6 Iustin Pop
        elif field == "valid":
1868 255dcebd Iustin Pop
          val = utils.all([osl and osl[0][1] for osl in os_data.values()])
1869 1f9430d6 Iustin Pop
        elif field == "node_status":
1870 255dcebd Iustin Pop
          # this is just a copy of the dict
1871 1f9430d6 Iustin Pop
          val = {}
1872 255dcebd Iustin Pop
          for node_name, nos_list in os_data.items():
1873 255dcebd Iustin Pop
            val[node_name] = nos_list
1874 1f9430d6 Iustin Pop
        else:
1875 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1876 1f9430d6 Iustin Pop
        row.append(val)
1877 1f9430d6 Iustin Pop
      output.append(row)
1878 1f9430d6 Iustin Pop
1879 1f9430d6 Iustin Pop
    return output
1880 a8083063 Iustin Pop
1881 a8083063 Iustin Pop
1882 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1883 a8083063 Iustin Pop
  """Logical unit for removing a node.
1884 a8083063 Iustin Pop

1885 a8083063 Iustin Pop
  """
1886 a8083063 Iustin Pop
  HPATH = "node-remove"
1887 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1888 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1889 a8083063 Iustin Pop
1890 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1891 a8083063 Iustin Pop
    """Build hooks env.
1892 a8083063 Iustin Pop

1893 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1894 d08869ee Guido Trotter
    node would then be impossible to remove.
1895 a8083063 Iustin Pop

1896 a8083063 Iustin Pop
    """
1897 396e1b78 Michael Hanselmann
    env = {
1898 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1899 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1900 396e1b78 Michael Hanselmann
      }
1901 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1902 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1903 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1904 a8083063 Iustin Pop
1905 a8083063 Iustin Pop
  def CheckPrereq(self):
1906 a8083063 Iustin Pop
    """Check prerequisites.
1907 a8083063 Iustin Pop

1908 a8083063 Iustin Pop
    This checks:
1909 a8083063 Iustin Pop
     - the node exists in the configuration
1910 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1911 a8083063 Iustin Pop
     - it's not the master
1912 a8083063 Iustin Pop

1913 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1914 a8083063 Iustin Pop

1915 a8083063 Iustin Pop
    """
1916 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1917 a8083063 Iustin Pop
    if node is None:
1918 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1919 a8083063 Iustin Pop
1920 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1921 a8083063 Iustin Pop
1922 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
1923 a8083063 Iustin Pop
    if node.name == masternode:
1924 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1925 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1926 a8083063 Iustin Pop
1927 a8083063 Iustin Pop
    for instance_name in instance_list:
1928 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1929 6b12959c Iustin Pop
      if node.name in instance.all_nodes:
1930 6b12959c Iustin Pop
        raise errors.OpPrereqError("Instance %s is still running on the node,"
1931 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1932 a8083063 Iustin Pop
    self.op.node_name = node.name
1933 a8083063 Iustin Pop
    self.node = node
1934 a8083063 Iustin Pop
1935 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1936 a8083063 Iustin Pop
    """Removes the node from the cluster.
1937 a8083063 Iustin Pop

1938 a8083063 Iustin Pop
    """
1939 a8083063 Iustin Pop
    node = self.node
1940 9a4f63d1 Iustin Pop
    logging.info("Stopping the node daemon and removing configs from node %s",
1941 9a4f63d1 Iustin Pop
                 node.name)
1942 a8083063 Iustin Pop
1943 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
1944 a8083063 Iustin Pop
1945 0623d351 Iustin Pop
    result = self.rpc.call_node_leave_cluster(node.name)
1946 4c4e4e1e Iustin Pop
    msg = result.fail_msg
1947 0623d351 Iustin Pop
    if msg:
1948 0623d351 Iustin Pop
      self.LogWarning("Errors encountered on the remote node while leaving"
1949 0623d351 Iustin Pop
                      " the cluster: %s", msg)
1950 c8a0948f Michael Hanselmann
1951 eb1742d5 Guido Trotter
    # Promote nodes to master candidate as needed
1952 ec0292f1 Iustin Pop
    _AdjustCandidatePool(self)
1953 eb1742d5 Guido Trotter
1954 a8083063 Iustin Pop
1955 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1956 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1957 a8083063 Iustin Pop

1958 a8083063 Iustin Pop
  """
1959 bc8e4a1a Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
1960 35705d8f Guido Trotter
  REQ_BGL = False
1961 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet(
1962 31bf511f Iustin Pop
    "dtotal", "dfree",
1963 31bf511f Iustin Pop
    "mtotal", "mnode", "mfree",
1964 31bf511f Iustin Pop
    "bootid",
1965 0105bad3 Iustin Pop
    "ctotal", "cnodes", "csockets",
1966 31bf511f Iustin Pop
    )
1967 31bf511f Iustin Pop
1968 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(
1969 31bf511f Iustin Pop
    "name", "pinst_cnt", "sinst_cnt",
1970 31bf511f Iustin Pop
    "pinst_list", "sinst_list",
1971 31bf511f Iustin Pop
    "pip", "sip", "tags",
1972 31bf511f Iustin Pop
    "serial_no",
1973 0e67cdbe Iustin Pop
    "master_candidate",
1974 0e67cdbe Iustin Pop
    "master",
1975 9ddb5e45 Iustin Pop
    "offline",
1976 0b2454b9 Iustin Pop
    "drained",
1977 31bf511f Iustin Pop
    )
1978 a8083063 Iustin Pop
1979 35705d8f Guido Trotter
  def ExpandNames(self):
1980 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1981 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1982 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1983 a8083063 Iustin Pop
1984 35705d8f Guido Trotter
    self.needed_locks = {}
1985 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1986 c8d8b4c8 Iustin Pop
1987 c8d8b4c8 Iustin Pop
    if self.op.names:
1988 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
1989 35705d8f Guido Trotter
    else:
1990 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
1991 c8d8b4c8 Iustin Pop
1992 bc8e4a1a Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
1993 bc8e4a1a Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
1994 c8d8b4c8 Iustin Pop
    if self.do_locking:
1995 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
1996 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
1997 c8d8b4c8 Iustin Pop
1998 35705d8f Guido Trotter
1999 35705d8f Guido Trotter
  def CheckPrereq(self):
2000 35705d8f Guido Trotter
    """Check prerequisites.
2001 35705d8f Guido Trotter

2002 35705d8f Guido Trotter
    """
2003 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
2004 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
2005 c8d8b4c8 Iustin Pop
    pass
2006 a8083063 Iustin Pop
2007 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2008 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2009 a8083063 Iustin Pop

2010 a8083063 Iustin Pop
    """
2011 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
2012 c8d8b4c8 Iustin Pop
    if self.do_locking:
2013 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
2014 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
2015 3fa93523 Guido Trotter
      nodenames = self.wanted
2016 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
2017 3fa93523 Guido Trotter
      if missing:
2018 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
2019 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
2020 c8d8b4c8 Iustin Pop
    else:
2021 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
2022 c1f1cbb2 Iustin Pop
2023 c1f1cbb2 Iustin Pop
    nodenames = utils.NiceSort(nodenames)
2024 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
2025 a8083063 Iustin Pop
2026 a8083063 Iustin Pop
    # begin data gathering
2027 a8083063 Iustin Pop
2028 bc8e4a1a Iustin Pop
    if self.do_node_query:
2029 a8083063 Iustin Pop
      live_data = {}
2030 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
2031 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
2032 a8083063 Iustin Pop
      for name in nodenames:
2033 781de953 Iustin Pop
        nodeinfo = node_data[name]
2034 4c4e4e1e Iustin Pop
        if not nodeinfo.fail_msg and nodeinfo.payload:
2035 070e998b Iustin Pop
          nodeinfo = nodeinfo.payload
2036 d599d686 Iustin Pop
          fn = utils.TryConvert
2037 a8083063 Iustin Pop
          live_data[name] = {
2038 d599d686 Iustin Pop
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
2039 d599d686 Iustin Pop
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
2040 d599d686 Iustin Pop
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
2041 d599d686 Iustin Pop
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
2042 d599d686 Iustin Pop
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
2043 d599d686 Iustin Pop
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
2044 d599d686 Iustin Pop
            "bootid": nodeinfo.get('bootid', None),
2045 0105bad3 Iustin Pop
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
2046 0105bad3 Iustin Pop
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
2047 a8083063 Iustin Pop
            }
2048 a8083063 Iustin Pop
        else:
2049 a8083063 Iustin Pop
          live_data[name] = {}
2050 a8083063 Iustin Pop
    else:
2051 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
2052 a8083063 Iustin Pop
2053 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
2054 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
2055 a8083063 Iustin Pop
2056 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
2057 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
2058 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
2059 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
2060 a8083063 Iustin Pop
2061 ec223efb Iustin Pop
      for instance_name in instancelist:
2062 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
2063 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
2064 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
2065 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
2066 ec223efb Iustin Pop
          if secnode in node_to_secondary:
2067 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
2068 a8083063 Iustin Pop
2069 0e67cdbe Iustin Pop
    master_node = self.cfg.GetMasterNode()
2070 0e67cdbe Iustin Pop
2071 a8083063 Iustin Pop
    # end data gathering
2072 a8083063 Iustin Pop
2073 a8083063 Iustin Pop
    output = []
2074 a8083063 Iustin Pop
    for node in nodelist:
2075 a8083063 Iustin Pop
      node_output = []
2076 a8083063 Iustin Pop
      for field in self.op.output_fields:
2077 a8083063 Iustin Pop
        if field == "name":
2078 a8083063 Iustin Pop
          val = node.name
2079 ec223efb Iustin Pop
        elif field == "pinst_list":
2080 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
2081 ec223efb Iustin Pop
        elif field == "sinst_list":
2082 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
2083 ec223efb Iustin Pop
        elif field == "pinst_cnt":
2084 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
2085 ec223efb Iustin Pop
        elif field == "sinst_cnt":
2086 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
2087 a8083063 Iustin Pop
        elif field == "pip":
2088 a8083063 Iustin Pop
          val = node.primary_ip
2089 a8083063 Iustin Pop
        elif field == "sip":
2090 a8083063 Iustin Pop
          val = node.secondary_ip
2091 130a6a6f Iustin Pop
        elif field == "tags":
2092 130a6a6f Iustin Pop
          val = list(node.GetTags())
2093 38d7239a Iustin Pop
        elif field == "serial_no":
2094 38d7239a Iustin Pop
          val = node.serial_no
2095 0e67cdbe Iustin Pop
        elif field == "master_candidate":
2096 0e67cdbe Iustin Pop
          val = node.master_candidate
2097 0e67cdbe Iustin Pop
        elif field == "master":
2098 0e67cdbe Iustin Pop
          val = node.name == master_node
2099 9ddb5e45 Iustin Pop
        elif field == "offline":
2100 9ddb5e45 Iustin Pop
          val = node.offline
2101 0b2454b9 Iustin Pop
        elif field == "drained":
2102 0b2454b9 Iustin Pop
          val = node.drained
2103 31bf511f Iustin Pop
        elif self._FIELDS_DYNAMIC.Matches(field):
2104 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
2105 a8083063 Iustin Pop
        else:
2106 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2107 a8083063 Iustin Pop
        node_output.append(val)
2108 a8083063 Iustin Pop
      output.append(node_output)
2109 a8083063 Iustin Pop
2110 a8083063 Iustin Pop
    return output
2111 a8083063 Iustin Pop
2112 a8083063 Iustin Pop
2113 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
2114 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
2115 dcb93971 Michael Hanselmann

2116 dcb93971 Michael Hanselmann
  """
2117 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
2118 21a15682 Guido Trotter
  REQ_BGL = False
2119 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
2120 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("node")
2121 21a15682 Guido Trotter
2122 21a15682 Guido Trotter
  def ExpandNames(self):
2123 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2124 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2125 21a15682 Guido Trotter
                       selected=self.op.output_fields)
2126 21a15682 Guido Trotter
2127 21a15682 Guido Trotter
    self.needed_locks = {}
2128 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2129 21a15682 Guido Trotter
    if not self.op.nodes:
2130 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2131 21a15682 Guido Trotter
    else:
2132 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
2133 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
2134 dcb93971 Michael Hanselmann
2135 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
2136 dcb93971 Michael Hanselmann
    """Check prerequisites.
2137 dcb93971 Michael Hanselmann

2138 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
2139 dcb93971 Michael Hanselmann

2140 dcb93971 Michael Hanselmann
    """
2141 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
2142 dcb93971 Michael Hanselmann
2143 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
2144 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
2145 dcb93971 Michael Hanselmann

2146 dcb93971 Michael Hanselmann
    """
2147 a7ba5e53 Iustin Pop
    nodenames = self.nodes
2148 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
2149 dcb93971 Michael Hanselmann
2150 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
2151 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
2152 dcb93971 Michael Hanselmann
2153 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
2154 dcb93971 Michael Hanselmann
2155 dcb93971 Michael Hanselmann
    output = []
2156 dcb93971 Michael Hanselmann
    for node in nodenames:
2157 10bfe6cb Iustin Pop
      nresult = volumes[node]
2158 10bfe6cb Iustin Pop
      if nresult.offline:
2159 10bfe6cb Iustin Pop
        continue
2160 4c4e4e1e Iustin Pop
      msg = nresult.fail_msg
2161 10bfe6cb Iustin Pop
      if msg:
2162 10bfe6cb Iustin Pop
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
2163 37d19eb2 Michael Hanselmann
        continue
2164 37d19eb2 Michael Hanselmann
2165 10bfe6cb Iustin Pop
      node_vols = nresult.payload[:]
2166 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
2167 dcb93971 Michael Hanselmann
2168 dcb93971 Michael Hanselmann
      for vol in node_vols:
2169 dcb93971 Michael Hanselmann
        node_output = []
2170 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
2171 dcb93971 Michael Hanselmann
          if field == "node":
2172 dcb93971 Michael Hanselmann
            val = node
2173 dcb93971 Michael Hanselmann
          elif field == "phys":
2174 dcb93971 Michael Hanselmann
            val = vol['dev']
2175 dcb93971 Michael Hanselmann
          elif field == "vg":
2176 dcb93971 Michael Hanselmann
            val = vol['vg']
2177 dcb93971 Michael Hanselmann
          elif field == "name":
2178 dcb93971 Michael Hanselmann
            val = vol['name']
2179 dcb93971 Michael Hanselmann
          elif field == "size":
2180 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
2181 dcb93971 Michael Hanselmann
          elif field == "instance":
2182 dcb93971 Michael Hanselmann
            for inst in ilist:
2183 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
2184 dcb93971 Michael Hanselmann
                continue
2185 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
2186 dcb93971 Michael Hanselmann
                val = inst.name
2187 dcb93971 Michael Hanselmann
                break
2188 dcb93971 Michael Hanselmann
            else:
2189 dcb93971 Michael Hanselmann
              val = '-'
2190 dcb93971 Michael Hanselmann
          else:
2191 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
2192 dcb93971 Michael Hanselmann
          node_output.append(str(val))
2193 dcb93971 Michael Hanselmann
2194 dcb93971 Michael Hanselmann
        output.append(node_output)
2195 dcb93971 Michael Hanselmann
2196 dcb93971 Michael Hanselmann
    return output
2197 dcb93971 Michael Hanselmann
2198 dcb93971 Michael Hanselmann
2199 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
2200 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
2201 a8083063 Iustin Pop

2202 a8083063 Iustin Pop
  """
2203 a8083063 Iustin Pop
  HPATH = "node-add"
2204 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2205 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
2206 a8083063 Iustin Pop
2207 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2208 a8083063 Iustin Pop
    """Build hooks env.
2209 a8083063 Iustin Pop

2210 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
2211 a8083063 Iustin Pop

2212 a8083063 Iustin Pop
    """
2213 a8083063 Iustin Pop
    env = {
2214 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
2215 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
2216 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
2217 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
2218 a8083063 Iustin Pop
      }
2219 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
2220 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
2221 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
2222 a8083063 Iustin Pop
2223 a8083063 Iustin Pop
  def CheckPrereq(self):
2224 a8083063 Iustin Pop
    """Check prerequisites.
2225 a8083063 Iustin Pop

2226 a8083063 Iustin Pop
    This checks:
2227 a8083063 Iustin Pop
     - the new node is not already in the config
2228 a8083063 Iustin Pop
     - it is resolvable
2229 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
2230 a8083063 Iustin Pop

2231 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
2232 a8083063 Iustin Pop

2233 a8083063 Iustin Pop
    """
2234 a8083063 Iustin Pop
    node_name = self.op.node_name
2235 a8083063 Iustin Pop
    cfg = self.cfg
2236 a8083063 Iustin Pop
2237 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
2238 a8083063 Iustin Pop
2239 bcf043c9 Iustin Pop
    node = dns_data.name
2240 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
2241 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
2242 a8083063 Iustin Pop
    if secondary_ip is None:
2243 a8083063 Iustin Pop
      secondary_ip = primary_ip
2244 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
2245 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
2246 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
2247 e7c6e02b Michael Hanselmann
2248 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
2249 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
2250 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
2251 e7c6e02b Michael Hanselmann
                                 node)
2252 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
2253 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
2254 a8083063 Iustin Pop
2255 a8083063 Iustin Pop
    for existing_node_name in node_list:
2256 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
2257 e7c6e02b Michael Hanselmann
2258 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
2259 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
2260 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
2261 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
2262 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
2263 e7c6e02b Michael Hanselmann
        continue
2264 e7c6e02b Michael Hanselmann
2265 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
2266 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
2267 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
2268 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
2269 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
2270 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
2271 a8083063 Iustin Pop
2272 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
2273 a8083063 Iustin Pop
    # same as for the master
2274 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
2275 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
2276 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
2277 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
2278 a8083063 Iustin Pop
      if master_singlehomed:
2279 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
2280 3ecf6786 Iustin Pop
                                   " new node has one")
2281 a8083063 Iustin Pop
      else:
2282 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
2283 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
2284 a8083063 Iustin Pop
2285 a8083063 Iustin Pop
    # checks reachablity
2286 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
2287 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
2288 a8083063 Iustin Pop
2289 a8083063 Iustin Pop
    if not newbie_singlehomed:
2290 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
2291 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
2292 b15d625f Iustin Pop
                           source=myself.secondary_ip):
2293 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
2294 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
2295 a8083063 Iustin Pop
2296 0fff97e9 Guido Trotter
    cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2297 ec0292f1 Iustin Pop
    mc_now, _ = self.cfg.GetMasterCandidateStats()
2298 ec0292f1 Iustin Pop
    master_candidate = mc_now < cp_size
2299 0fff97e9 Guido Trotter
2300 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
2301 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
2302 0fff97e9 Guido Trotter
                                 secondary_ip=secondary_ip,
2303 fc0fe88c Iustin Pop
                                 master_candidate=master_candidate,
2304 af64c0ea Iustin Pop
                                 offline=False, drained=False)
2305 a8083063 Iustin Pop
2306 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2307 a8083063 Iustin Pop
    """Adds the new node to the cluster.
2308 a8083063 Iustin Pop

2309 a8083063 Iustin Pop
    """
2310 a8083063 Iustin Pop
    new_node = self.new_node
2311 a8083063 Iustin Pop
    node = new_node.name
2312 a8083063 Iustin Pop
2313 a8083063 Iustin Pop
    # check connectivity
2314 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
2315 4c4e4e1e Iustin Pop
    result.Raise("Can't get version information from node %s" % node)
2316 90b54c26 Iustin Pop
    if constants.PROTOCOL_VERSION == result.payload:
2317 90b54c26 Iustin Pop
      logging.info("Communication to node %s fine, sw version %s match",
2318 90b54c26 Iustin Pop
                   node, result.payload)
2319 a8083063 Iustin Pop
    else:
2320 90b54c26 Iustin Pop
      raise errors.OpExecError("Version mismatch master version %s,"
2321 90b54c26 Iustin Pop
                               " node version %s" %
2322 90b54c26 Iustin Pop
                               (constants.PROTOCOL_VERSION, result.payload))
2323 a8083063 Iustin Pop
2324 a8083063 Iustin Pop
    # setup ssh on node
2325 9a4f63d1 Iustin Pop
    logging.info("Copy ssh key to node %s", node)
2326 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
2327 a8083063 Iustin Pop
    keyarray = []
2328 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
2329 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
2330 70d9e3d8 Iustin Pop
                priv_key, pub_key]
2331 a8083063 Iustin Pop
2332 a8083063 Iustin Pop
    for i in keyfiles:
2333 a8083063 Iustin Pop
      f = open(i, 'r')
2334 a8083063 Iustin Pop
      try:
2335 a8083063 Iustin Pop
        keyarray.append(f.read())
2336 a8083063 Iustin Pop
      finally:
2337 a8083063 Iustin Pop
        f.close()
2338 a8083063 Iustin Pop
2339 72737a7f Iustin Pop
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
2340 72737a7f Iustin Pop
                                    keyarray[2],
2341 72737a7f Iustin Pop
                                    keyarray[3], keyarray[4], keyarray[5])
2342 4c4e4e1e Iustin Pop
    result.Raise("Cannot transfer ssh keys to the new node")
2343 a8083063 Iustin Pop
2344 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
2345 b86a6bcd Guido Trotter
    if self.cfg.GetClusterInfo().modify_etc_hosts:
2346 b86a6bcd Guido Trotter
      utils.AddHostToEtcHosts(new_node.name)
2347 c8a0948f Michael Hanselmann
2348 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
2349 781de953 Iustin Pop
      result = self.rpc.call_node_has_ip_address(new_node.name,
2350 781de953 Iustin Pop
                                                 new_node.secondary_ip)
2351 4c4e4e1e Iustin Pop
      result.Raise("Failure checking secondary ip on node %s" % new_node.name,
2352 4c4e4e1e Iustin Pop
                   prereq=True)
2353 c2fc8250 Iustin Pop
      if not result.payload:
2354 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
2355 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
2356 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
2357 a8083063 Iustin Pop
2358 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
2359 5c0527ed Guido Trotter
    node_verify_param = {
2360 5c0527ed Guido Trotter
      'nodelist': [node],
2361 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
2362 5c0527ed Guido Trotter
    }
2363 5c0527ed Guido Trotter
2364 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
2365 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
2366 5c0527ed Guido Trotter
    for verifier in node_verify_list:
2367 4c4e4e1e Iustin Pop
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
2368 6f68a739 Iustin Pop
      nl_payload = result[verifier].payload['nodelist']
2369 6f68a739 Iustin Pop
      if nl_payload:
2370 6f68a739 Iustin Pop
        for failed in nl_payload:
2371 5c0527ed Guido Trotter
          feedback_fn("ssh/hostname verification failed %s -> %s" %
2372 6f68a739 Iustin Pop
                      (verifier, nl_payload[failed]))
2373 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
2374 ff98055b Iustin Pop
2375 d8470559 Michael Hanselmann
    if self.op.readd:
2376 28eddce5 Guido Trotter
      _RedistributeAncillaryFiles(self)
2377 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
2378 d8470559 Michael Hanselmann
    else:
2379 035566e3 Iustin Pop
      _RedistributeAncillaryFiles(self, additional_nodes=[node])
2380 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
2381 a8083063 Iustin Pop
2382 a8083063 Iustin Pop
2383 b31c8676 Iustin Pop
class LUSetNodeParams(LogicalUnit):
2384 b31c8676 Iustin Pop
  """Modifies the parameters of a node.
2385 b31c8676 Iustin Pop

2386 b31c8676 Iustin Pop
  """
2387 b31c8676 Iustin Pop
  HPATH = "node-modify"
2388 b31c8676 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2389 b31c8676 Iustin Pop
  _OP_REQP = ["node_name"]
2390 b31c8676 Iustin Pop
  REQ_BGL = False
2391 b31c8676 Iustin Pop
2392 b31c8676 Iustin Pop
  def CheckArguments(self):
2393 b31c8676 Iustin Pop
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2394 b31c8676 Iustin Pop
    if node_name is None:
2395 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2396 b31c8676 Iustin Pop
    self.op.node_name = node_name
2397 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'master_candidate')
2398 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'offline')
2399 c9d443ea Iustin Pop
    _CheckBooleanOpField(self.op, 'drained')
2400 c9d443ea Iustin Pop
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
2401 c9d443ea Iustin Pop
    if all_mods.count(None) == 3:
2402 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Please pass at least one modification")
2403 c9d443ea Iustin Pop
    if all_mods.count(True) > 1:
2404 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Can't set the node into more than one"
2405 c9d443ea Iustin Pop
                                 " state at the same time")
2406 b31c8676 Iustin Pop
2407 b31c8676 Iustin Pop
  def ExpandNames(self):
2408 b31c8676 Iustin Pop
    self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
2409 b31c8676 Iustin Pop
2410 b31c8676 Iustin Pop
  def BuildHooksEnv(self):
2411 b31c8676 Iustin Pop
    """Build hooks env.
2412 b31c8676 Iustin Pop

2413 b31c8676 Iustin Pop
    This runs on the master node.
2414 b31c8676 Iustin Pop

2415 b31c8676 Iustin Pop
    """
2416 b31c8676 Iustin Pop
    env = {
2417 b31c8676 Iustin Pop
      "OP_TARGET": self.op.node_name,
2418 b31c8676 Iustin Pop
      "MASTER_CANDIDATE": str(self.op.master_candidate),
2419 3a5ba66a Iustin Pop
      "OFFLINE": str(self.op.offline),
2420 c9d443ea Iustin Pop
      "DRAINED": str(self.op.drained),
2421 b31c8676 Iustin Pop
      }
2422 b31c8676 Iustin Pop
    nl = [self.cfg.GetMasterNode(),
2423 b31c8676 Iustin Pop
          self.op.node_name]
2424 b31c8676 Iustin Pop
    return env, nl, nl
2425 b31c8676 Iustin Pop
2426 b31c8676 Iustin Pop
  def CheckPrereq(self):
2427 b31c8676 Iustin Pop
    """Check prerequisites.
2428 b31c8676 Iustin Pop

2429 b31c8676 Iustin Pop
    This only checks the instance list against the existing names.
2430 b31c8676 Iustin Pop

2431 b31c8676 Iustin Pop
    """
2432 3a5ba66a Iustin Pop
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
2433 b31c8676 Iustin Pop
2434 c9d443ea Iustin Pop
    if ((self.op.master_candidate == False or self.op.offline == True or
2435 c9d443ea Iustin Pop
         self.op.drained == True) and node.master_candidate):
2436 3a5ba66a Iustin Pop
      # we will demote the node from master_candidate
2437 3a26773f Iustin Pop
      if self.op.node_name == self.cfg.GetMasterNode():
2438 3a26773f Iustin Pop
        raise errors.OpPrereqError("The master node has to be a"
2439 c9d443ea Iustin Pop
                                   " master candidate, online and not drained")
2440 3e83dd48 Iustin Pop
      cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2441 3a5ba66a Iustin Pop
      num_candidates, _ = self.cfg.GetMasterCandidateStats()
2442 3e83dd48 Iustin Pop
      if num_candidates <= cp_size:
2443 3e83dd48 Iustin Pop
        msg = ("Not enough master candidates (desired"
2444 3e83dd48 Iustin Pop
               " %d, new value will be %d)" % (cp_size, num_candidates-1))
2445 3a5ba66a Iustin Pop
        if self.op.force:
2446 3e83dd48 Iustin Pop
          self.LogWarning(msg)
2447 3e83dd48 Iustin Pop
        else:
2448 3e83dd48 Iustin Pop
          raise errors.OpPrereqError(msg)
2449 3e83dd48 Iustin Pop
2450 c9d443ea Iustin Pop
    if (self.op.master_candidate == True and
2451 c9d443ea Iustin Pop
        ((node.offline and not self.op.offline == False) or
2452 c9d443ea Iustin Pop
         (node.drained and not self.op.drained == False))):
2453 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
2454 949bdabe Iustin Pop
                                 " to master_candidate" % node.name)
2455 3a5ba66a Iustin Pop
2456 b31c8676 Iustin Pop
    return
2457 b31c8676 Iustin Pop
2458 b31c8676 Iustin Pop
  def Exec(self, feedback_fn):
2459 b31c8676 Iustin Pop
    """Modifies a node.
2460 b31c8676 Iustin Pop

2461 b31c8676 Iustin Pop
    """
2462 3a5ba66a Iustin Pop
    node = self.node
2463 b31c8676 Iustin Pop
2464 b31c8676 Iustin Pop
    result = []
2465 c9d443ea Iustin Pop
    changed_mc = False
2466 b31c8676 Iustin Pop
2467 3a5ba66a Iustin Pop
    if self.op.offline is not None:
2468 3a5ba66a Iustin Pop
      node.offline = self.op.offline
2469 3a5ba66a Iustin Pop
      result.append(("offline", str(self.op.offline)))
2470 c9d443ea Iustin Pop
      if self.op.offline == True:
2471 c9d443ea Iustin Pop
        if node.master_candidate:
2472 c9d443ea Iustin Pop
          node.master_candidate = False
2473 c9d443ea Iustin Pop
          changed_mc = True
2474 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to offline"))
2475 c9d443ea Iustin Pop
        if node.drained:
2476 c9d443ea Iustin Pop
          node.drained = False
2477 c9d443ea Iustin Pop
          result.append(("drained", "clear drained status due to offline"))
2478 3a5ba66a Iustin Pop
2479 b31c8676 Iustin Pop
    if self.op.master_candidate is not None:
2480 b31c8676 Iustin Pop
      node.master_candidate = self.op.master_candidate
2481 c9d443ea Iustin Pop
      changed_mc = True
2482 b31c8676 Iustin Pop
      result.append(("master_candidate", str(self.op.master_candidate)))
2483 56aa9fd5 Iustin Pop
      if self.op.master_candidate == False:
2484 56aa9fd5 Iustin Pop
        rrc = self.rpc.call_node_demote_from_mc(node.name)
2485 4c4e4e1e Iustin Pop
        msg = rrc.fail_msg
2486 0959c824 Iustin Pop
        if msg:
2487 0959c824 Iustin Pop
          self.LogWarning("Node failed to demote itself: %s" % msg)
2488 b31c8676 Iustin Pop
2489 c9d443ea Iustin Pop
    if self.op.drained is not None:
2490 c9d443ea Iustin Pop
      node.drained = self.op.drained
2491 82e12743 Iustin Pop
      result.append(("drained", str(self.op.drained)))
2492 c9d443ea Iustin Pop
      if self.op.drained == True:
2493 c9d443ea Iustin Pop
        if node.master_candidate:
2494 c9d443ea Iustin Pop
          node.master_candidate = False
2495 c9d443ea Iustin Pop
          changed_mc = True
2496 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to drain"))
2497 c9d443ea Iustin Pop
        if node.offline:
2498 c9d443ea Iustin Pop
          node.offline = False
2499 c9d443ea Iustin Pop
          result.append(("offline", "clear offline status due to drain"))
2500 c9d443ea Iustin Pop
2501 b31c8676 Iustin Pop
    # this will trigger configuration file update, if needed
2502 b31c8676 Iustin Pop
    self.cfg.Update(node)
2503 b31c8676 Iustin Pop
    # this will trigger job queue propagation or cleanup
2504 c9d443ea Iustin Pop
    if changed_mc:
2505 3a26773f Iustin Pop
      self.context.ReaddNode(node)
2506 b31c8676 Iustin Pop
2507 b31c8676 Iustin Pop
    return result
2508 b31c8676 Iustin Pop
2509 b31c8676 Iustin Pop
2510 f5118ade Iustin Pop
class LUPowercycleNode(NoHooksLU):
2511 f5118ade Iustin Pop
  """Powercycles a node.
2512 f5118ade Iustin Pop

2513 f5118ade Iustin Pop
  """
2514 f5118ade Iustin Pop
  _OP_REQP = ["node_name", "force"]
2515 f5118ade Iustin Pop
  REQ_BGL = False
2516 f5118ade Iustin Pop
2517 f5118ade Iustin Pop
  def CheckArguments(self):
2518 f5118ade Iustin Pop
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2519 f5118ade Iustin Pop
    if node_name is None:
2520 f5118ade Iustin Pop
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2521 f5118ade Iustin Pop
    self.op.node_name = node_name
2522 f5118ade Iustin Pop
    if node_name == self.cfg.GetMasterNode() and not self.op.force:
2523 f5118ade Iustin Pop
      raise errors.OpPrereqError("The node is the master and the force"
2524 f5118ade Iustin Pop
                                 " parameter was not set")
2525 f5118ade Iustin Pop
2526 f5118ade Iustin Pop
  def ExpandNames(self):
2527 f5118ade Iustin Pop
    """Locking for PowercycleNode.
2528 f5118ade Iustin Pop

2529 f5118ade Iustin Pop
    This is a last-resource option and shouldn't block on other
2530 f5118ade Iustin Pop
    jobs. Therefore, we grab no locks.
2531 f5118ade Iustin Pop

2532 f5118ade Iustin Pop
    """
2533 f5118ade Iustin Pop
    self.needed_locks = {}
2534 f5118ade Iustin Pop
2535 f5118ade Iustin Pop
  def CheckPrereq(self):
2536 f5118ade Iustin Pop
    """Check prerequisites.
2537 f5118ade Iustin Pop

2538 f5118ade Iustin Pop
    This LU has no prereqs.
2539 f5118ade Iustin Pop

2540 f5118ade Iustin Pop
    """
2541 f5118ade Iustin Pop
    pass
2542 f5118ade Iustin Pop
2543 f5118ade Iustin Pop
  def Exec(self, feedback_fn):
2544 f5118ade Iustin Pop
    """Reboots a node.
2545 f5118ade Iustin Pop

2546 f5118ade Iustin Pop
    """
2547 f5118ade Iustin Pop
    result = self.rpc.call_node_powercycle(self.op.node_name,
2548 f5118ade Iustin Pop
                                           self.cfg.GetHypervisorType())
2549 4c4e4e1e Iustin Pop
    result.Raise("Failed to schedule the reboot")
2550 f5118ade Iustin Pop
    return result.payload
2551 f5118ade Iustin Pop
2552 f5118ade Iustin Pop
2553 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
2554 a8083063 Iustin Pop
  """Query cluster configuration.
2555 a8083063 Iustin Pop

2556 a8083063 Iustin Pop
  """
2557 a8083063 Iustin Pop
  _OP_REQP = []
2558 642339cf Guido Trotter
  REQ_BGL = False
2559 642339cf Guido Trotter
2560 642339cf Guido Trotter
  def ExpandNames(self):
2561 642339cf Guido Trotter
    self.needed_locks = {}
2562 a8083063 Iustin Pop
2563 a8083063 Iustin Pop
  def CheckPrereq(self):
2564 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
2565 a8083063 Iustin Pop

2566 a8083063 Iustin Pop
    """
2567 a8083063 Iustin Pop
    pass
2568 a8083063 Iustin Pop
2569 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2570 a8083063 Iustin Pop
    """Return cluster config.
2571 a8083063 Iustin Pop

2572 a8083063 Iustin Pop
    """
2573 469f88e1 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
2574 a8083063 Iustin Pop
    result = {
2575 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
2576 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
2577 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
2578 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
2579 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
2580 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
2581 469f88e1 Iustin Pop
      "name": cluster.cluster_name,
2582 469f88e1 Iustin Pop
      "master": cluster.master_node,
2583 02691904 Alexander Schreiber
      "default_hypervisor": cluster.default_hypervisor,
2584 469f88e1 Iustin Pop
      "enabled_hypervisors": cluster.enabled_hypervisors,
2585 7a735d6a Guido Trotter
      "hvparams": dict([(hypervisor, cluster.hvparams[hypervisor])
2586 7a735d6a Guido Trotter
                        for hypervisor in cluster.enabled_hypervisors]),
2587 469f88e1 Iustin Pop
      "beparams": cluster.beparams,
2588 1094acda Guido Trotter
      "nicparams": cluster.nicparams,
2589 4b7735f9 Iustin Pop
      "candidate_pool_size": cluster.candidate_pool_size,
2590 7a56b411 Guido Trotter
      "master_netdev": cluster.master_netdev,
2591 7a56b411 Guido Trotter
      "volume_group_name": cluster.volume_group_name,
2592 7a56b411 Guido Trotter
      "file_storage_dir": cluster.file_storage_dir,
2593 a8083063 Iustin Pop
      }
2594 a8083063 Iustin Pop
2595 a8083063 Iustin Pop
    return result
2596 a8083063 Iustin Pop
2597 a8083063 Iustin Pop
2598 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
2599 ae5849b5 Michael Hanselmann
  """Return configuration values.
2600 a8083063 Iustin Pop

2601 a8083063 Iustin Pop
  """
2602 a8083063 Iustin Pop
  _OP_REQP = []
2603 642339cf Guido Trotter
  REQ_BGL = False
2604 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet()
2605 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag")
2606 642339cf Guido Trotter
2607 642339cf Guido Trotter
  def ExpandNames(self):
2608 642339cf Guido Trotter
    self.needed_locks = {}
2609 a8083063 Iustin Pop
2610 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2611 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2612 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
2613 ae5849b5 Michael Hanselmann
2614 a8083063 Iustin Pop
  def CheckPrereq(self):
2615 a8083063 Iustin Pop
    """No prerequisites.
2616 a8083063 Iustin Pop

2617 a8083063 Iustin Pop
    """
2618 a8083063 Iustin Pop
    pass
2619 a8083063 Iustin Pop
2620 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2621 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
2622 a8083063 Iustin Pop

2623 a8083063 Iustin Pop
    """
2624 ae5849b5 Michael Hanselmann
    values = []
2625 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
2626 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
2627 3ccafd0e Iustin Pop
        entry = self.cfg.GetClusterName()
2628 ae5849b5 Michael Hanselmann
      elif field == "master_node":
2629 3ccafd0e Iustin Pop
        entry = self.cfg.GetMasterNode()
2630 3ccafd0e Iustin Pop
      elif field == "drain_flag":
2631 3ccafd0e Iustin Pop
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2632 ae5849b5 Michael Hanselmann
      else:
2633 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
2634 3ccafd0e Iustin Pop
      values.append(entry)
2635 ae5849b5 Michael Hanselmann
    return values
2636 a8083063 Iustin Pop
2637 a8083063 Iustin Pop
2638 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
2639 a8083063 Iustin Pop
  """Bring up an instance's disks.
2640 a8083063 Iustin Pop

2641 a8083063 Iustin Pop
  """
2642 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2643 f22a8ba3 Guido Trotter
  REQ_BGL = False
2644 f22a8ba3 Guido Trotter
2645 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2646 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2647 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2648 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2649 f22a8ba3 Guido Trotter
2650 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2651 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2652 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2653 a8083063 Iustin Pop
2654 a8083063 Iustin Pop
  def CheckPrereq(self):
2655 a8083063 Iustin Pop
    """Check prerequisites.
2656 a8083063 Iustin Pop

2657 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2658 a8083063 Iustin Pop

2659 a8083063 Iustin Pop
    """
2660 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2661 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2662 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2663 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
2664 a8083063 Iustin Pop
2665 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2666 a8083063 Iustin Pop
    """Activate the disks.
2667 a8083063 Iustin Pop

2668 a8083063 Iustin Pop
    """
2669 b9bddb6b Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
2670 a8083063 Iustin Pop
    if not disks_ok:
2671 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
2672 a8083063 Iustin Pop
2673 a8083063 Iustin Pop
    return disks_info
2674 a8083063 Iustin Pop
2675 a8083063 Iustin Pop
2676 b9bddb6b Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
2677 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
2678 a8083063 Iustin Pop

2679 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
2680 a8083063 Iustin Pop

2681 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
2682 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
2683 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
2684 e4376078 Iustin Pop
  @param instance: the instance for whose disks we assemble
2685 e4376078 Iustin Pop
  @type ignore_secondaries: boolean
2686 e4376078 Iustin Pop
  @param ignore_secondaries: if true, errors on secondary nodes
2687 e4376078 Iustin Pop
      won't result in an error return from the function
2688 e4376078 Iustin Pop
  @return: False if the operation failed, otherwise a list of
2689 e4376078 Iustin Pop
      (host, instance_visible_name, node_visible_name)
2690 e4376078 Iustin Pop
      with the mapping from node devices to instance devices
2691 a8083063 Iustin Pop

2692 a8083063 Iustin Pop
  """
2693 a8083063 Iustin Pop
  device_info = []
2694 a8083063 Iustin Pop
  disks_ok = True
2695 fdbd668d Iustin Pop
  iname = instance.name
2696 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
2697 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
2698 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
2699 fdbd668d Iustin Pop
2700 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
2701 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
2702 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
2703 fdbd668d Iustin Pop
  # SyncSource, etc.)
2704 fdbd668d Iustin Pop
2705 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
2706 a8083063 Iustin Pop
  for inst_disk in instance.disks:
2707 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2708 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2709 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2710 4c4e4e1e Iustin Pop
      msg = result.fail_msg
2711 53c14ef1 Iustin Pop
      if msg:
2712 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2713 53c14ef1 Iustin Pop
                           " (is_primary=False, pass=1): %s",
2714 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
2715 fdbd668d Iustin Pop
        if not ignore_secondaries:
2716 a8083063 Iustin Pop
          disks_ok = False
2717 fdbd668d Iustin Pop
2718 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
2719 fdbd668d Iustin Pop
2720 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
2721 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
2722 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2723 fdbd668d Iustin Pop
      if node != instance.primary_node:
2724 fdbd668d Iustin Pop
        continue
2725 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2726 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2727 4c4e4e1e Iustin Pop
      msg = result.fail_msg
2728 53c14ef1 Iustin Pop
      if msg:
2729 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2730 53c14ef1 Iustin Pop
                           " (is_primary=True, pass=2): %s",
2731 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
2732 fdbd668d Iustin Pop
        disks_ok = False
2733 1dff8e07 Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name,
2734 1dff8e07 Iustin Pop
                        result.payload))
2735 a8083063 Iustin Pop
2736 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
2737 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
2738 b352ab5b Iustin Pop
  # improving the logical/physical id handling
2739 b352ab5b Iustin Pop
  for disk in instance.disks:
2740 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
2741 b352ab5b Iustin Pop
2742 a8083063 Iustin Pop
  return disks_ok, device_info
2743 a8083063 Iustin Pop
2744 a8083063 Iustin Pop
2745 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
2746 3ecf6786 Iustin Pop
  """Start the disks of an instance.
2747 3ecf6786 Iustin Pop

2748 3ecf6786 Iustin Pop
  """
2749 b9bddb6b Iustin Pop
  disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2750 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
2751 fe7b0351 Michael Hanselmann
  if not disks_ok:
2752 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
2753 fe7b0351 Michael Hanselmann
    if force is not None and not force:
2754 86d9d3bb Iustin Pop
      lu.proc.LogWarning("", hint="If the message above refers to a"
2755 86d9d3bb Iustin Pop
                         " secondary node,"
2756 86d9d3bb Iustin Pop
                         " you can retry the operation using '--force'.")
2757 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
2758 fe7b0351 Michael Hanselmann
2759 fe7b0351 Michael Hanselmann
2760 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
2761 a8083063 Iustin Pop
  """Shutdown an instance's disks.
2762 a8083063 Iustin Pop

2763 a8083063 Iustin Pop
  """
2764 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2765 f22a8ba3 Guido Trotter
  REQ_BGL = False
2766 f22a8ba3 Guido Trotter
2767 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2768 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2769 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2770 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2771 f22a8ba3 Guido Trotter
2772 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2773 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2774 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2775 a8083063 Iustin Pop
2776 a8083063 Iustin Pop
  def CheckPrereq(self):
2777 a8083063 Iustin Pop
    """Check prerequisites.
2778 a8083063 Iustin Pop

2779 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2780 a8083063 Iustin Pop

2781 a8083063 Iustin Pop
    """
2782 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2783 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2784 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2785 a8083063 Iustin Pop
2786 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2787 a8083063 Iustin Pop
    """Deactivate the disks
2788 a8083063 Iustin Pop

2789 a8083063 Iustin Pop
    """
2790 a8083063 Iustin Pop
    instance = self.instance
2791 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
2792 a8083063 Iustin Pop
2793 a8083063 Iustin Pop
2794 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
2795 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
2796 155d6c75 Guido Trotter

2797 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
2798 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
2799 155d6c75 Guido Trotter

2800 155d6c75 Guido Trotter
  """
2801 aca13712 Iustin Pop
  pnode = instance.primary_node
2802 4c4e4e1e Iustin Pop
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
2803 4c4e4e1e Iustin Pop
  ins_l.Raise("Can't contact node %s" % pnode)
2804 aca13712 Iustin Pop
2805 aca13712 Iustin Pop
  if instance.name in ins_l.payload:
2806 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
2807 155d6c75 Guido Trotter
                             " block devices.")
2808 155d6c75 Guido Trotter
2809 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
2810 a8083063 Iustin Pop
2811 a8083063 Iustin Pop
2812 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2813 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
2814 a8083063 Iustin Pop

2815 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
2816 a8083063 Iustin Pop

2817 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
2818 a8083063 Iustin Pop
  ignored.
2819 a8083063 Iustin Pop

2820 a8083063 Iustin Pop
  """
2821 cacfd1fd Iustin Pop
  all_result = True
2822 a8083063 Iustin Pop
  for disk in instance.disks:
2823 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2824 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
2825 781de953 Iustin Pop
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
2826 4c4e4e1e Iustin Pop
      msg = result.fail_msg
2827 cacfd1fd Iustin Pop
      if msg:
2828 cacfd1fd Iustin Pop
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
2829 cacfd1fd Iustin Pop
                      disk.iv_name, node, msg)
2830 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
2831 cacfd1fd Iustin Pop
          all_result = False
2832 cacfd1fd Iustin Pop
  return all_result
2833 a8083063 Iustin Pop
2834 a8083063 Iustin Pop
2835 9ca87a96 Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
2836 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
2837 d4f16fd9 Iustin Pop

2838 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
2839 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
2840 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
2841 d4f16fd9 Iustin Pop
  exception.
2842 d4f16fd9 Iustin Pop

2843 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
2844 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
2845 e69d05fd Iustin Pop
  @type node: C{str}
2846 e69d05fd Iustin Pop
  @param node: the node to check
2847 e69d05fd Iustin Pop
  @type reason: C{str}
2848 e69d05fd Iustin Pop
  @param reason: string to use in the error message
2849 e69d05fd Iustin Pop
  @type requested: C{int}
2850 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
2851 9ca87a96 Iustin Pop
  @type hypervisor_name: C{str}
2852 9ca87a96 Iustin Pop
  @param hypervisor_name: the hypervisor to ask for memory stats
2853 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2854 e69d05fd Iustin Pop
      we cannot check the node
2855 d4f16fd9 Iustin Pop

2856 d4f16fd9 Iustin Pop
  """
2857 9ca87a96 Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
2858 4c4e4e1e Iustin Pop
  nodeinfo[node].Raise("Can't get data from node %s" % node, prereq=True)
2859 070e998b Iustin Pop
  free_mem = nodeinfo[node].payload.get('memory_free', None)
2860 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2861 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2862 070e998b Iustin Pop
                               " was '%s'" % (node, free_mem))
2863 d4f16fd9 Iustin Pop
  if requested > free_mem:
2864 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2865 070e998b Iustin Pop
                               " needed %s MiB, available %s MiB" %
2866 070e998b Iustin Pop
                               (node, reason, requested, free_mem))
2867 d4f16fd9 Iustin Pop
2868 d4f16fd9 Iustin Pop
2869 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2870 a8083063 Iustin Pop
  """Starts an instance.
2871 a8083063 Iustin Pop

2872 a8083063 Iustin Pop
  """
2873 a8083063 Iustin Pop
  HPATH = "instance-start"
2874 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2875 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2876 e873317a Guido Trotter
  REQ_BGL = False
2877 e873317a Guido Trotter
2878 e873317a Guido Trotter
  def ExpandNames(self):
2879 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2880 a8083063 Iustin Pop
2881 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2882 a8083063 Iustin Pop
    """Build hooks env.
2883 a8083063 Iustin Pop

2884 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2885 a8083063 Iustin Pop

2886 a8083063 Iustin Pop
    """
2887 a8083063 Iustin Pop
    env = {
2888 a8083063 Iustin Pop
      "FORCE": self.op.force,
2889 a8083063 Iustin Pop
      }
2890 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2891 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2892 a8083063 Iustin Pop
    return env, nl, nl
2893 a8083063 Iustin Pop
2894 a8083063 Iustin Pop
  def CheckPrereq(self):
2895 a8083063 Iustin Pop
    """Check prerequisites.
2896 a8083063 Iustin Pop

2897 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2898 a8083063 Iustin Pop

2899 a8083063 Iustin Pop
    """
2900 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2901 e873317a Guido Trotter
    assert self.instance is not None, \
2902 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2903 a8083063 Iustin Pop
2904 d04aaa2f Iustin Pop
    # extra beparams
2905 d04aaa2f Iustin Pop
    self.beparams = getattr(self.op, "beparams", {})
2906 d04aaa2f Iustin Pop
    if self.beparams:
2907 d04aaa2f Iustin Pop
      if not isinstance(self.beparams, dict):
2908 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid beparams passed: %s, expected"
2909 d04aaa2f Iustin Pop
                                   " dict" % (type(self.beparams), ))
2910 d04aaa2f Iustin Pop
      # fill the beparams dict
2911 d04aaa2f Iustin Pop
      utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
2912 d04aaa2f Iustin Pop
      self.op.beparams = self.beparams
2913 d04aaa2f Iustin Pop
2914 d04aaa2f Iustin Pop
    # extra hvparams
2915 d04aaa2f Iustin Pop
    self.hvparams = getattr(self.op, "hvparams", {})
2916 d04aaa2f Iustin Pop
    if self.hvparams:
2917 d04aaa2f Iustin Pop
      if not isinstance(self.hvparams, dict):
2918 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid hvparams passed: %s, expected"
2919 d04aaa2f Iustin Pop
                                   " dict" % (type(self.hvparams), ))
2920 d04aaa2f Iustin Pop
2921 d04aaa2f Iustin Pop
      # check hypervisor parameter syntax (locally)
2922 d04aaa2f Iustin Pop
      cluster = self.cfg.GetClusterInfo()
2923 d04aaa2f Iustin Pop
      utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
2924 abe609b2 Guido Trotter
      filled_hvp = objects.FillDict(cluster.hvparams[instance.hypervisor],
2925 d04aaa2f Iustin Pop
                                    instance.hvparams)
2926 d04aaa2f Iustin Pop
      filled_hvp.update(self.hvparams)
2927 d04aaa2f Iustin Pop
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
2928 d04aaa2f Iustin Pop
      hv_type.CheckParameterSyntax(filled_hvp)
2929 d04aaa2f Iustin Pop
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
2930 d04aaa2f Iustin Pop
      self.op.hvparams = self.hvparams
2931 d04aaa2f Iustin Pop
2932 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
2933 7527a8a4 Iustin Pop
2934 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2935 a8083063 Iustin Pop
    # check bridges existance
2936 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2937 a8083063 Iustin Pop
2938 f1926756 Guido Trotter
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2939 f1926756 Guido Trotter
                                              instance.name,
2940 f1926756 Guido Trotter
                                              instance.hypervisor)
2941 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
2942 4c4e4e1e Iustin Pop
                      prereq=True)
2943 7ad1af4a Iustin Pop
    if not remote_info.payload: # not running already
2944 f1926756 Guido Trotter
      _CheckNodeFreeMemory(self, instance.primary_node,
2945 f1926756 Guido Trotter
                           "starting instance %s" % instance.name,
2946 f1926756 Guido Trotter
                           bep[constants.BE_MEMORY], instance.hypervisor)
2947 d4f16fd9 Iustin Pop
2948 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2949 a8083063 Iustin Pop
    """Start the instance.
2950 a8083063 Iustin Pop

2951 a8083063 Iustin Pop
    """
2952 a8083063 Iustin Pop
    instance = self.instance
2953 a8083063 Iustin Pop
    force = self.op.force
2954 a8083063 Iustin Pop
2955 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2956 fe482621 Iustin Pop
2957 a8083063 Iustin Pop
    node_current = instance.primary_node
2958 a8083063 Iustin Pop
2959 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
2960 a8083063 Iustin Pop
2961 d04aaa2f Iustin Pop
    result = self.rpc.call_instance_start(node_current, instance,
2962 d04aaa2f Iustin Pop
                                          self.hvparams, self.beparams)
2963 4c4e4e1e Iustin Pop
    msg = result.fail_msg
2964 dd279568 Iustin Pop
    if msg:
2965 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
2966 dd279568 Iustin Pop
      raise errors.OpExecError("Could not start instance: %s" % msg)
2967 a8083063 Iustin Pop
2968 a8083063 Iustin Pop
2969 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2970 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2971 bf6929a2 Alexander Schreiber

2972 bf6929a2 Alexander Schreiber
  """
2973 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2974 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2975 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2976 e873317a Guido Trotter
  REQ_BGL = False
2977 e873317a Guido Trotter
2978 e873317a Guido Trotter
  def ExpandNames(self):
2979 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2980 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2981 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
2982 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2983 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
2984 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2985 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
2986 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2987 bf6929a2 Alexander Schreiber
2988 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2989 bf6929a2 Alexander Schreiber
    """Build hooks env.
2990 bf6929a2 Alexander Schreiber

2991 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2992 bf6929a2 Alexander Schreiber

2993 bf6929a2 Alexander Schreiber
    """
2994 bf6929a2 Alexander Schreiber
    env = {
2995 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2996 2c2690c9 Iustin Pop
      "REBOOT_TYPE": self.op.reboot_type,
2997 bf6929a2 Alexander Schreiber
      }
2998 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2999 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3000 bf6929a2 Alexander Schreiber
    return env, nl, nl
3001 bf6929a2 Alexander Schreiber
3002 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
3003 bf6929a2 Alexander Schreiber
    """Check prerequisites.
3004 bf6929a2 Alexander Schreiber

3005 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
3006 bf6929a2 Alexander Schreiber

3007 bf6929a2 Alexander Schreiber
    """
3008 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3009 e873317a Guido Trotter
    assert self.instance is not None, \
3010 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3011 bf6929a2 Alexander Schreiber
3012 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3013 7527a8a4 Iustin Pop
3014 bf6929a2 Alexander Schreiber
    # check bridges existance
3015 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
3016 bf6929a2 Alexander Schreiber
3017 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
3018 bf6929a2 Alexander Schreiber
    """Reboot the instance.
3019 bf6929a2 Alexander Schreiber

3020 bf6929a2 Alexander Schreiber
    """
3021 bf6929a2 Alexander Schreiber
    instance = self.instance
3022 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
3023 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
3024 bf6929a2 Alexander Schreiber
3025 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
3026 bf6929a2 Alexander Schreiber
3027 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
3028 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
3029 ae48ac32 Iustin Pop
      for disk in instance.disks:
3030 ae48ac32 Iustin Pop
        self.cfg.SetDiskID(disk, node_current)
3031 781de953 Iustin Pop
      result = self.rpc.call_instance_reboot(node_current, instance,
3032 07813a9e Iustin Pop
                                             reboot_type)
3033 4c4e4e1e Iustin Pop
      result.Raise("Could not reboot instance")
3034 bf6929a2 Alexander Schreiber
    else:
3035 1fae010f Iustin Pop
      result = self.rpc.call_instance_shutdown(node_current, instance)
3036 4c4e4e1e Iustin Pop
      result.Raise("Could not shutdown instance for full reboot")
3037 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
3038 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
3039 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(node_current, instance, None, None)
3040 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3041 dd279568 Iustin Pop
      if msg:
3042 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3043 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance for"
3044 dd279568 Iustin Pop
                                 " full reboot: %s" % msg)
3045 bf6929a2 Alexander Schreiber
3046 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
3047 bf6929a2 Alexander Schreiber
3048 bf6929a2 Alexander Schreiber
3049 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
3050 a8083063 Iustin Pop
  """Shutdown an instance.
3051 a8083063 Iustin Pop

3052 a8083063 Iustin Pop
  """
3053 a8083063 Iustin Pop
  HPATH = "instance-stop"
3054 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3055 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3056 e873317a Guido Trotter
  REQ_BGL = False
3057 e873317a Guido Trotter
3058 e873317a Guido Trotter
  def ExpandNames(self):
3059 e873317a Guido Trotter
    self._ExpandAndLockInstance()
3060 a8083063 Iustin Pop
3061 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3062 a8083063 Iustin Pop
    """Build hooks env.
3063 a8083063 Iustin Pop

3064 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3065 a8083063 Iustin Pop

3066 a8083063 Iustin Pop
    """
3067 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3068 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3069 a8083063 Iustin Pop
    return env, nl, nl
3070 a8083063 Iustin Pop
3071 a8083063 Iustin Pop
  def CheckPrereq(self):
3072 a8083063 Iustin Pop
    """Check prerequisites.
3073 a8083063 Iustin Pop

3074 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3075 a8083063 Iustin Pop

3076 a8083063 Iustin Pop
    """
3077 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3078 e873317a Guido Trotter
    assert self.instance is not None, \
3079 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3080 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
3081 a8083063 Iustin Pop
3082 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3083 a8083063 Iustin Pop
    """Shutdown the instance.
3084 a8083063 Iustin Pop

3085 a8083063 Iustin Pop
    """
3086 a8083063 Iustin Pop
    instance = self.instance
3087 a8083063 Iustin Pop
    node_current = instance.primary_node
3088 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
3089 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(node_current, instance)
3090 4c4e4e1e Iustin Pop
    msg = result.fail_msg
3091 1fae010f Iustin Pop
    if msg:
3092 1fae010f Iustin Pop
      self.proc.LogWarning("Could not shutdown instance: %s" % msg)
3093 a8083063 Iustin Pop
3094 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
3095 a8083063 Iustin Pop
3096 a8083063 Iustin Pop
3097 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
3098 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
3099 fe7b0351 Michael Hanselmann

3100 fe7b0351 Michael Hanselmann
  """
3101 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
3102 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
3103 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
3104 4e0b4d2d Guido Trotter
  REQ_BGL = False
3105 4e0b4d2d Guido Trotter
3106 4e0b4d2d Guido Trotter
  def ExpandNames(self):
3107 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
3108 fe7b0351 Michael Hanselmann
3109 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
3110 fe7b0351 Michael Hanselmann
    """Build hooks env.
3111 fe7b0351 Michael Hanselmann

3112 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
3113 fe7b0351 Michael Hanselmann

3114 fe7b0351 Michael Hanselmann
    """
3115 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3116 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3117 fe7b0351 Michael Hanselmann
    return env, nl, nl
3118 fe7b0351 Michael Hanselmann
3119 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
3120 fe7b0351 Michael Hanselmann
    """Check prerequisites.
3121 fe7b0351 Michael Hanselmann

3122 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
3123 fe7b0351 Michael Hanselmann

3124 fe7b0351 Michael Hanselmann
    """
3125 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3126 4e0b4d2d Guido Trotter
    assert instance is not None, \
3127 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3128 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3129 4e0b4d2d Guido Trotter
3130 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
3131 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
3132 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3133 0d68c45d Iustin Pop
    if instance.admin_up:
3134 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3135 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3136 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3137 72737a7f Iustin Pop
                                              instance.name,
3138 72737a7f Iustin Pop
                                              instance.hypervisor)
3139 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
3140 4c4e4e1e Iustin Pop
                      prereq=True)
3141 7ad1af4a Iustin Pop
    if remote_info.payload:
3142 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3143 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
3144 3ecf6786 Iustin Pop
                                  instance.primary_node))
3145 d0834de3 Michael Hanselmann
3146 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
3147 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
3148 d0834de3 Michael Hanselmann
      # OS verification
3149 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
3150 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
3151 d0834de3 Michael Hanselmann
      if pnode is None:
3152 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
3153 3ecf6786 Iustin Pop
                                   self.op.pnode)
3154 781de953 Iustin Pop
      result = self.rpc.call_os_get(pnode.name, self.op.os_type)
3155 4c4e4e1e Iustin Pop
      result.Raise("OS '%s' not in supported OS list for primary node %s" %
3156 4c4e4e1e Iustin Pop
                   (self.op.os_type, pnode.name), prereq=True)
3157 d0834de3 Michael Hanselmann
3158 fe7b0351 Michael Hanselmann
    self.instance = instance
3159 fe7b0351 Michael Hanselmann
3160 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
3161 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
3162 fe7b0351 Michael Hanselmann

3163 fe7b0351 Michael Hanselmann
    """
3164 fe7b0351 Michael Hanselmann
    inst = self.instance
3165 fe7b0351 Michael Hanselmann
3166 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
3167 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
3168 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
3169 97abc79f Iustin Pop
      self.cfg.Update(inst)
3170 d0834de3 Michael Hanselmann
3171 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
3172 fe7b0351 Michael Hanselmann
    try:
3173 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
3174 e557bae9 Guido Trotter
      result = self.rpc.call_instance_os_add(inst.primary_node, inst, True)
3175 4c4e4e1e Iustin Pop
      result.Raise("Could not install OS for instance %s on node %s" %
3176 4c4e4e1e Iustin Pop
                   (inst.name, inst.primary_node))
3177 fe7b0351 Michael Hanselmann
    finally:
3178 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
3179 fe7b0351 Michael Hanselmann
3180 fe7b0351 Michael Hanselmann
3181 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
3182 decd5f45 Iustin Pop
  """Rename an instance.
3183 decd5f45 Iustin Pop

3184 decd5f45 Iustin Pop
  """
3185 decd5f45 Iustin Pop
  HPATH = "instance-rename"
3186 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3187 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
3188 decd5f45 Iustin Pop
3189 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
3190 decd5f45 Iustin Pop
    """Build hooks env.
3191 decd5f45 Iustin Pop

3192 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3193 decd5f45 Iustin Pop

3194 decd5f45 Iustin Pop
    """
3195 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3196 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
3197 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3198 decd5f45 Iustin Pop
    return env, nl, nl
3199 decd5f45 Iustin Pop
3200 decd5f45 Iustin Pop
  def CheckPrereq(self):
3201 decd5f45 Iustin Pop
    """Check prerequisites.
3202 decd5f45 Iustin Pop

3203 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
3204 decd5f45 Iustin Pop

3205 decd5f45 Iustin Pop
    """
3206 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3207 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3208 decd5f45 Iustin Pop
    if instance is None:
3209 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3210 decd5f45 Iustin Pop
                                 self.op.instance_name)
3211 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3212 7527a8a4 Iustin Pop
3213 0d68c45d Iustin Pop
    if instance.admin_up:
3214 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3215 decd5f45 Iustin Pop
                                 self.op.instance_name)
3216 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3217 72737a7f Iustin Pop
                                              instance.name,
3218 72737a7f Iustin Pop
                                              instance.hypervisor)
3219 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
3220 4c4e4e1e Iustin Pop
                      prereq=True)
3221 7ad1af4a Iustin Pop
    if remote_info.payload:
3222 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3223 decd5f45 Iustin Pop
                                 (self.op.instance_name,
3224 decd5f45 Iustin Pop
                                  instance.primary_node))
3225 decd5f45 Iustin Pop
    self.instance = instance
3226 decd5f45 Iustin Pop
3227 decd5f45 Iustin Pop
    # new name verification
3228 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
3229 decd5f45 Iustin Pop
3230 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
3231 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
3232 7bde3275 Guido Trotter
    if new_name in instance_list:
3233 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3234 c09f363f Manuel Franceschini
                                 new_name)
3235 7bde3275 Guido Trotter
3236 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
3237 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
3238 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3239 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
3240 decd5f45 Iustin Pop
3241 decd5f45 Iustin Pop
3242 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
3243 decd5f45 Iustin Pop
    """Reinstall the instance.
3244 decd5f45 Iustin Pop

3245 decd5f45 Iustin Pop
    """
3246 decd5f45 Iustin Pop
    inst = self.instance
3247 decd5f45 Iustin Pop
    old_name = inst.name
3248 decd5f45 Iustin Pop
3249 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
3250 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3251 b23c4333 Manuel Franceschini
3252 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
3253 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
3254 cb4e8387 Iustin Pop
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
3255 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
3256 decd5f45 Iustin Pop
3257 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
3258 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
3259 decd5f45 Iustin Pop
3260 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
3261 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3262 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
3263 72737a7f Iustin Pop
                                                     old_file_storage_dir,
3264 72737a7f Iustin Pop
                                                     new_file_storage_dir)
3265 4c4e4e1e Iustin Pop
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
3266 4c4e4e1e Iustin Pop
                   " (but the instance has been renamed in Ganeti)" %
3267 4c4e4e1e Iustin Pop
                   (inst.primary_node, old_file_storage_dir,
3268 4c4e4e1e Iustin Pop
                    new_file_storage_dir))
3269 b23c4333 Manuel Franceschini
3270 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
3271 decd5f45 Iustin Pop
    try:
3272 781de953 Iustin Pop
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
3273 781de953 Iustin Pop
                                                 old_name)
3274 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3275 96841384 Iustin Pop
      if msg:
3276 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
3277 96841384 Iustin Pop
               " (but the instance has been renamed in Ganeti): %s" %
3278 96841384 Iustin Pop
               (inst.name, inst.primary_node, msg))
3279 86d9d3bb Iustin Pop
        self.proc.LogWarning(msg)
3280 decd5f45 Iustin Pop
    finally:
3281 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
3282 decd5f45 Iustin Pop
3283 decd5f45 Iustin Pop
3284 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
3285 a8083063 Iustin Pop
  """Remove an instance.
3286 a8083063 Iustin Pop

3287 a8083063 Iustin Pop
  """
3288 a8083063 Iustin Pop
  HPATH = "instance-remove"
3289 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3290 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
3291 cf472233 Guido Trotter
  REQ_BGL = False
3292 cf472233 Guido Trotter
3293 cf472233 Guido Trotter
  def ExpandNames(self):
3294 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
3295 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3296 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3297 cf472233 Guido Trotter
3298 cf472233 Guido Trotter
  def DeclareLocks(self, level):
3299 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
3300 cf472233 Guido Trotter
      self._LockInstancesNodes()
3301 a8083063 Iustin Pop
3302 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3303 a8083063 Iustin Pop
    """Build hooks env.
3304 a8083063 Iustin Pop

3305 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3306 a8083063 Iustin Pop

3307 a8083063 Iustin Pop
    """
3308 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3309 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
3310 a8083063 Iustin Pop
    return env, nl, nl
3311 a8083063 Iustin Pop
3312 a8083063 Iustin Pop
  def CheckPrereq(self):
3313 a8083063 Iustin Pop
    """Check prerequisites.
3314 a8083063 Iustin Pop

3315 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3316 a8083063 Iustin Pop

3317 a8083063 Iustin Pop
    """
3318 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3319 cf472233 Guido Trotter
    assert self.instance is not None, \
3320 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3321 a8083063 Iustin Pop
3322 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3323 a8083063 Iustin Pop
    """Remove the instance.
3324 a8083063 Iustin Pop

3325 a8083063 Iustin Pop
    """
3326 a8083063 Iustin Pop
    instance = self.instance
3327 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
3328 9a4f63d1 Iustin Pop
                 instance.name, instance.primary_node)
3329 a8083063 Iustin Pop
3330 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
3331 4c4e4e1e Iustin Pop
    msg = result.fail_msg
3332 1fae010f Iustin Pop
    if msg:
3333 1d67656e Iustin Pop
      if self.op.ignore_failures:
3334 1fae010f Iustin Pop
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
3335 1d67656e Iustin Pop
      else:
3336 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
3337 1fae010f Iustin Pop
                                 " node %s: %s" %
3338 1fae010f Iustin Pop
                                 (instance.name, instance.primary_node, msg))
3339 a8083063 Iustin Pop
3340 9a4f63d1 Iustin Pop
    logging.info("Removing block devices for instance %s", instance.name)
3341 a8083063 Iustin Pop
3342 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
3343 1d67656e Iustin Pop
      if self.op.ignore_failures:
3344 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
3345 1d67656e Iustin Pop
      else:
3346 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
3347 a8083063 Iustin Pop
3348 9a4f63d1 Iustin Pop
    logging.info("Removing instance %s out of cluster config", instance.name)
3349 a8083063 Iustin Pop
3350 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
3351 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
3352 a8083063 Iustin Pop
3353 a8083063 Iustin Pop
3354 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
3355 a8083063 Iustin Pop
  """Logical unit for querying instances.
3356 a8083063 Iustin Pop

3357 a8083063 Iustin Pop
  """
3358 ec79568d Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
3359 7eb9d8f7 Guido Trotter
  REQ_BGL = False
3360 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
3361 5b460366 Iustin Pop
                                    "admin_state",
3362 a2d2e1a7 Iustin Pop
                                    "disk_template", "ip", "mac", "bridge",
3363 638c6349 Guido Trotter
                                    "nic_mode", "nic_link",
3364 a2d2e1a7 Iustin Pop
                                    "sda_size", "sdb_size", "vcpus", "tags",
3365 a2d2e1a7 Iustin Pop
                                    "network_port", "beparams",
3366 8aec325c Iustin Pop
                                    r"(disk)\.(size)/([0-9]+)",
3367 8aec325c Iustin Pop
                                    r"(disk)\.(sizes)", "disk_usage",
3368 638c6349 Guido Trotter
                                    r"(nic)\.(mac|ip|mode|link)/([0-9]+)",
3369 638c6349 Guido Trotter
                                    r"(nic)\.(bridge)/([0-9]+)",
3370 638c6349 Guido Trotter
                                    r"(nic)\.(macs|ips|modes|links|bridges)",
3371 8aec325c Iustin Pop
                                    r"(disk|nic)\.(count)",
3372 a2d2e1a7 Iustin Pop
                                    "serial_no", "hypervisor", "hvparams",] +
3373 a2d2e1a7 Iustin Pop
                                  ["hv/%s" % name
3374 a2d2e1a7 Iustin Pop
                                   for name in constants.HVS_PARAMETERS] +
3375 a2d2e1a7 Iustin Pop
                                  ["be/%s" % name
3376 a2d2e1a7 Iustin Pop
                                   for name in constants.BES_PARAMETERS])
3377 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
3378 31bf511f Iustin Pop
3379 a8083063 Iustin Pop
3380 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
3381 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
3382 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
3383 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
3384 a8083063 Iustin Pop
3385 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
3386 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
3387 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
3388 7eb9d8f7 Guido Trotter
3389 57a2fb91 Iustin Pop
    if self.op.names:
3390 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
3391 7eb9d8f7 Guido Trotter
    else:
3392 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
3393 7eb9d8f7 Guido Trotter
3394 ec79568d Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
3395 ec79568d Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
3396 57a2fb91 Iustin Pop
    if self.do_locking:
3397 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3398 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
3399 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3400 7eb9d8f7 Guido Trotter
3401 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
3402 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
3403 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
3404 7eb9d8f7 Guido Trotter
3405 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
3406 7eb9d8f7 Guido Trotter
    """Check prerequisites.
3407 7eb9d8f7 Guido Trotter

3408 7eb9d8f7 Guido Trotter
    """
3409 57a2fb91 Iustin Pop
    pass
3410 069dcc86 Iustin Pop
3411 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3412 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
3413 a8083063 Iustin Pop

3414 a8083063 Iustin Pop
    """
3415 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
3416 a7f5dc98 Iustin Pop
    if self.wanted == locking.ALL_SET:
3417 a7f5dc98 Iustin Pop
      # caller didn't specify instance names, so ordering is not important
3418 a7f5dc98 Iustin Pop
      if self.do_locking:
3419 a7f5dc98 Iustin Pop
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
3420 a7f5dc98 Iustin Pop
      else:
3421 a7f5dc98 Iustin Pop
        instance_names = all_info.keys()
3422 a7f5dc98 Iustin Pop
      instance_names = utils.NiceSort(instance_names)
3423 57a2fb91 Iustin Pop
    else:
3424 a7f5dc98 Iustin Pop
      # caller did specify names, so we must keep the ordering
3425 a7f5dc98 Iustin Pop
      if self.do_locking:
3426 a7f5dc98 Iustin Pop
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
3427 a7f5dc98 Iustin Pop
      else:
3428 a7f5dc98 Iustin Pop
        tgt_set = all_info.keys()
3429 a7f5dc98 Iustin Pop
      missing = set(self.wanted).difference(tgt_set)
3430 a7f5dc98 Iustin Pop
      if missing:
3431 a7f5dc98 Iustin Pop
        raise errors.OpExecError("Some instances were removed before"
3432 a7f5dc98 Iustin Pop
                                 " retrieving their data: %s" % missing)
3433 a7f5dc98 Iustin Pop
      instance_names = self.wanted
3434 c1f1cbb2 Iustin Pop
3435 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
3436 a8083063 Iustin Pop
3437 a8083063 Iustin Pop
    # begin data gathering
3438 a8083063 Iustin Pop
3439 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
3440 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
3441 a8083063 Iustin Pop
3442 a8083063 Iustin Pop
    bad_nodes = []
3443 cbfc4681 Iustin Pop
    off_nodes = []
3444 ec79568d Iustin Pop
    if self.do_node_query:
3445 a8083063 Iustin Pop
      live_data = {}
3446 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
3447 a8083063 Iustin Pop
      for name in nodes:
3448 a8083063 Iustin Pop
        result = node_data[name]
3449 cbfc4681 Iustin Pop
        if result.offline:
3450 cbfc4681 Iustin Pop
          # offline nodes will be in both lists
3451 cbfc4681 Iustin Pop
          off_nodes.append(name)
3452 4c4e4e1e Iustin Pop
        if result.failed or result.fail_msg:
3453 a8083063 Iustin Pop
          bad_nodes.append(name)
3454 781de953 Iustin Pop
        else:
3455 2fa74ef4 Iustin Pop
          if result.payload:
3456 2fa74ef4 Iustin Pop
            live_data.update(result.payload)
3457 2fa74ef4 Iustin Pop
          # else no instance is alive
3458 a8083063 Iustin Pop
    else:
3459 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
3460 a8083063 Iustin Pop
3461 a8083063 Iustin Pop
    # end data gathering
3462 a8083063 Iustin Pop
3463 5018a335 Iustin Pop
    HVPREFIX = "hv/"
3464 338e51e8 Iustin Pop
    BEPREFIX = "be/"
3465 a8083063 Iustin Pop
    output = []
3466 638c6349 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
3467 a8083063 Iustin Pop
    for instance in instance_list:
3468 a8083063 Iustin Pop
      iout = []
3469 638c6349 Guido Trotter
      i_hv = cluster.FillHV(instance)
3470 638c6349 Guido Trotter
      i_be = cluster.FillBE(instance)
3471 638c6349 Guido Trotter
      i_nicp = [objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
3472 638c6349 Guido Trotter
                                 nic.nicparams) for nic in instance.nics]
3473 a8083063 Iustin Pop
      for field in self.op.output_fields:
3474 71c1af58 Iustin Pop
        st_match = self._FIELDS_STATIC.Matches(field)
3475 a8083063 Iustin Pop
        if field == "name":
3476 a8083063 Iustin Pop
          val = instance.name
3477 a8083063 Iustin Pop
        elif field == "os":
3478 a8083063 Iustin Pop
          val = instance.os
3479 a8083063 Iustin Pop
        elif field == "pnode":
3480 a8083063 Iustin Pop
          val = instance.primary_node
3481 a8083063 Iustin Pop
        elif field == "snodes":
3482 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
3483 a8083063 Iustin Pop
        elif field == "admin_state":
3484 0d68c45d Iustin Pop
          val = instance.admin_up
3485 a8083063 Iustin Pop
        elif field == "oper_state":
3486 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
3487 8a23d2d3 Iustin Pop
            val = None
3488 a8083063 Iustin Pop
          else:
3489 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
3490 d8052456 Iustin Pop
        elif field == "status":
3491 cbfc4681 Iustin Pop
          if instance.primary_node in off_nodes:
3492 cbfc4681 Iustin Pop
            val = "ERROR_nodeoffline"
3493 cbfc4681 Iustin Pop
          elif instance.primary_node in bad_nodes:
3494 d8052456 Iustin Pop
            val = "ERROR_nodedown"
3495 d8052456 Iustin Pop
          else:
3496 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
3497 d8052456 Iustin Pop
            if running:
3498 0d68c45d Iustin Pop
              if instance.admin_up:
3499 d8052456 Iustin Pop
                val = "running"
3500 d8052456 Iustin Pop
              else:
3501 d8052456 Iustin Pop
                val = "ERROR_up"
3502 d8052456 Iustin Pop
            else:
3503 0d68c45d Iustin Pop
              if instance.admin_up:
3504 d8052456 Iustin Pop
                val = "ERROR_down"
3505 d8052456 Iustin Pop
              else:
3506 d8052456 Iustin Pop
                val = "ADMIN_down"
3507 a8083063 Iustin Pop
        elif field == "oper_ram":
3508 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
3509 8a23d2d3 Iustin Pop
            val = None
3510 a8083063 Iustin Pop
          elif instance.name in live_data:
3511 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
3512 a8083063 Iustin Pop
          else:
3513 a8083063 Iustin Pop
            val = "-"
3514 a8083063 Iustin Pop
        elif field == "disk_template":
3515 a8083063 Iustin Pop
          val = instance.disk_template
3516 a8083063 Iustin Pop
        elif field == "ip":
3517 39a02558 Guido Trotter
          if instance.nics:
3518 39a02558 Guido Trotter
            val = instance.nics[0].ip
3519 39a02558 Guido Trotter
          else:
3520 39a02558 Guido Trotter
            val = None
3521 638c6349 Guido Trotter
        elif field == "nic_mode":
3522 638c6349 Guido Trotter
          if instance.nics:
3523 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_MODE]
3524 638c6349 Guido Trotter
          else:
3525 638c6349 Guido Trotter
            val = None
3526 638c6349 Guido Trotter
        elif field == "nic_link":
3527 39a02558 Guido Trotter
          if instance.nics:
3528 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_LINK]
3529 638c6349 Guido Trotter
          else:
3530 638c6349 Guido Trotter
            val = None
3531 638c6349 Guido Trotter
        elif field == "bridge":
3532 638c6349 Guido Trotter
          if (instance.nics and
3533 638c6349 Guido Trotter
              i_nicp[0][constants.NIC_MODE] == constants.NIC_MODE_BRIDGED):
3534 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_LINK]
3535 39a02558 Guido Trotter
          else:
3536 39a02558 Guido Trotter
            val = None
3537 a8083063 Iustin Pop
        elif field == "mac":
3538 39a02558 Guido Trotter
          if instance.nics:
3539 39a02558 Guido Trotter
            val = instance.nics[0].mac
3540 39a02558 Guido Trotter
          else:
3541 39a02558 Guido Trotter
            val = None
3542 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
3543 ad24e046 Iustin Pop
          idx = ord(field[2]) - ord('a')
3544 ad24e046 Iustin Pop
          try:
3545 ad24e046 Iustin Pop
            val = instance.FindDisk(idx).size
3546 ad24e046 Iustin Pop
          except errors.OpPrereqError:
3547 8a23d2d3 Iustin Pop
            val = None
3548 024e157f Iustin Pop
        elif field == "disk_usage": # total disk usage per node
3549 024e157f Iustin Pop
          disk_sizes = [{'size': disk.size} for disk in instance.disks]
3550 024e157f Iustin Pop
          val = _ComputeDiskSize(instance.disk_template, disk_sizes)
3551 130a6a6f Iustin Pop
        elif field == "tags":
3552 130a6a6f Iustin Pop
          val = list(instance.GetTags())
3553 38d7239a Iustin Pop
        elif field == "serial_no":
3554 38d7239a Iustin Pop
          val = instance.serial_no
3555 5018a335 Iustin Pop
        elif field == "network_port":
3556 5018a335 Iustin Pop
          val = instance.network_port
3557 338e51e8 Iustin Pop
        elif field == "hypervisor":
3558 338e51e8 Iustin Pop
          val = instance.hypervisor
3559 338e51e8 Iustin Pop
        elif field == "hvparams":
3560 338e51e8 Iustin Pop
          val = i_hv
3561 5018a335 Iustin Pop
        elif (field.startswith(HVPREFIX) and
3562 5018a335 Iustin Pop
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
3563 5018a335 Iustin Pop
          val = i_hv.get(field[len(HVPREFIX):], None)
3564 338e51e8 Iustin Pop
        elif field == "beparams":
3565 338e51e8 Iustin Pop
          val = i_be
3566 338e51e8 Iustin Pop
        elif (field.startswith(BEPREFIX) and
3567 338e51e8 Iustin Pop
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
3568 338e51e8 Iustin Pop
          val = i_be.get(field[len(BEPREFIX):], None)
3569 71c1af58 Iustin Pop
        elif st_match and st_match.groups():
3570 71c1af58 Iustin Pop
          # matches a variable list
3571 71c1af58 Iustin Pop
          st_groups = st_match.groups()
3572 71c1af58 Iustin Pop
          if st_groups and st_groups[0] == "disk":
3573 71c1af58 Iustin Pop
            if st_groups[1] == "count":
3574 71c1af58 Iustin Pop
              val = len(instance.disks)
3575 41a776da Iustin Pop
            elif st_groups[1] == "sizes":
3576 41a776da Iustin Pop
              val = [disk.size for disk in instance.disks]
3577 71c1af58 Iustin Pop
            elif st_groups[1] == "size":
3578 3e0cea06 Iustin Pop
              try:
3579 3e0cea06 Iustin Pop
                val = instance.FindDisk(st_groups[2]).size
3580 3e0cea06 Iustin Pop
              except errors.OpPrereqError:
3581 71c1af58 Iustin Pop
                val = None
3582 71c1af58 Iustin Pop
            else:
3583 71c1af58 Iustin Pop
              assert False, "Unhandled disk parameter"
3584 71c1af58 Iustin Pop
          elif st_groups[0] == "nic":
3585 71c1af58 Iustin Pop
            if st_groups[1] == "count":
3586 71c1af58 Iustin Pop
              val = len(instance.nics)
3587 41a776da Iustin Pop
            elif st_groups[1] == "macs":
3588 41a776da Iustin Pop
              val = [nic.mac for nic in instance.nics]
3589 41a776da Iustin Pop
            elif st_groups[1] == "ips":
3590 41a776da Iustin Pop
              val = [nic.ip for nic in instance.nics]
3591 638c6349 Guido Trotter
            elif st_groups[1] == "modes":
3592 638c6349 Guido Trotter
              val = [nicp[constants.NIC_MODE] for nicp in i_nicp]
3593 638c6349 Guido Trotter
            elif st_groups[1] == "links":
3594 638c6349 Guido Trotter
              val = [nicp[constants.NIC_LINK] for nicp in i_nicp]
3595 41a776da Iustin Pop
            elif st_groups[1] == "bridges":
3596 638c6349 Guido Trotter
              val = []
3597 638c6349 Guido Trotter
              for nicp in i_nicp:
3598 638c6349 Guido Trotter
                if nicp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
3599 638c6349 Guido Trotter
                  val.append(nicp[constants.NIC_LINK])
3600 638c6349 Guido Trotter
                else:
3601 638c6349 Guido Trotter
                  val.append(None)
3602 71c1af58 Iustin Pop
            else:
3603 71c1af58 Iustin Pop
              # index-based item
3604 71c1af58 Iustin Pop
              nic_idx = int(st_groups[2])
3605 71c1af58 Iustin Pop
              if nic_idx >= len(instance.nics):
3606 71c1af58 Iustin Pop
                val = None
3607 71c1af58 Iustin Pop
              else:
3608 71c1af58 Iustin Pop
                if st_groups[1] == "mac":
3609 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].mac
3610 71c1af58 Iustin Pop
                elif st_groups[1] == "ip":
3611 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].ip
3612 638c6349 Guido Trotter
                elif st_groups[1] == "mode":
3613 638c6349 Guido Trotter
                  val = i_nicp[nic_idx][constants.NIC_MODE]
3614 638c6349 Guido Trotter
                elif st_groups[1] == "link":
3615 638c6349 Guido Trotter
                  val = i_nicp[nic_idx][constants.NIC_LINK]
3616 71c1af58 Iustin Pop
                elif st_groups[1] == "bridge":
3617 638c6349 Guido Trotter
                  nic_mode = i_nicp[nic_idx][constants.NIC_MODE]
3618 638c6349 Guido Trotter
                  if nic_mode == constants.NIC_MODE_BRIDGED:
3619 638c6349 Guido Trotter
                    val = i_nicp[nic_idx][constants.NIC_LINK]
3620 638c6349 Guido Trotter
                  else:
3621 638c6349 Guido Trotter
                    val = None
3622 71c1af58 Iustin Pop
                else:
3623 71c1af58 Iustin Pop
                  assert False, "Unhandled NIC parameter"
3624 71c1af58 Iustin Pop
          else:
3625 71c1af58 Iustin Pop
            assert False, "Unhandled variable parameter"
3626 a8083063 Iustin Pop
        else:
3627 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
3628 a8083063 Iustin Pop
        iout.append(val)
3629 a8083063 Iustin Pop
      output.append(iout)
3630 a8083063 Iustin Pop
3631 a8083063 Iustin Pop
    return output
3632 a8083063 Iustin Pop
3633 a8083063 Iustin Pop
3634 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
3635 a8083063 Iustin Pop
  """Failover an instance.
3636 a8083063 Iustin Pop

3637 a8083063 Iustin Pop
  """
3638 a8083063 Iustin Pop
  HPATH = "instance-failover"
3639 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3640 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
3641 c9e5c064 Guido Trotter
  REQ_BGL = False
3642 c9e5c064 Guido Trotter
3643 c9e5c064 Guido Trotter
  def ExpandNames(self):
3644 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
3645 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3646 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3647 c9e5c064 Guido Trotter
3648 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
3649 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
3650 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
3651 a8083063 Iustin Pop
3652 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3653 a8083063 Iustin Pop
    """Build hooks env.
3654 a8083063 Iustin Pop

3655 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3656 a8083063 Iustin Pop

3657 a8083063 Iustin Pop
    """
3658 a8083063 Iustin Pop
    env = {
3659 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
3660 a8083063 Iustin Pop
      }
3661 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3662 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3663 a8083063 Iustin Pop
    return env, nl, nl
3664 a8083063 Iustin Pop
3665 a8083063 Iustin Pop
  def CheckPrereq(self):
3666 a8083063 Iustin Pop
    """Check prerequisites.
3667 a8083063 Iustin Pop

3668 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3669 a8083063 Iustin Pop

3670 a8083063 Iustin Pop
    """
3671 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3672 c9e5c064 Guido Trotter
    assert self.instance is not None, \
3673 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3674 a8083063 Iustin Pop
3675 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3676 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3677 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
3678 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
3679 2a710df1 Michael Hanselmann
3680 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
3681 2a710df1 Michael Hanselmann
    if not secondary_nodes:
3682 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
3683 abdf0113 Iustin Pop
                                   "a mirrored disk template")
3684 2a710df1 Michael Hanselmann
3685 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
3686 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, target_node)
3687 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, target_node)
3688 d27776f0 Iustin Pop
    if instance.admin_up:
3689 d27776f0 Iustin Pop
      # check memory requirements on the secondary node
3690 d27776f0 Iustin Pop
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
3691 d27776f0 Iustin Pop
                           instance.name, bep[constants.BE_MEMORY],
3692 d27776f0 Iustin Pop
                           instance.hypervisor)
3693 d27776f0 Iustin Pop
    else:
3694 d27776f0 Iustin Pop
      self.LogInfo("Not checking memory on the secondary node as"
3695 d27776f0 Iustin Pop
                   " instance will not be started")
3696 3a7c308e Guido Trotter
3697 a8083063 Iustin Pop
    # check bridge existance
3698 b165e77e Guido Trotter
    _CheckInstanceBridgesExist(self, instance, node=target_node)
3699 a8083063 Iustin Pop
3700 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3701 a8083063 Iustin Pop
    """Failover an instance.
3702 a8083063 Iustin Pop

3703 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
3704 a8083063 Iustin Pop
    starting it on the secondary.
3705 a8083063 Iustin Pop

3706 a8083063 Iustin Pop
    """
3707 a8083063 Iustin Pop
    instance = self.instance
3708 a8083063 Iustin Pop
3709 a8083063 Iustin Pop
    source_node = instance.primary_node
3710 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
3711 a8083063 Iustin Pop
3712 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
3713 a8083063 Iustin Pop
    for dev in instance.disks:
3714 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
3715 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
3716 0d68c45d Iustin Pop
        if instance.admin_up and not self.op.ignore_consistency:
3717 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
3718 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
3719 a8083063 Iustin Pop
3720 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
3721 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
3722 9a4f63d1 Iustin Pop
                 instance.name, source_node)
3723 a8083063 Iustin Pop
3724 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(source_node, instance)
3725 4c4e4e1e Iustin Pop
    msg = result.fail_msg
3726 1fae010f Iustin Pop
    if msg:
3727 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
3728 86d9d3bb Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
3729 1fae010f Iustin Pop
                             " Proceeding anyway. Please make sure node"
3730 1fae010f Iustin Pop
                             " %s is down. Error details: %s",
3731 1fae010f Iustin Pop
                             instance.name, source_node, source_node, msg)
3732 24a40d57 Iustin Pop
      else:
3733 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
3734 1fae010f Iustin Pop
                                 " node %s: %s" %
3735 1fae010f Iustin Pop
                                 (instance.name, source_node, msg))
3736 a8083063 Iustin Pop
3737 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
3738 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3739 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
3740 a8083063 Iustin Pop
3741 a8083063 Iustin Pop
    instance.primary_node = target_node
3742 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
3743 b6102dab Guido Trotter
    self.cfg.Update(instance)
3744 a8083063 Iustin Pop
3745 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
3746 0d68c45d Iustin Pop
    if instance.admin_up:
3747 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
3748 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s",
3749 9a4f63d1 Iustin Pop
                   instance.name, target_node)
3750 12a0cfbe Guido Trotter
3751 b9bddb6b Iustin Pop
      disks_ok, dummy = _AssembleInstanceDisks(self, instance,
3752 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
3753 12a0cfbe Guido Trotter
      if not disks_ok:
3754 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3755 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
3756 a8083063 Iustin Pop
3757 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
3758 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance, None, None)
3759 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3760 dd279568 Iustin Pop
      if msg:
3761 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3762 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
3763 dd279568 Iustin Pop
                                 (instance.name, target_node, msg))
3764 a8083063 Iustin Pop
3765 a8083063 Iustin Pop
3766 53c776b5 Iustin Pop
class LUMigrateInstance(LogicalUnit):
3767 53c776b5 Iustin Pop
  """Migrate an instance.
3768 53c776b5 Iustin Pop

3769 53c776b5 Iustin Pop
  This is migration without shutting down, compared to the failover,
3770 53c776b5 Iustin Pop
  which is done with shutdown.
3771 53c776b5 Iustin Pop

3772 53c776b5 Iustin Pop
  """
3773 53c776b5 Iustin Pop
  HPATH = "instance-migrate"
3774 53c776b5 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3775 53c776b5 Iustin Pop
  _OP_REQP = ["instance_name", "live", "cleanup"]
3776 53c776b5 Iustin Pop
3777 53c776b5 Iustin Pop
  REQ_BGL = False
3778 53c776b5 Iustin Pop
3779 53c776b5 Iustin Pop
  def ExpandNames(self):
3780 53c776b5 Iustin Pop
    self._ExpandAndLockInstance()
3781 53c776b5 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
3782 53c776b5 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3783 53c776b5 Iustin Pop
3784 53c776b5 Iustin Pop
  def DeclareLocks(self, level):
3785 53c776b5 Iustin Pop
    if level == locking.LEVEL_NODE:
3786 53c776b5 Iustin Pop
      self._LockInstancesNodes()
3787 53c776b5 Iustin Pop
3788 53c776b5 Iustin Pop
  def BuildHooksEnv(self):
3789 53c776b5 Iustin Pop
    """Build hooks env.
3790 53c776b5 Iustin Pop

3791 53c776b5 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3792 53c776b5 Iustin Pop

3793 53c776b5 Iustin Pop
    """
3794 53c776b5 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3795 2c2690c9 Iustin Pop
    env["MIGRATE_LIVE"] = self.op.live
3796 2c2690c9 Iustin Pop
    env["MIGRATE_CLEANUP"] = self.op.cleanup
3797 53c776b5 Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3798 53c776b5 Iustin Pop
    return env, nl, nl
3799 53c776b5 Iustin Pop
3800 53c776b5 Iustin Pop
  def CheckPrereq(self):
3801 53c776b5 Iustin Pop
    """Check prerequisites.
3802 53c776b5 Iustin Pop

3803 53c776b5 Iustin Pop
    This checks that the instance is in the cluster.
3804 53c776b5 Iustin Pop

3805 53c776b5 Iustin Pop
    """
3806 53c776b5 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3807 53c776b5 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3808 53c776b5 Iustin Pop
    if instance is None:
3809 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3810 53c776b5 Iustin Pop
                                 self.op.instance_name)
3811 53c776b5 Iustin Pop
3812 53c776b5 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
3813 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3814 53c776b5 Iustin Pop
                                 " drbd8, cannot migrate.")
3815 53c776b5 Iustin Pop
3816 53c776b5 Iustin Pop
    secondary_nodes = instance.secondary_nodes
3817 53c776b5 Iustin Pop
    if not secondary_nodes:
3818 733a2b6a Iustin Pop
      raise errors.ConfigurationError("No secondary node but using"
3819 733a2b6a Iustin Pop
                                      " drbd8 disk template")
3820 53c776b5 Iustin Pop
3821 53c776b5 Iustin Pop
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
3822 53c776b5 Iustin Pop
3823 53c776b5 Iustin Pop
    target_node = secondary_nodes[0]
3824 53c776b5 Iustin Pop
    # check memory requirements on the secondary node
3825 53c776b5 Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
3826 53c776b5 Iustin Pop
                         instance.name, i_be[constants.BE_MEMORY],
3827 53c776b5 Iustin Pop
                         instance.hypervisor)
3828 53c776b5 Iustin Pop
3829 53c776b5 Iustin Pop
    # check bridge existance
3830 b165e77e Guido Trotter
    _CheckInstanceBridgesExist(self, instance, node=target_node)
3831 53c776b5 Iustin Pop
3832 53c776b5 Iustin Pop
    if not self.op.cleanup:
3833 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, target_node)
3834 53c776b5 Iustin Pop
      result = self.rpc.call_instance_migratable(instance.primary_node,
3835 53c776b5 Iustin Pop
                                                 instance)
3836 4c4e4e1e Iustin Pop
      result.Raise("Can't migrate, please use failover", prereq=True)
3837 53c776b5 Iustin Pop
3838 53c776b5 Iustin Pop
    self.instance = instance
3839 53c776b5 Iustin Pop
3840 53c776b5 Iustin Pop
  def _WaitUntilSync(self):
3841 53c776b5 Iustin Pop
    """Poll with custom rpc for disk sync.
3842 53c776b5 Iustin Pop

3843 53c776b5 Iustin Pop
    This uses our own step-based rpc call.
3844 53c776b5 Iustin Pop

3845 53c776b5 Iustin Pop
    """
3846 53c776b5 Iustin Pop
    self.feedback_fn("* wait until resync is done")
3847 53c776b5 Iustin Pop
    all_done = False
3848 53c776b5 Iustin Pop
    while not all_done:
3849 53c776b5 Iustin Pop
      all_done = True
3850 53c776b5 Iustin Pop
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
3851 53c776b5 Iustin Pop
                                            self.nodes_ip,
3852 53c776b5 Iustin Pop
                                            self.instance.disks)
3853 53c776b5 Iustin Pop
      min_percent = 100
3854 53c776b5 Iustin Pop
      for node, nres in result.items():
3855 4c4e4e1e Iustin Pop
        nres.Raise("Cannot resync disks on node %s" % node)
3856 0959c824 Iustin Pop
        node_done, node_percent = nres.payload
3857 53c776b5 Iustin Pop
        all_done = all_done and node_done
3858 53c776b5 Iustin Pop
        if node_percent is not None:
3859 53c776b5 Iustin Pop
          min_percent = min(min_percent, node_percent)
3860 53c776b5 Iustin Pop
      if not all_done:
3861 53c776b5 Iustin Pop
        if min_percent < 100:
3862 53c776b5 Iustin Pop
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
3863 53c776b5 Iustin Pop
        time.sleep(2)
3864 53c776b5 Iustin Pop
3865 53c776b5 Iustin Pop
  def _EnsureSecondary(self, node):
3866 53c776b5 Iustin Pop
    """Demote a node to secondary.
3867 53c776b5 Iustin Pop

3868 53c776b5 Iustin Pop
    """
3869 53c776b5 Iustin Pop
    self.feedback_fn("* switching node %s to secondary mode" % node)
3870 53c776b5 Iustin Pop
3871 53c776b5 Iustin Pop
    for dev in self.instance.disks:
3872 53c776b5 Iustin Pop
      self.cfg.SetDiskID(dev, node)
3873 53c776b5 Iustin Pop
3874 53c776b5 Iustin Pop
    result = self.rpc.call_blockdev_close(node, self.instance.name,
3875 53c776b5 Iustin Pop
                                          self.instance.disks)
3876 4c4e4e1e Iustin Pop
    result.Raise("Cannot change disk to secondary on node %s" % node)
3877 53c776b5 Iustin Pop
3878 53c776b5 Iustin Pop
  def _GoStandalone(self):
3879 53c776b5 Iustin Pop
    """Disconnect from the network.
3880 53c776b5 Iustin Pop

3881 53c776b5 Iustin Pop
    """
3882 53c776b5 Iustin Pop
    self.feedback_fn("* changing into standalone mode")
3883 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
3884 53c776b5 Iustin Pop
                                               self.instance.disks)
3885 53c776b5 Iustin Pop
    for node, nres in result.items():
3886 4c4e4e1e Iustin Pop
      nres.Raise("Cannot disconnect disks node %s" % node)
3887 53c776b5 Iustin Pop
3888 53c776b5 Iustin Pop
  def _GoReconnect(self, multimaster):
3889 53c776b5 Iustin Pop
    """Reconnect to the network.
3890 53c776b5 Iustin Pop

3891 53c776b5 Iustin Pop
    """
3892 53c776b5 Iustin Pop
    if multimaster:
3893 53c776b5 Iustin Pop
      msg = "dual-master"
3894 53c776b5 Iustin Pop
    else:
3895 53c776b5 Iustin Pop
      msg = "single-master"
3896 53c776b5 Iustin Pop
    self.feedback_fn("* changing disks into %s mode" % msg)
3897 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
3898 53c776b5 Iustin Pop
                                           self.instance.disks,
3899 53c776b5 Iustin Pop
                                           self.instance.name, multimaster)
3900 53c776b5 Iustin Pop
    for node, nres in result.items():
3901 4c4e4e1e Iustin Pop
      nres.Raise("Cannot change disks config on node %s" % node)
3902 53c776b5 Iustin Pop
3903 53c776b5 Iustin Pop
  def _ExecCleanup(self):
3904 53c776b5 Iustin Pop
    """Try to cleanup after a failed migration.
3905 53c776b5 Iustin Pop

3906 53c776b5 Iustin Pop
    The cleanup is done by:
3907 53c776b5 Iustin Pop
      - check that the instance is running only on one node
3908 53c776b5 Iustin Pop
        (and update the config if needed)
3909 53c776b5 Iustin Pop
      - change disks on its secondary node to secondary
3910 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
3911 53c776b5 Iustin Pop
      - disconnect from the network
3912 53c776b5 Iustin Pop
      - change disks into single-master mode
3913 53c776b5 Iustin Pop
      - wait again until disks are fully synchronized
3914 53c776b5 Iustin Pop

3915 53c776b5 Iustin Pop
    """
3916 53c776b5 Iustin Pop
    instance = self.instance
3917 53c776b5 Iustin Pop
    target_node = self.target_node
3918 53c776b5 Iustin Pop
    source_node = self.source_node
3919 53c776b5 Iustin Pop
3920 53c776b5 Iustin Pop
    # check running on only one node
3921 53c776b5 Iustin Pop
    self.feedback_fn("* checking where the instance actually runs"
3922 53c776b5 Iustin Pop
                     " (if this hangs, the hypervisor might be in"
3923 53c776b5 Iustin Pop
                     " a bad state)")
3924 53c776b5 Iustin Pop
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
3925 53c776b5 Iustin Pop
    for node, result in ins_l.items():
3926 4c4e4e1e Iustin Pop
      result.Raise("Can't contact node %s" % node)
3927 53c776b5 Iustin Pop
3928 aca13712 Iustin Pop
    runningon_source = instance.name in ins_l[source_node].payload
3929 aca13712 Iustin Pop
    runningon_target = instance.name in ins_l[target_node].payload
3930 53c776b5 Iustin Pop
3931 53c776b5 Iustin Pop
    if runningon_source and runningon_target:
3932 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance seems to be running on two nodes,"
3933 53c776b5 Iustin Pop
                               " or the hypervisor is confused. You will have"
3934 53c776b5 Iustin Pop
                               " to ensure manually that it runs only on one"
3935 53c776b5 Iustin Pop
                               " and restart this operation.")
3936 53c776b5 Iustin Pop
3937 53c776b5 Iustin Pop
    if not (runningon_source or runningon_target):
3938 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance does not seem to be running at all."
3939 53c776b5 Iustin Pop
                               " In this case, it's safer to repair by"
3940 53c776b5 Iustin Pop
                               " running 'gnt-instance stop' to ensure disk"
3941 53c776b5 Iustin Pop
                               " shutdown, and then restarting it.")
3942 53c776b5 Iustin Pop
3943 53c776b5 Iustin Pop
    if runningon_target:
3944 53c776b5 Iustin Pop
      # the migration has actually succeeded, we need to update the config
3945 53c776b5 Iustin Pop
      self.feedback_fn("* instance running on secondary node (%s),"
3946 53c776b5 Iustin Pop
                       " updating config" % target_node)
3947 53c776b5 Iustin Pop
      instance.primary_node = target_node
3948 53c776b5 Iustin Pop
      self.cfg.Update(instance)
3949 53c776b5 Iustin Pop
      demoted_node = source_node
3950 53c776b5 Iustin Pop
    else:
3951 53c776b5 Iustin Pop
      self.feedback_fn("* instance confirmed to be running on its"
3952 53c776b5 Iustin Pop
                       " primary node (%s)" % source_node)
3953 53c776b5 Iustin Pop
      demoted_node = target_node
3954 53c776b5 Iustin Pop
3955 53c776b5 Iustin Pop
    self._EnsureSecondary(demoted_node)
3956 53c776b5 Iustin Pop
    try:
3957 53c776b5 Iustin Pop
      self._WaitUntilSync()
3958 53c776b5 Iustin Pop
    except errors.OpExecError:
3959 53c776b5 Iustin Pop
      # we ignore here errors, since if the device is standalone, it
3960 53c776b5 Iustin Pop
      # won't be able to sync
3961 53c776b5 Iustin Pop
      pass
3962 53c776b5 Iustin Pop
    self._GoStandalone()
3963 53c776b5 Iustin Pop
    self._GoReconnect(False)
3964 53c776b5 Iustin Pop
    self._WaitUntilSync()
3965 53c776b5 Iustin Pop
3966 53c776b5 Iustin Pop
    self.feedback_fn("* done")
3967 53c776b5 Iustin Pop
3968 6906a9d8 Guido Trotter
  def _RevertDiskStatus(self):
3969 6906a9d8 Guido Trotter
    """Try to revert the disk status after a failed migration.
3970 6906a9d8 Guido Trotter

3971 6906a9d8 Guido Trotter
    """
3972 6906a9d8 Guido Trotter
    target_node = self.target_node
3973 6906a9d8 Guido Trotter
    try:
3974 6906a9d8 Guido Trotter
      self._EnsureSecondary(target_node)
3975 6906a9d8 Guido Trotter
      self._GoStandalone()
3976 6906a9d8 Guido Trotter
      self._GoReconnect(False)
3977 6906a9d8 Guido Trotter
      self._WaitUntilSync()
3978 6906a9d8 Guido Trotter
    except errors.OpExecError, err:
3979 6906a9d8 Guido Trotter
      self.LogWarning("Migration failed and I can't reconnect the"
3980 6906a9d8 Guido Trotter
                      " drives: error '%s'\n"
3981 6906a9d8 Guido Trotter
                      "Please look and recover the instance status" %
3982 6906a9d8 Guido Trotter
                      str(err))
3983 6906a9d8 Guido Trotter
3984 6906a9d8 Guido Trotter
  def _AbortMigration(self):
3985 6906a9d8 Guido Trotter
    """Call the hypervisor code to abort a started migration.
3986 6906a9d8 Guido Trotter

3987 6906a9d8 Guido Trotter
    """
3988 6906a9d8 Guido Trotter
    instance = self.instance
3989 6906a9d8 Guido Trotter
    target_node = self.target_node
3990 6906a9d8 Guido Trotter
    migration_info = self.migration_info
3991 6906a9d8 Guido Trotter
3992 6906a9d8 Guido Trotter
    abort_result = self.rpc.call_finalize_migration(target_node,
3993 6906a9d8 Guido Trotter
                                                    instance,
3994 6906a9d8 Guido Trotter
                                                    migration_info,
3995 6906a9d8 Guido Trotter
                                                    False)
3996 4c4e4e1e Iustin Pop
    abort_msg = abort_result.fail_msg
3997 6906a9d8 Guido Trotter
    if abort_msg:
3998 6906a9d8 Guido Trotter
      logging.error("Aborting migration failed on target node %s: %s" %
3999 6906a9d8 Guido Trotter
                    (target_node, abort_msg))
4000 6906a9d8 Guido Trotter
      # Don't raise an exception here, as we stil have to try to revert the
4001 6906a9d8 Guido Trotter
      # disk status, even if this step failed.
4002 6906a9d8 Guido Trotter
4003 53c776b5 Iustin Pop
  def _ExecMigration(self):
4004 53c776b5 Iustin Pop
    """Migrate an instance.
4005 53c776b5 Iustin Pop

4006 53c776b5 Iustin Pop
    The migrate is done by:
4007 53c776b5 Iustin Pop
      - change the disks into dual-master mode
4008 53c776b5 Iustin Pop
      - wait until disks are fully synchronized again
4009 53c776b5 Iustin Pop
      - migrate the instance
4010 53c776b5 Iustin Pop
      - change disks on the new secondary node (the old primary) to secondary
4011 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
4012 53c776b5 Iustin Pop
      - change disks into single-master mode
4013 53c776b5 Iustin Pop

4014 53c776b5 Iustin Pop
    """
4015 53c776b5 Iustin Pop
    instance = self.instance
4016 53c776b5 Iustin Pop
    target_node = self.target_node
4017 53c776b5 Iustin Pop
    source_node = self.source_node
4018 53c776b5 Iustin Pop
4019 53c776b5 Iustin Pop
    self.feedback_fn("* checking disk consistency between source and target")
4020 53c776b5 Iustin Pop
    for dev in instance.disks:
4021 53c776b5 Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
4022 53c776b5 Iustin Pop
        raise errors.OpExecError("Disk %s is degraded or not fully"
4023 53c776b5 Iustin Pop
                                 " synchronized on target node,"
4024 53c776b5 Iustin Pop
                                 " aborting migrate." % dev.iv_name)
4025 53c776b5 Iustin Pop
4026 6906a9d8 Guido Trotter
    # First get the migration information from the remote node
4027 6906a9d8 Guido Trotter
    result = self.rpc.call_migration_info(source_node, instance)
4028 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4029 6906a9d8 Guido Trotter
    if msg:
4030 6906a9d8 Guido Trotter
      log_err = ("Failed fetching source migration information from %s: %s" %
4031 0959c824 Iustin Pop
                 (source_node, msg))
4032 6906a9d8 Guido Trotter
      logging.error(log_err)
4033 6906a9d8 Guido Trotter
      raise errors.OpExecError(log_err)
4034 6906a9d8 Guido Trotter
4035 0959c824 Iustin Pop
    self.migration_info = migration_info = result.payload
4036 6906a9d8 Guido Trotter
4037 6906a9d8 Guido Trotter
    # Then switch the disks to master/master mode
4038 53c776b5 Iustin Pop
    self._EnsureSecondary(target_node)
4039 53c776b5 Iustin Pop
    self._GoStandalone()
4040 53c776b5 Iustin Pop
    self._GoReconnect(True)
4041 53c776b5 Iustin Pop
    self._WaitUntilSync()
4042 53c776b5 Iustin Pop
4043 6906a9d8 Guido Trotter
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
4044 6906a9d8 Guido Trotter
    result = self.rpc.call_accept_instance(target_node,
4045 6906a9d8 Guido Trotter
                                           instance,
4046 6906a9d8 Guido Trotter
                                           migration_info,
4047 6906a9d8 Guido Trotter
                                           self.nodes_ip[target_node])
4048 6906a9d8 Guido Trotter
4049 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4050 6906a9d8 Guido Trotter
    if msg:
4051 6906a9d8 Guido Trotter
      logging.error("Instance pre-migration failed, trying to revert"
4052 6906a9d8 Guido Trotter
                    " disk status: %s", msg)
4053 6906a9d8 Guido Trotter
      self._AbortMigration()
4054 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
4055 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
4056 6906a9d8 Guido Trotter
                               (instance.name, msg))
4057 6906a9d8 Guido Trotter
4058 53c776b5 Iustin Pop
    self.feedback_fn("* migrating instance to %s" % target_node)
4059 53c776b5 Iustin Pop
    time.sleep(10)
4060 53c776b5 Iustin Pop
    result = self.rpc.call_instance_migrate(source_node, instance,
4061 53c776b5 Iustin Pop
                                            self.nodes_ip[target_node],
4062 53c776b5 Iustin Pop
                                            self.op.live)
4063 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4064 53c776b5 Iustin Pop
    if msg:
4065 53c776b5 Iustin Pop
      logging.error("Instance migration failed, trying to revert"
4066 53c776b5 Iustin Pop
                    " disk status: %s", msg)
4067 6906a9d8 Guido Trotter
      self._AbortMigration()
4068 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
4069 53c776b5 Iustin Pop
      raise errors.OpExecError("Could not migrate instance %s: %s" %
4070 53c776b5 Iustin Pop
                               (instance.name, msg))
4071 53c776b5 Iustin Pop
    time.sleep(10)
4072 53c776b5 Iustin Pop
4073 53c776b5 Iustin Pop
    instance.primary_node = target_node
4074 53c776b5 Iustin Pop
    # distribute new instance config to the other nodes
4075 53c776b5 Iustin Pop
    self.cfg.Update(instance)
4076 53c776b5 Iustin Pop
4077 6906a9d8 Guido Trotter
    result = self.rpc.call_finalize_migration(target_node,
4078 6906a9d8 Guido Trotter
                                              instance,
4079 6906a9d8 Guido Trotter
                                              migration_info,
4080 6906a9d8 Guido Trotter
                                              True)
4081 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4082 6906a9d8 Guido Trotter
    if msg:
4083 6906a9d8 Guido Trotter
      logging.error("Instance migration succeeded, but finalization failed:"
4084 6906a9d8 Guido Trotter
                    " %s" % msg)
4085 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not finalize instance migration: %s" %
4086 6906a9d8 Guido Trotter
                               msg)
4087 6906a9d8 Guido Trotter
4088 53c776b5 Iustin Pop
    self._EnsureSecondary(source_node)
4089 53c776b5 Iustin Pop
    self._WaitUntilSync()
4090 53c776b5 Iustin Pop
    self._GoStandalone()
4091 53c776b5 Iustin Pop
    self._GoReconnect(False)
4092 53c776b5 Iustin Pop
    self._WaitUntilSync()
4093 53c776b5 Iustin Pop
4094 53c776b5 Iustin Pop
    self.feedback_fn("* done")
4095 53c776b5 Iustin Pop
4096 53c776b5 Iustin Pop
  def Exec(self, feedback_fn):
4097 53c776b5 Iustin Pop
    """Perform the migration.
4098 53c776b5 Iustin Pop

4099 53c776b5 Iustin Pop
    """
4100 53c776b5 Iustin Pop
    self.feedback_fn = feedback_fn
4101 53c776b5 Iustin Pop
4102 53c776b5 Iustin Pop
    self.source_node = self.instance.primary_node
4103 53c776b5 Iustin Pop
    self.target_node = self.instance.secondary_nodes[0]
4104 53c776b5 Iustin Pop
    self.all_nodes = [self.source_node, self.target_node]
4105 53c776b5 Iustin Pop
    self.nodes_ip = {
4106 53c776b5 Iustin Pop
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
4107 53c776b5 Iustin Pop
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
4108 53c776b5 Iustin Pop
      }
4109 53c776b5 Iustin Pop
    if self.op.cleanup:
4110 53c776b5 Iustin Pop
      return self._ExecCleanup()
4111 53c776b5 Iustin Pop
    else:
4112 53c776b5 Iustin Pop
      return self._ExecMigration()
4113 53c776b5 Iustin Pop
4114 53c776b5 Iustin Pop
4115 428958aa Iustin Pop
def _CreateBlockDev(lu, node, instance, device, force_create,
4116 428958aa Iustin Pop
                    info, force_open):
4117 428958aa Iustin Pop
  """Create a tree of block devices on a given node.
4118 a8083063 Iustin Pop

4119 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
4120 a8083063 Iustin Pop
  all its children.
4121 a8083063 Iustin Pop

4122 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
4123 a8083063 Iustin Pop

4124 428958aa Iustin Pop
  @param lu: the lu on whose behalf we execute
4125 428958aa Iustin Pop
  @param node: the node on which to create the device
4126 428958aa Iustin Pop
  @type instance: L{objects.Instance}
4127 428958aa Iustin Pop
  @param instance: the instance which owns the device
4128 428958aa Iustin Pop
  @type device: L{objects.Disk}
4129 428958aa Iustin Pop
  @param device: the device to create
4130 428958aa Iustin Pop
  @type force_create: boolean
4131 428958aa Iustin Pop
  @param force_create: whether to force creation of this device; this
4132 428958aa Iustin Pop
      will be change to True whenever we find a device which has
4133 428958aa Iustin Pop
      CreateOnSecondary() attribute
4134 428958aa Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
4135 428958aa Iustin Pop
      (this will be represented as a LVM tag)
4136 428958aa Iustin Pop
  @type force_open: boolean
4137 428958aa Iustin Pop
  @param force_open: this parameter will be passes to the
4138 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
4139 428958aa Iustin Pop
      whether we run on primary or not, and it affects both
4140 428958aa Iustin Pop
      the child assembly and the device own Open() execution
4141 428958aa Iustin Pop

4142 a8083063 Iustin Pop
  """
4143 a8083063 Iustin Pop
  if device.CreateOnSecondary():
4144 428958aa Iustin Pop
    force_create = True
4145 796cab27 Iustin Pop
4146 a8083063 Iustin Pop
  if device.children:
4147 a8083063 Iustin Pop
    for child in device.children:
4148 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, child, force_create,
4149 428958aa Iustin Pop
                      info, force_open)
4150 a8083063 Iustin Pop
4151 428958aa Iustin Pop
  if not force_create:
4152 796cab27 Iustin Pop
    return
4153 796cab27 Iustin Pop
4154 de12473a Iustin Pop
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
4155 de12473a Iustin Pop
4156 de12473a Iustin Pop
4157 de12473a Iustin Pop
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
4158 de12473a Iustin Pop
  """Create a single block device on a given node.
4159 de12473a Iustin Pop

4160 de12473a Iustin Pop
  This will not recurse over children of the device, so they must be
4161 de12473a Iustin Pop
  created in advance.
4162 de12473a Iustin Pop

4163 de12473a Iustin Pop
  @param lu: the lu on whose behalf we execute
4164 de12473a Iustin Pop
  @param node: the node on which to create the device
4165 de12473a Iustin Pop
  @type instance: L{objects.Instance}
4166 de12473a Iustin Pop
  @param instance: the instance which owns the device
4167 de12473a Iustin Pop
  @type device: L{objects.Disk}
4168 de12473a Iustin Pop
  @param device: the device to create
4169 de12473a Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
4170 de12473a Iustin Pop
      (this will be represented as a LVM tag)
4171 de12473a Iustin Pop
  @type force_open: boolean
4172 de12473a Iustin Pop
  @param force_open: this parameter will be passes to the
4173 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
4174 de12473a Iustin Pop
      whether we run on primary or not, and it affects both
4175 de12473a Iustin Pop
      the child assembly and the device own Open() execution
4176 de12473a Iustin Pop

4177 de12473a Iustin Pop
  """
4178 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
4179 7d81697f Iustin Pop
  result = lu.rpc.call_blockdev_create(node, device, device.size,
4180 428958aa Iustin Pop
                                       instance.name, force_open, info)
4181 4c4e4e1e Iustin Pop
  result.Raise("Can't create block device %s on"
4182 4c4e4e1e Iustin Pop
               " node %s for instance %s" % (device, node, instance.name))
4183 a8083063 Iustin Pop
  if device.physical_id is None:
4184 0959c824 Iustin Pop
    device.physical_id = result.payload
4185 a8083063 Iustin Pop
4186 a8083063 Iustin Pop
4187 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
4188 923b1523 Iustin Pop
  """Generate a suitable LV name.
4189 923b1523 Iustin Pop

4190 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
4191 923b1523 Iustin Pop

4192 923b1523 Iustin Pop
  """
4193 923b1523 Iustin Pop
  results = []
4194 923b1523 Iustin Pop
  for val in exts:
4195 b9bddb6b Iustin Pop
    new_id = lu.cfg.GenerateUniqueID()
4196 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
4197 923b1523 Iustin Pop
  return results
4198 923b1523 Iustin Pop
4199 923b1523 Iustin Pop
4200 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
4201 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
4202 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
4203 a1f445d3 Iustin Pop

4204 a1f445d3 Iustin Pop
  """
4205 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
4206 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
4207 b9bddb6b Iustin Pop
  shared_secret = lu.cfg.GenerateDRBDSecret()
4208 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
4209 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
4210 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
4211 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
4212 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
4213 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
4214 f9518d38 Iustin Pop
                                      p_minor, s_minor,
4215 f9518d38 Iustin Pop
                                      shared_secret),
4216 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
4217 a1f445d3 Iustin Pop
                          iv_name=iv_name)
4218 a1f445d3 Iustin Pop
  return drbd_dev
4219 a1f445d3 Iustin Pop
4220 7c0d6283 Michael Hanselmann
4221 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
4222 a8083063 Iustin Pop
                          instance_name, primary_node,
4223 08db7c5c Iustin Pop
                          secondary_nodes, disk_info,
4224 e2a65344 Iustin Pop
                          file_storage_dir, file_driver,
4225 e2a65344 Iustin Pop
                          base_index):
4226 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
4227 a8083063 Iustin Pop

4228 a8083063 Iustin Pop
  """
4229 a8083063 Iustin Pop
  #TODO: compute space requirements
4230 a8083063 Iustin Pop
4231 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
4232 08db7c5c Iustin Pop
  disk_count = len(disk_info)
4233 08db7c5c Iustin Pop
  disks = []
4234 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
4235 08db7c5c Iustin Pop
    pass
4236 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
4237 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
4238 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
4239 923b1523 Iustin Pop
4240 08db7c5c Iustin Pop
    names = _GenerateUniqueNames(lu, [".disk%d" % i
4241 08db7c5c Iustin Pop
                                      for i in range(disk_count)])
4242 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4243 e2a65344 Iustin Pop
      disk_index = idx + base_index
4244 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
4245 08db7c5c Iustin Pop
                              logical_id=(vgname, names[idx]),
4246 6ec66eae Iustin Pop
                              iv_name="disk/%d" % disk_index,
4247 6ec66eae Iustin Pop
                              mode=disk["mode"])
4248 08db7c5c Iustin Pop
      disks.append(disk_dev)
4249 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
4250 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
4251 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
4252 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
4253 08db7c5c Iustin Pop
    minors = lu.cfg.AllocateDRBDMinor(
4254 08db7c5c Iustin Pop
      [primary_node, remote_node] * len(disk_info), instance_name)
4255 08db7c5c Iustin Pop
4256 e6c1ff2f Iustin Pop
    names = []
4257 e6c1ff2f Iustin Pop
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % i
4258 e6c1ff2f Iustin Pop
                                               for i in range(disk_count)]):
4259 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_data")
4260 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_meta")
4261 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4262 112050d9 Iustin Pop
      disk_index = idx + base_index
4263 08db7c5c Iustin Pop
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
4264 08db7c5c Iustin Pop
                                      disk["size"], names[idx*2:idx*2+2],
4265 e2a65344 Iustin Pop
                                      "disk/%d" % disk_index,
4266 08db7c5c Iustin Pop
                                      minors[idx*2], minors[idx*2+1])
4267 6ec66eae Iustin Pop
      disk_dev.mode = disk["mode"]
4268 08db7c5c Iustin Pop
      disks.append(disk_dev)
4269 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
4270 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
4271 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
4272 0f1a06e3 Manuel Franceschini
4273 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4274 112050d9 Iustin Pop
      disk_index = idx + base_index
4275 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
4276 e2a65344 Iustin Pop
                              iv_name="disk/%d" % disk_index,
4277 08db7c5c Iustin Pop
                              logical_id=(file_driver,
4278 08db7c5c Iustin Pop
                                          "%s/disk%d" % (file_storage_dir,
4279 43e99cff Guido Trotter
                                                         disk_index)),
4280 6ec66eae Iustin Pop
                              mode=disk["mode"])
4281 08db7c5c Iustin Pop
      disks.append(disk_dev)
4282 a8083063 Iustin Pop
  else:
4283 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
4284 a8083063 Iustin Pop
  return disks
4285 a8083063 Iustin Pop
4286 a8083063 Iustin Pop
4287 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
4288 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
4289 3ecf6786 Iustin Pop

4290 3ecf6786 Iustin Pop
  """
4291 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
4292 a0c3fea1 Michael Hanselmann
4293 a0c3fea1 Michael Hanselmann
4294 b9bddb6b Iustin Pop
def _CreateDisks(lu, instance):
4295 a8083063 Iustin Pop
  """Create all disks for an instance.
4296 a8083063 Iustin Pop

4297 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
4298 a8083063 Iustin Pop

4299 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
4300 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
4301 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
4302 e4376078 Iustin Pop
  @param instance: the instance whose disks we should create
4303 e4376078 Iustin Pop
  @rtype: boolean
4304 e4376078 Iustin Pop
  @return: the success of the creation
4305 a8083063 Iustin Pop

4306 a8083063 Iustin Pop
  """
4307 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
4308 428958aa Iustin Pop
  pnode = instance.primary_node
4309 a0c3fea1 Michael Hanselmann
4310 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
4311 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4312 428958aa Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
4313 0f1a06e3 Manuel Franceschini
4314 4c4e4e1e Iustin Pop
    result.Raise("Failed to create directory '%s' on"
4315 4c4e4e1e Iustin Pop
                 " node %s: %s" % (file_storage_dir, pnode))
4316 0f1a06e3 Manuel Franceschini
4317 24991749 Iustin Pop
  # Note: this needs to be kept in sync with adding of disks in
4318 24991749 Iustin Pop
  # LUSetInstanceParams
4319 a8083063 Iustin Pop
  for device in instance.disks:
4320 9a4f63d1 Iustin Pop
    logging.info("Creating volume %s for instance %s",
4321 9a4f63d1 Iustin Pop
                 device.iv_name, instance.name)
4322 a8083063 Iustin Pop
    #HARDCODE
4323 428958aa Iustin Pop
    for node in instance.all_nodes:
4324 428958aa Iustin Pop
      f_create = node == pnode
4325 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
4326 a8083063 Iustin Pop
4327 a8083063 Iustin Pop
4328 b9bddb6b Iustin Pop
def _RemoveDisks(lu, instance):
4329 a8083063 Iustin Pop
  """Remove all disks for an instance.
4330 a8083063 Iustin Pop

4331 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
4332 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
4333 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
4334 a8083063 Iustin Pop
  with `_CreateDisks()`).
4335 a8083063 Iustin Pop

4336 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
4337 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
4338 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
4339 e4376078 Iustin Pop
  @param instance: the instance whose disks we should remove
4340 e4376078 Iustin Pop
  @rtype: boolean
4341 e4376078 Iustin Pop
  @return: the success of the removal
4342 a8083063 Iustin Pop

4343 a8083063 Iustin Pop
  """
4344 9a4f63d1 Iustin Pop
  logging.info("Removing block devices for instance %s", instance.name)
4345 a8083063 Iustin Pop
4346 e1bc0878 Iustin Pop
  all_result = True
4347 a8083063 Iustin Pop
  for device in instance.disks:
4348 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
4349 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
4350 4c4e4e1e Iustin Pop
      msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
4351 e1bc0878 Iustin Pop
      if msg:
4352 e1bc0878 Iustin Pop
        lu.LogWarning("Could not remove block device %s on node %s,"
4353 e1bc0878 Iustin Pop
                      " continuing anyway: %s", device.iv_name, node, msg)
4354 e1bc0878 Iustin Pop
        all_result = False
4355 0f1a06e3 Manuel Franceschini
4356 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
4357 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4358 781de953 Iustin Pop
    result = lu.rpc.call_file_storage_dir_remove(instance.primary_node,
4359 781de953 Iustin Pop
                                                 file_storage_dir)
4360 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4361 b2b8bcce Iustin Pop
    if msg:
4362 b2b8bcce Iustin Pop
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
4363 b2b8bcce Iustin Pop
                    file_storage_dir, instance.primary_node, msg)
4364 e1bc0878 Iustin Pop
      all_result = False
4365 0f1a06e3 Manuel Franceschini
4366 e1bc0878 Iustin Pop
  return all_result
4367 a8083063 Iustin Pop
4368 a8083063 Iustin Pop
4369 08db7c5c Iustin Pop
def _ComputeDiskSize(disk_template, disks):
4370 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
4371 e2fe6369 Iustin Pop

4372 e2fe6369 Iustin Pop
  """
4373 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
4374 e2fe6369 Iustin Pop
  req_size_dict = {
4375 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
4376 08db7c5c Iustin Pop
    constants.DT_PLAIN: sum(d["size"] for d in disks),
4377 08db7c5c Iustin Pop
    # 128 MB are added for drbd metadata for each disk
4378 08db7c5c Iustin Pop
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
4379 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
4380 e2fe6369 Iustin Pop
  }
4381 e2fe6369 Iustin Pop
4382 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
4383 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
4384 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
4385 e2fe6369 Iustin Pop
4386 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
4387 e2fe6369 Iustin Pop
4388 e2fe6369 Iustin Pop
4389 74409b12 Iustin Pop
def _CheckHVParams(lu, nodenames, hvname, hvparams):
4390 74409b12 Iustin Pop
  """Hypervisor parameter validation.
4391 74409b12 Iustin Pop

4392 74409b12 Iustin Pop
  This function abstract the hypervisor parameter validation to be
4393 74409b12 Iustin Pop
  used in both instance create and instance modify.
4394 74409b12 Iustin Pop

4395 74409b12 Iustin Pop
  @type lu: L{LogicalUnit}
4396 74409b12 Iustin Pop
  @param lu: the logical unit for which we check
4397 74409b12 Iustin Pop
  @type nodenames: list
4398 74409b12 Iustin Pop
  @param nodenames: the list of nodes on which we should check
4399 74409b12 Iustin Pop
  @type hvname: string
4400 74409b12 Iustin Pop
  @param hvname: the name of the hypervisor we should use
4401 74409b12 Iustin Pop
  @type hvparams: dict
4402 74409b12 Iustin Pop
  @param hvparams: the parameters which we need to check
4403 74409b12 Iustin Pop
  @raise errors.OpPrereqError: if the parameters are not valid
4404 74409b12 Iustin Pop

4405 74409b12 Iustin Pop
  """
4406 74409b12 Iustin Pop
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
4407 74409b12 Iustin Pop
                                                  hvname,
4408 74409b12 Iustin Pop
                                                  hvparams)
4409 74409b12 Iustin Pop
  for node in nodenames:
4410 781de953 Iustin Pop
    info = hvinfo[node]
4411 68c6f21c Iustin Pop
    if info.offline:
4412 68c6f21c Iustin Pop
      continue
4413 4c4e4e1e Iustin Pop
    info.Raise("Hypervisor parameter validation failed on node %s" % node)
4414 74409b12 Iustin Pop
4415 74409b12 Iustin Pop
4416 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
4417 a8083063 Iustin Pop
  """Create an instance.
4418 a8083063 Iustin Pop

4419 a8083063 Iustin Pop
  """
4420 a8083063 Iustin Pop
  HPATH = "instance-add"
4421 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4422 08db7c5c Iustin Pop
  _OP_REQP = ["instance_name", "disks", "disk_template",
4423 08db7c5c Iustin Pop
              "mode", "start",
4424 08db7c5c Iustin Pop
              "wait_for_sync", "ip_check", "nics",
4425 338e51e8 Iustin Pop
              "hvparams", "beparams"]
4426 7baf741d Guido Trotter
  REQ_BGL = False
4427 7baf741d Guido Trotter
4428 7baf741d Guido Trotter
  def _ExpandNode(self, node):
4429 7baf741d Guido Trotter
    """Expands and checks one node name.
4430 7baf741d Guido Trotter

4431 7baf741d Guido Trotter
    """
4432 7baf741d Guido Trotter
    node_full = self.cfg.ExpandNodeName(node)
4433 7baf741d Guido Trotter
    if node_full is None:
4434 7baf741d Guido Trotter
      raise errors.OpPrereqError("Unknown node %s" % node)
4435 7baf741d Guido Trotter
    return node_full
4436 7baf741d Guido Trotter
4437 7baf741d Guido Trotter
  def ExpandNames(self):
4438 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
4439 7baf741d Guido Trotter

4440 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
4441 7baf741d Guido Trotter

4442 7baf741d Guido Trotter
    """
4443 7baf741d Guido Trotter
    self.needed_locks = {}
4444 7baf741d Guido Trotter
4445 7baf741d Guido Trotter
    # set optional parameters to none if they don't exist
4446 6785674e Iustin Pop
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
4447 7baf741d Guido Trotter
      if not hasattr(self.op, attr):
4448 7baf741d Guido Trotter
        setattr(self.op, attr, None)
4449 7baf741d Guido Trotter
4450 4b2f38dd Iustin Pop
    # cheap checks, mostly valid constants given
4451 4b2f38dd Iustin Pop
4452 7baf741d Guido Trotter
    # verify creation mode
4453 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
4454 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
4455 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
4456 7baf741d Guido Trotter
                                 self.op.mode)
4457 4b2f38dd Iustin Pop
4458 7baf741d Guido Trotter
    # disk template and mirror node verification
4459 7baf741d Guido Trotter
    if self.op.disk_template not in constants.DISK_TEMPLATES:
4460 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid disk template name")
4461 7baf741d Guido Trotter
4462 4b2f38dd Iustin Pop
    if self.op.hypervisor is None:
4463 4b2f38dd Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
4464 4b2f38dd Iustin Pop
4465 8705eb96 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
4466 8705eb96 Iustin Pop
    enabled_hvs = cluster.enabled_hypervisors
4467 4b2f38dd Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
4468 4b2f38dd Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
4469 4b2f38dd Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
4470 4b2f38dd Iustin Pop
                                  ",".join(enabled_hvs)))
4471 4b2f38dd Iustin Pop
4472 6785674e Iustin Pop
    # check hypervisor parameter syntax (locally)
4473 a5728081 Guido Trotter
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
4474 abe609b2 Guido Trotter
    filled_hvp = objects.FillDict(cluster.hvparams[self.op.hypervisor],
4475 8705eb96 Iustin Pop
                                  self.op.hvparams)
4476 6785674e Iustin Pop
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
4477 8705eb96 Iustin Pop
    hv_type.CheckParameterSyntax(filled_hvp)
4478 67fc3042 Iustin Pop
    self.hv_full = filled_hvp
4479 6785674e Iustin Pop
4480 338e51e8 Iustin Pop
    # fill and remember the beparams dict
4481 a5728081 Guido Trotter
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
4482 4ef7f423 Guido Trotter
    self.be_full = objects.FillDict(cluster.beparams[constants.PP_DEFAULT],
4483 338e51e8 Iustin Pop
                                    self.op.beparams)
4484 338e51e8 Iustin Pop
4485 7baf741d Guido Trotter
    #### instance parameters check
4486 7baf741d Guido Trotter
4487 7baf741d Guido Trotter
    # instance name verification
4488 7baf741d Guido Trotter
    hostname1 = utils.HostInfo(self.op.instance_name)
4489 7baf741d Guido Trotter
    self.op.instance_name = instance_name = hostname1.name
4490 7baf741d Guido Trotter
4491 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
4492 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
4493 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
4494 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
4495 7baf741d Guido Trotter
                                 instance_name)
4496 7baf741d Guido Trotter
4497 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
4498 7baf741d Guido Trotter
4499 08db7c5c Iustin Pop
    # NIC buildup
4500 08db7c5c Iustin Pop
    self.nics = []
4501 9dce4771 Guido Trotter
    for idx, nic in enumerate(self.op.nics):
4502 9dce4771 Guido Trotter
      nic_mode_req = nic.get("mode", None)
4503 9dce4771 Guido Trotter
      nic_mode = nic_mode_req
4504 9dce4771 Guido Trotter
      if nic_mode is None:
4505 9dce4771 Guido Trotter
        nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
4506 9dce4771 Guido Trotter
4507 9dce4771 Guido Trotter
      # in routed mode, for the first nic, the default ip is 'auto'
4508 9dce4771 Guido Trotter
      if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
4509 9dce4771 Guido Trotter
        default_ip_mode = constants.VALUE_AUTO
4510 9dce4771 Guido Trotter
      else:
4511 9dce4771 Guido Trotter
        default_ip_mode = constants.VALUE_NONE
4512 9dce4771 Guido Trotter
4513 08db7c5c Iustin Pop
      # ip validity checks
4514 9dce4771 Guido Trotter
      ip = nic.get("ip", default_ip_mode)
4515 9dce4771 Guido Trotter
      if ip is None or ip.lower() == constants.VALUE_NONE:
4516 08db7c5c Iustin Pop
        nic_ip = None
4517 08db7c5c Iustin Pop
      elif ip.lower() == constants.VALUE_AUTO:
4518 08db7c5c Iustin Pop
        nic_ip = hostname1.ip
4519 08db7c5c Iustin Pop
      else:
4520 08db7c5c Iustin Pop
        if not utils.IsValidIP(ip):
4521 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
4522 08db7c5c Iustin Pop
                                     " like a valid IP" % ip)
4523 08db7c5c Iustin Pop
        nic_ip = ip
4524 08db7c5c Iustin Pop
4525 9dce4771 Guido Trotter
      # TODO: check the ip for uniqueness !!
4526 9dce4771 Guido Trotter
      if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
4527 9dce4771 Guido Trotter
        raise errors.OpPrereqError("Routed nic mode requires an ip address")
4528 9dce4771 Guido Trotter
4529 08db7c5c Iustin Pop
      # MAC address verification
4530 08db7c5c Iustin Pop
      mac = nic.get("mac", constants.VALUE_AUTO)
4531 08db7c5c Iustin Pop
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4532 08db7c5c Iustin Pop
        if not utils.IsValidMac(mac.lower()):
4533 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Invalid MAC address specified: %s" %
4534 08db7c5c Iustin Pop
                                     mac)
4535 08db7c5c Iustin Pop
      # bridge verification
4536 9939547b Iustin Pop
      bridge = nic.get("bridge", None)
4537 9dce4771 Guido Trotter
      link = nic.get("link", None)
4538 9dce4771 Guido Trotter
      if bridge and link:
4539 9dce4771 Guido Trotter
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link' at the same time")
4540 9dce4771 Guido Trotter
      elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
4541 9dce4771 Guido Trotter
        raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic")
4542 9dce4771 Guido Trotter
      elif bridge:
4543 9dce4771 Guido Trotter
        link = bridge
4544 9dce4771 Guido Trotter
4545 9dce4771 Guido Trotter
      nicparams = {}
4546 9dce4771 Guido Trotter
      if nic_mode_req:
4547 9dce4771 Guido Trotter
        nicparams[constants.NIC_MODE] = nic_mode_req
4548 9dce4771 Guido Trotter
      if link:
4549 9dce4771 Guido Trotter
        nicparams[constants.NIC_LINK] = link
4550 9dce4771 Guido Trotter
4551 9dce4771 Guido Trotter
      check_params = objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
4552 9dce4771 Guido Trotter
                                      nicparams)
4553 9dce4771 Guido Trotter
      objects.NIC.CheckParameterSyntax(check_params)
4554 9dce4771 Guido Trotter
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
4555 08db7c5c Iustin Pop
4556 08db7c5c Iustin Pop
    # disk checks/pre-build
4557 08db7c5c Iustin Pop
    self.disks = []
4558 08db7c5c Iustin Pop
    for disk in self.op.disks:
4559 08db7c5c Iustin Pop
      mode = disk.get("mode", constants.DISK_RDWR)
4560 08db7c5c Iustin Pop
      if mode not in constants.DISK_ACCESS_SET:
4561 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
4562 08db7c5c Iustin Pop
                                   mode)
4563 08db7c5c Iustin Pop
      size = disk.get("size", None)
4564 08db7c5c Iustin Pop
      if size is None:
4565 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Missing disk size")
4566 08db7c5c Iustin Pop
      try:
4567 08db7c5c Iustin Pop
        size = int(size)
4568 08db7c5c Iustin Pop
      except ValueError:
4569 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk size '%s'" % size)
4570 08db7c5c Iustin Pop
      self.disks.append({"size": size, "mode": mode})
4571 08db7c5c Iustin Pop
4572 7baf741d Guido Trotter
    # used in CheckPrereq for ip ping check
4573 7baf741d Guido Trotter
    self.check_ip = hostname1.ip
4574 7baf741d Guido Trotter
4575 7baf741d Guido Trotter
    # file storage checks
4576 7baf741d Guido Trotter
    if (self.op.file_driver and
4577 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
4578 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
4579 7baf741d Guido Trotter
                                 self.op.file_driver)
4580 7baf741d Guido Trotter
4581 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
4582 7baf741d Guido Trotter
      raise errors.OpPrereqError("File storage directory path not absolute")
4583 7baf741d Guido Trotter
4584 7baf741d Guido Trotter
    ### Node/iallocator related checks
4585 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
4586 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
4587 7baf741d Guido Trotter
                                 " node must be given")
4588 7baf741d Guido Trotter
4589 7baf741d Guido Trotter
    if self.op.iallocator:
4590 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4591 7baf741d Guido Trotter
    else:
4592 7baf741d Guido Trotter
      self.op.pnode = self._ExpandNode(self.op.pnode)
4593 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
4594 7baf741d Guido Trotter
      if self.op.snode is not None:
4595 7baf741d Guido Trotter
        self.op.snode = self._ExpandNode(self.op.snode)
4596 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
4597 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
4598 7baf741d Guido Trotter
4599 7baf741d Guido Trotter
    # in case of import lock the source node too
4600 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
4601 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
4602 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
4603 7baf741d Guido Trotter
4604 b9322a9f Guido Trotter
      if src_path is None:
4605 b9322a9f Guido Trotter
        self.op.src_path = src_path = self.op.instance_name
4606 b9322a9f Guido Trotter
4607 b9322a9f Guido Trotter
      if src_node is None:
4608 b9322a9f Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4609 b9322a9f Guido Trotter
        self.op.src_node = None
4610 b9322a9f Guido Trotter
        if os.path.isabs(src_path):
4611 b9322a9f Guido Trotter
          raise errors.OpPrereqError("Importing an instance from an absolute"
4612 b9322a9f Guido Trotter
                                     " path requires a source node option.")
4613 b9322a9f Guido Trotter
      else:
4614 b9322a9f Guido Trotter
        self.op.src_node = src_node = self._ExpandNode(src_node)
4615 b9322a9f Guido Trotter
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4616 b9322a9f Guido Trotter
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
4617 b9322a9f Guido Trotter
        if not os.path.isabs(src_path):
4618 b9322a9f Guido Trotter
          self.op.src_path = src_path = \
4619 b9322a9f Guido Trotter
            os.path.join(constants.EXPORT_DIR, src_path)
4620 7baf741d Guido Trotter
4621 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
4622 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
4623 7baf741d Guido Trotter
        raise errors.OpPrereqError("No guest OS specified")
4624 a8083063 Iustin Pop
4625 538475ca Iustin Pop
  def _RunAllocator(self):
4626 538475ca Iustin Pop
    """Run the allocator based on input opcode.
4627 538475ca Iustin Pop

4628 538475ca Iustin Pop
    """
4629 08db7c5c Iustin Pop
    nics = [n.ToDict() for n in self.nics]
4630 72737a7f Iustin Pop
    ial = IAllocator(self,
4631 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
4632 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
4633 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
4634 d1c2dd75 Iustin Pop
                     tags=[],
4635 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
4636 338e51e8 Iustin Pop
                     vcpus=self.be_full[constants.BE_VCPUS],
4637 338e51e8 Iustin Pop
                     mem_size=self.be_full[constants.BE_MEMORY],
4638 08db7c5c Iustin Pop
                     disks=self.disks,
4639 d1c2dd75 Iustin Pop
                     nics=nics,
4640 8cc7e742 Guido Trotter
                     hypervisor=self.op.hypervisor,
4641 29859cb7 Iustin Pop
                     )
4642 d1c2dd75 Iustin Pop
4643 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
4644 d1c2dd75 Iustin Pop
4645 d1c2dd75 Iustin Pop
    if not ial.success:
4646 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
4647 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
4648 d1c2dd75 Iustin Pop
                                                           ial.info))
4649 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
4650 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4651 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
4652 97abc79f Iustin Pop
                                 (self.op.iallocator, len(ial.nodes),
4653 1ce4bbe3 Renรฉ Nussbaumer
                                  ial.required_nodes))
4654 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
4655 86d9d3bb Iustin Pop
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
4656 86d9d3bb Iustin Pop
                 self.op.instance_name, self.op.iallocator,
4657 86d9d3bb Iustin Pop
                 ", ".join(ial.nodes))
4658 27579978 Iustin Pop
    if ial.required_nodes == 2:
4659 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
4660 538475ca Iustin Pop
4661 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4662 a8083063 Iustin Pop
    """Build hooks env.
4663 a8083063 Iustin Pop

4664 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4665 a8083063 Iustin Pop

4666 a8083063 Iustin Pop
    """
4667 a8083063 Iustin Pop
    env = {
4668 2c2690c9 Iustin Pop
      "ADD_MODE": self.op.mode,
4669 a8083063 Iustin Pop
      }
4670 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
4671 2c2690c9 Iustin Pop
      env["SRC_NODE"] = self.op.src_node
4672 2c2690c9 Iustin Pop
      env["SRC_PATH"] = self.op.src_path
4673 2c2690c9 Iustin Pop
      env["SRC_IMAGES"] = self.src_images
4674 396e1b78 Michael Hanselmann
4675 2c2690c9 Iustin Pop
    env.update(_BuildInstanceHookEnv(
4676 2c2690c9 Iustin Pop
      name=self.op.instance_name,
4677 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
4678 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
4679 4978db17 Iustin Pop
      status=self.op.start,
4680 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
4681 338e51e8 Iustin Pop
      memory=self.be_full[constants.BE_MEMORY],
4682 338e51e8 Iustin Pop
      vcpus=self.be_full[constants.BE_VCPUS],
4683 f9b10246 Guido Trotter
      nics=_NICListToTuple(self, self.nics),
4684 2c2690c9 Iustin Pop
      disk_template=self.op.disk_template,
4685 2c2690c9 Iustin Pop
      disks=[(d["size"], d["mode"]) for d in self.disks],
4686 67fc3042 Iustin Pop
      bep=self.be_full,
4687 67fc3042 Iustin Pop
      hvp=self.hv_full,
4688 67fc3042 Iustin Pop
      hypervisor=self.op.hypervisor,
4689 396e1b78 Michael Hanselmann
    ))
4690 a8083063 Iustin Pop
4691 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
4692 a8083063 Iustin Pop
          self.secondaries)
4693 a8083063 Iustin Pop
    return env, nl, nl
4694 a8083063 Iustin Pop
4695 a8083063 Iustin Pop
4696 a8083063 Iustin Pop
  def CheckPrereq(self):
4697 a8083063 Iustin Pop
    """Check prerequisites.
4698 a8083063 Iustin Pop

4699 a8083063 Iustin Pop
    """
4700 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
4701 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
4702 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
4703 eedc99de Manuel Franceschini
                                 " instances")
4704 eedc99de Manuel Franceschini
4705 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
4706 7baf741d Guido Trotter
      src_node = self.op.src_node
4707 7baf741d Guido Trotter
      src_path = self.op.src_path
4708 a8083063 Iustin Pop
4709 c0cbdc67 Guido Trotter
      if src_node is None:
4710 1b7bfbb7 Iustin Pop
        locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
4711 1b7bfbb7 Iustin Pop
        exp_list = self.rpc.call_export_list(locked_nodes)
4712 c0cbdc67 Guido Trotter
        found = False
4713 c0cbdc67 Guido Trotter
        for node in exp_list:
4714 4c4e4e1e Iustin Pop
          if exp_list[node].fail_msg:
4715 1b7bfbb7 Iustin Pop
            continue
4716 1b7bfbb7 Iustin Pop
          if src_path in exp_list[node].payload:
4717 c0cbdc67 Guido Trotter
            found = True
4718 c0cbdc67 Guido Trotter
            self.op.src_node = src_node = node
4719 c0cbdc67 Guido Trotter
            self.op.src_path = src_path = os.path.join(constants.EXPORT_DIR,
4720 c0cbdc67 Guido Trotter
                                                       src_path)
4721 c0cbdc67 Guido Trotter
            break
4722 c0cbdc67 Guido Trotter
        if not found:
4723 c0cbdc67 Guido Trotter
          raise errors.OpPrereqError("No export found for relative path %s" %
4724 c0cbdc67 Guido Trotter
                                      src_path)
4725 c0cbdc67 Guido Trotter
4726 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, src_node)
4727 781de953 Iustin Pop
      result = self.rpc.call_export_info(src_node, src_path)
4728 4c4e4e1e Iustin Pop
      result.Raise("No export or invalid export found in dir %s" % src_path)
4729 a8083063 Iustin Pop
4730 3eccac06 Iustin Pop
      export_info = objects.SerializableConfigParser.Loads(str(result.payload))
4731 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
4732 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
4733 a8083063 Iustin Pop
4734 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
4735 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
4736 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
4737 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
4738 a8083063 Iustin Pop
4739 09acf207 Guido Trotter
      # Check that the new instance doesn't have less disks than the export
4740 08db7c5c Iustin Pop
      instance_disks = len(self.disks)
4741 09acf207 Guido Trotter
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
4742 09acf207 Guido Trotter
      if instance_disks < export_disks:
4743 09acf207 Guido Trotter
        raise errors.OpPrereqError("Not enough disks to import."
4744 09acf207 Guido Trotter
                                   " (instance: %d, export: %d)" %
4745 726d7d68 Iustin Pop
                                   (instance_disks, export_disks))
4746 a8083063 Iustin Pop
4747 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
4748 09acf207 Guido Trotter
      disk_images = []
4749 09acf207 Guido Trotter
      for idx in range(export_disks):
4750 09acf207 Guido Trotter
        option = 'disk%d_dump' % idx
4751 09acf207 Guido Trotter
        if export_info.has_option(constants.INISECT_INS, option):
4752 09acf207 Guido Trotter
          # FIXME: are the old os-es, disk sizes, etc. useful?
4753 09acf207 Guido Trotter
          export_name = export_info.get(constants.INISECT_INS, option)
4754 09acf207 Guido Trotter
          image = os.path.join(src_path, export_name)
4755 09acf207 Guido Trotter
          disk_images.append(image)
4756 09acf207 Guido Trotter
        else:
4757 09acf207 Guido Trotter
          disk_images.append(False)
4758 09acf207 Guido Trotter
4759 09acf207 Guido Trotter
      self.src_images = disk_images
4760 901a65c1 Iustin Pop
4761 b4364a6b Guido Trotter
      old_name = export_info.get(constants.INISECT_INS, 'name')
4762 b4364a6b Guido Trotter
      # FIXME: int() here could throw a ValueError on broken exports
4763 b4364a6b Guido Trotter
      exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
4764 b4364a6b Guido Trotter
      if self.op.instance_name == old_name:
4765 b4364a6b Guido Trotter
        for idx, nic in enumerate(self.nics):
4766 b4364a6b Guido Trotter
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
4767 b4364a6b Guido Trotter
            nic_mac_ini = 'nic%d_mac' % idx
4768 b4364a6b Guido Trotter
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
4769 bc89efc3 Guido Trotter
4770 295728df Guido Trotter
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
4771 7baf741d Guido Trotter
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
4772 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
4773 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
4774 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
4775 901a65c1 Iustin Pop
4776 901a65c1 Iustin Pop
    if self.op.ip_check:
4777 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
4778 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
4779 7b3a8fb5 Iustin Pop
                                   (self.check_ip, self.op.instance_name))
4780 901a65c1 Iustin Pop
4781 295728df Guido Trotter
    #### mac address generation
4782 295728df Guido Trotter
    # By generating here the mac address both the allocator and the hooks get
4783 295728df Guido Trotter
    # the real final mac address rather than the 'auto' or 'generate' value.
4784 295728df Guido Trotter
    # There is a race condition between the generation and the instance object
4785 295728df Guido Trotter
    # creation, which means that we know the mac is valid now, but we're not
4786 295728df Guido Trotter
    # sure it will be when we actually add the instance. If things go bad
4787 295728df Guido Trotter
    # adding the instance will abort because of a duplicate mac, and the
4788 295728df Guido Trotter
    # creation job will fail.
4789 295728df Guido Trotter
    for nic in self.nics:
4790 295728df Guido Trotter
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4791 295728df Guido Trotter
        nic.mac = self.cfg.GenerateMAC()
4792 295728df Guido Trotter
4793 538475ca Iustin Pop
    #### allocator run
4794 538475ca Iustin Pop
4795 538475ca Iustin Pop
    if self.op.iallocator is not None:
4796 538475ca Iustin Pop
      self._RunAllocator()
4797 0f1a06e3 Manuel Franceschini
4798 901a65c1 Iustin Pop
    #### node related checks
4799 901a65c1 Iustin Pop
4800 901a65c1 Iustin Pop
    # check primary node
4801 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
4802 7baf741d Guido Trotter
    assert self.pnode is not None, \
4803 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
4804 7527a8a4 Iustin Pop
    if pnode.offline:
4805 7527a8a4 Iustin Pop
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
4806 7527a8a4 Iustin Pop
                                 pnode.name)
4807 733a2b6a Iustin Pop
    if pnode.drained:
4808 733a2b6a Iustin Pop
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
4809 733a2b6a Iustin Pop
                                 pnode.name)
4810 7527a8a4 Iustin Pop
4811 901a65c1 Iustin Pop
    self.secondaries = []
4812 901a65c1 Iustin Pop
4813 901a65c1 Iustin Pop
    # mirror node verification
4814 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
4815 7baf741d Guido Trotter
      if self.op.snode is None:
4816 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
4817 3ecf6786 Iustin Pop
                                   " a mirror node")
4818 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
4819 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
4820 3ecf6786 Iustin Pop
                                   " the primary node.")
4821 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, self.op.snode)
4822 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, self.op.snode)
4823 733a2b6a Iustin Pop
      self.secondaries.append(self.op.snode)
4824 a8083063 Iustin Pop
4825 6785674e Iustin Pop
    nodenames = [pnode.name] + self.secondaries
4826 6785674e Iustin Pop
4827 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
4828 08db7c5c Iustin Pop
                                self.disks)
4829 ed1ebc60 Guido Trotter
4830 8d75db10 Iustin Pop
    # Check lv size requirements
4831 8d75db10 Iustin Pop
    if req_size is not None:
4832 72737a7f Iustin Pop
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4833 72737a7f Iustin Pop
                                         self.op.hypervisor)
4834 8d75db10 Iustin Pop
      for node in nodenames:
4835 781de953 Iustin Pop
        info = nodeinfo[node]
4836 4c4e4e1e Iustin Pop
        info.Raise("Cannot get current information from node %s" % node)
4837 070e998b Iustin Pop
        info = info.payload
4838 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
4839 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
4840 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
4841 8d75db10 Iustin Pop
                                     " node %s" % node)
4842 070e998b Iustin Pop
        if req_size > vg_free:
4843 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
4844 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
4845 070e998b Iustin Pop
                                     (node, vg_free, req_size))
4846 ed1ebc60 Guido Trotter
4847 74409b12 Iustin Pop
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
4848 6785674e Iustin Pop
4849 a8083063 Iustin Pop
    # os verification
4850 781de953 Iustin Pop
    result = self.rpc.call_os_get(pnode.name, self.op.os_type)
4851 4c4e4e1e Iustin Pop
    result.Raise("OS '%s' not in supported os list for primary node %s" %
4852 4c4e4e1e Iustin Pop
                 (self.op.os_type, pnode.name), prereq=True)
4853 a8083063 Iustin Pop
4854 b165e77e Guido Trotter
    _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
4855 a8083063 Iustin Pop
4856 49ce1563 Iustin Pop
    # memory check on primary node
4857 49ce1563 Iustin Pop
    if self.op.start:
4858 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
4859 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
4860 338e51e8 Iustin Pop
                           self.be_full[constants.BE_MEMORY],
4861 338e51e8 Iustin Pop
                           self.op.hypervisor)
4862 49ce1563 Iustin Pop
4863 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4864 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
4865 a8083063 Iustin Pop

4866 a8083063 Iustin Pop
    """
4867 a8083063 Iustin Pop
    instance = self.op.instance_name
4868 a8083063 Iustin Pop
    pnode_name = self.pnode.name
4869 a8083063 Iustin Pop
4870 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
4871 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
4872 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
4873 2a6469d5 Alexander Schreiber
    else:
4874 2a6469d5 Alexander Schreiber
      network_port = None
4875 58acb49d Alexander Schreiber
4876 6785674e Iustin Pop
    ##if self.op.vnc_bind_address is None:
4877 6785674e Iustin Pop
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
4878 31a853d2 Iustin Pop
4879 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
4880 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
4881 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
4882 2c313123 Manuel Franceschini
    else:
4883 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
4884 2c313123 Manuel Franceschini
4885 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
4886 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
4887 d6a02168 Michael Hanselmann
                                        self.cfg.GetFileStorageDir(),
4888 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
4889 0f1a06e3 Manuel Franceschini
4890 0f1a06e3 Manuel Franceschini
4891 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
4892 a8083063 Iustin Pop
                                  self.op.disk_template,
4893 a8083063 Iustin Pop
                                  instance, pnode_name,
4894 08db7c5c Iustin Pop
                                  self.secondaries,
4895 08db7c5c Iustin Pop
                                  self.disks,
4896 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
4897 e2a65344 Iustin Pop
                                  self.op.file_driver,
4898 e2a65344 Iustin Pop
                                  0)
4899 a8083063 Iustin Pop
4900 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
4901 a8083063 Iustin Pop
                            primary_node=pnode_name,
4902 08db7c5c Iustin Pop
                            nics=self.nics, disks=disks,
4903 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
4904 4978db17 Iustin Pop
                            admin_up=False,
4905 58acb49d Alexander Schreiber
                            network_port=network_port,
4906 338e51e8 Iustin Pop
                            beparams=self.op.beparams,
4907 6785674e Iustin Pop
                            hvparams=self.op.hvparams,
4908 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
4909 a8083063 Iustin Pop
                            )
4910 a8083063 Iustin Pop
4911 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
4912 796cab27 Iustin Pop
    try:
4913 796cab27 Iustin Pop
      _CreateDisks(self, iobj)
4914 796cab27 Iustin Pop
    except errors.OpExecError:
4915 796cab27 Iustin Pop
      self.LogWarning("Device creation failed, reverting...")
4916 796cab27 Iustin Pop
      try:
4917 796cab27 Iustin Pop
        _RemoveDisks(self, iobj)
4918 796cab27 Iustin Pop
      finally:
4919 796cab27 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance)
4920 796cab27 Iustin Pop
        raise
4921 a8083063 Iustin Pop
4922 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
4923 a8083063 Iustin Pop
4924 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
4925 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
4926 7baf741d Guido Trotter
    # added the instance to the config
4927 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
4928 e36e96b4 Guido Trotter
    # Unlock all the nodes
4929 9c8971d7 Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
4930 9c8971d7 Guido Trotter
      nodes_keep = [self.op.src_node]
4931 9c8971d7 Guido Trotter
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
4932 9c8971d7 Guido Trotter
                       if node != self.op.src_node]
4933 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
4934 9c8971d7 Guido Trotter
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
4935 9c8971d7 Guido Trotter
    else:
4936 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE)
4937 9c8971d7 Guido Trotter
      del self.acquired_locks[locking.LEVEL_NODE]
4938 a8083063 Iustin Pop
4939 a8083063 Iustin Pop
    if self.op.wait_for_sync:
4940 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
4941 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
4942 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
4943 a8083063 Iustin Pop
      time.sleep(15)
4944 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
4945 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
4946 a8083063 Iustin Pop
    else:
4947 a8083063 Iustin Pop
      disk_abort = False
4948 a8083063 Iustin Pop
4949 a8083063 Iustin Pop
    if disk_abort:
4950 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
4951 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
4952 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
4953 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
4954 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
4955 3ecf6786 Iustin Pop
                               " this instance")
4956 a8083063 Iustin Pop
4957 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
4958 a8083063 Iustin Pop
                (instance, pnode_name))
4959 a8083063 Iustin Pop
4960 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
4961 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
4962 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
4963 e557bae9 Guido Trotter
        result = self.rpc.call_instance_os_add(pnode_name, iobj, False)
4964 4c4e4e1e Iustin Pop
        result.Raise("Could not add os for instance %s"
4965 4c4e4e1e Iustin Pop
                     " on node %s" % (instance, pnode_name))
4966 a8083063 Iustin Pop
4967 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
4968 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
4969 a8083063 Iustin Pop
        src_node = self.op.src_node
4970 09acf207 Guido Trotter
        src_images = self.src_images
4971 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
4972 6c0af70e Guido Trotter
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
4973 09acf207 Guido Trotter
                                                         src_node, src_images,
4974 6c0af70e Guido Trotter
                                                         cluster_name)
4975 4c4e4e1e Iustin Pop
        msg = import_result.fail_msg
4976 944bf548 Iustin Pop
        if msg:
4977 944bf548 Iustin Pop
          self.LogWarning("Error while importing the disk images for instance"
4978 944bf548 Iustin Pop
                          " %s on node %s: %s" % (instance, pnode_name, msg))
4979 a8083063 Iustin Pop
      else:
4980 a8083063 Iustin Pop
        # also checked in the prereq part
4981 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
4982 3ecf6786 Iustin Pop
                                     % self.op.mode)
4983 a8083063 Iustin Pop
4984 a8083063 Iustin Pop
    if self.op.start:
4985 4978db17 Iustin Pop
      iobj.admin_up = True
4986 4978db17 Iustin Pop
      self.cfg.Update(iobj)
4987 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s", instance, pnode_name)
4988 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
4989 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
4990 4c4e4e1e Iustin Pop
      result.Raise("Could not start instance")
4991 a8083063 Iustin Pop
4992 a8083063 Iustin Pop
4993 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
4994 a8083063 Iustin Pop
  """Connect to an instance's console.
4995 a8083063 Iustin Pop

4996 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
4997 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
4998 a8083063 Iustin Pop
  console.
4999 a8083063 Iustin Pop

5000 a8083063 Iustin Pop
  """
5001 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
5002 8659b73e Guido Trotter
  REQ_BGL = False
5003 8659b73e Guido Trotter
5004 8659b73e Guido Trotter
  def ExpandNames(self):
5005 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
5006 a8083063 Iustin Pop
5007 a8083063 Iustin Pop
  def CheckPrereq(self):
5008 a8083063 Iustin Pop
    """Check prerequisites.
5009 a8083063 Iustin Pop

5010 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
5011 a8083063 Iustin Pop

5012 a8083063 Iustin Pop
    """
5013 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5014 8659b73e Guido Trotter
    assert self.instance is not None, \
5015 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5016 513e896d Guido Trotter
    _CheckNodeOnline(self, self.instance.primary_node)
5017 a8083063 Iustin Pop
5018 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5019 a8083063 Iustin Pop
    """Connect to the console of an instance
5020 a8083063 Iustin Pop

5021 a8083063 Iustin Pop
    """
5022 a8083063 Iustin Pop
    instance = self.instance
5023 a8083063 Iustin Pop
    node = instance.primary_node
5024 a8083063 Iustin Pop
5025 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
5026 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
5027 4c4e4e1e Iustin Pop
    node_insts.Raise("Can't get node information from %s" % node)
5028 a8083063 Iustin Pop
5029 aca13712 Iustin Pop
    if instance.name not in node_insts.payload:
5030 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
5031 a8083063 Iustin Pop
5032 9a4f63d1 Iustin Pop
    logging.debug("Connecting to console of %s on %s", instance.name, node)
5033 a8083063 Iustin Pop
5034 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
5035 5431b2e4 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
5036 5431b2e4 Guido Trotter
    # beparams and hvparams are passed separately, to avoid editing the
5037 5431b2e4 Guido Trotter
    # instance and then saving the defaults in the instance itself.
5038 5431b2e4 Guido Trotter
    hvparams = cluster.FillHV(instance)
5039 5431b2e4 Guido Trotter
    beparams = cluster.FillBE(instance)
5040 5431b2e4 Guido Trotter
    console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
5041 b047857b Michael Hanselmann
5042 82122173 Iustin Pop
    # build ssh cmdline
5043 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
5044 a8083063 Iustin Pop
5045 a8083063 Iustin Pop
5046 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
5047 a8083063 Iustin Pop
  """Replace the disks of an instance.
5048 a8083063 Iustin Pop

5049 a8083063 Iustin Pop
  """
5050 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
5051 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5052 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
5053 efd990e4 Guido Trotter
  REQ_BGL = False
5054 efd990e4 Guido Trotter
5055 7e9366f7 Iustin Pop
  def CheckArguments(self):
5056 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
5057 efd990e4 Guido Trotter
      self.op.remote_node = None
5058 7e9366f7 Iustin Pop
    if not hasattr(self.op, "iallocator"):
5059 7e9366f7 Iustin Pop
      self.op.iallocator = None
5060 7e9366f7 Iustin Pop
5061 7e9366f7 Iustin Pop
    # check for valid parameter combination
5062 7e9366f7 Iustin Pop
    cnt = [self.op.remote_node, self.op.iallocator].count(None)
5063 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_CHG:
5064 7e9366f7 Iustin Pop
      if cnt == 2:
5065 7e9366f7 Iustin Pop
        raise errors.OpPrereqError("When changing the secondary either an"
5066 7e9366f7 Iustin Pop
                                   " iallocator script must be used or the"
5067 7e9366f7 Iustin Pop
                                   " new node given")
5068 7e9366f7 Iustin Pop
      elif cnt == 0:
5069 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Give either the iallocator or the new"
5070 efd990e4 Guido Trotter
                                   " secondary, not both")
5071 7e9366f7 Iustin Pop
    else: # not replacing the secondary
5072 7e9366f7 Iustin Pop
      if cnt != 2:
5073 7e9366f7 Iustin Pop
        raise errors.OpPrereqError("The iallocator and new node options can"
5074 7e9366f7 Iustin Pop
                                   " be used only when changing the"
5075 7e9366f7 Iustin Pop
                                   " secondary node")
5076 7e9366f7 Iustin Pop
5077 7e9366f7 Iustin Pop
  def ExpandNames(self):
5078 7e9366f7 Iustin Pop
    self._ExpandAndLockInstance()
5079 7e9366f7 Iustin Pop
5080 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
5081 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5082 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
5083 efd990e4 Guido Trotter
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
5084 efd990e4 Guido Trotter
      if remote_node is None:
5085 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Node '%s' not known" %
5086 efd990e4 Guido Trotter
                                   self.op.remote_node)
5087 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
5088 3b559640 Iustin Pop
      # Warning: do not remove the locking of the new secondary here
5089 3b559640 Iustin Pop
      # unless DRBD8.AddChildren is changed to work in parallel;
5090 3b559640 Iustin Pop
      # currently it doesn't since parallel invocations of
5091 3b559640 Iustin Pop
      # FindUnusedMinor will conflict
5092 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
5093 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5094 efd990e4 Guido Trotter
    else:
5095 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
5096 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5097 efd990e4 Guido Trotter
5098 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
5099 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
5100 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
5101 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
5102 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
5103 efd990e4 Guido Trotter
      self._LockInstancesNodes()
5104 a8083063 Iustin Pop
5105 b6e82a65 Iustin Pop
  def _RunAllocator(self):
5106 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
5107 b6e82a65 Iustin Pop

5108 b6e82a65 Iustin Pop
    """
5109 72737a7f Iustin Pop
    ial = IAllocator(self,
5110 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
5111 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
5112 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
5113 b6e82a65 Iustin Pop
5114 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
5115 b6e82a65 Iustin Pop
5116 b6e82a65 Iustin Pop
    if not ial.success:
5117 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
5118 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
5119 b6e82a65 Iustin Pop
                                                           ial.info))
5120 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
5121 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
5122 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
5123 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
5124 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
5125 86d9d3bb Iustin Pop
    self.LogInfo("Selected new secondary for the instance: %s",
5126 86d9d3bb Iustin Pop
                 self.op.remote_node)
5127 b6e82a65 Iustin Pop
5128 a8083063 Iustin Pop
  def BuildHooksEnv(self):
5129 a8083063 Iustin Pop
    """Build hooks env.
5130 a8083063 Iustin Pop

5131 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
5132 a8083063 Iustin Pop

5133 a8083063 Iustin Pop
    """
5134 a8083063 Iustin Pop
    env = {
5135 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
5136 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
5137 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
5138 a8083063 Iustin Pop
      }
5139 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5140 0834c866 Iustin Pop
    nl = [
5141 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
5142 0834c866 Iustin Pop
      self.instance.primary_node,
5143 0834c866 Iustin Pop
      ]
5144 0834c866 Iustin Pop
    if self.op.remote_node is not None:
5145 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
5146 a8083063 Iustin Pop
    return env, nl, nl
5147 a8083063 Iustin Pop
5148 a8083063 Iustin Pop
  def CheckPrereq(self):
5149 a8083063 Iustin Pop
    """Check prerequisites.
5150 a8083063 Iustin Pop

5151 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
5152 a8083063 Iustin Pop

5153 a8083063 Iustin Pop
    """
5154 efd990e4 Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5155 efd990e4 Guido Trotter
    assert instance is not None, \
5156 efd990e4 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5157 a8083063 Iustin Pop
    self.instance = instance
5158 a8083063 Iustin Pop
5159 7e9366f7 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
5160 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
5161 7e9366f7 Iustin Pop
                                 " instances")
5162 a8083063 Iustin Pop
5163 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
5164 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
5165 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
5166 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
5167 a8083063 Iustin Pop
5168 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
5169 a9e0c397 Iustin Pop
5170 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
5171 de8c7666 Guido Trotter
      self._RunAllocator()
5172 b6e82a65 Iustin Pop
5173 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
5174 a9e0c397 Iustin Pop
    if remote_node is not None:
5175 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
5176 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
5177 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
5178 a9e0c397 Iustin Pop
    else:
5179 a9e0c397 Iustin Pop
      self.remote_node_info = None
5180 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
5181 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
5182 3ecf6786 Iustin Pop
                                 " the instance.")
5183 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
5184 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("The specified node is already the"
5185 7e9366f7 Iustin Pop
                                 " secondary node of the instance.")
5186 7e9366f7 Iustin Pop
5187 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_PRI:
5188 7e9366f7 Iustin Pop
      n1 = self.tgt_node = instance.primary_node
5189 7e9366f7 Iustin Pop
      n2 = self.oth_node = self.sec_node
5190 7e9366f7 Iustin Pop
    elif self.op.mode == constants.REPLACE_DISK_SEC:
5191 7e9366f7 Iustin Pop
      n1 = self.tgt_node = self.sec_node
5192 7e9366f7 Iustin Pop
      n2 = self.oth_node = instance.primary_node
5193 7e9366f7 Iustin Pop
    elif self.op.mode == constants.REPLACE_DISK_CHG:
5194 7e9366f7 Iustin Pop
      n1 = self.new_node = remote_node
5195 7e9366f7 Iustin Pop
      n2 = self.oth_node = instance.primary_node
5196 7e9366f7 Iustin Pop
      self.tgt_node = self.sec_node
5197 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, remote_node)
5198 7e9366f7 Iustin Pop
    else:
5199 7e9366f7 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replace mode")
5200 7e9366f7 Iustin Pop
5201 7e9366f7 Iustin Pop
    _CheckNodeOnline(self, n1)
5202 7e9366f7 Iustin Pop
    _CheckNodeOnline(self, n2)
5203 a9e0c397 Iustin Pop
5204 54155f52 Iustin Pop
    if not self.op.disks:
5205 54155f52 Iustin Pop
      self.op.disks = range(len(instance.disks))
5206 54155f52 Iustin Pop
5207 54155f52 Iustin Pop
    for disk_idx in self.op.disks:
5208 3e0cea06 Iustin Pop
      instance.FindDisk(disk_idx)
5209 a8083063 Iustin Pop
5210 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
5211 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
5212 a9e0c397 Iustin Pop

5213 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
5214 e4376078 Iustin Pop

5215 e4376078 Iustin Pop
      1. for each disk to be replaced:
5216 e4376078 Iustin Pop

5217 e4376078 Iustin Pop
        1. create new LVs on the target node with unique names
5218 e4376078 Iustin Pop
        1. detach old LVs from the drbd device
5219 e4376078 Iustin Pop
        1. rename old LVs to name_replaced.<time_t>
5220 e4376078 Iustin Pop
        1. rename new LVs to old LVs
5221 e4376078 Iustin Pop
        1. attach the new LVs (with the old names now) to the drbd device
5222 e4376078 Iustin Pop

5223 e4376078 Iustin Pop
      1. wait for sync across all devices
5224 e4376078 Iustin Pop

5225 e4376078 Iustin Pop
      1. for each modified disk:
5226 e4376078 Iustin Pop

5227 e4376078 Iustin Pop
        1. remove old LVs (which have the name name_replaces.<time_t>)
5228 a9e0c397 Iustin Pop

5229 a9e0c397 Iustin Pop
    Failures are not very well handled.
5230 cff90b79 Iustin Pop

5231 a9e0c397 Iustin Pop
    """
5232 cff90b79 Iustin Pop
    steps_total = 6
5233 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
5234 a9e0c397 Iustin Pop
    instance = self.instance
5235 a9e0c397 Iustin Pop
    iv_names = {}
5236 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
5237 a9e0c397 Iustin Pop
    # start of work
5238 a9e0c397 Iustin Pop
    cfg = self.cfg
5239 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
5240 cff90b79 Iustin Pop
    oth_node = self.oth_node
5241 cff90b79 Iustin Pop
5242 cff90b79 Iustin Pop
    # Step: check device activation
5243 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
5244 cff90b79 Iustin Pop
    info("checking volume groups")
5245 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
5246 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([oth_node, tgt_node])
5247 cff90b79 Iustin Pop
    if not results:
5248 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
5249 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
5250 781de953 Iustin Pop
      res = results[node]
5251 4c4e4e1e Iustin Pop
      res.Raise("Error checking node %s" % node)
5252 e480923b Iustin Pop
      if my_vg not in res.payload:
5253 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
5254 cff90b79 Iustin Pop
                                 (my_vg, node))
5255 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
5256 54155f52 Iustin Pop
      if idx not in self.op.disks:
5257 cff90b79 Iustin Pop
        continue
5258 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
5259 54155f52 Iustin Pop
        info("checking disk/%d on %s" % (idx, node))
5260 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
5261 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(node, dev)
5262 4c4e4e1e Iustin Pop
        msg = result.fail_msg
5263 23829f6f Iustin Pop
        if not msg and not result.payload:
5264 23829f6f Iustin Pop
          msg = "disk not found"
5265 23829f6f Iustin Pop
        if msg:
5266 23829f6f Iustin Pop
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5267 23829f6f Iustin Pop
                                   (idx, node, msg))
5268 cff90b79 Iustin Pop
5269 cff90b79 Iustin Pop
    # Step: check other node consistency
5270 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
5271 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
5272 54155f52 Iustin Pop
      if idx not in self.op.disks:
5273 cff90b79 Iustin Pop
        continue
5274 54155f52 Iustin Pop
      info("checking disk/%d consistency on %s" % (idx, oth_node))
5275 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, oth_node,
5276 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
5277 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
5278 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
5279 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
5280 cff90b79 Iustin Pop
5281 cff90b79 Iustin Pop
    # Step: create new storage
5282 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
5283 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
5284 54155f52 Iustin Pop
      if idx not in self.op.disks:
5285 a9e0c397 Iustin Pop
        continue
5286 a9e0c397 Iustin Pop
      size = dev.size
5287 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
5288 54155f52 Iustin Pop
      lv_names = [".disk%d_%s" % (idx, suf)
5289 54155f52 Iustin Pop
                  for suf in ["data", "meta"]]
5290 b9bddb6b Iustin Pop
      names = _GenerateUniqueNames(self, lv_names)
5291 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
5292 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
5293 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
5294 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
5295 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
5296 a9e0c397 Iustin Pop
      old_lvs = dev.children
5297 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
5298 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
5299 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
5300 428958aa Iustin Pop
      # we pass force_create=True to force the LVM creation
5301 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
5302 428958aa Iustin Pop
        _CreateBlockDev(self, tgt_node, instance, new_lv, True,
5303 428958aa Iustin Pop
                        _GetInstanceInfoText(instance), False)
5304 a9e0c397 Iustin Pop
5305 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
5306 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
5307 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
5308 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
5309 781de953 Iustin Pop
      result = self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs)
5310 4c4e4e1e Iustin Pop
      result.Raise("Can't detach drbd from local storage on node"
5311 4c4e4e1e Iustin Pop
                   " %s for device %s" % (tgt_node, dev.iv_name))
5312 cff90b79 Iustin Pop
      #dev.children = []
5313 cff90b79 Iustin Pop
      #cfg.Update(instance)
5314 a9e0c397 Iustin Pop
5315 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
5316 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
5317 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
5318 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
5319 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
5320 cff90b79 Iustin Pop
5321 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
5322 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
5323 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
5324 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
5325 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
5326 cff90b79 Iustin Pop
      rlist = []
5327 cff90b79 Iustin Pop
      for to_ren in old_lvs:
5328 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(tgt_node, to_ren)
5329 4c4e4e1e Iustin Pop
        if not result.fail_msg and result.payload:
5330 23829f6f Iustin Pop
          # device exists
5331 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
5332 cff90b79 Iustin Pop
5333 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
5334 781de953 Iustin Pop
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5335 4c4e4e1e Iustin Pop
      result.Raise("Can't rename old LVs on node %s" % tgt_node)
5336 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
5337 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
5338 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
5339 781de953 Iustin Pop
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5340 4c4e4e1e Iustin Pop
      result.Raise("Can't rename new LVs on node %s" % tgt_node)
5341 cff90b79 Iustin Pop
5342 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
5343 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
5344 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
5345 a9e0c397 Iustin Pop
5346 cff90b79 Iustin Pop
      for disk in old_lvs:
5347 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
5348 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
5349 a9e0c397 Iustin Pop
5350 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
5351 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
5352 4504c3d6 Iustin Pop
      result = self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs)
5353 4c4e4e1e Iustin Pop
      msg = result.fail_msg
5354 2cc1da8b Iustin Pop
      if msg:
5355 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
5356 4c4e4e1e Iustin Pop
          msg2 = self.rpc.call_blockdev_remove(tgt_node, new_lv).fail_msg
5357 4c4e4e1e Iustin Pop
          if msg2:
5358 4c4e4e1e Iustin Pop
            warning("Can't rollback device %s: %s", dev, msg2,
5359 e1bc0878 Iustin Pop
                    hint="cleanup manually the unused logical volumes")
5360 2cc1da8b Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
5361 a9e0c397 Iustin Pop
5362 a9e0c397 Iustin Pop
      dev.children = new_lvs
5363 a9e0c397 Iustin Pop
      cfg.Update(instance)
5364 a9e0c397 Iustin Pop
5365 cff90b79 Iustin Pop
    # Step: wait for sync
5366 a9e0c397 Iustin Pop
5367 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
5368 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
5369 a9e0c397 Iustin Pop
    # return value
5370 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
5371 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
5372 a9e0c397 Iustin Pop
5373 a9e0c397 Iustin Pop
    # so check manually all the devices
5374 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5375 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
5376 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(instance.primary_node, dev)
5377 4c4e4e1e Iustin Pop
      msg = result.fail_msg
5378 23829f6f Iustin Pop
      if not msg and not result.payload:
5379 23829f6f Iustin Pop
        msg = "disk not found"
5380 23829f6f Iustin Pop
      if msg:
5381 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
5382 23829f6f Iustin Pop
                                 (name, msg))
5383 23829f6f Iustin Pop
      if result.payload[5]:
5384 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
5385 a9e0c397 Iustin Pop
5386 cff90b79 Iustin Pop
    # Step: remove old storage
5387 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
5388 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5389 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
5390 a9e0c397 Iustin Pop
      for lv in old_lvs:
5391 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
5392 4c4e4e1e Iustin Pop
        msg = self.rpc.call_blockdev_remove(tgt_node, lv).fail_msg
5393 e1bc0878 Iustin Pop
        if msg:
5394 e1bc0878 Iustin Pop
          warning("Can't remove old LV: %s" % msg,
5395 e1bc0878 Iustin Pop
                  hint="manually remove unused LVs")
5396 a9e0c397 Iustin Pop
          continue
5397 a9e0c397 Iustin Pop
5398 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
5399 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
5400 a9e0c397 Iustin Pop

5401 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
5402 a9e0c397 Iustin Pop
      - for all disks of the instance:
5403 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
5404 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
5405 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
5406 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
5407 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
5408 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
5409 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
5410 a9e0c397 Iustin Pop
          not network enabled
5411 a9e0c397 Iustin Pop
      - wait for sync across all devices
5412 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
5413 a9e0c397 Iustin Pop

5414 a9e0c397 Iustin Pop
    Failures are not very well handled.
5415 0834c866 Iustin Pop

5416 a9e0c397 Iustin Pop
    """
5417 0834c866 Iustin Pop
    steps_total = 6
5418 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
5419 a9e0c397 Iustin Pop
    instance = self.instance
5420 a9e0c397 Iustin Pop
    iv_names = {}
5421 a9e0c397 Iustin Pop
    # start of work
5422 a9e0c397 Iustin Pop
    cfg = self.cfg
5423 a9e0c397 Iustin Pop
    old_node = self.tgt_node
5424 a9e0c397 Iustin Pop
    new_node = self.new_node
5425 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
5426 a2d59d8b Iustin Pop
    nodes_ip = {
5427 a2d59d8b Iustin Pop
      old_node: self.cfg.GetNodeInfo(old_node).secondary_ip,
5428 a2d59d8b Iustin Pop
      new_node: self.cfg.GetNodeInfo(new_node).secondary_ip,
5429 a2d59d8b Iustin Pop
      pri_node: self.cfg.GetNodeInfo(pri_node).secondary_ip,
5430 a2d59d8b Iustin Pop
      }
5431 0834c866 Iustin Pop
5432 0834c866 Iustin Pop
    # Step: check device activation
5433 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
5434 0834c866 Iustin Pop
    info("checking volume groups")
5435 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
5436 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([pri_node, new_node])
5437 0834c866 Iustin Pop
    for node in pri_node, new_node:
5438 781de953 Iustin Pop
      res = results[node]
5439 4c4e4e1e Iustin Pop
      res.Raise("Error checking node %s" % node)
5440 e480923b Iustin Pop
      if my_vg not in res.payload:
5441 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
5442 0834c866 Iustin Pop
                                 (my_vg, node))
5443 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5444 d418ebfb Iustin Pop
      if idx not in self.op.disks:
5445 0834c866 Iustin Pop
        continue
5446 d418ebfb Iustin Pop
      info("checking disk/%d on %s" % (idx, pri_node))
5447 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5448 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(pri_node, dev)
5449 4c4e4e1e Iustin Pop
      msg = result.fail_msg
5450 23829f6f Iustin Pop
      if not msg and not result.payload:
5451 23829f6f Iustin Pop
        msg = "disk not found"
5452 23829f6f Iustin Pop
      if msg:
5453 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5454 23829f6f Iustin Pop
                                 (idx, pri_node, msg))
5455 0834c866 Iustin Pop
5456 0834c866 Iustin Pop
    # Step: check other node consistency
5457 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
5458 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5459 d418ebfb Iustin Pop
      if idx not in self.op.disks:
5460 0834c866 Iustin Pop
        continue
5461 d418ebfb Iustin Pop
      info("checking disk/%d consistency on %s" % (idx, pri_node))
5462 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
5463 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
5464 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
5465 0834c866 Iustin Pop
                                 pri_node)
5466 0834c866 Iustin Pop
5467 0834c866 Iustin Pop
    # Step: create new storage
5468 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
5469 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5470 d418ebfb Iustin Pop
      info("adding new local storage on %s for disk/%d" %
5471 d418ebfb Iustin Pop
           (new_node, idx))
5472 428958aa Iustin Pop
      # we pass force_create=True to force LVM creation
5473 a9e0c397 Iustin Pop
      for new_lv in dev.children:
5474 428958aa Iustin Pop
        _CreateBlockDev(self, new_node, instance, new_lv, True,
5475 428958aa Iustin Pop
                        _GetInstanceInfoText(instance), False)
5476 a9e0c397 Iustin Pop
5477 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
5478 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
5479 a1578d63 Iustin Pop
    # error and the success paths
5480 a1578d63 Iustin Pop
    minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
5481 a1578d63 Iustin Pop
                                   instance.name)
5482 468b46f9 Iustin Pop
    logging.debug("Allocated minors %s" % (minors,))
5483 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
5484 d418ebfb Iustin Pop
    for idx, (dev, new_minor) in enumerate(zip(instance.disks, minors)):
5485 0834c866 Iustin Pop
      size = dev.size
5486 d418ebfb Iustin Pop
      info("activating a new drbd on %s for disk/%d" % (new_node, idx))
5487 a2d59d8b Iustin Pop
      # create new devices on new_node; note that we create two IDs:
5488 a2d59d8b Iustin Pop
      # one without port, so the drbd will be activated without
5489 a2d59d8b Iustin Pop
      # networking information on the new node at this stage, and one
5490 a2d59d8b Iustin Pop
      # with network, for the latter activation in step 4
5491 a2d59d8b Iustin Pop
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
5492 a2d59d8b Iustin Pop
      if pri_node == o_node1:
5493 a2d59d8b Iustin Pop
        p_minor = o_minor1
5494 ffa1c0dc Iustin Pop
      else:
5495 a2d59d8b Iustin Pop
        p_minor = o_minor2
5496 a2d59d8b Iustin Pop
5497 a2d59d8b Iustin Pop
      new_alone_id = (pri_node, new_node, None, p_minor, new_minor, o_secret)
5498 a2d59d8b Iustin Pop
      new_net_id = (pri_node, new_node, o_port, p_minor, new_minor, o_secret)
5499 a2d59d8b Iustin Pop
5500 a2d59d8b Iustin Pop
      iv_names[idx] = (dev, dev.children, new_net_id)
5501 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
5502 a2d59d8b Iustin Pop
                    new_net_id)
5503 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
5504 a2d59d8b Iustin Pop
                              logical_id=new_alone_id,
5505 8a6c7011 Iustin Pop
                              children=dev.children,
5506 8a6c7011 Iustin Pop
                              size=dev.size)
5507 796cab27 Iustin Pop
      try:
5508 de12473a Iustin Pop
        _CreateSingleBlockDev(self, new_node, instance, new_drbd,
5509 de12473a Iustin Pop
                              _GetInstanceInfoText(instance), False)
5510 82759cb1 Iustin Pop
      except errors.GenericError:
5511 a1578d63 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
5512 796cab27 Iustin Pop
        raise
5513 a9e0c397 Iustin Pop
5514 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5515 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
5516 d418ebfb Iustin Pop
      info("shutting down drbd for disk/%d on old node" % idx)
5517 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
5518 4c4e4e1e Iustin Pop
      msg = self.rpc.call_blockdev_shutdown(old_node, dev).fail_msg
5519 cacfd1fd Iustin Pop
      if msg:
5520 cacfd1fd Iustin Pop
        warning("Failed to shutdown drbd for disk/%d on old node: %s" %
5521 cacfd1fd Iustin Pop
                (idx, msg),
5522 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
5523 a9e0c397 Iustin Pop
5524 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
5525 a2d59d8b Iustin Pop
    result = self.rpc.call_drbd_disconnect_net([pri_node], nodes_ip,
5526 a2d59d8b Iustin Pop
                                               instance.disks)[pri_node]
5527 642445d9 Iustin Pop
5528 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5529 a2d59d8b Iustin Pop
    if msg:
5530 a2d59d8b Iustin Pop
      # detaches didn't succeed (unlikely)
5531 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance.name)
5532 a2d59d8b Iustin Pop
      raise errors.OpExecError("Can't detach the disks from the network on"
5533 a2d59d8b Iustin Pop
                               " old node: %s" % (msg,))
5534 642445d9 Iustin Pop
5535 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
5536 642445d9 Iustin Pop
    # the instance to point to the new secondary
5537 642445d9 Iustin Pop
    info("updating instance configuration")
5538 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
5539 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
5540 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5541 642445d9 Iustin Pop
    cfg.Update(instance)
5542 a9e0c397 Iustin Pop
5543 642445d9 Iustin Pop
    # and now perform the drbd attach
5544 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
5545 a2d59d8b Iustin Pop
    result = self.rpc.call_drbd_attach_net([pri_node, new_node], nodes_ip,
5546 a2d59d8b Iustin Pop
                                           instance.disks, instance.name,
5547 a2d59d8b Iustin Pop
                                           False)
5548 a2d59d8b Iustin Pop
    for to_node, to_result in result.items():
5549 4c4e4e1e Iustin Pop
      msg = to_result.fail_msg
5550 a2d59d8b Iustin Pop
      if msg:
5551 a2d59d8b Iustin Pop
        warning("can't attach drbd disks on node %s: %s", to_node, msg,
5552 a2d59d8b Iustin Pop
                hint="please do a gnt-instance info to see the"
5553 a2d59d8b Iustin Pop
                " status of disks")
5554 a9e0c397 Iustin Pop
5555 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
5556 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
5557 a9e0c397 Iustin Pop
    # return value
5558 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
5559 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
5560 a9e0c397 Iustin Pop
5561 a9e0c397 Iustin Pop
    # so check manually all the devices
5562 d418ebfb Iustin Pop
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
5563 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5564 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(pri_node, dev)
5565 4c4e4e1e Iustin Pop
      msg = result.fail_msg
5566 23829f6f Iustin Pop
      if not msg and not result.payload:
5567 23829f6f Iustin Pop
        msg = "disk not found"
5568 23829f6f Iustin Pop
      if msg:
5569 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find DRBD device disk/%d: %s" %
5570 23829f6f Iustin Pop
                                 (idx, msg))
5571 23829f6f Iustin Pop
      if result.payload[5]:
5572 d418ebfb Iustin Pop
        raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx)
5573 a9e0c397 Iustin Pop
5574 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
5575 d418ebfb Iustin Pop
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
5576 d418ebfb Iustin Pop
      info("remove logical volumes for disk/%d" % idx)
5577 a9e0c397 Iustin Pop
      for lv in old_lvs:
5578 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
5579 4c4e4e1e Iustin Pop
        msg = self.rpc.call_blockdev_remove(old_node, lv).fail_msg
5580 e1bc0878 Iustin Pop
        if msg:
5581 e1bc0878 Iustin Pop
          warning("Can't remove LV on old secondary: %s", msg,
5582 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
5583 a9e0c397 Iustin Pop
5584 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
5585 a9e0c397 Iustin Pop
    """Execute disk replacement.
5586 a9e0c397 Iustin Pop

5587 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
5588 a9e0c397 Iustin Pop

5589 a9e0c397 Iustin Pop
    """
5590 a9e0c397 Iustin Pop
    instance = self.instance
5591 22985314 Guido Trotter
5592 22985314 Guido Trotter
    # Activate the instance disks if we're replacing them on a down instance
5593 0d68c45d Iustin Pop
    if not instance.admin_up:
5594 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, True)
5595 22985314 Guido Trotter
5596 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_CHG:
5597 7e9366f7 Iustin Pop
      fn = self._ExecD8Secondary
5598 a9e0c397 Iustin Pop
    else:
5599 7e9366f7 Iustin Pop
      fn = self._ExecD8DiskOnly
5600 22985314 Guido Trotter
5601 22985314 Guido Trotter
    ret = fn(feedback_fn)
5602 22985314 Guido Trotter
5603 22985314 Guido Trotter
    # Deactivate the instance disks if we're replacing them on a down instance
5604 0d68c45d Iustin Pop
    if not instance.admin_up:
5605 b9bddb6b Iustin Pop
      _SafeShutdownInstanceDisks(self, instance)
5606 22985314 Guido Trotter
5607 22985314 Guido Trotter
    return ret
5608 a9e0c397 Iustin Pop
5609 a8083063 Iustin Pop
5610 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
5611 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
5612 8729e0d7 Iustin Pop

5613 8729e0d7 Iustin Pop
  """
5614 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
5615 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5616 6605411d Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
5617 31e63dbf Guido Trotter
  REQ_BGL = False
5618 31e63dbf Guido Trotter
5619 31e63dbf Guido Trotter
  def ExpandNames(self):
5620 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
5621 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
5622 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5623 31e63dbf Guido Trotter
5624 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
5625 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
5626 31e63dbf Guido Trotter
      self._LockInstancesNodes()
5627 8729e0d7 Iustin Pop
5628 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
5629 8729e0d7 Iustin Pop
    """Build hooks env.
5630 8729e0d7 Iustin Pop

5631 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
5632 8729e0d7 Iustin Pop

5633 8729e0d7 Iustin Pop
    """
5634 8729e0d7 Iustin Pop
    env = {
5635 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
5636 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
5637 8729e0d7 Iustin Pop
      }
5638 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5639 8729e0d7 Iustin Pop
    nl = [
5640 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
5641 8729e0d7 Iustin Pop
      self.instance.primary_node,
5642 8729e0d7 Iustin Pop
      ]
5643 8729e0d7 Iustin Pop
    return env, nl, nl
5644 8729e0d7 Iustin Pop
5645 8729e0d7 Iustin Pop
  def CheckPrereq(self):
5646 8729e0d7 Iustin Pop
    """Check prerequisites.
5647 8729e0d7 Iustin Pop

5648 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
5649 8729e0d7 Iustin Pop

5650 8729e0d7 Iustin Pop
    """
5651 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5652 31e63dbf Guido Trotter
    assert instance is not None, \
5653 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5654 6b12959c Iustin Pop
    nodenames = list(instance.all_nodes)
5655 6b12959c Iustin Pop
    for node in nodenames:
5656 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, node)
5657 7527a8a4 Iustin Pop
5658 31e63dbf Guido Trotter
5659 8729e0d7 Iustin Pop
    self.instance = instance
5660 8729e0d7 Iustin Pop
5661 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
5662 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
5663 8729e0d7 Iustin Pop
                                 " growing.")
5664 8729e0d7 Iustin Pop
5665 ad24e046 Iustin Pop
    self.disk = instance.FindDisk(self.op.disk)
5666 8729e0d7 Iustin Pop
5667 72737a7f Iustin Pop
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
5668 72737a7f Iustin Pop
                                       instance.hypervisor)
5669 8729e0d7 Iustin Pop
    for node in nodenames:
5670 781de953 Iustin Pop
      info = nodeinfo[node]
5671 4c4e4e1e Iustin Pop
      info.Raise("Cannot get current information from node %s" % node)
5672 070e998b Iustin Pop
      vg_free = info.payload.get('vg_free', None)
5673 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
5674 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
5675 8729e0d7 Iustin Pop
                                   " node %s" % node)
5676 781de953 Iustin Pop
      if self.op.amount > vg_free:
5677 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
5678 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
5679 781de953 Iustin Pop
                                   (node, vg_free, self.op.amount))
5680 8729e0d7 Iustin Pop
5681 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
5682 8729e0d7 Iustin Pop
    """Execute disk grow.
5683 8729e0d7 Iustin Pop

5684 8729e0d7 Iustin Pop
    """
5685 8729e0d7 Iustin Pop
    instance = self.instance
5686 ad24e046 Iustin Pop
    disk = self.disk
5687 6b12959c Iustin Pop
    for node in instance.all_nodes:
5688 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
5689 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
5690 4c4e4e1e Iustin Pop
      result.Raise("Grow request failed to node %s" % node)
5691 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
5692 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
5693 6605411d Iustin Pop
    if self.op.wait_for_sync:
5694 cd4d138f Guido Trotter
      disk_abort = not _WaitForSync(self, instance)
5695 6605411d Iustin Pop
      if disk_abort:
5696 86d9d3bb Iustin Pop
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
5697 86d9d3bb Iustin Pop
                             " status.\nPlease check the instance.")
5698 8729e0d7 Iustin Pop
5699 8729e0d7 Iustin Pop
5700 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
5701 a8083063 Iustin Pop
  """Query runtime instance data.
5702 a8083063 Iustin Pop

5703 a8083063 Iustin Pop
  """
5704 57821cac Iustin Pop
  _OP_REQP = ["instances", "static"]
5705 a987fa48 Guido Trotter
  REQ_BGL = False
5706 ae5849b5 Michael Hanselmann
5707 a987fa48 Guido Trotter
  def ExpandNames(self):
5708 a987fa48 Guido Trotter
    self.needed_locks = {}
5709 a987fa48 Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
5710 a987fa48 Guido Trotter
5711 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
5712 a987fa48 Guido Trotter
      raise errors.OpPrereqError("Invalid argument type 'instances'")
5713 a987fa48 Guido Trotter
5714 a987fa48 Guido Trotter
    if self.op.instances:
5715 a987fa48 Guido Trotter
      self.wanted_names = []
5716 a987fa48 Guido Trotter
      for name in self.op.instances:
5717 a987fa48 Guido Trotter
        full_name = self.cfg.ExpandInstanceName(name)
5718 a987fa48 Guido Trotter
        if full_name is None:
5719 f57c76e4 Iustin Pop
          raise errors.OpPrereqError("Instance '%s' not known" % name)
5720 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
5721 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
5722 a987fa48 Guido Trotter
    else:
5723 a987fa48 Guido Trotter
      self.wanted_names = None
5724 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
5725 a987fa48 Guido Trotter
5726 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
5727 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5728 a987fa48 Guido Trotter
5729 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
5730 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
5731 a987fa48 Guido Trotter
      self._LockInstancesNodes()
5732 a8083063 Iustin Pop
5733 a8083063 Iustin Pop
  def CheckPrereq(self):
5734 a8083063 Iustin Pop
    """Check prerequisites.
5735 a8083063 Iustin Pop

5736 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
5737 a8083063 Iustin Pop

5738 a8083063 Iustin Pop
    """
5739 a987fa48 Guido Trotter
    if self.wanted_names is None:
5740 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
5741 a8083063 Iustin Pop
5742 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
5743 a987fa48 Guido Trotter
                             in self.wanted_names]
5744 a987fa48 Guido Trotter
    return
5745 a8083063 Iustin Pop
5746 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
5747 a8083063 Iustin Pop
    """Compute block device status.
5748 a8083063 Iustin Pop

5749 a8083063 Iustin Pop
    """
5750 57821cac Iustin Pop
    static = self.op.static
5751 57821cac Iustin Pop
    if not static:
5752 57821cac Iustin Pop
      self.cfg.SetDiskID(dev, instance.primary_node)
5753 57821cac Iustin Pop
      dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
5754 9854f5d0 Iustin Pop
      if dev_pstatus.offline:
5755 9854f5d0 Iustin Pop
        dev_pstatus = None
5756 9854f5d0 Iustin Pop
      else:
5757 4c4e4e1e Iustin Pop
        dev_pstatus.Raise("Can't compute disk status for %s" % instance.name)
5758 9854f5d0 Iustin Pop
        dev_pstatus = dev_pstatus.payload
5759 57821cac Iustin Pop
    else:
5760 57821cac Iustin Pop
      dev_pstatus = None
5761 57821cac Iustin Pop
5762 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
5763 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
5764 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
5765 a8083063 Iustin Pop
        snode = dev.logical_id[1]
5766 a8083063 Iustin Pop
      else:
5767 a8083063 Iustin Pop
        snode = dev.logical_id[0]
5768 a8083063 Iustin Pop
5769 57821cac Iustin Pop
    if snode and not static:
5770 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
5771 72737a7f Iustin Pop
      dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
5772 9854f5d0 Iustin Pop
      if dev_sstatus.offline:
5773 9854f5d0 Iustin Pop
        dev_sstatus = None
5774 9854f5d0 Iustin Pop
      else:
5775 4c4e4e1e Iustin Pop
        dev_sstatus.Raise("Can't compute disk status for %s" % instance.name)
5776 9854f5d0 Iustin Pop
        dev_sstatus = dev_sstatus.payload
5777 a8083063 Iustin Pop
    else:
5778 a8083063 Iustin Pop
      dev_sstatus = None
5779 a8083063 Iustin Pop
5780 a8083063 Iustin Pop
    if dev.children:
5781 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
5782 a8083063 Iustin Pop
                      for child in dev.children]
5783 a8083063 Iustin Pop
    else:
5784 a8083063 Iustin Pop
      dev_children = []
5785 a8083063 Iustin Pop
5786 a8083063 Iustin Pop
    data = {
5787 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
5788 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
5789 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
5790 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
5791 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
5792 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
5793 a8083063 Iustin Pop
      "children": dev_children,
5794 b6fdf8b8 Iustin Pop
      "mode": dev.mode,
5795 a8083063 Iustin Pop
      }
5796 a8083063 Iustin Pop
5797 a8083063 Iustin Pop
    return data
5798 a8083063 Iustin Pop
5799 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5800 a8083063 Iustin Pop
    """Gather and return data"""
5801 a8083063 Iustin Pop
    result = {}
5802 338e51e8 Iustin Pop
5803 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
5804 338e51e8 Iustin Pop
5805 a8083063 Iustin Pop
    for instance in self.wanted_instances:
5806 57821cac Iustin Pop
      if not self.op.static:
5807 57821cac Iustin Pop
        remote_info = self.rpc.call_instance_info(instance.primary_node,
5808 57821cac Iustin Pop
                                                  instance.name,
5809 57821cac Iustin Pop
                                                  instance.hypervisor)
5810 4c4e4e1e Iustin Pop
        remote_info.Raise("Error checking node %s" % instance.primary_node)
5811 7ad1af4a Iustin Pop
        remote_info = remote_info.payload
5812 57821cac Iustin Pop
        if remote_info and "state" in remote_info:
5813 57821cac Iustin Pop
          remote_state = "up"
5814 57821cac Iustin Pop
        else:
5815 57821cac Iustin Pop
          remote_state = "down"
5816 a8083063 Iustin Pop
      else:
5817 57821cac Iustin Pop
        remote_state = None
5818 0d68c45d Iustin Pop
      if instance.admin_up:
5819 a8083063 Iustin Pop
        config_state = "up"
5820 0d68c45d Iustin Pop
      else:
5821 0d68c45d Iustin Pop
        config_state = "down"
5822 a8083063 Iustin Pop
5823 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
5824 a8083063 Iustin Pop
               for device in instance.disks]
5825 a8083063 Iustin Pop
5826 a8083063 Iustin Pop
      idict = {
5827 a8083063 Iustin Pop
        "name": instance.name,
5828 a8083063 Iustin Pop
        "config_state": config_state,
5829 a8083063 Iustin Pop
        "run_state": remote_state,
5830 a8083063 Iustin Pop
        "pnode": instance.primary_node,
5831 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
5832 a8083063 Iustin Pop
        "os": instance.os,
5833 0b13832c Guido Trotter
        # this happens to be the same format used for hooks
5834 0b13832c Guido Trotter
        "nics": _NICListToTuple(self, instance.nics),
5835 a8083063 Iustin Pop
        "disks": disks,
5836 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
5837 24838135 Iustin Pop
        "network_port": instance.network_port,
5838 24838135 Iustin Pop
        "hv_instance": instance.hvparams,
5839 338e51e8 Iustin Pop
        "hv_actual": cluster.FillHV(instance),
5840 338e51e8 Iustin Pop
        "be_instance": instance.beparams,
5841 338e51e8 Iustin Pop
        "be_actual": cluster.FillBE(instance),
5842 a8083063 Iustin Pop
        }
5843 a8083063 Iustin Pop
5844 a8083063 Iustin Pop
      result[instance.name] = idict
5845 a8083063 Iustin Pop
5846 a8083063 Iustin Pop
    return result
5847 a8083063 Iustin Pop
5848 a8083063 Iustin Pop
5849 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
5850 a8083063 Iustin Pop
  """Modifies an instances's parameters.
5851 a8083063 Iustin Pop

5852 a8083063 Iustin Pop
  """
5853 a8083063 Iustin Pop
  HPATH = "instance-modify"
5854 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5855 24991749 Iustin Pop
  _OP_REQP = ["instance_name"]
5856 1a5c7281 Guido Trotter
  REQ_BGL = False
5857 1a5c7281 Guido Trotter
5858 24991749 Iustin Pop
  def CheckArguments(self):
5859 24991749 Iustin Pop
    if not hasattr(self.op, 'nics'):
5860 24991749 Iustin Pop
      self.op.nics = []
5861 24991749 Iustin Pop
    if not hasattr(self.op, 'disks'):
5862 24991749 Iustin Pop
      self.op.disks = []
5863 24991749 Iustin Pop
    if not hasattr(self.op, 'beparams'):
5864 24991749 Iustin Pop
      self.op.beparams = {}
5865 24991749 Iustin Pop
    if not hasattr(self.op, 'hvparams'):
5866 24991749 Iustin Pop
      self.op.hvparams = {}
5867 24991749 Iustin Pop
    self.op.force = getattr(self.op, "force", False)
5868 24991749 Iustin Pop
    if not (self.op.nics or self.op.disks or
5869 24991749 Iustin Pop
            self.op.hvparams or self.op.beparams):
5870 24991749 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
5871 24991749 Iustin Pop
5872 24991749 Iustin Pop
    # Disk validation
5873 24991749 Iustin Pop
    disk_addremove = 0
5874 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
5875 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
5876 24991749 Iustin Pop
        disk_addremove += 1
5877 24991749 Iustin Pop
        continue
5878 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
5879 24991749 Iustin Pop
        disk_addremove += 1
5880 24991749 Iustin Pop
      else:
5881 24991749 Iustin Pop
        if not isinstance(disk_op, int):
5882 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index")
5883 24991749 Iustin Pop
      if disk_op == constants.DDM_ADD:
5884 24991749 Iustin Pop
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
5885 6ec66eae Iustin Pop
        if mode not in constants.DISK_ACCESS_SET:
5886 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode)
5887 24991749 Iustin Pop
        size = disk_dict.get('size', None)
5888 24991749 Iustin Pop
        if size is None:
5889 24991749 Iustin Pop
          raise errors.OpPrereqError("Required disk parameter size missing")
5890 24991749 Iustin Pop
        try:
5891 24991749 Iustin Pop
          size = int(size)
5892 24991749 Iustin Pop
        except ValueError, err:
5893 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
5894 24991749 Iustin Pop
                                     str(err))
5895 24991749 Iustin Pop
        disk_dict['size'] = size
5896 24991749 Iustin Pop
      else:
5897 24991749 Iustin Pop
        # modification of disk
5898 24991749 Iustin Pop
        if 'size' in disk_dict:
5899 24991749 Iustin Pop
          raise errors.OpPrereqError("Disk size change not possible, use"
5900 24991749 Iustin Pop
                                     " grow-disk")
5901 24991749 Iustin Pop
5902 24991749 Iustin Pop
    if disk_addremove > 1:
5903 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one disk add or remove operation"
5904 24991749 Iustin Pop
                                 " supported at a time")
5905 24991749 Iustin Pop
5906 24991749 Iustin Pop
    # NIC validation
5907 24991749 Iustin Pop
    nic_addremove = 0
5908 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
5909 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
5910 24991749 Iustin Pop
        nic_addremove += 1
5911 24991749 Iustin Pop
        continue
5912 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
5913 24991749 Iustin Pop
        nic_addremove += 1
5914 24991749 Iustin Pop
      else:
5915 24991749 Iustin Pop
        if not isinstance(nic_op, int):
5916 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid nic index")
5917 24991749 Iustin Pop
5918 24991749 Iustin Pop
      # nic_dict should be a dict
5919 24991749 Iustin Pop
      nic_ip = nic_dict.get('ip', None)
5920 24991749 Iustin Pop
      if nic_ip is not None:
5921 5c44da6a Guido Trotter
        if nic_ip.lower() == constants.VALUE_NONE:
5922 24991749 Iustin Pop
          nic_dict['ip'] = None
5923 24991749 Iustin Pop
        else:
5924 24991749 Iustin Pop
          if not utils.IsValidIP(nic_ip):
5925 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip)
5926 5c44da6a Guido Trotter
5927 cd098c41 Guido Trotter
      nic_bridge = nic_dict.get('bridge', None)
5928 cd098c41 Guido Trotter
      nic_link = nic_dict.get('link', None)
5929 cd098c41 Guido Trotter
      if nic_bridge and nic_link:
5930 cd098c41 Guido Trotter
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link' at the same time")
5931 cd098c41 Guido Trotter
      elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
5932 cd098c41 Guido Trotter
        nic_dict['bridge'] = None
5933 cd098c41 Guido Trotter
      elif nic_link and nic_link.lower() == constants.VALUE_NONE:
5934 cd098c41 Guido Trotter
        nic_dict['link'] = None
5935 cd098c41 Guido Trotter
5936 5c44da6a Guido Trotter
      if nic_op == constants.DDM_ADD:
5937 5c44da6a Guido Trotter
        nic_mac = nic_dict.get('mac', None)
5938 5c44da6a Guido Trotter
        if nic_mac is None:
5939 5c44da6a Guido Trotter
          nic_dict['mac'] = constants.VALUE_AUTO
5940 5c44da6a Guido Trotter
5941 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
5942 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
5943 24991749 Iustin Pop
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5944 24991749 Iustin Pop
          if not utils.IsValidMac(nic_mac):
5945 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac)
5946 5c44da6a Guido Trotter
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
5947 5c44da6a Guido Trotter
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
5948 5c44da6a Guido Trotter
                                     " modifying an existing nic")
5949 5c44da6a Guido Trotter
5950 24991749 Iustin Pop
    if nic_addremove > 1:
5951 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one NIC add or remove operation"
5952 24991749 Iustin Pop
                                 " supported at a time")
5953 24991749 Iustin Pop
5954 1a5c7281 Guido Trotter
  def ExpandNames(self):
5955 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
5956 74409b12 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
5957 74409b12 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5958 74409b12 Iustin Pop
5959 74409b12 Iustin Pop
  def DeclareLocks(self, level):
5960 74409b12 Iustin Pop
    if level == locking.LEVEL_NODE:
5961 74409b12 Iustin Pop
      self._LockInstancesNodes()
5962 a8083063 Iustin Pop
5963 a8083063 Iustin Pop
  def BuildHooksEnv(self):
5964 a8083063 Iustin Pop
    """Build hooks env.
5965 a8083063 Iustin Pop

5966 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
5967 a8083063 Iustin Pop

5968 a8083063 Iustin Pop
    """
5969 396e1b78 Michael Hanselmann
    args = dict()
5970 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.be_new:
5971 338e51e8 Iustin Pop
      args['memory'] = self.be_new[constants.BE_MEMORY]
5972 338e51e8 Iustin Pop
    if constants.BE_VCPUS in self.be_new:
5973 61be6ba4 Iustin Pop
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
5974 d8dcf3c9 Guido Trotter
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
5975 d8dcf3c9 Guido Trotter
    # information at all.
5976 d8dcf3c9 Guido Trotter
    if self.op.nics:
5977 d8dcf3c9 Guido Trotter
      args['nics'] = []
5978 d8dcf3c9 Guido Trotter
      nic_override = dict(self.op.nics)
5979 62f0dd02 Guido Trotter
      c_nicparams = self.cluster.nicparams[constants.PP_DEFAULT]
5980 d8dcf3c9 Guido Trotter
      for idx, nic in enumerate(self.instance.nics):
5981 d8dcf3c9 Guido Trotter
        if idx in nic_override:
5982 d8dcf3c9 Guido Trotter
          this_nic_override = nic_override[idx]
5983 d8dcf3c9 Guido Trotter
        else:
5984 d8dcf3c9 Guido Trotter
          this_nic_override = {}
5985 d8dcf3c9 Guido Trotter
        if 'ip' in this_nic_override:
5986 d8dcf3c9 Guido Trotter
          ip = this_nic_override['ip']
5987 d8dcf3c9 Guido Trotter
        else:
5988 d8dcf3c9 Guido Trotter
          ip = nic.ip
5989 d8dcf3c9 Guido Trotter
        if 'mac' in this_nic_override:
5990 d8dcf3c9 Guido Trotter
          mac = this_nic_override['mac']
5991 d8dcf3c9 Guido Trotter
        else:
5992 d8dcf3c9 Guido Trotter
          mac = nic.mac
5993 62f0dd02 Guido Trotter
        if idx in self.nic_pnew:
5994 62f0dd02 Guido Trotter
          nicparams = self.nic_pnew[idx]
5995 62f0dd02 Guido Trotter
        else:
5996 62f0dd02 Guido Trotter
          nicparams = objects.FillDict(c_nicparams, nic.nicparams)
5997 62f0dd02 Guido Trotter
        mode = nicparams[constants.NIC_MODE]
5998 62f0dd02 Guido Trotter
        link = nicparams[constants.NIC_LINK]
5999 62f0dd02 Guido Trotter
        args['nics'].append((ip, mac, mode, link))
6000 d8dcf3c9 Guido Trotter
      if constants.DDM_ADD in nic_override:
6001 d8dcf3c9 Guido Trotter
        ip = nic_override[constants.DDM_ADD].get('ip', None)
6002 d8dcf3c9 Guido Trotter
        mac = nic_override[constants.DDM_ADD]['mac']
6003 62f0dd02 Guido Trotter
        nicparams = self.nic_pnew[constants.DDM_ADD]
6004 62f0dd02 Guido Trotter
        mode = nicparams[constants.NIC_MODE]
6005 62f0dd02 Guido Trotter
        link = nicparams[constants.NIC_LINK]
6006 62f0dd02 Guido Trotter
        args['nics'].append((ip, mac, mode, link))
6007 d8dcf3c9 Guido Trotter
      elif constants.DDM_REMOVE in nic_override:
6008 d8dcf3c9 Guido Trotter
        del args['nics'][-1]
6009 d8dcf3c9 Guido Trotter
6010 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
6011 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6012 a8083063 Iustin Pop
    return env, nl, nl
6013 a8083063 Iustin Pop
6014 0329617a Guido Trotter
  def _GetUpdatedParams(self, old_params, update_dict,
6015 0329617a Guido Trotter
                        default_values, parameter_types):
6016 0329617a Guido Trotter
    """Return the new params dict for the given params.
6017 0329617a Guido Trotter

6018 0329617a Guido Trotter
    @type old_params: dict
6019 0329617a Guido Trotter
    @type old_params: old parameters
6020 0329617a Guido Trotter
    @type update_dict: dict
6021 0329617a Guido Trotter
    @type update_dict: dict containing new parameter values,
6022 0329617a Guido Trotter
                       or constants.VALUE_DEFAULT to reset the
6023 0329617a Guido Trotter
                       parameter to its default value
6024 0329617a Guido Trotter
    @type default_values: dict
6025 0329617a Guido Trotter
    @param default_values: default values for the filled parameters
6026 0329617a Guido Trotter
    @type parameter_types: dict
6027 0329617a Guido Trotter
    @param parameter_types: dict mapping target dict keys to types
6028 0329617a Guido Trotter
                            in constants.ENFORCEABLE_TYPES
6029 0329617a Guido Trotter
    @rtype: (dict, dict)
6030 0329617a Guido Trotter
    @return: (new_parameters, filled_parameters)
6031 0329617a Guido Trotter

6032 0329617a Guido Trotter
    """
6033 0329617a Guido Trotter
    params_copy = copy.deepcopy(old_params)
6034 0329617a Guido Trotter
    for key, val in update_dict.iteritems():
6035 0329617a Guido Trotter
      if val == constants.VALUE_DEFAULT:
6036 0329617a Guido Trotter
        try:
6037 0329617a Guido Trotter
          del params_copy[key]
6038 0329617a Guido Trotter
        except KeyError:
6039 0329617a Guido Trotter
          pass
6040 0329617a Guido Trotter
      else:
6041 0329617a Guido Trotter
        params_copy[key] = val
6042 0329617a Guido Trotter
    utils.ForceDictType(params_copy, parameter_types)
6043 0329617a Guido Trotter
    params_filled = objects.FillDict(default_values, params_copy)
6044 0329617a Guido Trotter
    return (params_copy, params_filled)
6045 0329617a Guido Trotter
6046 a8083063 Iustin Pop
  def CheckPrereq(self):
6047 a8083063 Iustin Pop
    """Check prerequisites.
6048 a8083063 Iustin Pop

6049 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
6050 a8083063 Iustin Pop

6051 a8083063 Iustin Pop
    """
6052 24991749 Iustin Pop
    force = self.force = self.op.force
6053 a8083063 Iustin Pop
6054 74409b12 Iustin Pop
    # checking the new params on the primary/secondary nodes
6055 31a853d2 Iustin Pop
6056 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6057 2ee88aeb Guido Trotter
    cluster = self.cluster = self.cfg.GetClusterInfo()
6058 1a5c7281 Guido Trotter
    assert self.instance is not None, \
6059 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
6060 6b12959c Iustin Pop
    pnode = instance.primary_node
6061 6b12959c Iustin Pop
    nodelist = list(instance.all_nodes)
6062 74409b12 Iustin Pop
6063 338e51e8 Iustin Pop
    # hvparams processing
6064 74409b12 Iustin Pop
    if self.op.hvparams:
6065 0329617a Guido Trotter
      i_hvdict, hv_new = self._GetUpdatedParams(
6066 0329617a Guido Trotter
                             instance.hvparams, self.op.hvparams,
6067 0329617a Guido Trotter
                             cluster.hvparams[instance.hypervisor],
6068 0329617a Guido Trotter
                             constants.HVS_PARAMETER_TYPES)
6069 74409b12 Iustin Pop
      # local check
6070 74409b12 Iustin Pop
      hypervisor.GetHypervisor(
6071 74409b12 Iustin Pop
        instance.hypervisor).CheckParameterSyntax(hv_new)
6072 74409b12 Iustin Pop
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
6073 338e51e8 Iustin Pop
      self.hv_new = hv_new # the new actual values
6074 338e51e8 Iustin Pop
      self.hv_inst = i_hvdict # the new dict (without defaults)
6075 338e51e8 Iustin Pop
    else:
6076 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
6077 338e51e8 Iustin Pop
6078 338e51e8 Iustin Pop
    # beparams processing
6079 338e51e8 Iustin Pop
    if self.op.beparams:
6080 0329617a Guido Trotter
      i_bedict, be_new = self._GetUpdatedParams(
6081 0329617a Guido Trotter
                             instance.beparams, self.op.beparams,
6082 0329617a Guido Trotter
                             cluster.beparams[constants.PP_DEFAULT],
6083 0329617a Guido Trotter
                             constants.BES_PARAMETER_TYPES)
6084 338e51e8 Iustin Pop
      self.be_new = be_new # the new actual values
6085 338e51e8 Iustin Pop
      self.be_inst = i_bedict # the new dict (without defaults)
6086 338e51e8 Iustin Pop
    else:
6087 b637ae4d Iustin Pop
      self.be_new = self.be_inst = {}
6088 74409b12 Iustin Pop
6089 cfefe007 Guido Trotter
    self.warn = []
6090 647a5d80 Iustin Pop
6091 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.op.beparams and not self.force:
6092 647a5d80 Iustin Pop
      mem_check_list = [pnode]
6093 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
6094 c0f2b229 Iustin Pop
        # either we changed auto_balance to yes or it was from before
6095 647a5d80 Iustin Pop
        mem_check_list.extend(instance.secondary_nodes)
6096 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
6097 72737a7f Iustin Pop
                                                  instance.hypervisor)
6098 647a5d80 Iustin Pop
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
6099 72737a7f Iustin Pop
                                         instance.hypervisor)
6100 070e998b Iustin Pop
      pninfo = nodeinfo[pnode]
6101 4c4e4e1e Iustin Pop
      msg = pninfo.fail_msg
6102 070e998b Iustin Pop
      if msg:
6103 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
6104 070e998b Iustin Pop
        self.warn.append("Can't get info from primary node %s: %s" %
6105 070e998b Iustin Pop
                         (pnode,  msg))
6106 070e998b Iustin Pop
      elif not isinstance(pninfo.payload.get('memory_free', None), int):
6107 070e998b Iustin Pop
        self.warn.append("Node data from primary node %s doesn't contain"
6108 070e998b Iustin Pop
                         " free memory information" % pnode)
6109 4c4e4e1e Iustin Pop
      elif instance_info.fail_msg:
6110 7ad1af4a Iustin Pop
        self.warn.append("Can't get instance runtime information: %s" %
6111 4c4e4e1e Iustin Pop
                        instance_info.fail_msg)
6112 cfefe007 Guido Trotter
      else:
6113 7ad1af4a Iustin Pop
        if instance_info.payload:
6114 7ad1af4a Iustin Pop
          current_mem = int(instance_info.payload['memory'])
6115 cfefe007 Guido Trotter
        else:
6116 cfefe007 Guido Trotter
          # Assume instance not running
6117 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
6118 cfefe007 Guido Trotter
          # and we have no other way to check)
6119 cfefe007 Guido Trotter
          current_mem = 0
6120 338e51e8 Iustin Pop
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
6121 070e998b Iustin Pop
                    pninfo.payload['memory_free'])
6122 cfefe007 Guido Trotter
        if miss_mem > 0:
6123 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
6124 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
6125 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
6126 cfefe007 Guido Trotter
6127 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
6128 070e998b Iustin Pop
        for node, nres in nodeinfo.items():
6129 ea33068f Iustin Pop
          if node not in instance.secondary_nodes:
6130 ea33068f Iustin Pop
            continue
6131 4c4e4e1e Iustin Pop
          msg = nres.fail_msg
6132 070e998b Iustin Pop
          if msg:
6133 070e998b Iustin Pop
            self.warn.append("Can't get info from secondary node %s: %s" %
6134 070e998b Iustin Pop
                             (node, msg))
6135 070e998b Iustin Pop
          elif not isinstance(nres.payload.get('memory_free', None), int):
6136 070e998b Iustin Pop
            self.warn.append("Secondary node %s didn't return free"
6137 070e998b Iustin Pop
                             " memory information" % node)
6138 070e998b Iustin Pop
          elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
6139 647a5d80 Iustin Pop
            self.warn.append("Not enough memory to failover instance to"
6140 647a5d80 Iustin Pop
                             " secondary node %s" % node)
6141 5bc84f33 Alexander Schreiber
6142 24991749 Iustin Pop
    # NIC processing
6143 cd098c41 Guido Trotter
    self.nic_pnew = {}
6144 cd098c41 Guido Trotter
    self.nic_pinst = {}
6145 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
6146 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
6147 24991749 Iustin Pop
        if not instance.nics:
6148 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance has no NICs, cannot remove")
6149 24991749 Iustin Pop
        continue
6150 24991749 Iustin Pop
      if nic_op != constants.DDM_ADD:
6151 24991749 Iustin Pop
        # an existing nic
6152 24991749 Iustin Pop
        if nic_op < 0 or nic_op >= len(instance.nics):
6153 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
6154 24991749 Iustin Pop
                                     " are 0 to %d" %
6155 24991749 Iustin Pop
                                     (nic_op, len(instance.nics)))
6156 cd098c41 Guido Trotter
        old_nic_params = instance.nics[nic_op].nicparams
6157 cd098c41 Guido Trotter
        old_nic_ip = instance.nics[nic_op].ip
6158 cd098c41 Guido Trotter
      else:
6159 cd098c41 Guido Trotter
        old_nic_params = {}
6160 cd098c41 Guido Trotter
        old_nic_ip = None
6161 cd098c41 Guido Trotter
6162 cd098c41 Guido Trotter
      update_params_dict = dict([(key, nic_dict[key])
6163 cd098c41 Guido Trotter
                                 for key in constants.NICS_PARAMETERS
6164 cd098c41 Guido Trotter
                                 if key in nic_dict])
6165 cd098c41 Guido Trotter
6166 5c44da6a Guido Trotter
      if 'bridge' in nic_dict:
6167 cd098c41 Guido Trotter
        update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
6168 cd098c41 Guido Trotter
6169 cd098c41 Guido Trotter
      new_nic_params, new_filled_nic_params = \
6170 cd098c41 Guido Trotter
          self._GetUpdatedParams(old_nic_params, update_params_dict,
6171 cd098c41 Guido Trotter
                                 cluster.nicparams[constants.PP_DEFAULT],
6172 cd098c41 Guido Trotter
                                 constants.NICS_PARAMETER_TYPES)
6173 cd098c41 Guido Trotter
      objects.NIC.CheckParameterSyntax(new_filled_nic_params)
6174 cd098c41 Guido Trotter
      self.nic_pinst[nic_op] = new_nic_params
6175 cd098c41 Guido Trotter
      self.nic_pnew[nic_op] = new_filled_nic_params
6176 cd098c41 Guido Trotter
      new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
6177 cd098c41 Guido Trotter
6178 cd098c41 Guido Trotter
      if new_nic_mode == constants.NIC_MODE_BRIDGED:
6179 cd098c41 Guido Trotter
        nic_bridge = new_filled_nic_params[constants.NIC_LINK]
6180 4c4e4e1e Iustin Pop
        msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
6181 35c0c8da Iustin Pop
        if msg:
6182 35c0c8da Iustin Pop
          msg = "Error checking bridges on node %s: %s" % (pnode, msg)
6183 24991749 Iustin Pop
          if self.force:
6184 24991749 Iustin Pop
            self.warn.append(msg)
6185 24991749 Iustin Pop
          else:
6186 24991749 Iustin Pop
            raise errors.OpPrereqError(msg)
6187 cd098c41 Guido Trotter
      if new_nic_mode == constants.NIC_MODE_ROUTED:
6188 cd098c41 Guido Trotter
        if 'ip' in nic_dict:
6189 cd098c41 Guido Trotter
          nic_ip = nic_dict['ip']
6190 cd098c41 Guido Trotter
        else:
6191 cd098c41 Guido Trotter
          nic_ip = old_nic_ip
6192 cd098c41 Guido Trotter
        if nic_ip is None:
6193 cd098c41 Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic ip to None'
6194 cd098c41 Guido Trotter
                                     ' on a routed nic')
6195 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
6196 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
6197 5c44da6a Guido Trotter
        if nic_mac is None:
6198 5c44da6a Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic mac to None')
6199 5c44da6a Guido Trotter
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
6200 5c44da6a Guido Trotter
          # otherwise generate the mac
6201 5c44da6a Guido Trotter
          nic_dict['mac'] = self.cfg.GenerateMAC()
6202 5c44da6a Guido Trotter
        else:
6203 5c44da6a Guido Trotter
          # or validate/reserve the current one
6204 5c44da6a Guido Trotter
          if self.cfg.IsMacInUse(nic_mac):
6205 5c44da6a Guido Trotter
            raise errors.OpPrereqError("MAC address %s already in use"
6206 5c44da6a Guido Trotter
                                       " in cluster" % nic_mac)
6207 24991749 Iustin Pop
6208 24991749 Iustin Pop
    # DISK processing
6209 24991749 Iustin Pop
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
6210 24991749 Iustin Pop
      raise errors.OpPrereqError("Disk operations not supported for"
6211 24991749 Iustin Pop
                                 " diskless instances")
6212 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
6213 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
6214 24991749 Iustin Pop
        if len(instance.disks) == 1:
6215 24991749 Iustin Pop
          raise errors.OpPrereqError("Cannot remove the last disk of"
6216 24991749 Iustin Pop
                                     " an instance")
6217 24991749 Iustin Pop
        ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
6218 24991749 Iustin Pop
        ins_l = ins_l[pnode]
6219 4c4e4e1e Iustin Pop
        msg = ins_l.fail_msg
6220 aca13712 Iustin Pop
        if msg:
6221 aca13712 Iustin Pop
          raise errors.OpPrereqError("Can't contact node %s: %s" %
6222 aca13712 Iustin Pop
                                     (pnode, msg))
6223 aca13712 Iustin Pop
        if instance.name in ins_l.payload:
6224 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance is running, can't remove"
6225 24991749 Iustin Pop
                                     " disks.")
6226 24991749 Iustin Pop
6227 24991749 Iustin Pop
      if (disk_op == constants.DDM_ADD and
6228 24991749 Iustin Pop
          len(instance.nics) >= constants.MAX_DISKS):
6229 24991749 Iustin Pop
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
6230 24991749 Iustin Pop
                                   " add more" % constants.MAX_DISKS)
6231 24991749 Iustin Pop
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
6232 24991749 Iustin Pop
        # an existing disk
6233 24991749 Iustin Pop
        if disk_op < 0 or disk_op >= len(instance.disks):
6234 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
6235 24991749 Iustin Pop
                                     " are 0 to %d" %
6236 24991749 Iustin Pop
                                     (disk_op, len(instance.disks)))
6237 24991749 Iustin Pop
6238 a8083063 Iustin Pop
    return
6239 a8083063 Iustin Pop
6240 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6241 a8083063 Iustin Pop
    """Modifies an instance.
6242 a8083063 Iustin Pop

6243 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
6244 24991749 Iustin Pop

6245 a8083063 Iustin Pop
    """
6246 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
6247 cfefe007 Guido Trotter
    # feedback_fn there.
6248 cfefe007 Guido Trotter
    for warn in self.warn:
6249 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
6250 cfefe007 Guido Trotter
6251 a8083063 Iustin Pop
    result = []
6252 a8083063 Iustin Pop
    instance = self.instance
6253 cd098c41 Guido Trotter
    cluster = self.cluster
6254 24991749 Iustin Pop
    # disk changes
6255 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
6256 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
6257 24991749 Iustin Pop
        # remove the last disk
6258 24991749 Iustin Pop
        device = instance.disks.pop()
6259 24991749 Iustin Pop
        device_idx = len(instance.disks)
6260 24991749 Iustin Pop
        for node, disk in device.ComputeNodeTree(instance.primary_node):
6261 24991749 Iustin Pop
          self.cfg.SetDiskID(disk, node)
6262 4c4e4e1e Iustin Pop
          msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
6263 e1bc0878 Iustin Pop
          if msg:
6264 e1bc0878 Iustin Pop
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
6265 e1bc0878 Iustin Pop
                            " continuing anyway", device_idx, node, msg)
6266 24991749 Iustin Pop
        result.append(("disk/%d" % device_idx, "remove"))
6267 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
6268 24991749 Iustin Pop
        # add a new disk
6269 24991749 Iustin Pop
        if instance.disk_template == constants.DT_FILE:
6270 24991749 Iustin Pop
          file_driver, file_path = instance.disks[0].logical_id
6271 24991749 Iustin Pop
          file_path = os.path.dirname(file_path)
6272 24991749 Iustin Pop
        else:
6273 24991749 Iustin Pop
          file_driver = file_path = None
6274 24991749 Iustin Pop
        disk_idx_base = len(instance.disks)
6275 24991749 Iustin Pop
        new_disk = _GenerateDiskTemplate(self,
6276 24991749 Iustin Pop
                                         instance.disk_template,
6277 32388e6d Iustin Pop
                                         instance.name, instance.primary_node,
6278 24991749 Iustin Pop
                                         instance.secondary_nodes,
6279 24991749 Iustin Pop
                                         [disk_dict],
6280 24991749 Iustin Pop
                                         file_path,
6281 24991749 Iustin Pop
                                         file_driver,
6282 24991749 Iustin Pop
                                         disk_idx_base)[0]
6283 24991749 Iustin Pop
        instance.disks.append(new_disk)
6284 24991749 Iustin Pop
        info = _GetInstanceInfoText(instance)
6285 24991749 Iustin Pop
6286 24991749 Iustin Pop
        logging.info("Creating volume %s for instance %s",
6287 24991749 Iustin Pop
                     new_disk.iv_name, instance.name)
6288 24991749 Iustin Pop
        # Note: this needs to be kept in sync with _CreateDisks
6289 24991749 Iustin Pop
        #HARDCODE
6290 428958aa Iustin Pop
        for node in instance.all_nodes:
6291 428958aa Iustin Pop
          f_create = node == instance.primary_node
6292 796cab27 Iustin Pop
          try:
6293 428958aa Iustin Pop
            _CreateBlockDev(self, node, instance, new_disk,
6294 428958aa Iustin Pop
                            f_create, info, f_create)
6295 1492cca7 Iustin Pop
          except errors.OpExecError, err:
6296 24991749 Iustin Pop
            self.LogWarning("Failed to create volume %s (%s) on"
6297 428958aa Iustin Pop
                            " node %s: %s",
6298 428958aa Iustin Pop
                            new_disk.iv_name, new_disk, node, err)
6299 24991749 Iustin Pop
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
6300 24991749 Iustin Pop
                       (new_disk.size, new_disk.mode)))
6301 24991749 Iustin Pop
      else:
6302 24991749 Iustin Pop
        # change a given disk
6303 24991749 Iustin Pop
        instance.disks[disk_op].mode = disk_dict['mode']
6304 24991749 Iustin Pop
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
6305 24991749 Iustin Pop
    # NIC changes
6306 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
6307 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
6308 24991749 Iustin Pop
        # remove the last nic
6309 24991749 Iustin Pop
        del instance.nics[-1]
6310 24991749 Iustin Pop
        result.append(("nic.%d" % len(instance.nics), "remove"))
6311 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
6312 5c44da6a Guido Trotter
        # mac and bridge should be set, by now
6313 5c44da6a Guido Trotter
        mac = nic_dict['mac']
6314 cd098c41 Guido Trotter
        ip = nic_dict.get('ip', None)
6315 cd098c41 Guido Trotter
        nicparams = self.nic_pinst[constants.DDM_ADD]
6316 cd098c41 Guido Trotter
        new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
6317 24991749 Iustin Pop
        instance.nics.append(new_nic)
6318 24991749 Iustin Pop
        result.append(("nic.%d" % (len(instance.nics) - 1),
6319 cd098c41 Guido Trotter
                       "add:mac=%s,ip=%s,mode=%s,link=%s" %
6320 cd098c41 Guido Trotter
                       (new_nic.mac, new_nic.ip,
6321 cd098c41 Guido Trotter
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
6322 cd098c41 Guido Trotter
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
6323 cd098c41 Guido Trotter
                       )))
6324 24991749 Iustin Pop
      else:
6325 cd098c41 Guido Trotter
        for key in 'mac', 'ip':
6326 24991749 Iustin Pop
          if key in nic_dict:
6327 24991749 Iustin Pop
            setattr(instance.nics[nic_op], key, nic_dict[key])
6328 cd098c41 Guido Trotter
        if nic_op in self.nic_pnew:
6329 cd098c41 Guido Trotter
          instance.nics[nic_op].nicparams = self.nic_pnew[nic_op]
6330 cd098c41 Guido Trotter
        for key, val in nic_dict.iteritems():
6331 cd098c41 Guido Trotter
          result.append(("nic.%s/%d" % (key, nic_op), val))
6332 24991749 Iustin Pop
6333 24991749 Iustin Pop
    # hvparams changes
6334 74409b12 Iustin Pop
    if self.op.hvparams:
6335 12649e35 Guido Trotter
      instance.hvparams = self.hv_inst
6336 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
6337 74409b12 Iustin Pop
        result.append(("hv/%s" % key, val))
6338 24991749 Iustin Pop
6339 24991749 Iustin Pop
    # beparams changes
6340 338e51e8 Iustin Pop
    if self.op.beparams:
6341 338e51e8 Iustin Pop
      instance.beparams = self.be_inst
6342 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
6343 338e51e8 Iustin Pop
        result.append(("be/%s" % key, val))
6344 a8083063 Iustin Pop
6345 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
6346 a8083063 Iustin Pop
6347 a8083063 Iustin Pop
    return result
6348 a8083063 Iustin Pop
6349 a8083063 Iustin Pop
6350 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
6351 a8083063 Iustin Pop
  """Query the exports list
6352 a8083063 Iustin Pop

6353 a8083063 Iustin Pop
  """
6354 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
6355 21a15682 Guido Trotter
  REQ_BGL = False
6356 21a15682 Guido Trotter
6357 21a15682 Guido Trotter
  def ExpandNames(self):
6358 21a15682 Guido Trotter
    self.needed_locks = {}
6359 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
6360 21a15682 Guido Trotter
    if not self.op.nodes:
6361 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6362 21a15682 Guido Trotter
    else:
6363 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
6364 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
6365 a8083063 Iustin Pop
6366 a8083063 Iustin Pop
  def CheckPrereq(self):
6367 21a15682 Guido Trotter
    """Check prerequisites.
6368 a8083063 Iustin Pop

6369 a8083063 Iustin Pop
    """
6370 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
6371 a8083063 Iustin Pop
6372 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6373 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
6374 a8083063 Iustin Pop

6375 e4376078 Iustin Pop
    @rtype: dict
6376 e4376078 Iustin Pop
    @return: a dictionary with the structure node->(export-list)
6377 e4376078 Iustin Pop
        where export-list is a list of the instances exported on
6378 e4376078 Iustin Pop
        that node.
6379 a8083063 Iustin Pop

6380 a8083063 Iustin Pop
    """
6381 b04285f2 Guido Trotter
    rpcresult = self.rpc.call_export_list(self.nodes)
6382 b04285f2 Guido Trotter
    result = {}
6383 b04285f2 Guido Trotter
    for node in rpcresult:
6384 4c4e4e1e Iustin Pop
      if rpcresult[node].fail_msg:
6385 b04285f2 Guido Trotter
        result[node] = False
6386 b04285f2 Guido Trotter
      else:
6387 1b7bfbb7 Iustin Pop
        result[node] = rpcresult[node].payload
6388 b04285f2 Guido Trotter
6389 b04285f2 Guido Trotter
    return result
6390 a8083063 Iustin Pop
6391 a8083063 Iustin Pop
6392 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
6393 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
6394 a8083063 Iustin Pop

6395 a8083063 Iustin Pop
  """
6396 a8083063 Iustin Pop
  HPATH = "instance-export"
6397 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
6398 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
6399 6657590e Guido Trotter
  REQ_BGL = False
6400 6657590e Guido Trotter
6401 6657590e Guido Trotter
  def ExpandNames(self):
6402 6657590e Guido Trotter
    self._ExpandAndLockInstance()
6403 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
6404 6657590e Guido Trotter
    #
6405 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
6406 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
6407 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
6408 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
6409 6657590e Guido Trotter
    #    then one to remove, after
6410 6657590e Guido Trotter
    #  - removing the removal operation altoghether
6411 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6412 6657590e Guido Trotter
6413 6657590e Guido Trotter
  def DeclareLocks(self, level):
6414 6657590e Guido Trotter
    """Last minute lock declaration."""
6415 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
6416 a8083063 Iustin Pop
6417 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6418 a8083063 Iustin Pop
    """Build hooks env.
6419 a8083063 Iustin Pop

6420 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
6421 a8083063 Iustin Pop

6422 a8083063 Iustin Pop
    """
6423 a8083063 Iustin Pop
    env = {
6424 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
6425 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
6426 a8083063 Iustin Pop
      }
6427 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6428 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
6429 a8083063 Iustin Pop
          self.op.target_node]
6430 a8083063 Iustin Pop
    return env, nl, nl
6431 a8083063 Iustin Pop
6432 a8083063 Iustin Pop
  def CheckPrereq(self):
6433 a8083063 Iustin Pop
    """Check prerequisites.
6434 a8083063 Iustin Pop

6435 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
6436 a8083063 Iustin Pop

6437 a8083063 Iustin Pop
    """
6438 6657590e Guido Trotter
    instance_name = self.op.instance_name
6439 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
6440 6657590e Guido Trotter
    assert self.instance is not None, \
6441 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
6442 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
6443 a8083063 Iustin Pop
6444 6657590e Guido Trotter
    self.dst_node = self.cfg.GetNodeInfo(
6445 6657590e Guido Trotter
      self.cfg.ExpandNodeName(self.op.target_node))
6446 a8083063 Iustin Pop
6447 268b8e42 Iustin Pop
    if self.dst_node is None:
6448 268b8e42 Iustin Pop
      # This is wrong node name, not a non-locked node
6449 268b8e42 Iustin Pop
      raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node)
6450 aeb83a2b Iustin Pop
    _CheckNodeOnline(self, self.dst_node.name)
6451 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, self.dst_node.name)
6452 a8083063 Iustin Pop
6453 b6023d6c Manuel Franceschini
    # instance disk type verification
6454 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
6455 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
6456 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
6457 b6023d6c Manuel Franceschini
                                   " file-based disks")
6458 b6023d6c Manuel Franceschini
6459 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6460 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
6461 a8083063 Iustin Pop

6462 a8083063 Iustin Pop
    """
6463 a8083063 Iustin Pop
    instance = self.instance
6464 a8083063 Iustin Pop
    dst_node = self.dst_node
6465 a8083063 Iustin Pop
    src_node = instance.primary_node
6466 a8083063 Iustin Pop
    if self.op.shutdown:
6467 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
6468 781de953 Iustin Pop
      result = self.rpc.call_instance_shutdown(src_node, instance)
6469 4c4e4e1e Iustin Pop
      result.Raise("Could not shutdown instance %s on"
6470 4c4e4e1e Iustin Pop
                   " node %s" % (instance.name, src_node))
6471 a8083063 Iustin Pop
6472 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
6473 a8083063 Iustin Pop
6474 a8083063 Iustin Pop
    snap_disks = []
6475 a8083063 Iustin Pop
6476 998c712c Iustin Pop
    # set the disks ID correctly since call_instance_start needs the
6477 998c712c Iustin Pop
    # correct drbd minor to create the symlinks
6478 998c712c Iustin Pop
    for disk in instance.disks:
6479 998c712c Iustin Pop
      self.cfg.SetDiskID(disk, src_node)
6480 998c712c Iustin Pop
6481 a8083063 Iustin Pop
    try:
6482 a97da6b7 Iustin Pop
      for idx, disk in enumerate(instance.disks):
6483 87812fd3 Iustin Pop
        # result.payload will be a snapshot of an lvm leaf of the one we passed
6484 87812fd3 Iustin Pop
        result = self.rpc.call_blockdev_snapshot(src_node, disk)
6485 4c4e4e1e Iustin Pop
        msg = result.fail_msg
6486 87812fd3 Iustin Pop
        if msg:
6487 af0413bb Guido Trotter
          self.LogWarning("Could not snapshot disk/%s on node %s: %s",
6488 af0413bb Guido Trotter
                          idx, src_node, msg)
6489 19d7f90a Guido Trotter
          snap_disks.append(False)
6490 19d7f90a Guido Trotter
        else:
6491 87812fd3 Iustin Pop
          disk_id = (vgname, result.payload)
6492 19d7f90a Guido Trotter
          new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
6493 87812fd3 Iustin Pop
                                 logical_id=disk_id, physical_id=disk_id,
6494 19d7f90a Guido Trotter
                                 iv_name=disk.iv_name)
6495 19d7f90a Guido Trotter
          snap_disks.append(new_dev)
6496 a8083063 Iustin Pop
6497 a8083063 Iustin Pop
    finally:
6498 0d68c45d Iustin Pop
      if self.op.shutdown and instance.admin_up:
6499 0eca8e0c Iustin Pop
        result = self.rpc.call_instance_start(src_node, instance, None, None)
6500 4c4e4e1e Iustin Pop
        msg = result.fail_msg
6501 dd279568 Iustin Pop
        if msg:
6502 b9bddb6b Iustin Pop
          _ShutdownInstanceDisks(self, instance)
6503 dd279568 Iustin Pop
          raise errors.OpExecError("Could not start instance: %s" % msg)
6504 a8083063 Iustin Pop
6505 a8083063 Iustin Pop
    # TODO: check for size
6506 a8083063 Iustin Pop
6507 62c9ec92 Iustin Pop
    cluster_name = self.cfg.GetClusterName()
6508 74c47259 Iustin Pop
    for idx, dev in enumerate(snap_disks):
6509 19d7f90a Guido Trotter
      if dev:
6510 781de953 Iustin Pop
        result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
6511 781de953 Iustin Pop
                                               instance, cluster_name, idx)
6512 4c4e4e1e Iustin Pop
        msg = result.fail_msg
6513 ba55d062 Iustin Pop
        if msg:
6514 af0413bb Guido Trotter
          self.LogWarning("Could not export disk/%s from node %s to"
6515 af0413bb Guido Trotter
                          " node %s: %s", idx, src_node, dst_node.name, msg)
6516 4c4e4e1e Iustin Pop
        msg = self.rpc.call_blockdev_remove(src_node, dev).fail_msg
6517 e1bc0878 Iustin Pop
        if msg:
6518 a97da6b7 Iustin Pop
          self.LogWarning("Could not remove snapshot for disk/%d from node"
6519 a97da6b7 Iustin Pop
                          " %s: %s", idx, src_node, msg)
6520 a8083063 Iustin Pop
6521 781de953 Iustin Pop
    result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
6522 4c4e4e1e Iustin Pop
    msg = result.fail_msg
6523 9b201a0d Iustin Pop
    if msg:
6524 9b201a0d Iustin Pop
      self.LogWarning("Could not finalize export for instance %s"
6525 9b201a0d Iustin Pop
                      " on node %s: %s", instance.name, dst_node.name, msg)
6526 a8083063 Iustin Pop
6527 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
6528 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
6529 a8083063 Iustin Pop
6530 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
6531 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
6532 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
6533 35fbcd11 Iustin Pop
    iname = instance.name
6534 a8083063 Iustin Pop
    if nodelist:
6535 72737a7f Iustin Pop
      exportlist = self.rpc.call_export_list(nodelist)
6536 a8083063 Iustin Pop
      for node in exportlist:
6537 4c4e4e1e Iustin Pop
        if exportlist[node].fail_msg:
6538 781de953 Iustin Pop
          continue
6539 35fbcd11 Iustin Pop
        if iname in exportlist[node].payload:
6540 4c4e4e1e Iustin Pop
          msg = self.rpc.call_export_remove(node, iname).fail_msg
6541 35fbcd11 Iustin Pop
          if msg:
6542 19d7f90a Guido Trotter
            self.LogWarning("Could not remove older export for instance %s"
6543 35fbcd11 Iustin Pop
                            " on node %s: %s", iname, node, msg)
6544 5c947f38 Iustin Pop
6545 5c947f38 Iustin Pop
6546 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
6547 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
6548 9ac99fda Guido Trotter

6549 9ac99fda Guido Trotter
  """
6550 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
6551 3656b3af Guido Trotter
  REQ_BGL = False
6552 3656b3af Guido Trotter
6553 3656b3af Guido Trotter
  def ExpandNames(self):
6554 3656b3af Guido Trotter
    self.needed_locks = {}
6555 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
6556 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
6557 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
6558 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6559 9ac99fda Guido Trotter
6560 9ac99fda Guido Trotter
  def CheckPrereq(self):
6561 9ac99fda Guido Trotter
    """Check prerequisites.
6562 9ac99fda Guido Trotter
    """
6563 9ac99fda Guido Trotter
    pass
6564 9ac99fda Guido Trotter
6565 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
6566 9ac99fda Guido Trotter
    """Remove any export.
6567 9ac99fda Guido Trotter

6568 9ac99fda Guido Trotter
    """
6569 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
6570 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
6571 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
6572 9ac99fda Guido Trotter
    fqdn_warn = False
6573 9ac99fda Guido Trotter
    if not instance_name:
6574 9ac99fda Guido Trotter
      fqdn_warn = True
6575 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
6576 9ac99fda Guido Trotter
6577 1b7bfbb7 Iustin Pop
    locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
6578 1b7bfbb7 Iustin Pop
    exportlist = self.rpc.call_export_list(locked_nodes)
6579 9ac99fda Guido Trotter
    found = False
6580 9ac99fda Guido Trotter
    for node in exportlist:
6581 4c4e4e1e Iustin Pop
      msg = exportlist[node].fail_msg
6582 1b7bfbb7 Iustin Pop
      if msg:
6583 1b7bfbb7 Iustin Pop
        self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
6584 781de953 Iustin Pop
        continue
6585 1b7bfbb7 Iustin Pop
      if instance_name in exportlist[node].payload:
6586 9ac99fda Guido Trotter
        found = True
6587 781de953 Iustin Pop
        result = self.rpc.call_export_remove(node, instance_name)
6588 4c4e4e1e Iustin Pop
        msg = result.fail_msg
6589 35fbcd11 Iustin Pop
        if msg:
6590 9a4f63d1 Iustin Pop
          logging.error("Could not remove export for instance %s"
6591 35fbcd11 Iustin Pop
                        " on node %s: %s", instance_name, node, msg)
6592 9ac99fda Guido Trotter
6593 9ac99fda Guido Trotter
    if fqdn_warn and not found:
6594 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
6595 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
6596 9ac99fda Guido Trotter
                  " Domain Name.")
6597 9ac99fda Guido Trotter
6598 9ac99fda Guido Trotter
6599 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
6600 5c947f38 Iustin Pop
  """Generic tags LU.
6601 5c947f38 Iustin Pop

6602 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
6603 5c947f38 Iustin Pop

6604 5c947f38 Iustin Pop
  """
6605 5c947f38 Iustin Pop
6606 8646adce Guido Trotter
  def ExpandNames(self):
6607 8646adce Guido Trotter
    self.needed_locks = {}
6608 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
6609 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
6610 5c947f38 Iustin Pop
      if name is None:
6611 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
6612 3ecf6786 Iustin Pop
                                   (self.op.name,))
6613 5c947f38 Iustin Pop
      self.op.name = name
6614 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = name
6615 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
6616 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
6617 5c947f38 Iustin Pop
      if name is None:
6618 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
6619 3ecf6786 Iustin Pop
                                   (self.op.name,))
6620 5c947f38 Iustin Pop
      self.op.name = name
6621 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = name
6622 8646adce Guido Trotter
6623 8646adce Guido Trotter
  def CheckPrereq(self):
6624 8646adce Guido Trotter
    """Check prerequisites.
6625 8646adce Guido Trotter

6626 8646adce Guido Trotter
    """
6627 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
6628 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
6629 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
6630 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
6631 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
6632 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
6633 5c947f38 Iustin Pop
    else:
6634 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
6635 3ecf6786 Iustin Pop
                                 str(self.op.kind))
6636 5c947f38 Iustin Pop
6637 5c947f38 Iustin Pop
6638 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
6639 5c947f38 Iustin Pop
  """Returns the tags of a given object.
6640 5c947f38 Iustin Pop

6641 5c947f38 Iustin Pop
  """
6642 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
6643 8646adce Guido Trotter
  REQ_BGL = False
6644 5c947f38 Iustin Pop
6645 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6646 5c947f38 Iustin Pop
    """Returns the tag list.
6647 5c947f38 Iustin Pop

6648 5c947f38 Iustin Pop
    """
6649 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
6650 5c947f38 Iustin Pop
6651 5c947f38 Iustin Pop
6652 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
6653 73415719 Iustin Pop
  """Searches the tags for a given pattern.
6654 73415719 Iustin Pop

6655 73415719 Iustin Pop
  """
6656 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
6657 8646adce Guido Trotter
  REQ_BGL = False
6658 8646adce Guido Trotter
6659 8646adce Guido Trotter
  def ExpandNames(self):
6660 8646adce Guido Trotter
    self.needed_locks = {}
6661 73415719 Iustin Pop
6662 73415719 Iustin Pop
  def CheckPrereq(self):
6663 73415719 Iustin Pop
    """Check prerequisites.
6664 73415719 Iustin Pop

6665 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
6666 73415719 Iustin Pop

6667 73415719 Iustin Pop
    """
6668 73415719 Iustin Pop
    try:
6669 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
6670 73415719 Iustin Pop
    except re.error, err:
6671 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
6672 73415719 Iustin Pop
                                 (self.op.pattern, err))
6673 73415719 Iustin Pop
6674 73415719 Iustin Pop
  def Exec(self, feedback_fn):
6675 73415719 Iustin Pop
    """Returns the tag list.
6676 73415719 Iustin Pop

6677 73415719 Iustin Pop
    """
6678 73415719 Iustin Pop
    cfg = self.cfg
6679 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
6680 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
6681 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
6682 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
6683 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
6684 73415719 Iustin Pop
    results = []
6685 73415719 Iustin Pop
    for path, target in tgts:
6686 73415719 Iustin Pop
      for tag in target.GetTags():
6687 73415719 Iustin Pop
        if self.re.search(tag):
6688 73415719 Iustin Pop
          results.append((path, tag))
6689 73415719 Iustin Pop
    return results
6690 73415719 Iustin Pop
6691 73415719 Iustin Pop
6692 f27302fa Iustin Pop
class LUAddTags(TagsLU):
6693 5c947f38 Iustin Pop
  """Sets a tag on a given object.
6694 5c947f38 Iustin Pop

6695 5c947f38 Iustin Pop
  """
6696 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
6697 8646adce Guido Trotter
  REQ_BGL = False
6698 5c947f38 Iustin Pop
6699 5c947f38 Iustin Pop
  def CheckPrereq(self):
6700 5c947f38 Iustin Pop
    """Check prerequisites.
6701 5c947f38 Iustin Pop

6702 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
6703 5c947f38 Iustin Pop

6704 5c947f38 Iustin Pop
    """
6705 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
6706 f27302fa Iustin Pop
    for tag in self.op.tags:
6707 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
6708 5c947f38 Iustin Pop
6709 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6710 5c947f38 Iustin Pop
    """Sets the tag.
6711 5c947f38 Iustin Pop

6712 5c947f38 Iustin Pop
    """
6713 5c947f38 Iustin Pop
    try:
6714 f27302fa Iustin Pop
      for tag in self.op.tags:
6715 f27302fa Iustin Pop
        self.target.AddTag(tag)
6716 5c947f38 Iustin Pop
    except errors.TagError, err:
6717 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
6718 5c947f38 Iustin Pop
    try:
6719 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
6720 5c947f38 Iustin Pop
    except errors.ConfigurationError:
6721 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
6722 3ecf6786 Iustin Pop
                                " config file and the operation has been"
6723 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
6724 5c947f38 Iustin Pop
6725 5c947f38 Iustin Pop
6726 f27302fa Iustin Pop
class LUDelTags(TagsLU):
6727 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
6728 5c947f38 Iustin Pop

6729 5c947f38 Iustin Pop
  """
6730 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
6731 8646adce Guido Trotter
  REQ_BGL = False
6732 5c947f38 Iustin Pop
6733 5c947f38 Iustin Pop
  def CheckPrereq(self):
6734 5c947f38 Iustin Pop
    """Check prerequisites.
6735 5c947f38 Iustin Pop

6736 5c947f38 Iustin Pop
    This checks that we have the given tag.
6737 5c947f38 Iustin Pop

6738 5c947f38 Iustin Pop
    """
6739 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
6740 f27302fa Iustin Pop
    for tag in self.op.tags:
6741 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
6742 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
6743 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
6744 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
6745 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
6746 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
6747 f27302fa Iustin Pop
      diff_names.sort()
6748 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
6749 f27302fa Iustin Pop
                                 (",".join(diff_names)))
6750 5c947f38 Iustin Pop
6751 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6752 5c947f38 Iustin Pop
    """Remove the tag from the object.
6753 5c947f38 Iustin Pop

6754 5c947f38 Iustin Pop
    """
6755 f27302fa Iustin Pop
    for tag in self.op.tags:
6756 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
6757 5c947f38 Iustin Pop
    try:
6758 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
6759 5c947f38 Iustin Pop
    except errors.ConfigurationError:
6760 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
6761 3ecf6786 Iustin Pop
                                " config file and the operation has been"
6762 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
6763 06009e27 Iustin Pop
6764 0eed6e61 Guido Trotter
6765 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
6766 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
6767 06009e27 Iustin Pop

6768 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
6769 06009e27 Iustin Pop
  time.
6770 06009e27 Iustin Pop

6771 06009e27 Iustin Pop
  """
6772 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
6773 fbe9022f Guido Trotter
  REQ_BGL = False
6774 06009e27 Iustin Pop
6775 fbe9022f Guido Trotter
  def ExpandNames(self):
6776 fbe9022f Guido Trotter
    """Expand names and set required locks.
6777 06009e27 Iustin Pop

6778 fbe9022f Guido Trotter
    This expands the node list, if any.
6779 06009e27 Iustin Pop

6780 06009e27 Iustin Pop
    """
6781 fbe9022f Guido Trotter
    self.needed_locks = {}
6782 06009e27 Iustin Pop
    if self.op.on_nodes:
6783 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
6784 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
6785 fbe9022f Guido Trotter
      # more information.
6786 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
6787 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
6788 fbe9022f Guido Trotter
6789 fbe9022f Guido Trotter
  def CheckPrereq(self):
6790 fbe9022f Guido Trotter
    """Check prerequisites.
6791 fbe9022f Guido Trotter

6792 fbe9022f Guido Trotter
    """
6793 06009e27 Iustin Pop
6794 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
6795 06009e27 Iustin Pop
    """Do the actual sleep.
6796 06009e27 Iustin Pop

6797 06009e27 Iustin Pop
    """
6798 06009e27 Iustin Pop
    if self.op.on_master:
6799 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
6800 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
6801 06009e27 Iustin Pop
    if self.op.on_nodes:
6802 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
6803 06009e27 Iustin Pop
      for node, node_result in result.items():
6804 4c4e4e1e Iustin Pop
        node_result.Raise("Failure during rpc call to node %s" % node)
6805 d61df03e Iustin Pop
6806 d61df03e Iustin Pop
6807 d1c2dd75 Iustin Pop
class IAllocator(object):
6808 d1c2dd75 Iustin Pop
  """IAllocator framework.
6809 d61df03e Iustin Pop

6810 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
6811 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
6812 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
6813 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
6814 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
6815 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
6816 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
6817 d1c2dd75 Iustin Pop
      easy usage
6818 d61df03e Iustin Pop

6819 d61df03e Iustin Pop
  """
6820 29859cb7 Iustin Pop
  _ALLO_KEYS = [
6821 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
6822 8cc7e742 Guido Trotter
    "os", "tags", "nics", "vcpus", "hypervisor",
6823 d1c2dd75 Iustin Pop
    ]
6824 29859cb7 Iustin Pop
  _RELO_KEYS = [
6825 29859cb7 Iustin Pop
    "relocate_from",
6826 29859cb7 Iustin Pop
    ]
6827 d1c2dd75 Iustin Pop
6828 72737a7f Iustin Pop
  def __init__(self, lu, mode, name, **kwargs):
6829 72737a7f Iustin Pop
    self.lu = lu
6830 d1c2dd75 Iustin Pop
    # init buffer variables
6831 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
6832 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
6833 29859cb7 Iustin Pop
    self.mode = mode
6834 29859cb7 Iustin Pop
    self.name = name
6835 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
6836 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
6837 a0add446 Iustin Pop
    self.hypervisor = None
6838 29859cb7 Iustin Pop
    self.relocate_from = None
6839 27579978 Iustin Pop
    # computed fields
6840 27579978 Iustin Pop
    self.required_nodes = None
6841 d1c2dd75 Iustin Pop
    # init result fields
6842 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
6843 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6844 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
6845 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6846 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
6847 29859cb7 Iustin Pop
    else:
6848 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
6849 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
6850 d1c2dd75 Iustin Pop
    for key in kwargs:
6851 29859cb7 Iustin Pop
      if key not in keyset:
6852 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
6853 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
6854 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
6855 29859cb7 Iustin Pop
    for key in keyset:
6856 d1c2dd75 Iustin Pop
      if key not in kwargs:
6857 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
6858 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
6859 d1c2dd75 Iustin Pop
    self._BuildInputData()
6860 d1c2dd75 Iustin Pop
6861 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
6862 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
6863 d1c2dd75 Iustin Pop

6864 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
6865 d1c2dd75 Iustin Pop

6866 d1c2dd75 Iustin Pop
    """
6867 72737a7f Iustin Pop
    cfg = self.lu.cfg
6868 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
6869 d1c2dd75 Iustin Pop
    # cluster data
6870 d1c2dd75 Iustin Pop
    data = {
6871 77031881 Iustin Pop
      "version": constants.IALLOCATOR_VERSION,
6872 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
6873 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
6874 1325da74 Iustin Pop
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
6875 d1c2dd75 Iustin Pop
      # we don't have job IDs
6876 d61df03e Iustin Pop
      }
6877 b57e9819 Guido Trotter
    iinfo = cfg.GetAllInstancesInfo().values()
6878 b57e9819 Guido Trotter
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
6879 6286519f Iustin Pop
6880 d1c2dd75 Iustin Pop
    # node data
6881 d1c2dd75 Iustin Pop
    node_results = {}
6882 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
6883 8cc7e742 Guido Trotter
6884 8cc7e742 Guido Trotter
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6885 a0add446 Iustin Pop
      hypervisor_name = self.hypervisor
6886 8cc7e742 Guido Trotter
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6887 a0add446 Iustin Pop
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
6888 8cc7e742 Guido Trotter
6889 72737a7f Iustin Pop
    node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
6890 a0add446 Iustin Pop
                                           hypervisor_name)
6891 18640d69 Guido Trotter
    node_iinfo = self.lu.rpc.call_all_instances_info(node_list,
6892 18640d69 Guido Trotter
                       cluster_info.enabled_hypervisors)
6893 1325da74 Iustin Pop
    for nname, nresult in node_data.items():
6894 1325da74 Iustin Pop
      # first fill in static (config-based) values
6895 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
6896 d1c2dd75 Iustin Pop
      pnr = {
6897 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
6898 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
6899 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
6900 fc0fe88c Iustin Pop
        "offline": ninfo.offline,
6901 0b2454b9 Iustin Pop
        "drained": ninfo.drained,
6902 1325da74 Iustin Pop
        "master_candidate": ninfo.master_candidate,
6903 d1c2dd75 Iustin Pop
        }
6904 1325da74 Iustin Pop
6905 1325da74 Iustin Pop
      if not ninfo.offline:
6906 4c4e4e1e Iustin Pop
        nresult.Raise("Can't get data for node %s" % nname)
6907 4c4e4e1e Iustin Pop
        node_iinfo[nname].Raise("Can't get node instance info from node %s" %
6908 4c4e4e1e Iustin Pop
                                nname)
6909 070e998b Iustin Pop
        remote_info = nresult.payload
6910 1325da74 Iustin Pop
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
6911 1325da74 Iustin Pop
                     'vg_size', 'vg_free', 'cpu_total']:
6912 1325da74 Iustin Pop
          if attr not in remote_info:
6913 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' didn't return attribute"
6914 1325da74 Iustin Pop
                                     " '%s'" % (nname, attr))
6915 070e998b Iustin Pop
          if not isinstance(remote_info[attr], int):
6916 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' returned invalid value"
6917 070e998b Iustin Pop
                                     " for '%s': %s" %
6918 070e998b Iustin Pop
                                     (nname, attr, remote_info[attr]))
6919 1325da74 Iustin Pop
        # compute memory used by primary instances
6920 1325da74 Iustin Pop
        i_p_mem = i_p_up_mem = 0
6921 1325da74 Iustin Pop
        for iinfo, beinfo in i_list:
6922 1325da74 Iustin Pop
          if iinfo.primary_node == nname:
6923 1325da74 Iustin Pop
            i_p_mem += beinfo[constants.BE_MEMORY]
6924 2fa74ef4 Iustin Pop
            if iinfo.name not in node_iinfo[nname].payload:
6925 1325da74 Iustin Pop
              i_used_mem = 0
6926 1325da74 Iustin Pop
            else:
6927 2fa74ef4 Iustin Pop
              i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory'])
6928 1325da74 Iustin Pop
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
6929 1325da74 Iustin Pop
            remote_info['memory_free'] -= max(0, i_mem_diff)
6930 1325da74 Iustin Pop
6931 1325da74 Iustin Pop
            if iinfo.admin_up:
6932 1325da74 Iustin Pop
              i_p_up_mem += beinfo[constants.BE_MEMORY]
6933 1325da74 Iustin Pop
6934 1325da74 Iustin Pop
        # compute memory used by instances
6935 1325da74 Iustin Pop
        pnr_dyn = {
6936 1325da74 Iustin Pop
          "total_memory": remote_info['memory_total'],
6937 1325da74 Iustin Pop
          "reserved_memory": remote_info['memory_dom0'],
6938 1325da74 Iustin Pop
          "free_memory": remote_info['memory_free'],
6939 1325da74 Iustin Pop
          "total_disk": remote_info['vg_size'],
6940 1325da74 Iustin Pop
          "free_disk": remote_info['vg_free'],
6941 1325da74 Iustin Pop
          "total_cpus": remote_info['cpu_total'],
6942 1325da74 Iustin Pop
          "i_pri_memory": i_p_mem,
6943 1325da74 Iustin Pop
          "i_pri_up_memory": i_p_up_mem,
6944 1325da74 Iustin Pop
          }
6945 1325da74 Iustin Pop
        pnr.update(pnr_dyn)
6946 1325da74 Iustin Pop
6947 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
6948 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
6949 d1c2dd75 Iustin Pop
6950 d1c2dd75 Iustin Pop
    # instance data
6951 d1c2dd75 Iustin Pop
    instance_data = {}
6952 338e51e8 Iustin Pop
    for iinfo, beinfo in i_list:
6953 a9fe7e8f Guido Trotter
      nic_data = []
6954 a9fe7e8f Guido Trotter
      for nic in iinfo.nics:
6955 a9fe7e8f Guido Trotter
        filled_params = objects.FillDict(
6956 a9fe7e8f Guido Trotter
            cluster_info.nicparams[constants.PP_DEFAULT],
6957 a9fe7e8f Guido Trotter
            nic.nicparams)
6958 a9fe7e8f Guido Trotter
        nic_dict = {"mac": nic.mac,
6959 a9fe7e8f Guido Trotter
                    "ip": nic.ip,
6960 a9fe7e8f Guido Trotter
                    "mode": filled_params[constants.NIC_MODE],
6961 a9fe7e8f Guido Trotter
                    "link": filled_params[constants.NIC_LINK],
6962 a9fe7e8f Guido Trotter
                   }
6963 a9fe7e8f Guido Trotter
        if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
6964 a9fe7e8f Guido Trotter
          nic_dict["bridge"] = filled_params[constants.NIC_LINK]
6965 a9fe7e8f Guido Trotter
        nic_data.append(nic_dict)
6966 d1c2dd75 Iustin Pop
      pir = {
6967 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
6968 1325da74 Iustin Pop
        "admin_up": iinfo.admin_up,
6969 338e51e8 Iustin Pop
        "vcpus": beinfo[constants.BE_VCPUS],
6970 338e51e8 Iustin Pop
        "memory": beinfo[constants.BE_MEMORY],
6971 d1c2dd75 Iustin Pop
        "os": iinfo.os,
6972 1325da74 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
6973 d1c2dd75 Iustin Pop
        "nics": nic_data,
6974 1325da74 Iustin Pop
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
6975 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
6976 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
6977 d1c2dd75 Iustin Pop
        }
6978 88ae4f85 Iustin Pop
      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
6979 88ae4f85 Iustin Pop
                                                 pir["disks"])
6980 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
6981 d61df03e Iustin Pop
6982 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
6983 d61df03e Iustin Pop
6984 d1c2dd75 Iustin Pop
    self.in_data = data
6985 d61df03e Iustin Pop
6986 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
6987 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
6988 d61df03e Iustin Pop

6989 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
6990 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
6991 d61df03e Iustin Pop

6992 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
6993 d1c2dd75 Iustin Pop
    done.
6994 d61df03e Iustin Pop

6995 d1c2dd75 Iustin Pop
    """
6996 d1c2dd75 Iustin Pop
    data = self.in_data
6997 d1c2dd75 Iustin Pop
6998 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
6999 d1c2dd75 Iustin Pop
7000 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
7001 27579978 Iustin Pop
      self.required_nodes = 2
7002 27579978 Iustin Pop
    else:
7003 27579978 Iustin Pop
      self.required_nodes = 1
7004 d1c2dd75 Iustin Pop
    request = {
7005 d1c2dd75 Iustin Pop
      "type": "allocate",
7006 d1c2dd75 Iustin Pop
      "name": self.name,
7007 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
7008 d1c2dd75 Iustin Pop
      "tags": self.tags,
7009 d1c2dd75 Iustin Pop
      "os": self.os,
7010 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
7011 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
7012 d1c2dd75 Iustin Pop
      "disks": self.disks,
7013 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
7014 d1c2dd75 Iustin Pop
      "nics": self.nics,
7015 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
7016 d1c2dd75 Iustin Pop
      }
7017 d1c2dd75 Iustin Pop
    data["request"] = request
7018 298fe380 Iustin Pop
7019 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
7020 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
7021 298fe380 Iustin Pop

7022 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
7023 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
7024 d61df03e Iustin Pop

7025 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
7026 d1c2dd75 Iustin Pop
    done.
7027 d61df03e Iustin Pop

7028 d1c2dd75 Iustin Pop
    """
7029 72737a7f Iustin Pop
    instance = self.lu.cfg.GetInstanceInfo(self.name)
7030 27579978 Iustin Pop
    if instance is None:
7031 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
7032 27579978 Iustin Pop
                                   " IAllocator" % self.name)
7033 27579978 Iustin Pop
7034 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
7035 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
7036 27579978 Iustin Pop
7037 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
7038 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
7039 2a139bb0 Iustin Pop
7040 27579978 Iustin Pop
    self.required_nodes = 1
7041 dafc7302 Guido Trotter
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
7042 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
7043 27579978 Iustin Pop
7044 d1c2dd75 Iustin Pop
    request = {
7045 2a139bb0 Iustin Pop
      "type": "relocate",
7046 d1c2dd75 Iustin Pop
      "name": self.name,
7047 27579978 Iustin Pop
      "disk_space_total": disk_space,
7048 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
7049 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
7050 d1c2dd75 Iustin Pop
      }
7051 27579978 Iustin Pop
    self.in_data["request"] = request
7052 d61df03e Iustin Pop
7053 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
7054 d1c2dd75 Iustin Pop
    """Build input data structures.
7055 d61df03e Iustin Pop

7056 d1c2dd75 Iustin Pop
    """
7057 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
7058 d61df03e Iustin Pop
7059 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
7060 d1c2dd75 Iustin Pop
      self._AddNewInstance()
7061 d1c2dd75 Iustin Pop
    else:
7062 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
7063 d61df03e Iustin Pop
7064 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
7065 d61df03e Iustin Pop
7066 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
7067 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
7068 298fe380 Iustin Pop

7069 d1c2dd75 Iustin Pop
    """
7070 72737a7f Iustin Pop
    if call_fn is None:
7071 72737a7f Iustin Pop
      call_fn = self.lu.rpc.call_iallocator_runner
7072 d1c2dd75 Iustin Pop
    data = self.in_text
7073 298fe380 Iustin Pop
7074 72737a7f Iustin Pop
    result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
7075 4c4e4e1e Iustin Pop
    result.Raise("Failure while running the iallocator script")
7076 8d528b7c Iustin Pop
7077 87f5c298 Iustin Pop
    self.out_text = result.payload
7078 d1c2dd75 Iustin Pop
    if validate:
7079 d1c2dd75 Iustin Pop
      self._ValidateResult()
7080 298fe380 Iustin Pop
7081 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
7082 d1c2dd75 Iustin Pop
    """Process the allocator results.
7083 538475ca Iustin Pop

7084 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
7085 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
7086 538475ca Iustin Pop

7087 d1c2dd75 Iustin Pop
    """
7088 d1c2dd75 Iustin Pop
    try:
7089 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
7090 d1c2dd75 Iustin Pop
    except Exception, err:
7091 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
7092 d1c2dd75 Iustin Pop
7093 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
7094 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
7095 538475ca Iustin Pop
7096 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
7097 d1c2dd75 Iustin Pop
      if key not in rdict:
7098 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
7099 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
7100 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
7101 538475ca Iustin Pop
7102 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
7103 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
7104 d1c2dd75 Iustin Pop
                               " is not a list")
7105 d1c2dd75 Iustin Pop
    self.out_data = rdict
7106 538475ca Iustin Pop
7107 538475ca Iustin Pop
7108 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
7109 d61df03e Iustin Pop
  """Run allocator tests.
7110 d61df03e Iustin Pop

7111 d61df03e Iustin Pop
  This LU runs the allocator tests
7112 d61df03e Iustin Pop

7113 d61df03e Iustin Pop
  """
7114 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
7115 d61df03e Iustin Pop
7116 d61df03e Iustin Pop
  def CheckPrereq(self):
7117 d61df03e Iustin Pop
    """Check prerequisites.
7118 d61df03e Iustin Pop

7119 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
7120 d61df03e Iustin Pop

7121 d61df03e Iustin Pop
    """
7122 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
7123 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
7124 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
7125 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
7126 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
7127 d61df03e Iustin Pop
                                     attr)
7128 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
7129 d61df03e Iustin Pop
      if iname is not None:
7130 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
7131 d61df03e Iustin Pop
                                   iname)
7132 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
7133 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
7134 d61df03e Iustin Pop
      for row in self.op.nics:
7135 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
7136 d61df03e Iustin Pop
            "mac" not in row or
7137 d61df03e Iustin Pop
            "ip" not in row or
7138 d61df03e Iustin Pop
            "bridge" not in row):
7139 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
7140 d61df03e Iustin Pop
                                     " 'nics' parameter")
7141 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
7142 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
7143 d61df03e Iustin Pop
      for row in self.op.disks:
7144 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
7145 d61df03e Iustin Pop
            "size" not in row or
7146 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
7147 d61df03e Iustin Pop
            "mode" not in row or
7148 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
7149 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
7150 d61df03e Iustin Pop
                                     " 'disks' parameter")
7151 8901997e Iustin Pop
      if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None:
7152 8cc7e742 Guido Trotter
        self.op.hypervisor = self.cfg.GetHypervisorType()
7153 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
7154 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
7155 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
7156 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
7157 d61df03e Iustin Pop
      if fname is None:
7158 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
7159 d61df03e Iustin Pop
                                   self.op.name)
7160 d61df03e Iustin Pop
      self.op.name = fname
7161 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
7162 d61df03e Iustin Pop
    else:
7163 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
7164 d61df03e Iustin Pop
                                 self.op.mode)
7165 d61df03e Iustin Pop
7166 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
7167 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
7168 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
7169 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
7170 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
7171 d61df03e Iustin Pop
                                 self.op.direction)
7172 d61df03e Iustin Pop
7173 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
7174 d61df03e Iustin Pop
    """Run the allocator test.
7175 d61df03e Iustin Pop

7176 d61df03e Iustin Pop
    """
7177 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
7178 72737a7f Iustin Pop
      ial = IAllocator(self,
7179 29859cb7 Iustin Pop
                       mode=self.op.mode,
7180 29859cb7 Iustin Pop
                       name=self.op.name,
7181 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
7182 29859cb7 Iustin Pop
                       disks=self.op.disks,
7183 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
7184 29859cb7 Iustin Pop
                       os=self.op.os,
7185 29859cb7 Iustin Pop
                       tags=self.op.tags,
7186 29859cb7 Iustin Pop
                       nics=self.op.nics,
7187 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
7188 8cc7e742 Guido Trotter
                       hypervisor=self.op.hypervisor,
7189 29859cb7 Iustin Pop
                       )
7190 29859cb7 Iustin Pop
    else:
7191 72737a7f Iustin Pop
      ial = IAllocator(self,
7192 29859cb7 Iustin Pop
                       mode=self.op.mode,
7193 29859cb7 Iustin Pop
                       name=self.op.name,
7194 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
7195 29859cb7 Iustin Pop
                       )
7196 d61df03e Iustin Pop
7197 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
7198 d1c2dd75 Iustin Pop
      result = ial.in_text
7199 298fe380 Iustin Pop
    else:
7200 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
7201 d1c2dd75 Iustin Pop
      result = ial.out_text
7202 298fe380 Iustin Pop
    return result