Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 3213d3c8

History | View | Annotate | Download (239.2 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 ffa1c0dc Iustin Pop
import logging
34 74409b12 Iustin Pop
import copy
35 4b7735f9 Iustin Pop
import random
36 a8083063 Iustin Pop
37 a8083063 Iustin Pop
from ganeti import ssh
38 a8083063 Iustin Pop
from ganeti import utils
39 a8083063 Iustin Pop
from ganeti import errors
40 a8083063 Iustin Pop
from ganeti import hypervisor
41 6048c986 Guido Trotter
from ganeti import locking
42 a8083063 Iustin Pop
from ganeti import constants
43 a8083063 Iustin Pop
from ganeti import objects
44 a8083063 Iustin Pop
from ganeti import opcodes
45 8d14b30d Iustin Pop
from ganeti import serializer
46 112f18a5 Iustin Pop
from ganeti import ssconf
47 d61df03e Iustin Pop
48 d61df03e Iustin Pop
49 a8083063 Iustin Pop
class LogicalUnit(object):
50 396e1b78 Michael Hanselmann
  """Logical Unit base class.
51 a8083063 Iustin Pop

52 a8083063 Iustin Pop
  Subclasses must follow these rules:
53 d465bdc8 Guido Trotter
    - implement ExpandNames
54 d465bdc8 Guido Trotter
    - implement CheckPrereq
55 a8083063 Iustin Pop
    - implement Exec
56 a8083063 Iustin Pop
    - implement BuildHooksEnv
57 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
58 05f86716 Guido Trotter
    - optionally redefine their run requirements:
59 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
60 05f86716 Guido Trotter

61 05f86716 Guido Trotter
  Note that all commands require root permissions.
62 a8083063 Iustin Pop

63 a8083063 Iustin Pop
  """
64 a8083063 Iustin Pop
  HPATH = None
65 a8083063 Iustin Pop
  HTYPE = None
66 a8083063 Iustin Pop
  _OP_REQP = []
67 7e55040e Guido Trotter
  REQ_BGL = True
68 a8083063 Iustin Pop
69 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
70 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
71 a8083063 Iustin Pop

72 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
73 a8083063 Iustin Pop
    validity.
74 a8083063 Iustin Pop

75 a8083063 Iustin Pop
    """
76 5bfac263 Iustin Pop
    self.proc = processor
77 a8083063 Iustin Pop
    self.op = op
78 77b657a3 Guido Trotter
    self.cfg = context.cfg
79 77b657a3 Guido Trotter
    self.context = context
80 72737a7f Iustin Pop
    self.rpc = rpc
81 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
82 d465bdc8 Guido Trotter
    self.needed_locks = None
83 6683bba2 Guido Trotter
    self.acquired_locks = {}
84 3977a4c1 Guido Trotter
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
85 ca2a79e1 Guido Trotter
    self.add_locks = {}
86 ca2a79e1 Guido Trotter
    self.remove_locks = {}
87 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
88 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
89 c92b310a Michael Hanselmann
    self.__ssh = None
90 86d9d3bb Iustin Pop
    # logging
91 86d9d3bb Iustin Pop
    self.LogWarning = processor.LogWarning
92 86d9d3bb Iustin Pop
    self.LogInfo = processor.LogInfo
93 c92b310a Michael Hanselmann
94 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
95 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
96 a8083063 Iustin Pop
      if attr_val is None:
97 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
98 3ecf6786 Iustin Pop
                                   attr_name)
99 4be4691d Iustin Pop
    self.CheckArguments()
100 a8083063 Iustin Pop
101 c92b310a Michael Hanselmann
  def __GetSSH(self):
102 c92b310a Michael Hanselmann
    """Returns the SshRunner object
103 c92b310a Michael Hanselmann

104 c92b310a Michael Hanselmann
    """
105 c92b310a Michael Hanselmann
    if not self.__ssh:
106 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
107 c92b310a Michael Hanselmann
    return self.__ssh
108 c92b310a Michael Hanselmann
109 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
110 c92b310a Michael Hanselmann
111 4be4691d Iustin Pop
  def CheckArguments(self):
112 4be4691d Iustin Pop
    """Check syntactic validity for the opcode arguments.
113 4be4691d Iustin Pop

114 4be4691d Iustin Pop
    This method is for doing a simple syntactic check and ensure
115 4be4691d Iustin Pop
    validity of opcode parameters, without any cluster-related
116 4be4691d Iustin Pop
    checks. While the same can be accomplished in ExpandNames and/or
117 4be4691d Iustin Pop
    CheckPrereq, doing these separate is better because:
118 4be4691d Iustin Pop

119 4be4691d Iustin Pop
      - ExpandNames is left as as purely a lock-related function
120 4be4691d Iustin Pop
      - CheckPrereq is run after we have aquired locks (and possible
121 4be4691d Iustin Pop
        waited for them)
122 4be4691d Iustin Pop

123 4be4691d Iustin Pop
    The function is allowed to change the self.op attribute so that
124 4be4691d Iustin Pop
    later methods can no longer worry about missing parameters.
125 4be4691d Iustin Pop

126 4be4691d Iustin Pop
    """
127 4be4691d Iustin Pop
    pass
128 4be4691d Iustin Pop
129 d465bdc8 Guido Trotter
  def ExpandNames(self):
130 d465bdc8 Guido Trotter
    """Expand names for this LU.
131 d465bdc8 Guido Trotter

132 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
133 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
134 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
135 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
136 d465bdc8 Guido Trotter

137 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
138 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
139 d465bdc8 Guido Trotter
    as values. Rules:
140 e4376078 Iustin Pop

141 e4376078 Iustin Pop
      - use an empty dict if you don't need any lock
142 e4376078 Iustin Pop
      - if you don't need any lock at a particular level omit that level
143 e4376078 Iustin Pop
      - don't put anything for the BGL level
144 e4376078 Iustin Pop
      - if you want all locks at a level use locking.ALL_SET as a value
145 d465bdc8 Guido Trotter

146 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
147 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
148 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
149 3977a4c1 Guido Trotter

150 e4376078 Iustin Pop
    Examples::
151 e4376078 Iustin Pop

152 e4376078 Iustin Pop
      # Acquire all nodes and one instance
153 e4376078 Iustin Pop
      self.needed_locks = {
154 e4376078 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
155 e4376078 Iustin Pop
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
156 e4376078 Iustin Pop
      }
157 e4376078 Iustin Pop
      # Acquire just two nodes
158 e4376078 Iustin Pop
      self.needed_locks = {
159 e4376078 Iustin Pop
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
160 e4376078 Iustin Pop
      }
161 e4376078 Iustin Pop
      # Acquire no locks
162 e4376078 Iustin Pop
      self.needed_locks = {} # No, you can't leave it to the default value None
163 d465bdc8 Guido Trotter

164 d465bdc8 Guido Trotter
    """
165 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
166 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
167 d465bdc8 Guido Trotter
    # time.
168 d465bdc8 Guido Trotter
    if self.REQ_BGL:
169 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
170 d465bdc8 Guido Trotter
    else:
171 d465bdc8 Guido Trotter
      raise NotImplementedError
172 d465bdc8 Guido Trotter
173 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
174 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
175 fb8dcb62 Guido Trotter

176 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
177 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
178 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
179 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
180 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
181 fb8dcb62 Guido Trotter
    default it does nothing.
182 fb8dcb62 Guido Trotter

183 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
184 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
185 fb8dcb62 Guido Trotter

186 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
187 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
188 fb8dcb62 Guido Trotter

189 fb8dcb62 Guido Trotter
    """
190 fb8dcb62 Guido Trotter
191 a8083063 Iustin Pop
  def CheckPrereq(self):
192 a8083063 Iustin Pop
    """Check prerequisites for this LU.
193 a8083063 Iustin Pop

194 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
195 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
196 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
197 a8083063 Iustin Pop
    allowed.
198 a8083063 Iustin Pop

199 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
200 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
201 a8083063 Iustin Pop

202 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
203 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
204 a8083063 Iustin Pop

205 a8083063 Iustin Pop
    """
206 a8083063 Iustin Pop
    raise NotImplementedError
207 a8083063 Iustin Pop
208 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
209 a8083063 Iustin Pop
    """Execute the LU.
210 a8083063 Iustin Pop

211 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
212 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
213 a8083063 Iustin Pop
    code, or expected.
214 a8083063 Iustin Pop

215 a8083063 Iustin Pop
    """
216 a8083063 Iustin Pop
    raise NotImplementedError
217 a8083063 Iustin Pop
218 a8083063 Iustin Pop
  def BuildHooksEnv(self):
219 a8083063 Iustin Pop
    """Build hooks environment for this LU.
220 a8083063 Iustin Pop

221 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
222 a8083063 Iustin Pop
    containing the environment that will be used for running the
223 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
224 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
225 a8083063 Iustin Pop
    the hook should run after the execution.
226 a8083063 Iustin Pop

227 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
228 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
229 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
230 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
231 a8083063 Iustin Pop

232 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
233 a8083063 Iustin Pop

234 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
235 a8083063 Iustin Pop
    not be called.
236 a8083063 Iustin Pop

237 a8083063 Iustin Pop
    """
238 a8083063 Iustin Pop
    raise NotImplementedError
239 a8083063 Iustin Pop
240 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
241 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
242 1fce5219 Guido Trotter

243 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
244 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
245 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
246 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
247 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
248 1fce5219 Guido Trotter

249 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
250 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
251 e4376078 Iustin Pop
    @param hook_results: the results of the multi-node hooks rpc call
252 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
253 e4376078 Iustin Pop
    @param lu_result: the previous Exec result this LU had, or None
254 e4376078 Iustin Pop
        in the PRE phase
255 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
256 e4376078 Iustin Pop
        and hook results
257 1fce5219 Guido Trotter

258 1fce5219 Guido Trotter
    """
259 1fce5219 Guido Trotter
    return lu_result
260 1fce5219 Guido Trotter
261 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
262 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
263 43905206 Guido Trotter

264 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
265 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
266 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
267 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
268 43905206 Guido Trotter
    before.
269 43905206 Guido Trotter

270 43905206 Guido Trotter
    """
271 43905206 Guido Trotter
    if self.needed_locks is None:
272 43905206 Guido Trotter
      self.needed_locks = {}
273 43905206 Guido Trotter
    else:
274 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
275 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
276 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
277 43905206 Guido Trotter
    if expanded_name is None:
278 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
279 43905206 Guido Trotter
                                  self.op.instance_name)
280 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
281 43905206 Guido Trotter
    self.op.instance_name = expanded_name
282 43905206 Guido Trotter
283 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
284 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
285 c4a2fee1 Guido Trotter

286 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
287 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
288 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
289 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
290 c4a2fee1 Guido Trotter

291 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
292 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
293 c4a2fee1 Guido Trotter

294 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
295 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
296 c4a2fee1 Guido Trotter

297 e4376078 Iustin Pop
    If should be called in DeclareLocks in a way similar to::
298 c4a2fee1 Guido Trotter

299 e4376078 Iustin Pop
      if level == locking.LEVEL_NODE:
300 e4376078 Iustin Pop
        self._LockInstancesNodes()
301 c4a2fee1 Guido Trotter

302 a82ce292 Guido Trotter
    @type primary_only: boolean
303 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
304 a82ce292 Guido Trotter

305 c4a2fee1 Guido Trotter
    """
306 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
307 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
308 c4a2fee1 Guido Trotter
309 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
310 c4a2fee1 Guido Trotter
311 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
312 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
313 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
314 c4a2fee1 Guido Trotter
    wanted_nodes = []
315 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
316 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
317 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
318 a82ce292 Guido Trotter
      if not primary_only:
319 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
320 9513b6ab Guido Trotter
321 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
322 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
323 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
324 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
325 c4a2fee1 Guido Trotter
326 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
327 c4a2fee1 Guido Trotter
328 a8083063 Iustin Pop
329 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
330 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
331 a8083063 Iustin Pop

332 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
333 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
334 a8083063 Iustin Pop

335 a8083063 Iustin Pop
  """
336 a8083063 Iustin Pop
  HPATH = None
337 a8083063 Iustin Pop
  HTYPE = None
338 a8083063 Iustin Pop
339 a8083063 Iustin Pop
340 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
341 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
342 83120a01 Michael Hanselmann

343 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
344 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
345 e4376078 Iustin Pop
  @type nodes: list
346 e4376078 Iustin Pop
  @param nodes: list of node names or None for all nodes
347 e4376078 Iustin Pop
  @rtype: list
348 e4376078 Iustin Pop
  @return: the list of nodes, sorted
349 e4376078 Iustin Pop
  @raise errors.OpProgrammerError: if the nodes parameter is wrong type
350 83120a01 Michael Hanselmann

351 83120a01 Michael Hanselmann
  """
352 3312b702 Iustin Pop
  if not isinstance(nodes, list):
353 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
354 dcb93971 Michael Hanselmann
355 ea47808a Guido Trotter
  if not nodes:
356 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
357 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
358 dcb93971 Michael Hanselmann
359 ea47808a Guido Trotter
  wanted = []
360 ea47808a Guido Trotter
  for name in nodes:
361 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
362 ea47808a Guido Trotter
    if node is None:
363 ea47808a Guido Trotter
      raise errors.OpPrereqError("No such node name '%s'" % name)
364 ea47808a Guido Trotter
    wanted.append(node)
365 dcb93971 Michael Hanselmann
366 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
367 3312b702 Iustin Pop
368 3312b702 Iustin Pop
369 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
370 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
371 3312b702 Iustin Pop

372 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
373 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
374 e4376078 Iustin Pop
  @type instances: list
375 e4376078 Iustin Pop
  @param instances: list of instance names or None for all instances
376 e4376078 Iustin Pop
  @rtype: list
377 e4376078 Iustin Pop
  @return: the list of instances, sorted
378 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if the instances parameter is wrong type
379 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if any of the passed instances is not found
380 3312b702 Iustin Pop

381 3312b702 Iustin Pop
  """
382 3312b702 Iustin Pop
  if not isinstance(instances, list):
383 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
384 3312b702 Iustin Pop
385 3312b702 Iustin Pop
  if instances:
386 3312b702 Iustin Pop
    wanted = []
387 3312b702 Iustin Pop
388 3312b702 Iustin Pop
    for name in instances:
389 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
390 3312b702 Iustin Pop
      if instance is None:
391 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
392 3312b702 Iustin Pop
      wanted.append(instance)
393 3312b702 Iustin Pop
394 3312b702 Iustin Pop
  else:
395 a7f5dc98 Iustin Pop
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
396 a7f5dc98 Iustin Pop
  return wanted
397 dcb93971 Michael Hanselmann
398 dcb93971 Michael Hanselmann
399 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
400 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
401 83120a01 Michael Hanselmann

402 a2d2e1a7 Iustin Pop
  @type static: L{utils.FieldSet}
403 31bf511f Iustin Pop
  @param static: static fields set
404 a2d2e1a7 Iustin Pop
  @type dynamic: L{utils.FieldSet}
405 31bf511f Iustin Pop
  @param dynamic: dynamic fields set
406 83120a01 Michael Hanselmann

407 83120a01 Michael Hanselmann
  """
408 a2d2e1a7 Iustin Pop
  f = utils.FieldSet()
409 31bf511f Iustin Pop
  f.Extend(static)
410 31bf511f Iustin Pop
  f.Extend(dynamic)
411 dcb93971 Michael Hanselmann
412 31bf511f Iustin Pop
  delta = f.NonMatching(selected)
413 31bf511f Iustin Pop
  if delta:
414 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
415 31bf511f Iustin Pop
                               % ",".join(delta))
416 dcb93971 Michael Hanselmann
417 dcb93971 Michael Hanselmann
418 a5961235 Iustin Pop
def _CheckBooleanOpField(op, name):
419 a5961235 Iustin Pop
  """Validates boolean opcode parameters.
420 a5961235 Iustin Pop

421 a5961235 Iustin Pop
  This will ensure that an opcode parameter is either a boolean value,
422 a5961235 Iustin Pop
  or None (but that it always exists).
423 a5961235 Iustin Pop

424 a5961235 Iustin Pop
  """
425 a5961235 Iustin Pop
  val = getattr(op, name, None)
426 a5961235 Iustin Pop
  if not (val is None or isinstance(val, bool)):
427 a5961235 Iustin Pop
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
428 a5961235 Iustin Pop
                               (name, str(val)))
429 a5961235 Iustin Pop
  setattr(op, name, val)
430 a5961235 Iustin Pop
431 a5961235 Iustin Pop
432 a5961235 Iustin Pop
def _CheckNodeOnline(lu, node):
433 a5961235 Iustin Pop
  """Ensure that a given node is online.
434 a5961235 Iustin Pop

435 a5961235 Iustin Pop
  @param lu: the LU on behalf of which we make the check
436 a5961235 Iustin Pop
  @param node: the node to check
437 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is offline
438 a5961235 Iustin Pop

439 a5961235 Iustin Pop
  """
440 a5961235 Iustin Pop
  if lu.cfg.GetNodeInfo(node).offline:
441 a5961235 Iustin Pop
    raise errors.OpPrereqError("Can't use offline node %s" % node)
442 a5961235 Iustin Pop
443 a5961235 Iustin Pop
444 733a2b6a Iustin Pop
def _CheckNodeNotDrained(lu, node):
445 733a2b6a Iustin Pop
  """Ensure that a given node is not drained.
446 733a2b6a Iustin Pop

447 733a2b6a Iustin Pop
  @param lu: the LU on behalf of which we make the check
448 733a2b6a Iustin Pop
  @param node: the node to check
449 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is drained
450 733a2b6a Iustin Pop

451 733a2b6a Iustin Pop
  """
452 733a2b6a Iustin Pop
  if lu.cfg.GetNodeInfo(node).drained:
453 733a2b6a Iustin Pop
    raise errors.OpPrereqError("Can't use drained node %s" % node)
454 733a2b6a Iustin Pop
455 733a2b6a Iustin Pop
456 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
457 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
458 e4376078 Iustin Pop
  """Builds instance related env variables for hooks
459 e4376078 Iustin Pop

460 e4376078 Iustin Pop
  This builds the hook environment from individual variables.
461 e4376078 Iustin Pop

462 e4376078 Iustin Pop
  @type name: string
463 e4376078 Iustin Pop
  @param name: the name of the instance
464 e4376078 Iustin Pop
  @type primary_node: string
465 e4376078 Iustin Pop
  @param primary_node: the name of the instance's primary node
466 e4376078 Iustin Pop
  @type secondary_nodes: list
467 e4376078 Iustin Pop
  @param secondary_nodes: list of secondary nodes as strings
468 e4376078 Iustin Pop
  @type os_type: string
469 e4376078 Iustin Pop
  @param os_type: the name of the instance's OS
470 0d68c45d Iustin Pop
  @type status: boolean
471 0d68c45d Iustin Pop
  @param status: the should_run status of the instance
472 e4376078 Iustin Pop
  @type memory: string
473 e4376078 Iustin Pop
  @param memory: the memory size of the instance
474 e4376078 Iustin Pop
  @type vcpus: string
475 e4376078 Iustin Pop
  @param vcpus: the count of VCPUs the instance has
476 e4376078 Iustin Pop
  @type nics: list
477 e4376078 Iustin Pop
  @param nics: list of tuples (ip, bridge, mac) representing
478 e4376078 Iustin Pop
      the NICs the instance  has
479 e4376078 Iustin Pop
  @rtype: dict
480 e4376078 Iustin Pop
  @return: the hook environment for this instance
481 ecb215b5 Michael Hanselmann

482 396e1b78 Michael Hanselmann
  """
483 0d68c45d Iustin Pop
  if status:
484 0d68c45d Iustin Pop
    str_status = "up"
485 0d68c45d Iustin Pop
  else:
486 0d68c45d Iustin Pop
    str_status = "down"
487 396e1b78 Michael Hanselmann
  env = {
488 0e137c28 Iustin Pop
    "OP_TARGET": name,
489 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
490 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
491 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
492 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
493 0d68c45d Iustin Pop
    "INSTANCE_STATUS": str_status,
494 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
495 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
496 396e1b78 Michael Hanselmann
  }
497 396e1b78 Michael Hanselmann
498 396e1b78 Michael Hanselmann
  if nics:
499 396e1b78 Michael Hanselmann
    nic_count = len(nics)
500 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
501 396e1b78 Michael Hanselmann
      if ip is None:
502 396e1b78 Michael Hanselmann
        ip = ""
503 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
504 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
505 53e4e875 Guido Trotter
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
506 396e1b78 Michael Hanselmann
  else:
507 396e1b78 Michael Hanselmann
    nic_count = 0
508 396e1b78 Michael Hanselmann
509 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
510 396e1b78 Michael Hanselmann
511 396e1b78 Michael Hanselmann
  return env
512 396e1b78 Michael Hanselmann
513 396e1b78 Michael Hanselmann
514 338e51e8 Iustin Pop
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
515 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
516 ecb215b5 Michael Hanselmann

517 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
518 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
519 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
520 e4376078 Iustin Pop
  @param instance: the instance for which we should build the
521 e4376078 Iustin Pop
      environment
522 e4376078 Iustin Pop
  @type override: dict
523 e4376078 Iustin Pop
  @param override: dictionary with key/values that will override
524 e4376078 Iustin Pop
      our values
525 e4376078 Iustin Pop
  @rtype: dict
526 e4376078 Iustin Pop
  @return: the hook environment dictionary
527 e4376078 Iustin Pop

528 ecb215b5 Michael Hanselmann
  """
529 338e51e8 Iustin Pop
  bep = lu.cfg.GetClusterInfo().FillBE(instance)
530 396e1b78 Michael Hanselmann
  args = {
531 396e1b78 Michael Hanselmann
    'name': instance.name,
532 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
533 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
534 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
535 0d68c45d Iustin Pop
    'status': instance.admin_up,
536 338e51e8 Iustin Pop
    'memory': bep[constants.BE_MEMORY],
537 338e51e8 Iustin Pop
    'vcpus': bep[constants.BE_VCPUS],
538 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
539 396e1b78 Michael Hanselmann
  }
540 396e1b78 Michael Hanselmann
  if override:
541 396e1b78 Michael Hanselmann
    args.update(override)
542 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
543 396e1b78 Michael Hanselmann
544 396e1b78 Michael Hanselmann
545 ec0292f1 Iustin Pop
def _AdjustCandidatePool(lu):
546 ec0292f1 Iustin Pop
  """Adjust the candidate pool after node operations.
547 ec0292f1 Iustin Pop

548 ec0292f1 Iustin Pop
  """
549 ec0292f1 Iustin Pop
  mod_list = lu.cfg.MaintainCandidatePool()
550 ec0292f1 Iustin Pop
  if mod_list:
551 ec0292f1 Iustin Pop
    lu.LogInfo("Promoted nodes to master candidate role: %s",
552 ee513a66 Iustin Pop
               ", ".join(node.name for node in mod_list))
553 ec0292f1 Iustin Pop
    for name in mod_list:
554 ec0292f1 Iustin Pop
      lu.context.ReaddNode(name)
555 ec0292f1 Iustin Pop
  mc_now, mc_max = lu.cfg.GetMasterCandidateStats()
556 ec0292f1 Iustin Pop
  if mc_now > mc_max:
557 ec0292f1 Iustin Pop
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
558 ec0292f1 Iustin Pop
               (mc_now, mc_max))
559 ec0292f1 Iustin Pop
560 ec0292f1 Iustin Pop
561 b9bddb6b Iustin Pop
def _CheckInstanceBridgesExist(lu, instance):
562 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
563 bf6929a2 Alexander Schreiber

564 bf6929a2 Alexander Schreiber
  """
565 bf6929a2 Alexander Schreiber
  # check bridges existance
566 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
567 781de953 Iustin Pop
  result = lu.rpc.call_bridges_exist(instance.primary_node, brlist)
568 781de953 Iustin Pop
  result.Raise()
569 781de953 Iustin Pop
  if not result.data:
570 781de953 Iustin Pop
    raise errors.OpPrereqError("One or more target bridges %s does not"
571 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
572 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
573 bf6929a2 Alexander Schreiber
574 bf6929a2 Alexander Schreiber
575 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
576 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
577 a8083063 Iustin Pop

578 a8083063 Iustin Pop
  """
579 a8083063 Iustin Pop
  _OP_REQP = []
580 a8083063 Iustin Pop
581 a8083063 Iustin Pop
  def CheckPrereq(self):
582 a8083063 Iustin Pop
    """Check prerequisites.
583 a8083063 Iustin Pop

584 a8083063 Iustin Pop
    This checks whether the cluster is empty.
585 a8083063 Iustin Pop

586 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
587 a8083063 Iustin Pop

588 a8083063 Iustin Pop
    """
589 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
590 a8083063 Iustin Pop
591 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
592 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
593 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
594 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
595 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
596 db915bd1 Michael Hanselmann
    if instancelist:
597 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
598 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
599 a8083063 Iustin Pop
600 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
601 a8083063 Iustin Pop
    """Destroys the cluster.
602 a8083063 Iustin Pop

603 a8083063 Iustin Pop
    """
604 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
605 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
606 781de953 Iustin Pop
    result.Raise()
607 781de953 Iustin Pop
    if not result.data:
608 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
609 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
610 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
611 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
612 140aa4a8 Iustin Pop
    return master
613 a8083063 Iustin Pop
614 a8083063 Iustin Pop
615 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
616 a8083063 Iustin Pop
  """Verifies the cluster status.
617 a8083063 Iustin Pop

618 a8083063 Iustin Pop
  """
619 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
620 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
621 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
622 d4b9d97f Guido Trotter
  REQ_BGL = False
623 d4b9d97f Guido Trotter
624 d4b9d97f Guido Trotter
  def ExpandNames(self):
625 d4b9d97f Guido Trotter
    self.needed_locks = {
626 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
627 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
628 d4b9d97f Guido Trotter
    }
629 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
630 a8083063 Iustin Pop
631 25361b9a Iustin Pop
  def _VerifyNode(self, nodeinfo, file_list, local_cksum,
632 6d2e83d5 Iustin Pop
                  node_result, feedback_fn, master_files,
633 6d2e83d5 Iustin Pop
                  drbd_map):
634 a8083063 Iustin Pop
    """Run multiple tests against a node.
635 a8083063 Iustin Pop

636 112f18a5 Iustin Pop
    Test list:
637 e4376078 Iustin Pop

638 a8083063 Iustin Pop
      - compares ganeti version
639 a8083063 Iustin Pop
      - checks vg existance and size > 20G
640 a8083063 Iustin Pop
      - checks config file checksum
641 a8083063 Iustin Pop
      - checks ssh to other nodes
642 a8083063 Iustin Pop

643 112f18a5 Iustin Pop
    @type nodeinfo: L{objects.Node}
644 112f18a5 Iustin Pop
    @param nodeinfo: the node to check
645 e4376078 Iustin Pop
    @param file_list: required list of files
646 e4376078 Iustin Pop
    @param local_cksum: dictionary of local files and their checksums
647 e4376078 Iustin Pop
    @param node_result: the results from the node
648 e4376078 Iustin Pop
    @param feedback_fn: function used to accumulate results
649 112f18a5 Iustin Pop
    @param master_files: list of files that only masters should have
650 6d2e83d5 Iustin Pop
    @param drbd_map: the useddrbd minors for this node, in
651 6d2e83d5 Iustin Pop
        form of minor: (instance, must_exist) which correspond to instances
652 6d2e83d5 Iustin Pop
        and their running status
653 098c0958 Michael Hanselmann

654 a8083063 Iustin Pop
    """
655 112f18a5 Iustin Pop
    node = nodeinfo.name
656 25361b9a Iustin Pop
657 25361b9a Iustin Pop
    # main result, node_result should be a non-empty dict
658 25361b9a Iustin Pop
    if not node_result or not isinstance(node_result, dict):
659 25361b9a Iustin Pop
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
660 25361b9a Iustin Pop
      return True
661 25361b9a Iustin Pop
662 a8083063 Iustin Pop
    # compares ganeti version
663 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
664 25361b9a Iustin Pop
    remote_version = node_result.get('version', None)
665 e9ce0a64 Iustin Pop
    if not (remote_version and isinstance(remote_version, (list, tuple)) and
666 e9ce0a64 Iustin Pop
            len(remote_version) == 2):
667 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
668 a8083063 Iustin Pop
      return True
669 a8083063 Iustin Pop
670 e9ce0a64 Iustin Pop
    if local_version != remote_version[0]:
671 e9ce0a64 Iustin Pop
      feedback_fn("  - ERROR: incompatible protocol versions: master %s,"
672 e9ce0a64 Iustin Pop
                  " node %s %s" % (local_version, node, remote_version[0]))
673 a8083063 Iustin Pop
      return True
674 a8083063 Iustin Pop
675 e9ce0a64 Iustin Pop
    # node seems compatible, we can actually try to look into its results
676 a8083063 Iustin Pop
677 a8083063 Iustin Pop
    bad = False
678 e9ce0a64 Iustin Pop
679 e9ce0a64 Iustin Pop
    # full package version
680 e9ce0a64 Iustin Pop
    if constants.RELEASE_VERSION != remote_version[1]:
681 e9ce0a64 Iustin Pop
      feedback_fn("  - WARNING: software version mismatch: master %s,"
682 e9ce0a64 Iustin Pop
                  " node %s %s" %
683 e9ce0a64 Iustin Pop
                  (constants.RELEASE_VERSION, node, remote_version[1]))
684 e9ce0a64 Iustin Pop
685 e9ce0a64 Iustin Pop
    # checks vg existence and size > 20G
686 e9ce0a64 Iustin Pop
687 25361b9a Iustin Pop
    vglist = node_result.get(constants.NV_VGLIST, None)
688 a8083063 Iustin Pop
    if not vglist:
689 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
690 a8083063 Iustin Pop
                      (node,))
691 a8083063 Iustin Pop
      bad = True
692 a8083063 Iustin Pop
    else:
693 8d1a2a64 Michael Hanselmann
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
694 8d1a2a64 Michael Hanselmann
                                            constants.MIN_VG_SIZE)
695 a8083063 Iustin Pop
      if vgstatus:
696 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
697 a8083063 Iustin Pop
        bad = True
698 a8083063 Iustin Pop
699 a8083063 Iustin Pop
    # checks config file checksum
700 a8083063 Iustin Pop
701 25361b9a Iustin Pop
    remote_cksum = node_result.get(constants.NV_FILELIST, None)
702 25361b9a Iustin Pop
    if not isinstance(remote_cksum, dict):
703 a8083063 Iustin Pop
      bad = True
704 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
705 a8083063 Iustin Pop
    else:
706 a8083063 Iustin Pop
      for file_name in file_list:
707 112f18a5 Iustin Pop
        node_is_mc = nodeinfo.master_candidate
708 112f18a5 Iustin Pop
        must_have_file = file_name not in master_files
709 a8083063 Iustin Pop
        if file_name not in remote_cksum:
710 112f18a5 Iustin Pop
          if node_is_mc or must_have_file:
711 112f18a5 Iustin Pop
            bad = True
712 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' missing" % file_name)
713 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
714 112f18a5 Iustin Pop
          if node_is_mc or must_have_file:
715 112f18a5 Iustin Pop
            bad = True
716 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
717 112f18a5 Iustin Pop
          else:
718 112f18a5 Iustin Pop
            # not candidate and this is not a must-have file
719 112f18a5 Iustin Pop
            bad = True
720 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: non master-candidate has old/wrong file"
721 112f18a5 Iustin Pop
                        " '%s'" % file_name)
722 112f18a5 Iustin Pop
        else:
723 112f18a5 Iustin Pop
          # all good, except non-master/non-must have combination
724 112f18a5 Iustin Pop
          if not node_is_mc and not must_have_file:
725 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' should not exist on non master"
726 112f18a5 Iustin Pop
                        " candidates" % file_name)
727 a8083063 Iustin Pop
728 25361b9a Iustin Pop
    # checks ssh to any
729 25361b9a Iustin Pop
730 25361b9a Iustin Pop
    if constants.NV_NODELIST not in node_result:
731 a8083063 Iustin Pop
      bad = True
732 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
733 a8083063 Iustin Pop
    else:
734 25361b9a Iustin Pop
      if node_result[constants.NV_NODELIST]:
735 a8083063 Iustin Pop
        bad = True
736 25361b9a Iustin Pop
        for node in node_result[constants.NV_NODELIST]:
737 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
738 25361b9a Iustin Pop
                          (node, node_result[constants.NV_NODELIST][node]))
739 25361b9a Iustin Pop
740 25361b9a Iustin Pop
    if constants.NV_NODENETTEST not in node_result:
741 9d4bfc96 Iustin Pop
      bad = True
742 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
743 9d4bfc96 Iustin Pop
    else:
744 25361b9a Iustin Pop
      if node_result[constants.NV_NODENETTEST]:
745 9d4bfc96 Iustin Pop
        bad = True
746 25361b9a Iustin Pop
        nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys())
747 9d4bfc96 Iustin Pop
        for node in nlist:
748 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
749 25361b9a Iustin Pop
                          (node, node_result[constants.NV_NODENETTEST][node]))
750 9d4bfc96 Iustin Pop
751 25361b9a Iustin Pop
    hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
752 e69d05fd Iustin Pop
    if isinstance(hyp_result, dict):
753 e69d05fd Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
754 e69d05fd Iustin Pop
        if hv_result is not None:
755 e69d05fd Iustin Pop
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
756 e69d05fd Iustin Pop
                      (hv_name, hv_result))
757 6d2e83d5 Iustin Pop
758 6d2e83d5 Iustin Pop
    # check used drbd list
759 6d2e83d5 Iustin Pop
    used_minors = node_result.get(constants.NV_DRBDLIST, [])
760 f6eaed12 Iustin Pop
    if not isinstance(used_minors, (tuple, list)):
761 f6eaed12 Iustin Pop
      feedback_fn("  - ERROR: cannot parse drbd status file: %s" %
762 f6eaed12 Iustin Pop
                  str(used_minors))
763 f6eaed12 Iustin Pop
    else:
764 f6eaed12 Iustin Pop
      for minor, (iname, must_exist) in drbd_map.items():
765 f6eaed12 Iustin Pop
        if minor not in used_minors and must_exist:
766 f6eaed12 Iustin Pop
          feedback_fn("  - ERROR: drbd minor %d of instance %s is not active" %
767 f6eaed12 Iustin Pop
                      (minor, iname))
768 f6eaed12 Iustin Pop
          bad = True
769 f6eaed12 Iustin Pop
      for minor in used_minors:
770 f6eaed12 Iustin Pop
        if minor not in drbd_map:
771 f6eaed12 Iustin Pop
          feedback_fn("  - ERROR: unallocated drbd minor %d is in use" % minor)
772 f6eaed12 Iustin Pop
          bad = True
773 6d2e83d5 Iustin Pop
774 a8083063 Iustin Pop
    return bad
775 a8083063 Iustin Pop
776 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
777 0a66c968 Iustin Pop
                      node_instance, feedback_fn, n_offline):
778 a8083063 Iustin Pop
    """Verify an instance.
779 a8083063 Iustin Pop

780 a8083063 Iustin Pop
    This function checks to see if the required block devices are
781 a8083063 Iustin Pop
    available on the instance's node.
782 a8083063 Iustin Pop

783 a8083063 Iustin Pop
    """
784 a8083063 Iustin Pop
    bad = False
785 a8083063 Iustin Pop
786 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
787 a8083063 Iustin Pop
788 a8083063 Iustin Pop
    node_vol_should = {}
789 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
790 a8083063 Iustin Pop
791 a8083063 Iustin Pop
    for node in node_vol_should:
792 0a66c968 Iustin Pop
      if node in n_offline:
793 0a66c968 Iustin Pop
        # ignore missing volumes on offline nodes
794 0a66c968 Iustin Pop
        continue
795 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
796 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
797 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
798 a8083063 Iustin Pop
                          (volume, node))
799 a8083063 Iustin Pop
          bad = True
800 a8083063 Iustin Pop
801 0d68c45d Iustin Pop
    if instanceconfig.admin_up:
802 0a66c968 Iustin Pop
      if ((node_current not in node_instance or
803 0a66c968 Iustin Pop
          not instance in node_instance[node_current]) and
804 0a66c968 Iustin Pop
          node_current not in n_offline):
805 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
806 a8083063 Iustin Pop
                        (instance, node_current))
807 a8083063 Iustin Pop
        bad = True
808 a8083063 Iustin Pop
809 a8083063 Iustin Pop
    for node in node_instance:
810 a8083063 Iustin Pop
      if (not node == node_current):
811 a8083063 Iustin Pop
        if instance in node_instance[node]:
812 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
813 a8083063 Iustin Pop
                          (instance, node))
814 a8083063 Iustin Pop
          bad = True
815 a8083063 Iustin Pop
816 6a438c98 Michael Hanselmann
    return bad
817 a8083063 Iustin Pop
818 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
819 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
820 a8083063 Iustin Pop

821 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
822 a8083063 Iustin Pop
    reported as unknown.
823 a8083063 Iustin Pop

824 a8083063 Iustin Pop
    """
825 a8083063 Iustin Pop
    bad = False
826 a8083063 Iustin Pop
827 a8083063 Iustin Pop
    for node in node_vol_is:
828 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
829 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
830 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
831 a8083063 Iustin Pop
                      (volume, node))
832 a8083063 Iustin Pop
          bad = True
833 a8083063 Iustin Pop
    return bad
834 a8083063 Iustin Pop
835 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
836 a8083063 Iustin Pop
    """Verify the list of running instances.
837 a8083063 Iustin Pop

838 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
839 a8083063 Iustin Pop

840 a8083063 Iustin Pop
    """
841 a8083063 Iustin Pop
    bad = False
842 a8083063 Iustin Pop
    for node in node_instance:
843 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
844 a8083063 Iustin Pop
        if runninginstance not in instancelist:
845 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
846 a8083063 Iustin Pop
                          (runninginstance, node))
847 a8083063 Iustin Pop
          bad = True
848 a8083063 Iustin Pop
    return bad
849 a8083063 Iustin Pop
850 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
851 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
852 2b3b6ddd Guido Trotter

853 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
854 2b3b6ddd Guido Trotter
    was primary for.
855 2b3b6ddd Guido Trotter

856 2b3b6ddd Guido Trotter
    """
857 2b3b6ddd Guido Trotter
    bad = False
858 2b3b6ddd Guido Trotter
859 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
860 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
861 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
862 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
863 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
864 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
865 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
866 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
867 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
868 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
869 2b3b6ddd Guido Trotter
        needed_mem = 0
870 2b3b6ddd Guido Trotter
        for instance in instances:
871 338e51e8 Iustin Pop
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
872 c0f2b229 Iustin Pop
          if bep[constants.BE_AUTO_BALANCE]:
873 3924700f Iustin Pop
            needed_mem += bep[constants.BE_MEMORY]
874 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
875 2b3b6ddd Guido Trotter
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
876 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
877 2b3b6ddd Guido Trotter
          bad = True
878 2b3b6ddd Guido Trotter
    return bad
879 2b3b6ddd Guido Trotter
880 a8083063 Iustin Pop
  def CheckPrereq(self):
881 a8083063 Iustin Pop
    """Check prerequisites.
882 a8083063 Iustin Pop

883 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
884 e54c4c5e Guido Trotter
    all its members are valid.
885 a8083063 Iustin Pop

886 a8083063 Iustin Pop
    """
887 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
888 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
889 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
890 a8083063 Iustin Pop
891 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
892 d8fff41c Guido Trotter
    """Build hooks env.
893 d8fff41c Guido Trotter

894 d8fff41c Guido Trotter
    Cluster-Verify hooks just rone in the post phase and their failure makes
895 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
896 d8fff41c Guido Trotter

897 d8fff41c Guido Trotter
    """
898 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
899 d8fff41c Guido Trotter
    # TODO: populate the environment with useful information for verify hooks
900 d8fff41c Guido Trotter
    env = {}
901 d8fff41c Guido Trotter
    return env, [], all_nodes
902 d8fff41c Guido Trotter
903 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
904 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
905 a8083063 Iustin Pop

906 a8083063 Iustin Pop
    """
907 a8083063 Iustin Pop
    bad = False
908 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
909 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
910 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
911 a8083063 Iustin Pop
912 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
913 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
914 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
915 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
916 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
917 6d2e83d5 Iustin Pop
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
918 6d2e83d5 Iustin Pop
                        for iname in instancelist)
919 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
920 3924700f Iustin Pop
    i_non_a_balanced = [] # Non auto-balanced instances
921 0a66c968 Iustin Pop
    n_offline = [] # List of offline nodes
922 22f0f71d Iustin Pop
    n_drained = [] # List of nodes being drained
923 a8083063 Iustin Pop
    node_volume = {}
924 a8083063 Iustin Pop
    node_instance = {}
925 9c9c7d30 Guido Trotter
    node_info = {}
926 26b6af5e Guido Trotter
    instance_cfg = {}
927 a8083063 Iustin Pop
928 a8083063 Iustin Pop
    # FIXME: verify OS list
929 a8083063 Iustin Pop
    # do local checksums
930 112f18a5 Iustin Pop
    master_files = [constants.CLUSTER_CONF_FILE]
931 112f18a5 Iustin Pop
932 112f18a5 Iustin Pop
    file_names = ssconf.SimpleStore().GetFileList()
933 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
934 699777f2 Michael Hanselmann
    file_names.append(constants.RAPI_CERT_FILE)
935 112f18a5 Iustin Pop
    file_names.extend(master_files)
936 112f18a5 Iustin Pop
937 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
938 a8083063 Iustin Pop
939 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
940 a8083063 Iustin Pop
    node_verify_param = {
941 25361b9a Iustin Pop
      constants.NV_FILELIST: file_names,
942 82e37788 Iustin Pop
      constants.NV_NODELIST: [node.name for node in nodeinfo
943 82e37788 Iustin Pop
                              if not node.offline],
944 25361b9a Iustin Pop
      constants.NV_HYPERVISOR: hypervisors,
945 25361b9a Iustin Pop
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
946 82e37788 Iustin Pop
                                  node.secondary_ip) for node in nodeinfo
947 82e37788 Iustin Pop
                                 if not node.offline],
948 25361b9a Iustin Pop
      constants.NV_LVLIST: vg_name,
949 25361b9a Iustin Pop
      constants.NV_INSTANCELIST: hypervisors,
950 25361b9a Iustin Pop
      constants.NV_VGLIST: None,
951 25361b9a Iustin Pop
      constants.NV_VERSION: None,
952 25361b9a Iustin Pop
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
953 6d2e83d5 Iustin Pop
      constants.NV_DRBDLIST: None,
954 a8083063 Iustin Pop
      }
955 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
956 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
957 a8083063 Iustin Pop
958 3924700f Iustin Pop
    cluster = self.cfg.GetClusterInfo()
959 112f18a5 Iustin Pop
    master_node = self.cfg.GetMasterNode()
960 6d2e83d5 Iustin Pop
    all_drbd_map = self.cfg.ComputeDRBDMap()
961 6d2e83d5 Iustin Pop
962 112f18a5 Iustin Pop
    for node_i in nodeinfo:
963 112f18a5 Iustin Pop
      node = node_i.name
964 25361b9a Iustin Pop
      nresult = all_nvinfo[node].data
965 25361b9a Iustin Pop
966 0a66c968 Iustin Pop
      if node_i.offline:
967 0a66c968 Iustin Pop
        feedback_fn("* Skipping offline node %s" % (node,))
968 0a66c968 Iustin Pop
        n_offline.append(node)
969 0a66c968 Iustin Pop
        continue
970 0a66c968 Iustin Pop
971 112f18a5 Iustin Pop
      if node == master_node:
972 25361b9a Iustin Pop
        ntype = "master"
973 112f18a5 Iustin Pop
      elif node_i.master_candidate:
974 25361b9a Iustin Pop
        ntype = "master candidate"
975 22f0f71d Iustin Pop
      elif node_i.drained:
976 22f0f71d Iustin Pop
        ntype = "drained"
977 22f0f71d Iustin Pop
        n_drained.append(node)
978 112f18a5 Iustin Pop
      else:
979 25361b9a Iustin Pop
        ntype = "regular"
980 112f18a5 Iustin Pop
      feedback_fn("* Verifying node %s (%s)" % (node, ntype))
981 25361b9a Iustin Pop
982 25361b9a Iustin Pop
      if all_nvinfo[node].failed or not isinstance(nresult, dict):
983 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
984 25361b9a Iustin Pop
        bad = True
985 25361b9a Iustin Pop
        continue
986 25361b9a Iustin Pop
987 6d2e83d5 Iustin Pop
      node_drbd = {}
988 6d2e83d5 Iustin Pop
      for minor, instance in all_drbd_map[node].items():
989 6d2e83d5 Iustin Pop
        instance = instanceinfo[instance]
990 0d68c45d Iustin Pop
        node_drbd[minor] = (instance.name, instance.admin_up)
991 112f18a5 Iustin Pop
      result = self._VerifyNode(node_i, file_names, local_checksums,
992 6d2e83d5 Iustin Pop
                                nresult, feedback_fn, master_files,
993 6d2e83d5 Iustin Pop
                                node_drbd)
994 a8083063 Iustin Pop
      bad = bad or result
995 a8083063 Iustin Pop
996 25361b9a Iustin Pop
      lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
997 25361b9a Iustin Pop
      if isinstance(lvdata, basestring):
998 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
999 26f15862 Iustin Pop
                    (node, utils.SafeEncode(lvdata)))
1000 b63ed789 Iustin Pop
        bad = True
1001 b63ed789 Iustin Pop
        node_volume[node] = {}
1002 25361b9a Iustin Pop
      elif not isinstance(lvdata, dict):
1003 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (lvlist)" % (node,))
1004 a8083063 Iustin Pop
        bad = True
1005 a8083063 Iustin Pop
        continue
1006 b63ed789 Iustin Pop
      else:
1007 25361b9a Iustin Pop
        node_volume[node] = lvdata
1008 a8083063 Iustin Pop
1009 a8083063 Iustin Pop
      # node_instance
1010 25361b9a Iustin Pop
      idata = nresult.get(constants.NV_INSTANCELIST, None)
1011 25361b9a Iustin Pop
      if not isinstance(idata, list):
1012 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (instancelist)" %
1013 25361b9a Iustin Pop
                    (node,))
1014 a8083063 Iustin Pop
        bad = True
1015 a8083063 Iustin Pop
        continue
1016 a8083063 Iustin Pop
1017 25361b9a Iustin Pop
      node_instance[node] = idata
1018 a8083063 Iustin Pop
1019 9c9c7d30 Guido Trotter
      # node_info
1020 25361b9a Iustin Pop
      nodeinfo = nresult.get(constants.NV_HVINFO, None)
1021 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
1022 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (hvinfo)" % (node,))
1023 9c9c7d30 Guido Trotter
        bad = True
1024 9c9c7d30 Guido Trotter
        continue
1025 9c9c7d30 Guido Trotter
1026 9c9c7d30 Guido Trotter
      try:
1027 9c9c7d30 Guido Trotter
        node_info[node] = {
1028 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
1029 25361b9a Iustin Pop
          "dfree": int(nresult[constants.NV_VGLIST][vg_name]),
1030 93e4c50b Guido Trotter
          "pinst": [],
1031 93e4c50b Guido Trotter
          "sinst": [],
1032 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
1033 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
1034 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
1035 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
1036 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
1037 36e7da50 Guido Trotter
          # secondary.
1038 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
1039 9c9c7d30 Guido Trotter
        }
1040 9c9c7d30 Guido Trotter
      except ValueError:
1041 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
1042 9c9c7d30 Guido Trotter
        bad = True
1043 9c9c7d30 Guido Trotter
        continue
1044 9c9c7d30 Guido Trotter
1045 a8083063 Iustin Pop
    node_vol_should = {}
1046 a8083063 Iustin Pop
1047 a8083063 Iustin Pop
    for instance in instancelist:
1048 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
1049 6d2e83d5 Iustin Pop
      inst_config = instanceinfo[instance]
1050 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
1051 0a66c968 Iustin Pop
                                     node_instance, feedback_fn, n_offline)
1052 c5705f58 Guido Trotter
      bad = bad or result
1053 832261fd Iustin Pop
      inst_nodes_offline = []
1054 a8083063 Iustin Pop
1055 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
1056 a8083063 Iustin Pop
1057 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
1058 26b6af5e Guido Trotter
1059 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
1060 93e4c50b Guido Trotter
      if pnode in node_info:
1061 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
1062 0a66c968 Iustin Pop
      elif pnode not in n_offline:
1063 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
1064 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
1065 93e4c50b Guido Trotter
        bad = True
1066 93e4c50b Guido Trotter
1067 832261fd Iustin Pop
      if pnode in n_offline:
1068 832261fd Iustin Pop
        inst_nodes_offline.append(pnode)
1069 832261fd Iustin Pop
1070 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
1071 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
1072 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
1073 93e4c50b Guido Trotter
      # supported either.
1074 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
1075 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
1076 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
1077 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
1078 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
1079 93e4c50b Guido Trotter
                    % instance)
1080 93e4c50b Guido Trotter
1081 c0f2b229 Iustin Pop
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1082 3924700f Iustin Pop
        i_non_a_balanced.append(instance)
1083 3924700f Iustin Pop
1084 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
1085 93e4c50b Guido Trotter
        if snode in node_info:
1086 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
1087 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
1088 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
1089 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
1090 0a66c968 Iustin Pop
        elif snode not in n_offline:
1091 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
1092 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
1093 832261fd Iustin Pop
          bad = True
1094 832261fd Iustin Pop
        if snode in n_offline:
1095 832261fd Iustin Pop
          inst_nodes_offline.append(snode)
1096 832261fd Iustin Pop
1097 832261fd Iustin Pop
      if inst_nodes_offline:
1098 832261fd Iustin Pop
        # warn that the instance lives on offline nodes, and set bad=True
1099 832261fd Iustin Pop
        feedback_fn("  - ERROR: instance lives on offline node(s) %s" %
1100 832261fd Iustin Pop
                    ", ".join(inst_nodes_offline))
1101 832261fd Iustin Pop
        bad = True
1102 93e4c50b Guido Trotter
1103 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
1104 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
1105 a8083063 Iustin Pop
                                       feedback_fn)
1106 a8083063 Iustin Pop
    bad = bad or result
1107 a8083063 Iustin Pop
1108 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
1109 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
1110 a8083063 Iustin Pop
                                         feedback_fn)
1111 a8083063 Iustin Pop
    bad = bad or result
1112 a8083063 Iustin Pop
1113 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1114 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
1115 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
1116 e54c4c5e Guido Trotter
      bad = bad or result
1117 2b3b6ddd Guido Trotter
1118 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
1119 2b3b6ddd Guido Trotter
    if i_non_redundant:
1120 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1121 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
1122 2b3b6ddd Guido Trotter
1123 3924700f Iustin Pop
    if i_non_a_balanced:
1124 3924700f Iustin Pop
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1125 3924700f Iustin Pop
                  % len(i_non_a_balanced))
1126 3924700f Iustin Pop
1127 0a66c968 Iustin Pop
    if n_offline:
1128 0a66c968 Iustin Pop
      feedback_fn("  - NOTICE: %d offline node(s) found." % len(n_offline))
1129 0a66c968 Iustin Pop
1130 22f0f71d Iustin Pop
    if n_drained:
1131 22f0f71d Iustin Pop
      feedback_fn("  - NOTICE: %d drained node(s) found." % len(n_drained))
1132 22f0f71d Iustin Pop
1133 34290825 Michael Hanselmann
    return not bad
1134 a8083063 Iustin Pop
1135 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1136 e4376078 Iustin Pop
    """Analize the post-hooks' result
1137 e4376078 Iustin Pop

1138 e4376078 Iustin Pop
    This method analyses the hook result, handles it, and sends some
1139 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
1140 d8fff41c Guido Trotter

1141 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1142 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1143 e4376078 Iustin Pop
    @param hooks_results: the results of the multi-node hooks rpc call
1144 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
1145 e4376078 Iustin Pop
    @param lu_result: previous Exec result
1146 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
1147 e4376078 Iustin Pop
        and hook results
1148 d8fff41c Guido Trotter

1149 d8fff41c Guido Trotter
    """
1150 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
1151 38206f3c Iustin Pop
    # their results
1152 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
1153 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
1154 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
1155 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
1156 d8fff41c Guido Trotter
      if not hooks_results:
1157 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
1158 d8fff41c Guido Trotter
        lu_result = 1
1159 d8fff41c Guido Trotter
      else:
1160 d8fff41c Guido Trotter
        for node_name in hooks_results:
1161 d8fff41c Guido Trotter
          show_node_header = True
1162 d8fff41c Guido Trotter
          res = hooks_results[node_name]
1163 25361b9a Iustin Pop
          if res.failed or res.data is False or not isinstance(res.data, list):
1164 0a66c968 Iustin Pop
            if res.offline:
1165 0a66c968 Iustin Pop
              # no need to warn or set fail return value
1166 0a66c968 Iustin Pop
              continue
1167 25361b9a Iustin Pop
            feedback_fn("    Communication failure in hooks execution")
1168 d8fff41c Guido Trotter
            lu_result = 1
1169 d8fff41c Guido Trotter
            continue
1170 25361b9a Iustin Pop
          for script, hkr, output in res.data:
1171 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
1172 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
1173 d8fff41c Guido Trotter
              # failing hooks on that node
1174 d8fff41c Guido Trotter
              if show_node_header:
1175 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
1176 d8fff41c Guido Trotter
                show_node_header = False
1177 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
1178 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
1179 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
1180 d8fff41c Guido Trotter
              lu_result = 1
1181 d8fff41c Guido Trotter
1182 d8fff41c Guido Trotter
      return lu_result
1183 d8fff41c Guido Trotter
1184 a8083063 Iustin Pop
1185 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
1186 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
1187 2c95a8d4 Iustin Pop

1188 2c95a8d4 Iustin Pop
  """
1189 2c95a8d4 Iustin Pop
  _OP_REQP = []
1190 d4b9d97f Guido Trotter
  REQ_BGL = False
1191 d4b9d97f Guido Trotter
1192 d4b9d97f Guido Trotter
  def ExpandNames(self):
1193 d4b9d97f Guido Trotter
    self.needed_locks = {
1194 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1195 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1196 d4b9d97f Guido Trotter
    }
1197 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1198 2c95a8d4 Iustin Pop
1199 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
1200 2c95a8d4 Iustin Pop
    """Check prerequisites.
1201 2c95a8d4 Iustin Pop

1202 2c95a8d4 Iustin Pop
    This has no prerequisites.
1203 2c95a8d4 Iustin Pop

1204 2c95a8d4 Iustin Pop
    """
1205 2c95a8d4 Iustin Pop
    pass
1206 2c95a8d4 Iustin Pop
1207 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
1208 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
1209 2c95a8d4 Iustin Pop

1210 2c95a8d4 Iustin Pop
    """
1211 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
1212 2c95a8d4 Iustin Pop
1213 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
1214 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1215 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
1216 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
1217 2c95a8d4 Iustin Pop
1218 2c95a8d4 Iustin Pop
    nv_dict = {}
1219 2c95a8d4 Iustin Pop
    for inst in instances:
1220 2c95a8d4 Iustin Pop
      inst_lvs = {}
1221 0d68c45d Iustin Pop
      if (not inst.admin_up or
1222 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
1223 2c95a8d4 Iustin Pop
        continue
1224 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
1225 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1226 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
1227 2c95a8d4 Iustin Pop
        for vol in vol_list:
1228 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
1229 2c95a8d4 Iustin Pop
1230 2c95a8d4 Iustin Pop
    if not nv_dict:
1231 2c95a8d4 Iustin Pop
      return result
1232 2c95a8d4 Iustin Pop
1233 72737a7f Iustin Pop
    node_lvs = self.rpc.call_volume_list(nodes, vg_name)
1234 2c95a8d4 Iustin Pop
1235 2c95a8d4 Iustin Pop
    to_act = set()
1236 2c95a8d4 Iustin Pop
    for node in nodes:
1237 2c95a8d4 Iustin Pop
      # node_volume
1238 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
1239 781de953 Iustin Pop
      if lvs.failed:
1240 0a66c968 Iustin Pop
        if not lvs.offline:
1241 0a66c968 Iustin Pop
          self.LogWarning("Connection to node %s failed: %s" %
1242 0a66c968 Iustin Pop
                          (node, lvs.data))
1243 781de953 Iustin Pop
        continue
1244 781de953 Iustin Pop
      lvs = lvs.data
1245 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
1246 9a4f63d1 Iustin Pop
        logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
1247 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
1248 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
1249 9a4f63d1 Iustin Pop
        logging.warning("Connection to node %s failed or invalid data"
1250 9a4f63d1 Iustin Pop
                        " returned", node)
1251 2c95a8d4 Iustin Pop
        res_nodes.append(node)
1252 2c95a8d4 Iustin Pop
        continue
1253 2c95a8d4 Iustin Pop
1254 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
1255 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
1256 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
1257 b63ed789 Iustin Pop
            and inst.name not in res_instances):
1258 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
1259 2c95a8d4 Iustin Pop
1260 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1261 b63ed789 Iustin Pop
    # data better
1262 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1263 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1264 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1265 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1266 b63ed789 Iustin Pop
1267 2c95a8d4 Iustin Pop
    return result
1268 2c95a8d4 Iustin Pop
1269 2c95a8d4 Iustin Pop
1270 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1271 07bd8a51 Iustin Pop
  """Rename the cluster.
1272 07bd8a51 Iustin Pop

1273 07bd8a51 Iustin Pop
  """
1274 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1275 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1276 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1277 07bd8a51 Iustin Pop
1278 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1279 07bd8a51 Iustin Pop
    """Build hooks env.
1280 07bd8a51 Iustin Pop

1281 07bd8a51 Iustin Pop
    """
1282 07bd8a51 Iustin Pop
    env = {
1283 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1284 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1285 07bd8a51 Iustin Pop
      }
1286 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1287 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1288 07bd8a51 Iustin Pop
1289 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1290 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1291 07bd8a51 Iustin Pop

1292 07bd8a51 Iustin Pop
    """
1293 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1294 07bd8a51 Iustin Pop
1295 bcf043c9 Iustin Pop
    new_name = hostname.name
1296 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1297 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
1298 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
1299 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1300 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1301 07bd8a51 Iustin Pop
                                 " cluster has changed")
1302 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1303 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1304 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1305 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1306 07bd8a51 Iustin Pop
                                   new_ip)
1307 07bd8a51 Iustin Pop
1308 07bd8a51 Iustin Pop
    self.op.name = new_name
1309 07bd8a51 Iustin Pop
1310 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1311 07bd8a51 Iustin Pop
    """Rename the cluster.
1312 07bd8a51 Iustin Pop

1313 07bd8a51 Iustin Pop
    """
1314 07bd8a51 Iustin Pop
    clustername = self.op.name
1315 07bd8a51 Iustin Pop
    ip = self.ip
1316 07bd8a51 Iustin Pop
1317 07bd8a51 Iustin Pop
    # shutdown the master IP
1318 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
1319 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
1320 781de953 Iustin Pop
    if result.failed or not result.data:
1321 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1322 07bd8a51 Iustin Pop
1323 07bd8a51 Iustin Pop
    try:
1324 55cf7d83 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
1325 55cf7d83 Iustin Pop
      cluster.cluster_name = clustername
1326 55cf7d83 Iustin Pop
      cluster.master_ip = ip
1327 55cf7d83 Iustin Pop
      self.cfg.Update(cluster)
1328 ec85e3d5 Iustin Pop
1329 ec85e3d5 Iustin Pop
      # update the known hosts file
1330 ec85e3d5 Iustin Pop
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
1331 ec85e3d5 Iustin Pop
      node_list = self.cfg.GetNodeList()
1332 ec85e3d5 Iustin Pop
      try:
1333 ec85e3d5 Iustin Pop
        node_list.remove(master)
1334 ec85e3d5 Iustin Pop
      except ValueError:
1335 ec85e3d5 Iustin Pop
        pass
1336 ec85e3d5 Iustin Pop
      result = self.rpc.call_upload_file(node_list,
1337 ec85e3d5 Iustin Pop
                                         constants.SSH_KNOWN_HOSTS_FILE)
1338 ec85e3d5 Iustin Pop
      for to_node, to_result in result.iteritems():
1339 ec85e3d5 Iustin Pop
        if to_result.failed or not to_result.data:
1340 d1dc3548 Iustin Pop
          logging.error("Copy of file %s to node %s failed",
1341 d1dc3548 Iustin Pop
                        constants.SSH_KNOWN_HOSTS_FILE, to_node)
1342 ec85e3d5 Iustin Pop
1343 07bd8a51 Iustin Pop
    finally:
1344 781de953 Iustin Pop
      result = self.rpc.call_node_start_master(master, False)
1345 781de953 Iustin Pop
      if result.failed or not result.data:
1346 86d9d3bb Iustin Pop
        self.LogWarning("Could not re-enable the master role on"
1347 86d9d3bb Iustin Pop
                        " the master, please restart manually.")
1348 07bd8a51 Iustin Pop
1349 07bd8a51 Iustin Pop
1350 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1351 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1352 8084f9f6 Manuel Franceschini

1353 e4376078 Iustin Pop
  @type disk: L{objects.Disk}
1354 e4376078 Iustin Pop
  @param disk: the disk to check
1355 e4376078 Iustin Pop
  @rtype: booleean
1356 e4376078 Iustin Pop
  @return: boolean indicating whether a LD_LV dev_type was found or not
1357 8084f9f6 Manuel Franceschini

1358 8084f9f6 Manuel Franceschini
  """
1359 8084f9f6 Manuel Franceschini
  if disk.children:
1360 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1361 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1362 8084f9f6 Manuel Franceschini
        return True
1363 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1364 8084f9f6 Manuel Franceschini
1365 8084f9f6 Manuel Franceschini
1366 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1367 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1368 8084f9f6 Manuel Franceschini

1369 8084f9f6 Manuel Franceschini
  """
1370 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1371 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1372 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1373 c53279cf Guido Trotter
  REQ_BGL = False
1374 c53279cf Guido Trotter
1375 4b7735f9 Iustin Pop
  def CheckParameters(self):
1376 4b7735f9 Iustin Pop
    """Check parameters
1377 4b7735f9 Iustin Pop

1378 4b7735f9 Iustin Pop
    """
1379 4b7735f9 Iustin Pop
    if not hasattr(self.op, "candidate_pool_size"):
1380 4b7735f9 Iustin Pop
      self.op.candidate_pool_size = None
1381 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1382 4b7735f9 Iustin Pop
      try:
1383 4b7735f9 Iustin Pop
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1384 4b7735f9 Iustin Pop
      except ValueError, err:
1385 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1386 4b7735f9 Iustin Pop
                                   str(err))
1387 4b7735f9 Iustin Pop
      if self.op.candidate_pool_size < 1:
1388 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("At least one master candidate needed")
1389 4b7735f9 Iustin Pop
1390 c53279cf Guido Trotter
  def ExpandNames(self):
1391 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
1392 c53279cf Guido Trotter
    # all nodes to be modified.
1393 c53279cf Guido Trotter
    self.needed_locks = {
1394 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1395 c53279cf Guido Trotter
    }
1396 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1397 8084f9f6 Manuel Franceschini
1398 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1399 8084f9f6 Manuel Franceschini
    """Build hooks env.
1400 8084f9f6 Manuel Franceschini

1401 8084f9f6 Manuel Franceschini
    """
1402 8084f9f6 Manuel Franceschini
    env = {
1403 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1404 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1405 8084f9f6 Manuel Franceschini
      }
1406 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1407 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1408 8084f9f6 Manuel Franceschini
1409 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1410 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1411 8084f9f6 Manuel Franceschini

1412 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1413 5f83e263 Iustin Pop
    if the given volume group is valid.
1414 8084f9f6 Manuel Franceschini

1415 8084f9f6 Manuel Franceschini
    """
1416 779c15bb Iustin Pop
    if self.op.vg_name is not None and not self.op.vg_name:
1417 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
1418 8084f9f6 Manuel Franceschini
      for inst in instances:
1419 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1420 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1421 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1422 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1423 8084f9f6 Manuel Franceschini
1424 779c15bb Iustin Pop
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1425 779c15bb Iustin Pop
1426 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1427 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1428 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
1429 8084f9f6 Manuel Franceschini
      for node in node_list:
1430 781de953 Iustin Pop
        if vglist[node].failed:
1431 781de953 Iustin Pop
          # ignoring down node
1432 781de953 Iustin Pop
          self.LogWarning("Node %s unreachable/error, ignoring" % node)
1433 781de953 Iustin Pop
          continue
1434 781de953 Iustin Pop
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].data,
1435 781de953 Iustin Pop
                                              self.op.vg_name,
1436 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1437 8084f9f6 Manuel Franceschini
        if vgstatus:
1438 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1439 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1440 8084f9f6 Manuel Franceschini
1441 779c15bb Iustin Pop
    self.cluster = cluster = self.cfg.GetClusterInfo()
1442 d4b72030 Guido Trotter
    # validate beparams changes
1443 779c15bb Iustin Pop
    if self.op.beparams:
1444 a5728081 Guido Trotter
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
1445 779c15bb Iustin Pop
      self.new_beparams = cluster.FillDict(
1446 779c15bb Iustin Pop
        cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)
1447 779c15bb Iustin Pop
1448 779c15bb Iustin Pop
    # hypervisor list/parameters
1449 779c15bb Iustin Pop
    self.new_hvparams = cluster.FillDict(cluster.hvparams, {})
1450 779c15bb Iustin Pop
    if self.op.hvparams:
1451 779c15bb Iustin Pop
      if not isinstance(self.op.hvparams, dict):
1452 779c15bb Iustin Pop
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1453 779c15bb Iustin Pop
      for hv_name, hv_dict in self.op.hvparams.items():
1454 779c15bb Iustin Pop
        if hv_name not in self.new_hvparams:
1455 779c15bb Iustin Pop
          self.new_hvparams[hv_name] = hv_dict
1456 779c15bb Iustin Pop
        else:
1457 779c15bb Iustin Pop
          self.new_hvparams[hv_name].update(hv_dict)
1458 779c15bb Iustin Pop
1459 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1460 779c15bb Iustin Pop
      self.hv_list = self.op.enabled_hypervisors
1461 779c15bb Iustin Pop
    else:
1462 779c15bb Iustin Pop
      self.hv_list = cluster.enabled_hypervisors
1463 779c15bb Iustin Pop
1464 779c15bb Iustin Pop
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1465 779c15bb Iustin Pop
      # either the enabled list has changed, or the parameters have, validate
1466 779c15bb Iustin Pop
      for hv_name, hv_params in self.new_hvparams.items():
1467 779c15bb Iustin Pop
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1468 779c15bb Iustin Pop
            (self.op.enabled_hypervisors and
1469 779c15bb Iustin Pop
             hv_name in self.op.enabled_hypervisors)):
1470 779c15bb Iustin Pop
          # either this is a new hypervisor, or its parameters have changed
1471 779c15bb Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
1472 a5728081 Guido Trotter
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1473 779c15bb Iustin Pop
          hv_class.CheckParameterSyntax(hv_params)
1474 779c15bb Iustin Pop
          _CheckHVParams(self, node_list, hv_name, hv_params)
1475 779c15bb Iustin Pop
1476 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1477 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1478 8084f9f6 Manuel Franceschini

1479 8084f9f6 Manuel Franceschini
    """
1480 779c15bb Iustin Pop
    if self.op.vg_name is not None:
1481 779c15bb Iustin Pop
      if self.op.vg_name != self.cfg.GetVGName():
1482 779c15bb Iustin Pop
        self.cfg.SetVGName(self.op.vg_name)
1483 779c15bb Iustin Pop
      else:
1484 779c15bb Iustin Pop
        feedback_fn("Cluster LVM configuration already in desired"
1485 779c15bb Iustin Pop
                    " state, not changing")
1486 779c15bb Iustin Pop
    if self.op.hvparams:
1487 779c15bb Iustin Pop
      self.cluster.hvparams = self.new_hvparams
1488 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1489 779c15bb Iustin Pop
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1490 779c15bb Iustin Pop
    if self.op.beparams:
1491 779c15bb Iustin Pop
      self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams
1492 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1493 4b7735f9 Iustin Pop
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
1494 4b7735f9 Iustin Pop
1495 779c15bb Iustin Pop
    self.cfg.Update(self.cluster)
1496 8084f9f6 Manuel Franceschini
1497 4b7735f9 Iustin Pop
    # we want to update nodes after the cluster so that if any errors
1498 4b7735f9 Iustin Pop
    # happen, we have recorded and saved the cluster info
1499 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1500 ec0292f1 Iustin Pop
      _AdjustCandidatePool(self)
1501 4b7735f9 Iustin Pop
1502 8084f9f6 Manuel Franceschini
1503 afee0879 Iustin Pop
class LURedistributeConfig(NoHooksLU):
1504 afee0879 Iustin Pop
  """Force the redistribution of cluster configuration.
1505 afee0879 Iustin Pop

1506 afee0879 Iustin Pop
  This is a very simple LU.
1507 afee0879 Iustin Pop

1508 afee0879 Iustin Pop
  """
1509 afee0879 Iustin Pop
  _OP_REQP = []
1510 afee0879 Iustin Pop
  REQ_BGL = False
1511 afee0879 Iustin Pop
1512 afee0879 Iustin Pop
  def ExpandNames(self):
1513 afee0879 Iustin Pop
    self.needed_locks = {
1514 afee0879 Iustin Pop
      locking.LEVEL_NODE: locking.ALL_SET,
1515 afee0879 Iustin Pop
    }
1516 afee0879 Iustin Pop
    self.share_locks[locking.LEVEL_NODE] = 1
1517 afee0879 Iustin Pop
1518 afee0879 Iustin Pop
  def CheckPrereq(self):
1519 afee0879 Iustin Pop
    """Check prerequisites.
1520 afee0879 Iustin Pop

1521 afee0879 Iustin Pop
    """
1522 afee0879 Iustin Pop
1523 afee0879 Iustin Pop
  def Exec(self, feedback_fn):
1524 afee0879 Iustin Pop
    """Redistribute the configuration.
1525 afee0879 Iustin Pop

1526 afee0879 Iustin Pop
    """
1527 afee0879 Iustin Pop
    self.cfg.Update(self.cfg.GetClusterInfo())
1528 afee0879 Iustin Pop
1529 afee0879 Iustin Pop
1530 b9bddb6b Iustin Pop
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1531 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1532 a8083063 Iustin Pop

1533 a8083063 Iustin Pop
  """
1534 a8083063 Iustin Pop
  if not instance.disks:
1535 a8083063 Iustin Pop
    return True
1536 a8083063 Iustin Pop
1537 a8083063 Iustin Pop
  if not oneshot:
1538 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1539 a8083063 Iustin Pop
1540 a8083063 Iustin Pop
  node = instance.primary_node
1541 a8083063 Iustin Pop
1542 a8083063 Iustin Pop
  for dev in instance.disks:
1543 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
1544 a8083063 Iustin Pop
1545 a8083063 Iustin Pop
  retries = 0
1546 a8083063 Iustin Pop
  while True:
1547 a8083063 Iustin Pop
    max_time = 0
1548 a8083063 Iustin Pop
    done = True
1549 a8083063 Iustin Pop
    cumul_degraded = False
1550 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1551 781de953 Iustin Pop
    if rstats.failed or not rstats.data:
1552 86d9d3bb Iustin Pop
      lu.LogWarning("Can't get any data from node %s", node)
1553 a8083063 Iustin Pop
      retries += 1
1554 a8083063 Iustin Pop
      if retries >= 10:
1555 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1556 3ecf6786 Iustin Pop
                                 " aborting." % node)
1557 a8083063 Iustin Pop
      time.sleep(6)
1558 a8083063 Iustin Pop
      continue
1559 781de953 Iustin Pop
    rstats = rstats.data
1560 a8083063 Iustin Pop
    retries = 0
1561 1492cca7 Iustin Pop
    for i, mstat in enumerate(rstats):
1562 a8083063 Iustin Pop
      if mstat is None:
1563 86d9d3bb Iustin Pop
        lu.LogWarning("Can't compute data for node %s/%s",
1564 86d9d3bb Iustin Pop
                           node, instance.disks[i].iv_name)
1565 a8083063 Iustin Pop
        continue
1566 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1567 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1568 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1569 a8083063 Iustin Pop
      if perc_done is not None:
1570 a8083063 Iustin Pop
        done = False
1571 a8083063 Iustin Pop
        if est_time is not None:
1572 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1573 a8083063 Iustin Pop
          max_time = est_time
1574 a8083063 Iustin Pop
        else:
1575 a8083063 Iustin Pop
          rem_time = "no time estimate"
1576 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1577 b9bddb6b Iustin Pop
                        (instance.disks[i].iv_name, perc_done, rem_time))
1578 a8083063 Iustin Pop
    if done or oneshot:
1579 a8083063 Iustin Pop
      break
1580 a8083063 Iustin Pop
1581 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
1582 a8083063 Iustin Pop
1583 a8083063 Iustin Pop
  if done:
1584 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1585 a8083063 Iustin Pop
  return not cumul_degraded
1586 a8083063 Iustin Pop
1587 a8083063 Iustin Pop
1588 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1589 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1590 a8083063 Iustin Pop

1591 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1592 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1593 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1594 0834c866 Iustin Pop

1595 a8083063 Iustin Pop
  """
1596 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
1597 0834c866 Iustin Pop
  if ldisk:
1598 0834c866 Iustin Pop
    idx = 6
1599 0834c866 Iustin Pop
  else:
1600 0834c866 Iustin Pop
    idx = 5
1601 a8083063 Iustin Pop
1602 a8083063 Iustin Pop
  result = True
1603 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1604 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
1605 23829f6f Iustin Pop
    msg = rstats.RemoteFailMsg()
1606 23829f6f Iustin Pop
    if msg:
1607 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
1608 23829f6f Iustin Pop
      result = False
1609 23829f6f Iustin Pop
    elif not rstats.payload:
1610 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s", node)
1611 a8083063 Iustin Pop
      result = False
1612 a8083063 Iustin Pop
    else:
1613 23829f6f Iustin Pop
      result = result and (not rstats.payload[idx])
1614 a8083063 Iustin Pop
  if dev.children:
1615 a8083063 Iustin Pop
    for child in dev.children:
1616 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1617 a8083063 Iustin Pop
1618 a8083063 Iustin Pop
  return result
1619 a8083063 Iustin Pop
1620 a8083063 Iustin Pop
1621 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1622 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1623 a8083063 Iustin Pop

1624 a8083063 Iustin Pop
  """
1625 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1626 6bf01bbb Guido Trotter
  REQ_BGL = False
1627 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet()
1628 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
1629 a8083063 Iustin Pop
1630 6bf01bbb Guido Trotter
  def ExpandNames(self):
1631 1f9430d6 Iustin Pop
    if self.op.names:
1632 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1633 1f9430d6 Iustin Pop
1634 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1635 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1636 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1637 1f9430d6 Iustin Pop
1638 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
1639 6bf01bbb Guido Trotter
    self.needed_locks = {}
1640 6bf01bbb Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1641 e310b019 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1642 6bf01bbb Guido Trotter
1643 6bf01bbb Guido Trotter
  def CheckPrereq(self):
1644 6bf01bbb Guido Trotter
    """Check prerequisites.
1645 6bf01bbb Guido Trotter

1646 6bf01bbb Guido Trotter
    """
1647 6bf01bbb Guido Trotter
1648 1f9430d6 Iustin Pop
  @staticmethod
1649 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1650 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1651 1f9430d6 Iustin Pop

1652 e4376078 Iustin Pop
    @param node_list: a list with the names of all nodes
1653 e4376078 Iustin Pop
    @param rlist: a map with node names as keys and OS objects as values
1654 1f9430d6 Iustin Pop

1655 e4376078 Iustin Pop
    @rtype: dict
1656 e4376078 Iustin Pop
    @returns: a dictionary with osnames as keys and as value another map, with
1657 e4376078 Iustin Pop
        nodes as keys and list of OS objects as values, eg::
1658 e4376078 Iustin Pop

1659 e4376078 Iustin Pop
          {"debian-etch": {"node1": [<object>,...],
1660 e4376078 Iustin Pop
                           "node2": [<object>,]}
1661 e4376078 Iustin Pop
          }
1662 1f9430d6 Iustin Pop

1663 1f9430d6 Iustin Pop
    """
1664 1f9430d6 Iustin Pop
    all_os = {}
1665 1f9430d6 Iustin Pop
    for node_name, nr in rlist.iteritems():
1666 781de953 Iustin Pop
      if nr.failed or not nr.data:
1667 1f9430d6 Iustin Pop
        continue
1668 781de953 Iustin Pop
      for os_obj in nr.data:
1669 b4de68a9 Iustin Pop
        if os_obj.name not in all_os:
1670 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1671 1f9430d6 Iustin Pop
          # for each node in node_list
1672 b4de68a9 Iustin Pop
          all_os[os_obj.name] = {}
1673 1f9430d6 Iustin Pop
          for nname in node_list:
1674 b4de68a9 Iustin Pop
            all_os[os_obj.name][nname] = []
1675 b4de68a9 Iustin Pop
        all_os[os_obj.name][node_name].append(os_obj)
1676 1f9430d6 Iustin Pop
    return all_os
1677 a8083063 Iustin Pop
1678 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1679 a8083063 Iustin Pop
    """Compute the list of OSes.
1680 a8083063 Iustin Pop

1681 a8083063 Iustin Pop
    """
1682 6bf01bbb Guido Trotter
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1683 94a02bb5 Iustin Pop
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()
1684 94a02bb5 Iustin Pop
                   if node in node_list]
1685 94a02bb5 Iustin Pop
    node_data = self.rpc.call_os_diagnose(valid_nodes)
1686 a8083063 Iustin Pop
    if node_data == False:
1687 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1688 94a02bb5 Iustin Pop
    pol = self._DiagnoseByOS(valid_nodes, node_data)
1689 1f9430d6 Iustin Pop
    output = []
1690 1f9430d6 Iustin Pop
    for os_name, os_data in pol.iteritems():
1691 1f9430d6 Iustin Pop
      row = []
1692 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1693 1f9430d6 Iustin Pop
        if field == "name":
1694 1f9430d6 Iustin Pop
          val = os_name
1695 1f9430d6 Iustin Pop
        elif field == "valid":
1696 1f9430d6 Iustin Pop
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1697 1f9430d6 Iustin Pop
        elif field == "node_status":
1698 1f9430d6 Iustin Pop
          val = {}
1699 1f9430d6 Iustin Pop
          for node_name, nos_list in os_data.iteritems():
1700 1f9430d6 Iustin Pop
            val[node_name] = [(v.status, v.path) for v in nos_list]
1701 1f9430d6 Iustin Pop
        else:
1702 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1703 1f9430d6 Iustin Pop
        row.append(val)
1704 1f9430d6 Iustin Pop
      output.append(row)
1705 1f9430d6 Iustin Pop
1706 1f9430d6 Iustin Pop
    return output
1707 a8083063 Iustin Pop
1708 a8083063 Iustin Pop
1709 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1710 a8083063 Iustin Pop
  """Logical unit for removing a node.
1711 a8083063 Iustin Pop

1712 a8083063 Iustin Pop
  """
1713 a8083063 Iustin Pop
  HPATH = "node-remove"
1714 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1715 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1716 a8083063 Iustin Pop
1717 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1718 a8083063 Iustin Pop
    """Build hooks env.
1719 a8083063 Iustin Pop

1720 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1721 d08869ee Guido Trotter
    node would then be impossible to remove.
1722 a8083063 Iustin Pop

1723 a8083063 Iustin Pop
    """
1724 396e1b78 Michael Hanselmann
    env = {
1725 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1726 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1727 396e1b78 Michael Hanselmann
      }
1728 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1729 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1730 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1731 a8083063 Iustin Pop
1732 a8083063 Iustin Pop
  def CheckPrereq(self):
1733 a8083063 Iustin Pop
    """Check prerequisites.
1734 a8083063 Iustin Pop

1735 a8083063 Iustin Pop
    This checks:
1736 a8083063 Iustin Pop
     - the node exists in the configuration
1737 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1738 a8083063 Iustin Pop
     - it's not the master
1739 a8083063 Iustin Pop

1740 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1741 a8083063 Iustin Pop

1742 a8083063 Iustin Pop
    """
1743 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1744 a8083063 Iustin Pop
    if node is None:
1745 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1746 a8083063 Iustin Pop
1747 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1748 a8083063 Iustin Pop
1749 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
1750 a8083063 Iustin Pop
    if node.name == masternode:
1751 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1752 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1753 a8083063 Iustin Pop
1754 a8083063 Iustin Pop
    for instance_name in instance_list:
1755 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1756 6b12959c Iustin Pop
      if node.name in instance.all_nodes:
1757 6b12959c Iustin Pop
        raise errors.OpPrereqError("Instance %s is still running on the node,"
1758 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1759 a8083063 Iustin Pop
    self.op.node_name = node.name
1760 a8083063 Iustin Pop
    self.node = node
1761 a8083063 Iustin Pop
1762 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1763 a8083063 Iustin Pop
    """Removes the node from the cluster.
1764 a8083063 Iustin Pop

1765 a8083063 Iustin Pop
    """
1766 a8083063 Iustin Pop
    node = self.node
1767 9a4f63d1 Iustin Pop
    logging.info("Stopping the node daemon and removing configs from node %s",
1768 9a4f63d1 Iustin Pop
                 node.name)
1769 a8083063 Iustin Pop
1770 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
1771 a8083063 Iustin Pop
1772 72737a7f Iustin Pop
    self.rpc.call_node_leave_cluster(node.name)
1773 c8a0948f Michael Hanselmann
1774 eb1742d5 Guido Trotter
    # Promote nodes to master candidate as needed
1775 ec0292f1 Iustin Pop
    _AdjustCandidatePool(self)
1776 eb1742d5 Guido Trotter
1777 a8083063 Iustin Pop
1778 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1779 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1780 a8083063 Iustin Pop

1781 a8083063 Iustin Pop
  """
1782 bc8e4a1a Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
1783 35705d8f Guido Trotter
  REQ_BGL = False
1784 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet(
1785 31bf511f Iustin Pop
    "dtotal", "dfree",
1786 31bf511f Iustin Pop
    "mtotal", "mnode", "mfree",
1787 31bf511f Iustin Pop
    "bootid",
1788 0105bad3 Iustin Pop
    "ctotal", "cnodes", "csockets",
1789 31bf511f Iustin Pop
    )
1790 31bf511f Iustin Pop
1791 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(
1792 31bf511f Iustin Pop
    "name", "pinst_cnt", "sinst_cnt",
1793 31bf511f Iustin Pop
    "pinst_list", "sinst_list",
1794 31bf511f Iustin Pop
    "pip", "sip", "tags",
1795 31bf511f Iustin Pop
    "serial_no",
1796 0e67cdbe Iustin Pop
    "master_candidate",
1797 0e67cdbe Iustin Pop
    "master",
1798 9ddb5e45 Iustin Pop
    "offline",
1799 0b2454b9 Iustin Pop
    "drained",
1800 31bf511f Iustin Pop
    )
1801 a8083063 Iustin Pop
1802 35705d8f Guido Trotter
  def ExpandNames(self):
1803 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1804 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1805 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1806 a8083063 Iustin Pop
1807 35705d8f Guido Trotter
    self.needed_locks = {}
1808 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1809 c8d8b4c8 Iustin Pop
1810 c8d8b4c8 Iustin Pop
    if self.op.names:
1811 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
1812 35705d8f Guido Trotter
    else:
1813 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
1814 c8d8b4c8 Iustin Pop
1815 bc8e4a1a Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
1816 bc8e4a1a Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
1817 c8d8b4c8 Iustin Pop
    if self.do_locking:
1818 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
1819 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
1820 c8d8b4c8 Iustin Pop
1821 35705d8f Guido Trotter
1822 35705d8f Guido Trotter
  def CheckPrereq(self):
1823 35705d8f Guido Trotter
    """Check prerequisites.
1824 35705d8f Guido Trotter

1825 35705d8f Guido Trotter
    """
1826 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
1827 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
1828 c8d8b4c8 Iustin Pop
    pass
1829 a8083063 Iustin Pop
1830 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1831 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1832 a8083063 Iustin Pop

1833 a8083063 Iustin Pop
    """
1834 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
1835 c8d8b4c8 Iustin Pop
    if self.do_locking:
1836 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
1837 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
1838 3fa93523 Guido Trotter
      nodenames = self.wanted
1839 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
1840 3fa93523 Guido Trotter
      if missing:
1841 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
1842 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
1843 c8d8b4c8 Iustin Pop
    else:
1844 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
1845 c1f1cbb2 Iustin Pop
1846 c1f1cbb2 Iustin Pop
    nodenames = utils.NiceSort(nodenames)
1847 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
1848 a8083063 Iustin Pop
1849 a8083063 Iustin Pop
    # begin data gathering
1850 a8083063 Iustin Pop
1851 bc8e4a1a Iustin Pop
    if self.do_node_query:
1852 a8083063 Iustin Pop
      live_data = {}
1853 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
1854 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
1855 a8083063 Iustin Pop
      for name in nodenames:
1856 781de953 Iustin Pop
        nodeinfo = node_data[name]
1857 781de953 Iustin Pop
        if not nodeinfo.failed and nodeinfo.data:
1858 781de953 Iustin Pop
          nodeinfo = nodeinfo.data
1859 d599d686 Iustin Pop
          fn = utils.TryConvert
1860 a8083063 Iustin Pop
          live_data[name] = {
1861 d599d686 Iustin Pop
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
1862 d599d686 Iustin Pop
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
1863 d599d686 Iustin Pop
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
1864 d599d686 Iustin Pop
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
1865 d599d686 Iustin Pop
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
1866 d599d686 Iustin Pop
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
1867 d599d686 Iustin Pop
            "bootid": nodeinfo.get('bootid', None),
1868 0105bad3 Iustin Pop
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
1869 0105bad3 Iustin Pop
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
1870 a8083063 Iustin Pop
            }
1871 a8083063 Iustin Pop
        else:
1872 a8083063 Iustin Pop
          live_data[name] = {}
1873 a8083063 Iustin Pop
    else:
1874 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1875 a8083063 Iustin Pop
1876 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1877 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1878 a8083063 Iustin Pop
1879 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1880 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1881 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1882 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1883 a8083063 Iustin Pop
1884 ec223efb Iustin Pop
      for instance_name in instancelist:
1885 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1886 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1887 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1888 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1889 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1890 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1891 a8083063 Iustin Pop
1892 0e67cdbe Iustin Pop
    master_node = self.cfg.GetMasterNode()
1893 0e67cdbe Iustin Pop
1894 a8083063 Iustin Pop
    # end data gathering
1895 a8083063 Iustin Pop
1896 a8083063 Iustin Pop
    output = []
1897 a8083063 Iustin Pop
    for node in nodelist:
1898 a8083063 Iustin Pop
      node_output = []
1899 a8083063 Iustin Pop
      for field in self.op.output_fields:
1900 a8083063 Iustin Pop
        if field == "name":
1901 a8083063 Iustin Pop
          val = node.name
1902 ec223efb Iustin Pop
        elif field == "pinst_list":
1903 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1904 ec223efb Iustin Pop
        elif field == "sinst_list":
1905 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1906 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1907 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1908 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1909 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1910 a8083063 Iustin Pop
        elif field == "pip":
1911 a8083063 Iustin Pop
          val = node.primary_ip
1912 a8083063 Iustin Pop
        elif field == "sip":
1913 a8083063 Iustin Pop
          val = node.secondary_ip
1914 130a6a6f Iustin Pop
        elif field == "tags":
1915 130a6a6f Iustin Pop
          val = list(node.GetTags())
1916 38d7239a Iustin Pop
        elif field == "serial_no":
1917 38d7239a Iustin Pop
          val = node.serial_no
1918 0e67cdbe Iustin Pop
        elif field == "master_candidate":
1919 0e67cdbe Iustin Pop
          val = node.master_candidate
1920 0e67cdbe Iustin Pop
        elif field == "master":
1921 0e67cdbe Iustin Pop
          val = node.name == master_node
1922 9ddb5e45 Iustin Pop
        elif field == "offline":
1923 9ddb5e45 Iustin Pop
          val = node.offline
1924 0b2454b9 Iustin Pop
        elif field == "drained":
1925 0b2454b9 Iustin Pop
          val = node.drained
1926 31bf511f Iustin Pop
        elif self._FIELDS_DYNAMIC.Matches(field):
1927 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1928 a8083063 Iustin Pop
        else:
1929 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1930 a8083063 Iustin Pop
        node_output.append(val)
1931 a8083063 Iustin Pop
      output.append(node_output)
1932 a8083063 Iustin Pop
1933 a8083063 Iustin Pop
    return output
1934 a8083063 Iustin Pop
1935 a8083063 Iustin Pop
1936 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1937 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1938 dcb93971 Michael Hanselmann

1939 dcb93971 Michael Hanselmann
  """
1940 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1941 21a15682 Guido Trotter
  REQ_BGL = False
1942 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
1943 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("node")
1944 21a15682 Guido Trotter
1945 21a15682 Guido Trotter
  def ExpandNames(self):
1946 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1947 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1948 21a15682 Guido Trotter
                       selected=self.op.output_fields)
1949 21a15682 Guido Trotter
1950 21a15682 Guido Trotter
    self.needed_locks = {}
1951 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1952 21a15682 Guido Trotter
    if not self.op.nodes:
1953 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1954 21a15682 Guido Trotter
    else:
1955 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
1956 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
1957 dcb93971 Michael Hanselmann
1958 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1959 dcb93971 Michael Hanselmann
    """Check prerequisites.
1960 dcb93971 Michael Hanselmann

1961 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1962 dcb93971 Michael Hanselmann

1963 dcb93971 Michael Hanselmann
    """
1964 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
1965 dcb93971 Michael Hanselmann
1966 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1967 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1968 dcb93971 Michael Hanselmann

1969 dcb93971 Michael Hanselmann
    """
1970 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1971 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
1972 dcb93971 Michael Hanselmann
1973 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1974 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1975 dcb93971 Michael Hanselmann
1976 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1977 dcb93971 Michael Hanselmann
1978 dcb93971 Michael Hanselmann
    output = []
1979 dcb93971 Michael Hanselmann
    for node in nodenames:
1980 781de953 Iustin Pop
      if node not in volumes or volumes[node].failed or not volumes[node].data:
1981 37d19eb2 Michael Hanselmann
        continue
1982 37d19eb2 Michael Hanselmann
1983 781de953 Iustin Pop
      node_vols = volumes[node].data[:]
1984 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1985 dcb93971 Michael Hanselmann
1986 dcb93971 Michael Hanselmann
      for vol in node_vols:
1987 dcb93971 Michael Hanselmann
        node_output = []
1988 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1989 dcb93971 Michael Hanselmann
          if field == "node":
1990 dcb93971 Michael Hanselmann
            val = node
1991 dcb93971 Michael Hanselmann
          elif field == "phys":
1992 dcb93971 Michael Hanselmann
            val = vol['dev']
1993 dcb93971 Michael Hanselmann
          elif field == "vg":
1994 dcb93971 Michael Hanselmann
            val = vol['vg']
1995 dcb93971 Michael Hanselmann
          elif field == "name":
1996 dcb93971 Michael Hanselmann
            val = vol['name']
1997 dcb93971 Michael Hanselmann
          elif field == "size":
1998 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1999 dcb93971 Michael Hanselmann
          elif field == "instance":
2000 dcb93971 Michael Hanselmann
            for inst in ilist:
2001 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
2002 dcb93971 Michael Hanselmann
                continue
2003 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
2004 dcb93971 Michael Hanselmann
                val = inst.name
2005 dcb93971 Michael Hanselmann
                break
2006 dcb93971 Michael Hanselmann
            else:
2007 dcb93971 Michael Hanselmann
              val = '-'
2008 dcb93971 Michael Hanselmann
          else:
2009 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
2010 dcb93971 Michael Hanselmann
          node_output.append(str(val))
2011 dcb93971 Michael Hanselmann
2012 dcb93971 Michael Hanselmann
        output.append(node_output)
2013 dcb93971 Michael Hanselmann
2014 dcb93971 Michael Hanselmann
    return output
2015 dcb93971 Michael Hanselmann
2016 dcb93971 Michael Hanselmann
2017 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
2018 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
2019 a8083063 Iustin Pop

2020 a8083063 Iustin Pop
  """
2021 a8083063 Iustin Pop
  HPATH = "node-add"
2022 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2023 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
2024 a8083063 Iustin Pop
2025 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2026 a8083063 Iustin Pop
    """Build hooks env.
2027 a8083063 Iustin Pop

2028 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
2029 a8083063 Iustin Pop

2030 a8083063 Iustin Pop
    """
2031 a8083063 Iustin Pop
    env = {
2032 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
2033 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
2034 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
2035 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
2036 a8083063 Iustin Pop
      }
2037 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
2038 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
2039 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
2040 a8083063 Iustin Pop
2041 a8083063 Iustin Pop
  def CheckPrereq(self):
2042 a8083063 Iustin Pop
    """Check prerequisites.
2043 a8083063 Iustin Pop

2044 a8083063 Iustin Pop
    This checks:
2045 a8083063 Iustin Pop
     - the new node is not already in the config
2046 a8083063 Iustin Pop
     - it is resolvable
2047 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
2048 a8083063 Iustin Pop

2049 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
2050 a8083063 Iustin Pop

2051 a8083063 Iustin Pop
    """
2052 a8083063 Iustin Pop
    node_name = self.op.node_name
2053 a8083063 Iustin Pop
    cfg = self.cfg
2054 a8083063 Iustin Pop
2055 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
2056 a8083063 Iustin Pop
2057 bcf043c9 Iustin Pop
    node = dns_data.name
2058 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
2059 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
2060 a8083063 Iustin Pop
    if secondary_ip is None:
2061 a8083063 Iustin Pop
      secondary_ip = primary_ip
2062 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
2063 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
2064 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
2065 e7c6e02b Michael Hanselmann
2066 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
2067 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
2068 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
2069 e7c6e02b Michael Hanselmann
                                 node)
2070 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
2071 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
2072 a8083063 Iustin Pop
2073 a8083063 Iustin Pop
    for existing_node_name in node_list:
2074 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
2075 e7c6e02b Michael Hanselmann
2076 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
2077 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
2078 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
2079 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
2080 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
2081 e7c6e02b Michael Hanselmann
        continue
2082 e7c6e02b Michael Hanselmann
2083 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
2084 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
2085 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
2086 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
2087 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
2088 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
2089 a8083063 Iustin Pop
2090 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
2091 a8083063 Iustin Pop
    # same as for the master
2092 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
2093 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
2094 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
2095 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
2096 a8083063 Iustin Pop
      if master_singlehomed:
2097 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
2098 3ecf6786 Iustin Pop
                                   " new node has one")
2099 a8083063 Iustin Pop
      else:
2100 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
2101 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
2102 a8083063 Iustin Pop
2103 a8083063 Iustin Pop
    # checks reachablity
2104 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
2105 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
2106 a8083063 Iustin Pop
2107 a8083063 Iustin Pop
    if not newbie_singlehomed:
2108 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
2109 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
2110 b15d625f Iustin Pop
                           source=myself.secondary_ip):
2111 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
2112 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
2113 a8083063 Iustin Pop
2114 0fff97e9 Guido Trotter
    cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2115 ec0292f1 Iustin Pop
    mc_now, _ = self.cfg.GetMasterCandidateStats()
2116 ec0292f1 Iustin Pop
    master_candidate = mc_now < cp_size
2117 0fff97e9 Guido Trotter
2118 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
2119 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
2120 0fff97e9 Guido Trotter
                                 secondary_ip=secondary_ip,
2121 fc0fe88c Iustin Pop
                                 master_candidate=master_candidate,
2122 af64c0ea Iustin Pop
                                 offline=False, drained=False)
2123 a8083063 Iustin Pop
2124 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2125 a8083063 Iustin Pop
    """Adds the new node to the cluster.
2126 a8083063 Iustin Pop

2127 a8083063 Iustin Pop
    """
2128 a8083063 Iustin Pop
    new_node = self.new_node
2129 a8083063 Iustin Pop
    node = new_node.name
2130 a8083063 Iustin Pop
2131 a8083063 Iustin Pop
    # check connectivity
2132 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
2133 781de953 Iustin Pop
    result.Raise()
2134 781de953 Iustin Pop
    if result.data:
2135 781de953 Iustin Pop
      if constants.PROTOCOL_VERSION == result.data:
2136 9a4f63d1 Iustin Pop
        logging.info("Communication to node %s fine, sw version %s match",
2137 781de953 Iustin Pop
                     node, result.data)
2138 a8083063 Iustin Pop
      else:
2139 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
2140 3ecf6786 Iustin Pop
                                 " node version %s" %
2141 781de953 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result.data))
2142 a8083063 Iustin Pop
    else:
2143 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
2144 a8083063 Iustin Pop
2145 a8083063 Iustin Pop
    # setup ssh on node
2146 9a4f63d1 Iustin Pop
    logging.info("Copy ssh key to node %s", node)
2147 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
2148 a8083063 Iustin Pop
    keyarray = []
2149 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
2150 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
2151 70d9e3d8 Iustin Pop
                priv_key, pub_key]
2152 a8083063 Iustin Pop
2153 a8083063 Iustin Pop
    for i in keyfiles:
2154 a8083063 Iustin Pop
      f = open(i, 'r')
2155 a8083063 Iustin Pop
      try:
2156 a8083063 Iustin Pop
        keyarray.append(f.read())
2157 a8083063 Iustin Pop
      finally:
2158 a8083063 Iustin Pop
        f.close()
2159 a8083063 Iustin Pop
2160 72737a7f Iustin Pop
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
2161 72737a7f Iustin Pop
                                    keyarray[2],
2162 72737a7f Iustin Pop
                                    keyarray[3], keyarray[4], keyarray[5])
2163 a8083063 Iustin Pop
2164 a1b805fb Iustin Pop
    msg = result.RemoteFailMsg()
2165 a1b805fb Iustin Pop
    if msg:
2166 a1b805fb Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the"
2167 a1b805fb Iustin Pop
                               " new node: %s" % msg)
2168 a8083063 Iustin Pop
2169 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
2170 d9c02ca6 Michael Hanselmann
    utils.AddHostToEtcHosts(new_node.name)
2171 c8a0948f Michael Hanselmann
2172 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
2173 781de953 Iustin Pop
      result = self.rpc.call_node_has_ip_address(new_node.name,
2174 781de953 Iustin Pop
                                                 new_node.secondary_ip)
2175 781de953 Iustin Pop
      if result.failed or not result.data:
2176 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
2177 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
2178 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
2179 a8083063 Iustin Pop
2180 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
2181 5c0527ed Guido Trotter
    node_verify_param = {
2182 5c0527ed Guido Trotter
      'nodelist': [node],
2183 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
2184 5c0527ed Guido Trotter
    }
2185 5c0527ed Guido Trotter
2186 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
2187 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
2188 5c0527ed Guido Trotter
    for verifier in node_verify_list:
2189 f08ce603 Guido Trotter
      if result[verifier].failed or not result[verifier].data:
2190 5c0527ed Guido Trotter
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
2191 5c0527ed Guido Trotter
                                 " for remote verification" % verifier)
2192 781de953 Iustin Pop
      if result[verifier].data['nodelist']:
2193 781de953 Iustin Pop
        for failed in result[verifier].data['nodelist']:
2194 5c0527ed Guido Trotter
          feedback_fn("ssh/hostname verification failed %s -> %s" %
2195 bafc1d90 Iustin Pop
                      (verifier, result[verifier].data['nodelist'][failed]))
2196 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
2197 ff98055b Iustin Pop
2198 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
2199 a8083063 Iustin Pop
    # including the node just added
2200 d6a02168 Michael Hanselmann
    myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
2201 102b115b Michael Hanselmann
    dist_nodes = self.cfg.GetNodeList()
2202 102b115b Michael Hanselmann
    if not self.op.readd:
2203 102b115b Michael Hanselmann
      dist_nodes.append(node)
2204 a8083063 Iustin Pop
    if myself.name in dist_nodes:
2205 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
2206 a8083063 Iustin Pop
2207 9a4f63d1 Iustin Pop
    logging.debug("Copying hosts and known_hosts to all nodes")
2208 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
2209 72737a7f Iustin Pop
      result = self.rpc.call_upload_file(dist_nodes, fname)
2210 ec85e3d5 Iustin Pop
      for to_node, to_result in result.iteritems():
2211 ec85e3d5 Iustin Pop
        if to_result.failed or not to_result.data:
2212 9a4f63d1 Iustin Pop
          logging.error("Copy of file %s to node %s failed", fname, to_node)
2213 a8083063 Iustin Pop
2214 d6a02168 Michael Hanselmann
    to_copy = []
2215 2928f08d Guido Trotter
    enabled_hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
2216 ccd905ac Guido Trotter
    if constants.HTS_COPY_VNC_PASSWORD.intersection(enabled_hypervisors):
2217 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
2218 2928f08d Guido Trotter
2219 a8083063 Iustin Pop
    for fname in to_copy:
2220 72737a7f Iustin Pop
      result = self.rpc.call_upload_file([node], fname)
2221 781de953 Iustin Pop
      if result[node].failed or not result[node]:
2222 9a4f63d1 Iustin Pop
        logging.error("Could not copy file %s to node %s", fname, node)
2223 a8083063 Iustin Pop
2224 d8470559 Michael Hanselmann
    if self.op.readd:
2225 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
2226 d8470559 Michael Hanselmann
    else:
2227 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
2228 a8083063 Iustin Pop
2229 a8083063 Iustin Pop
2230 b31c8676 Iustin Pop
class LUSetNodeParams(LogicalUnit):
2231 b31c8676 Iustin Pop
  """Modifies the parameters of a node.
2232 b31c8676 Iustin Pop

2233 b31c8676 Iustin Pop
  """
2234 b31c8676 Iustin Pop
  HPATH = "node-modify"
2235 b31c8676 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2236 b31c8676 Iustin Pop
  _OP_REQP = ["node_name"]
2237 b31c8676 Iustin Pop
  REQ_BGL = False
2238 b31c8676 Iustin Pop
2239 b31c8676 Iustin Pop
  def CheckArguments(self):
2240 b31c8676 Iustin Pop
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2241 b31c8676 Iustin Pop
    if node_name is None:
2242 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2243 b31c8676 Iustin Pop
    self.op.node_name = node_name
2244 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'master_candidate')
2245 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'offline')
2246 c9d443ea Iustin Pop
    _CheckBooleanOpField(self.op, 'drained')
2247 c9d443ea Iustin Pop
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
2248 c9d443ea Iustin Pop
    if all_mods.count(None) == 3:
2249 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Please pass at least one modification")
2250 c9d443ea Iustin Pop
    if all_mods.count(True) > 1:
2251 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Can't set the node into more than one"
2252 c9d443ea Iustin Pop
                                 " state at the same time")
2253 b31c8676 Iustin Pop
2254 b31c8676 Iustin Pop
  def ExpandNames(self):
2255 b31c8676 Iustin Pop
    self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
2256 b31c8676 Iustin Pop
2257 b31c8676 Iustin Pop
  def BuildHooksEnv(self):
2258 b31c8676 Iustin Pop
    """Build hooks env.
2259 b31c8676 Iustin Pop

2260 b31c8676 Iustin Pop
    This runs on the master node.
2261 b31c8676 Iustin Pop

2262 b31c8676 Iustin Pop
    """
2263 b31c8676 Iustin Pop
    env = {
2264 b31c8676 Iustin Pop
      "OP_TARGET": self.op.node_name,
2265 b31c8676 Iustin Pop
      "MASTER_CANDIDATE": str(self.op.master_candidate),
2266 3a5ba66a Iustin Pop
      "OFFLINE": str(self.op.offline),
2267 c9d443ea Iustin Pop
      "DRAINED": str(self.op.drained),
2268 b31c8676 Iustin Pop
      }
2269 b31c8676 Iustin Pop
    nl = [self.cfg.GetMasterNode(),
2270 b31c8676 Iustin Pop
          self.op.node_name]
2271 b31c8676 Iustin Pop
    return env, nl, nl
2272 b31c8676 Iustin Pop
2273 b31c8676 Iustin Pop
  def CheckPrereq(self):
2274 b31c8676 Iustin Pop
    """Check prerequisites.
2275 b31c8676 Iustin Pop

2276 b31c8676 Iustin Pop
    This only checks the instance list against the existing names.
2277 b31c8676 Iustin Pop

2278 b31c8676 Iustin Pop
    """
2279 3a5ba66a Iustin Pop
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
2280 b31c8676 Iustin Pop
2281 c9d443ea Iustin Pop
    if ((self.op.master_candidate == False or self.op.offline == True or
2282 c9d443ea Iustin Pop
         self.op.drained == True) and node.master_candidate):
2283 3a5ba66a Iustin Pop
      # we will demote the node from master_candidate
2284 3a26773f Iustin Pop
      if self.op.node_name == self.cfg.GetMasterNode():
2285 3a26773f Iustin Pop
        raise errors.OpPrereqError("The master node has to be a"
2286 c9d443ea Iustin Pop
                                   " master candidate, online and not drained")
2287 3e83dd48 Iustin Pop
      cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2288 3a5ba66a Iustin Pop
      num_candidates, _ = self.cfg.GetMasterCandidateStats()
2289 3e83dd48 Iustin Pop
      if num_candidates <= cp_size:
2290 3e83dd48 Iustin Pop
        msg = ("Not enough master candidates (desired"
2291 3e83dd48 Iustin Pop
               " %d, new value will be %d)" % (cp_size, num_candidates-1))
2292 3a5ba66a Iustin Pop
        if self.op.force:
2293 3e83dd48 Iustin Pop
          self.LogWarning(msg)
2294 3e83dd48 Iustin Pop
        else:
2295 3e83dd48 Iustin Pop
          raise errors.OpPrereqError(msg)
2296 3e83dd48 Iustin Pop
2297 c9d443ea Iustin Pop
    if (self.op.master_candidate == True and
2298 c9d443ea Iustin Pop
        ((node.offline and not self.op.offline == False) or
2299 c9d443ea Iustin Pop
         (node.drained and not self.op.drained == False))):
2300 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
2301 c9d443ea Iustin Pop
                                 " to master_candidate")
2302 3a5ba66a Iustin Pop
2303 b31c8676 Iustin Pop
    return
2304 b31c8676 Iustin Pop
2305 b31c8676 Iustin Pop
  def Exec(self, feedback_fn):
2306 b31c8676 Iustin Pop
    """Modifies a node.
2307 b31c8676 Iustin Pop

2308 b31c8676 Iustin Pop
    """
2309 3a5ba66a Iustin Pop
    node = self.node
2310 b31c8676 Iustin Pop
2311 b31c8676 Iustin Pop
    result = []
2312 c9d443ea Iustin Pop
    changed_mc = False
2313 b31c8676 Iustin Pop
2314 3a5ba66a Iustin Pop
    if self.op.offline is not None:
2315 3a5ba66a Iustin Pop
      node.offline = self.op.offline
2316 3a5ba66a Iustin Pop
      result.append(("offline", str(self.op.offline)))
2317 c9d443ea Iustin Pop
      if self.op.offline == True:
2318 c9d443ea Iustin Pop
        if node.master_candidate:
2319 c9d443ea Iustin Pop
          node.master_candidate = False
2320 c9d443ea Iustin Pop
          changed_mc = True
2321 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to offline"))
2322 c9d443ea Iustin Pop
        if node.drained:
2323 c9d443ea Iustin Pop
          node.drained = False
2324 c9d443ea Iustin Pop
          result.append(("drained", "clear drained status due to offline"))
2325 3a5ba66a Iustin Pop
2326 b31c8676 Iustin Pop
    if self.op.master_candidate is not None:
2327 b31c8676 Iustin Pop
      node.master_candidate = self.op.master_candidate
2328 c9d443ea Iustin Pop
      changed_mc = True
2329 b31c8676 Iustin Pop
      result.append(("master_candidate", str(self.op.master_candidate)))
2330 56aa9fd5 Iustin Pop
      if self.op.master_candidate == False:
2331 56aa9fd5 Iustin Pop
        rrc = self.rpc.call_node_demote_from_mc(node.name)
2332 0959c824 Iustin Pop
        msg = rrc.RemoteFailMsg()
2333 0959c824 Iustin Pop
        if msg:
2334 0959c824 Iustin Pop
          self.LogWarning("Node failed to demote itself: %s" % msg)
2335 b31c8676 Iustin Pop
2336 c9d443ea Iustin Pop
    if self.op.drained is not None:
2337 c9d443ea Iustin Pop
      node.drained = self.op.drained
2338 82e12743 Iustin Pop
      result.append(("drained", str(self.op.drained)))
2339 c9d443ea Iustin Pop
      if self.op.drained == True:
2340 c9d443ea Iustin Pop
        if node.master_candidate:
2341 c9d443ea Iustin Pop
          node.master_candidate = False
2342 c9d443ea Iustin Pop
          changed_mc = True
2343 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to drain"))
2344 c9d443ea Iustin Pop
        if node.offline:
2345 c9d443ea Iustin Pop
          node.offline = False
2346 c9d443ea Iustin Pop
          result.append(("offline", "clear offline status due to drain"))
2347 c9d443ea Iustin Pop
2348 b31c8676 Iustin Pop
    # this will trigger configuration file update, if needed
2349 b31c8676 Iustin Pop
    self.cfg.Update(node)
2350 b31c8676 Iustin Pop
    # this will trigger job queue propagation or cleanup
2351 c9d443ea Iustin Pop
    if changed_mc:
2352 3a26773f Iustin Pop
      self.context.ReaddNode(node)
2353 b31c8676 Iustin Pop
2354 b31c8676 Iustin Pop
    return result
2355 b31c8676 Iustin Pop
2356 b31c8676 Iustin Pop
2357 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
2358 a8083063 Iustin Pop
  """Query cluster configuration.
2359 a8083063 Iustin Pop

2360 a8083063 Iustin Pop
  """
2361 a8083063 Iustin Pop
  _OP_REQP = []
2362 642339cf Guido Trotter
  REQ_BGL = False
2363 642339cf Guido Trotter
2364 642339cf Guido Trotter
  def ExpandNames(self):
2365 642339cf Guido Trotter
    self.needed_locks = {}
2366 a8083063 Iustin Pop
2367 a8083063 Iustin Pop
  def CheckPrereq(self):
2368 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
2369 a8083063 Iustin Pop

2370 a8083063 Iustin Pop
    """
2371 a8083063 Iustin Pop
    pass
2372 a8083063 Iustin Pop
2373 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2374 a8083063 Iustin Pop
    """Return cluster config.
2375 a8083063 Iustin Pop

2376 a8083063 Iustin Pop
    """
2377 469f88e1 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
2378 a8083063 Iustin Pop
    result = {
2379 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
2380 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
2381 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
2382 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
2383 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
2384 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
2385 469f88e1 Iustin Pop
      "name": cluster.cluster_name,
2386 469f88e1 Iustin Pop
      "master": cluster.master_node,
2387 02691904 Alexander Schreiber
      "default_hypervisor": cluster.default_hypervisor,
2388 469f88e1 Iustin Pop
      "enabled_hypervisors": cluster.enabled_hypervisors,
2389 7a735d6a Guido Trotter
      "hvparams": dict([(hypervisor, cluster.hvparams[hypervisor])
2390 7a735d6a Guido Trotter
                        for hypervisor in cluster.enabled_hypervisors]),
2391 469f88e1 Iustin Pop
      "beparams": cluster.beparams,
2392 4b7735f9 Iustin Pop
      "candidate_pool_size": cluster.candidate_pool_size,
2393 a8083063 Iustin Pop
      }
2394 a8083063 Iustin Pop
2395 a8083063 Iustin Pop
    return result
2396 a8083063 Iustin Pop
2397 a8083063 Iustin Pop
2398 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
2399 ae5849b5 Michael Hanselmann
  """Return configuration values.
2400 a8083063 Iustin Pop

2401 a8083063 Iustin Pop
  """
2402 a8083063 Iustin Pop
  _OP_REQP = []
2403 642339cf Guido Trotter
  REQ_BGL = False
2404 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet()
2405 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag")
2406 642339cf Guido Trotter
2407 642339cf Guido Trotter
  def ExpandNames(self):
2408 642339cf Guido Trotter
    self.needed_locks = {}
2409 a8083063 Iustin Pop
2410 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2411 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2412 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
2413 ae5849b5 Michael Hanselmann
2414 a8083063 Iustin Pop
  def CheckPrereq(self):
2415 a8083063 Iustin Pop
    """No prerequisites.
2416 a8083063 Iustin Pop

2417 a8083063 Iustin Pop
    """
2418 a8083063 Iustin Pop
    pass
2419 a8083063 Iustin Pop
2420 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2421 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
2422 a8083063 Iustin Pop

2423 a8083063 Iustin Pop
    """
2424 ae5849b5 Michael Hanselmann
    values = []
2425 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
2426 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
2427 3ccafd0e Iustin Pop
        entry = self.cfg.GetClusterName()
2428 ae5849b5 Michael Hanselmann
      elif field == "master_node":
2429 3ccafd0e Iustin Pop
        entry = self.cfg.GetMasterNode()
2430 3ccafd0e Iustin Pop
      elif field == "drain_flag":
2431 3ccafd0e Iustin Pop
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2432 ae5849b5 Michael Hanselmann
      else:
2433 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
2434 3ccafd0e Iustin Pop
      values.append(entry)
2435 ae5849b5 Michael Hanselmann
    return values
2436 a8083063 Iustin Pop
2437 a8083063 Iustin Pop
2438 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
2439 a8083063 Iustin Pop
  """Bring up an instance's disks.
2440 a8083063 Iustin Pop

2441 a8083063 Iustin Pop
  """
2442 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2443 f22a8ba3 Guido Trotter
  REQ_BGL = False
2444 f22a8ba3 Guido Trotter
2445 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2446 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2447 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2448 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2449 f22a8ba3 Guido Trotter
2450 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2451 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2452 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2453 a8083063 Iustin Pop
2454 a8083063 Iustin Pop
  def CheckPrereq(self):
2455 a8083063 Iustin Pop
    """Check prerequisites.
2456 a8083063 Iustin Pop

2457 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2458 a8083063 Iustin Pop

2459 a8083063 Iustin Pop
    """
2460 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2461 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2462 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2463 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
2464 a8083063 Iustin Pop
2465 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2466 a8083063 Iustin Pop
    """Activate the disks.
2467 a8083063 Iustin Pop

2468 a8083063 Iustin Pop
    """
2469 b9bddb6b Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
2470 a8083063 Iustin Pop
    if not disks_ok:
2471 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
2472 a8083063 Iustin Pop
2473 a8083063 Iustin Pop
    return disks_info
2474 a8083063 Iustin Pop
2475 a8083063 Iustin Pop
2476 b9bddb6b Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
2477 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
2478 a8083063 Iustin Pop

2479 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
2480 a8083063 Iustin Pop

2481 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
2482 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
2483 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
2484 e4376078 Iustin Pop
  @param instance: the instance for whose disks we assemble
2485 e4376078 Iustin Pop
  @type ignore_secondaries: boolean
2486 e4376078 Iustin Pop
  @param ignore_secondaries: if true, errors on secondary nodes
2487 e4376078 Iustin Pop
      won't result in an error return from the function
2488 e4376078 Iustin Pop
  @return: False if the operation failed, otherwise a list of
2489 e4376078 Iustin Pop
      (host, instance_visible_name, node_visible_name)
2490 e4376078 Iustin Pop
      with the mapping from node devices to instance devices
2491 a8083063 Iustin Pop

2492 a8083063 Iustin Pop
  """
2493 a8083063 Iustin Pop
  device_info = []
2494 a8083063 Iustin Pop
  disks_ok = True
2495 fdbd668d Iustin Pop
  iname = instance.name
2496 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
2497 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
2498 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
2499 fdbd668d Iustin Pop
2500 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
2501 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
2502 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
2503 fdbd668d Iustin Pop
  # SyncSource, etc.)
2504 fdbd668d Iustin Pop
2505 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
2506 a8083063 Iustin Pop
  for inst_disk in instance.disks:
2507 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2508 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2509 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2510 53c14ef1 Iustin Pop
      msg = result.RemoteFailMsg()
2511 53c14ef1 Iustin Pop
      if msg:
2512 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2513 53c14ef1 Iustin Pop
                           " (is_primary=False, pass=1): %s",
2514 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
2515 fdbd668d Iustin Pop
        if not ignore_secondaries:
2516 a8083063 Iustin Pop
          disks_ok = False
2517 fdbd668d Iustin Pop
2518 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
2519 fdbd668d Iustin Pop
2520 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
2521 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
2522 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2523 fdbd668d Iustin Pop
      if node != instance.primary_node:
2524 fdbd668d Iustin Pop
        continue
2525 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2526 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2527 53c14ef1 Iustin Pop
      msg = result.RemoteFailMsg()
2528 53c14ef1 Iustin Pop
      if msg:
2529 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2530 53c14ef1 Iustin Pop
                           " (is_primary=True, pass=2): %s",
2531 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
2532 fdbd668d Iustin Pop
        disks_ok = False
2533 1dff8e07 Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name,
2534 1dff8e07 Iustin Pop
                        result.payload))
2535 a8083063 Iustin Pop
2536 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
2537 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
2538 b352ab5b Iustin Pop
  # improving the logical/physical id handling
2539 b352ab5b Iustin Pop
  for disk in instance.disks:
2540 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
2541 b352ab5b Iustin Pop
2542 a8083063 Iustin Pop
  return disks_ok, device_info
2543 a8083063 Iustin Pop
2544 a8083063 Iustin Pop
2545 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
2546 3ecf6786 Iustin Pop
  """Start the disks of an instance.
2547 3ecf6786 Iustin Pop

2548 3ecf6786 Iustin Pop
  """
2549 b9bddb6b Iustin Pop
  disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2550 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
2551 fe7b0351 Michael Hanselmann
  if not disks_ok:
2552 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
2553 fe7b0351 Michael Hanselmann
    if force is not None and not force:
2554 86d9d3bb Iustin Pop
      lu.proc.LogWarning("", hint="If the message above refers to a"
2555 86d9d3bb Iustin Pop
                         " secondary node,"
2556 86d9d3bb Iustin Pop
                         " you can retry the operation using '--force'.")
2557 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
2558 fe7b0351 Michael Hanselmann
2559 fe7b0351 Michael Hanselmann
2560 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
2561 a8083063 Iustin Pop
  """Shutdown an instance's disks.
2562 a8083063 Iustin Pop

2563 a8083063 Iustin Pop
  """
2564 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2565 f22a8ba3 Guido Trotter
  REQ_BGL = False
2566 f22a8ba3 Guido Trotter
2567 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2568 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2569 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2570 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2571 f22a8ba3 Guido Trotter
2572 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2573 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2574 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2575 a8083063 Iustin Pop
2576 a8083063 Iustin Pop
  def CheckPrereq(self):
2577 a8083063 Iustin Pop
    """Check prerequisites.
2578 a8083063 Iustin Pop

2579 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2580 a8083063 Iustin Pop

2581 a8083063 Iustin Pop
    """
2582 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2583 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2584 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2585 a8083063 Iustin Pop
2586 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2587 a8083063 Iustin Pop
    """Deactivate the disks
2588 a8083063 Iustin Pop

2589 a8083063 Iustin Pop
    """
2590 a8083063 Iustin Pop
    instance = self.instance
2591 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
2592 a8083063 Iustin Pop
2593 a8083063 Iustin Pop
2594 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
2595 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
2596 155d6c75 Guido Trotter

2597 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
2598 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
2599 155d6c75 Guido Trotter

2600 155d6c75 Guido Trotter
  """
2601 72737a7f Iustin Pop
  ins_l = lu.rpc.call_instance_list([instance.primary_node],
2602 72737a7f Iustin Pop
                                      [instance.hypervisor])
2603 155d6c75 Guido Trotter
  ins_l = ins_l[instance.primary_node]
2604 781de953 Iustin Pop
  if ins_l.failed or not isinstance(ins_l.data, list):
2605 155d6c75 Guido Trotter
    raise errors.OpExecError("Can't contact node '%s'" %
2606 155d6c75 Guido Trotter
                             instance.primary_node)
2607 155d6c75 Guido Trotter
2608 781de953 Iustin Pop
  if instance.name in ins_l.data:
2609 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
2610 155d6c75 Guido Trotter
                             " block devices.")
2611 155d6c75 Guido Trotter
2612 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
2613 a8083063 Iustin Pop
2614 a8083063 Iustin Pop
2615 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2616 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
2617 a8083063 Iustin Pop

2618 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
2619 a8083063 Iustin Pop

2620 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
2621 a8083063 Iustin Pop
  ignored.
2622 a8083063 Iustin Pop

2623 a8083063 Iustin Pop
  """
2624 cacfd1fd Iustin Pop
  all_result = True
2625 a8083063 Iustin Pop
  for disk in instance.disks:
2626 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2627 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
2628 781de953 Iustin Pop
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
2629 cacfd1fd Iustin Pop
      msg = result.RemoteFailMsg()
2630 cacfd1fd Iustin Pop
      if msg:
2631 cacfd1fd Iustin Pop
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
2632 cacfd1fd Iustin Pop
                      disk.iv_name, node, msg)
2633 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
2634 cacfd1fd Iustin Pop
          all_result = False
2635 cacfd1fd Iustin Pop
  return all_result
2636 a8083063 Iustin Pop
2637 a8083063 Iustin Pop
2638 9ca87a96 Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
2639 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
2640 d4f16fd9 Iustin Pop

2641 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
2642 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
2643 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
2644 d4f16fd9 Iustin Pop
  exception.
2645 d4f16fd9 Iustin Pop

2646 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
2647 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
2648 e69d05fd Iustin Pop
  @type node: C{str}
2649 e69d05fd Iustin Pop
  @param node: the node to check
2650 e69d05fd Iustin Pop
  @type reason: C{str}
2651 e69d05fd Iustin Pop
  @param reason: string to use in the error message
2652 e69d05fd Iustin Pop
  @type requested: C{int}
2653 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
2654 9ca87a96 Iustin Pop
  @type hypervisor_name: C{str}
2655 9ca87a96 Iustin Pop
  @param hypervisor_name: the hypervisor to ask for memory stats
2656 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2657 e69d05fd Iustin Pop
      we cannot check the node
2658 d4f16fd9 Iustin Pop

2659 d4f16fd9 Iustin Pop
  """
2660 9ca87a96 Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
2661 781de953 Iustin Pop
  nodeinfo[node].Raise()
2662 781de953 Iustin Pop
  free_mem = nodeinfo[node].data.get('memory_free')
2663 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2664 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2665 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
2666 d4f16fd9 Iustin Pop
  if requested > free_mem:
2667 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2668 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
2669 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
2670 d4f16fd9 Iustin Pop
2671 d4f16fd9 Iustin Pop
2672 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2673 a8083063 Iustin Pop
  """Starts an instance.
2674 a8083063 Iustin Pop

2675 a8083063 Iustin Pop
  """
2676 a8083063 Iustin Pop
  HPATH = "instance-start"
2677 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2678 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2679 e873317a Guido Trotter
  REQ_BGL = False
2680 e873317a Guido Trotter
2681 e873317a Guido Trotter
  def ExpandNames(self):
2682 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2683 a8083063 Iustin Pop
2684 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2685 a8083063 Iustin Pop
    """Build hooks env.
2686 a8083063 Iustin Pop

2687 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2688 a8083063 Iustin Pop

2689 a8083063 Iustin Pop
    """
2690 a8083063 Iustin Pop
    env = {
2691 a8083063 Iustin Pop
      "FORCE": self.op.force,
2692 a8083063 Iustin Pop
      }
2693 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2694 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2695 a8083063 Iustin Pop
    return env, nl, nl
2696 a8083063 Iustin Pop
2697 a8083063 Iustin Pop
  def CheckPrereq(self):
2698 a8083063 Iustin Pop
    """Check prerequisites.
2699 a8083063 Iustin Pop

2700 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2701 a8083063 Iustin Pop

2702 a8083063 Iustin Pop
    """
2703 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2704 e873317a Guido Trotter
    assert self.instance is not None, \
2705 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2706 a8083063 Iustin Pop
2707 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
2708 7527a8a4 Iustin Pop
2709 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2710 a8083063 Iustin Pop
    # check bridges existance
2711 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2712 a8083063 Iustin Pop
2713 b9bddb6b Iustin Pop
    _CheckNodeFreeMemory(self, instance.primary_node,
2714 d4f16fd9 Iustin Pop
                         "starting instance %s" % instance.name,
2715 338e51e8 Iustin Pop
                         bep[constants.BE_MEMORY], instance.hypervisor)
2716 d4f16fd9 Iustin Pop
2717 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2718 a8083063 Iustin Pop
    """Start the instance.
2719 a8083063 Iustin Pop

2720 a8083063 Iustin Pop
    """
2721 a8083063 Iustin Pop
    instance = self.instance
2722 a8083063 Iustin Pop
    force = self.op.force
2723 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
2724 a8083063 Iustin Pop
2725 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2726 fe482621 Iustin Pop
2727 a8083063 Iustin Pop
    node_current = instance.primary_node
2728 a8083063 Iustin Pop
2729 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
2730 a8083063 Iustin Pop
2731 781de953 Iustin Pop
    result = self.rpc.call_instance_start(node_current, instance, extra_args)
2732 dd279568 Iustin Pop
    msg = result.RemoteFailMsg()
2733 dd279568 Iustin Pop
    if msg:
2734 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
2735 dd279568 Iustin Pop
      raise errors.OpExecError("Could not start instance: %s" % msg)
2736 a8083063 Iustin Pop
2737 a8083063 Iustin Pop
2738 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2739 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2740 bf6929a2 Alexander Schreiber

2741 bf6929a2 Alexander Schreiber
  """
2742 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2743 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2744 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2745 e873317a Guido Trotter
  REQ_BGL = False
2746 e873317a Guido Trotter
2747 e873317a Guido Trotter
  def ExpandNames(self):
2748 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2749 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2750 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
2751 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2752 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
2753 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2754 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
2755 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2756 bf6929a2 Alexander Schreiber
2757 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2758 bf6929a2 Alexander Schreiber
    """Build hooks env.
2759 bf6929a2 Alexander Schreiber

2760 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2761 bf6929a2 Alexander Schreiber

2762 bf6929a2 Alexander Schreiber
    """
2763 bf6929a2 Alexander Schreiber
    env = {
2764 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2765 bf6929a2 Alexander Schreiber
      }
2766 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2767 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2768 bf6929a2 Alexander Schreiber
    return env, nl, nl
2769 bf6929a2 Alexander Schreiber
2770 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2771 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2772 bf6929a2 Alexander Schreiber

2773 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2774 bf6929a2 Alexander Schreiber

2775 bf6929a2 Alexander Schreiber
    """
2776 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2777 e873317a Guido Trotter
    assert self.instance is not None, \
2778 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2779 bf6929a2 Alexander Schreiber
2780 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
2781 7527a8a4 Iustin Pop
2782 bf6929a2 Alexander Schreiber
    # check bridges existance
2783 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2784 bf6929a2 Alexander Schreiber
2785 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2786 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2787 bf6929a2 Alexander Schreiber

2788 bf6929a2 Alexander Schreiber
    """
2789 bf6929a2 Alexander Schreiber
    instance = self.instance
2790 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2791 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2792 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2793 bf6929a2 Alexander Schreiber
2794 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2795 bf6929a2 Alexander Schreiber
2796 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2797 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2798 ae48ac32 Iustin Pop
      for disk in instance.disks:
2799 ae48ac32 Iustin Pop
        self.cfg.SetDiskID(disk, node_current)
2800 781de953 Iustin Pop
      result = self.rpc.call_instance_reboot(node_current, instance,
2801 781de953 Iustin Pop
                                             reboot_type, extra_args)
2802 489fcbe9 Iustin Pop
      msg = result.RemoteFailMsg()
2803 489fcbe9 Iustin Pop
      if msg:
2804 489fcbe9 Iustin Pop
        raise errors.OpExecError("Could not reboot instance: %s" % msg)
2805 bf6929a2 Alexander Schreiber
    else:
2806 1fae010f Iustin Pop
      result = self.rpc.call_instance_shutdown(node_current, instance)
2807 1fae010f Iustin Pop
      msg = result.RemoteFailMsg()
2808 1fae010f Iustin Pop
      if msg:
2809 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance for"
2810 1fae010f Iustin Pop
                                 " full reboot: %s" % msg)
2811 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
2812 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
2813 781de953 Iustin Pop
      result = self.rpc.call_instance_start(node_current, instance, extra_args)
2814 dd279568 Iustin Pop
      msg = result.RemoteFailMsg()
2815 dd279568 Iustin Pop
      if msg:
2816 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
2817 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance for"
2818 dd279568 Iustin Pop
                                 " full reboot: %s" % msg)
2819 bf6929a2 Alexander Schreiber
2820 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2821 bf6929a2 Alexander Schreiber
2822 bf6929a2 Alexander Schreiber
2823 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2824 a8083063 Iustin Pop
  """Shutdown an instance.
2825 a8083063 Iustin Pop

2826 a8083063 Iustin Pop
  """
2827 a8083063 Iustin Pop
  HPATH = "instance-stop"
2828 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2829 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2830 e873317a Guido Trotter
  REQ_BGL = False
2831 e873317a Guido Trotter
2832 e873317a Guido Trotter
  def ExpandNames(self):
2833 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2834 a8083063 Iustin Pop
2835 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2836 a8083063 Iustin Pop
    """Build hooks env.
2837 a8083063 Iustin Pop

2838 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2839 a8083063 Iustin Pop

2840 a8083063 Iustin Pop
    """
2841 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2842 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2843 a8083063 Iustin Pop
    return env, nl, nl
2844 a8083063 Iustin Pop
2845 a8083063 Iustin Pop
  def CheckPrereq(self):
2846 a8083063 Iustin Pop
    """Check prerequisites.
2847 a8083063 Iustin Pop

2848 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2849 a8083063 Iustin Pop

2850 a8083063 Iustin Pop
    """
2851 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2852 e873317a Guido Trotter
    assert self.instance is not None, \
2853 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2854 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
2855 a8083063 Iustin Pop
2856 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2857 a8083063 Iustin Pop
    """Shutdown the instance.
2858 a8083063 Iustin Pop

2859 a8083063 Iustin Pop
    """
2860 a8083063 Iustin Pop
    instance = self.instance
2861 a8083063 Iustin Pop
    node_current = instance.primary_node
2862 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2863 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(node_current, instance)
2864 1fae010f Iustin Pop
    msg = result.RemoteFailMsg()
2865 1fae010f Iustin Pop
    if msg:
2866 1fae010f Iustin Pop
      self.proc.LogWarning("Could not shutdown instance: %s" % msg)
2867 a8083063 Iustin Pop
2868 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
2869 a8083063 Iustin Pop
2870 a8083063 Iustin Pop
2871 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2872 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2873 fe7b0351 Michael Hanselmann

2874 fe7b0351 Michael Hanselmann
  """
2875 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2876 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2877 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2878 4e0b4d2d Guido Trotter
  REQ_BGL = False
2879 4e0b4d2d Guido Trotter
2880 4e0b4d2d Guido Trotter
  def ExpandNames(self):
2881 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
2882 fe7b0351 Michael Hanselmann
2883 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2884 fe7b0351 Michael Hanselmann
    """Build hooks env.
2885 fe7b0351 Michael Hanselmann

2886 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2887 fe7b0351 Michael Hanselmann

2888 fe7b0351 Michael Hanselmann
    """
2889 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2890 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2891 fe7b0351 Michael Hanselmann
    return env, nl, nl
2892 fe7b0351 Michael Hanselmann
2893 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2894 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2895 fe7b0351 Michael Hanselmann

2896 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2897 fe7b0351 Michael Hanselmann

2898 fe7b0351 Michael Hanselmann
    """
2899 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2900 4e0b4d2d Guido Trotter
    assert instance is not None, \
2901 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2902 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
2903 4e0b4d2d Guido Trotter
2904 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2905 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2906 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2907 0d68c45d Iustin Pop
    if instance.admin_up:
2908 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2909 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2910 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2911 72737a7f Iustin Pop
                                              instance.name,
2912 72737a7f Iustin Pop
                                              instance.hypervisor)
2913 781de953 Iustin Pop
    if remote_info.failed or remote_info.data:
2914 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2915 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2916 3ecf6786 Iustin Pop
                                  instance.primary_node))
2917 d0834de3 Michael Hanselmann
2918 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2919 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2920 d0834de3 Michael Hanselmann
      # OS verification
2921 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2922 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2923 d0834de3 Michael Hanselmann
      if pnode is None:
2924 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2925 3ecf6786 Iustin Pop
                                   self.op.pnode)
2926 781de953 Iustin Pop
      result = self.rpc.call_os_get(pnode.name, self.op.os_type)
2927 781de953 Iustin Pop
      result.Raise()
2928 781de953 Iustin Pop
      if not isinstance(result.data, objects.OS):
2929 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2930 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2931 d0834de3 Michael Hanselmann
2932 fe7b0351 Michael Hanselmann
    self.instance = instance
2933 fe7b0351 Michael Hanselmann
2934 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2935 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2936 fe7b0351 Michael Hanselmann

2937 fe7b0351 Michael Hanselmann
    """
2938 fe7b0351 Michael Hanselmann
    inst = self.instance
2939 fe7b0351 Michael Hanselmann
2940 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2941 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2942 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2943 97abc79f Iustin Pop
      self.cfg.Update(inst)
2944 d0834de3 Michael Hanselmann
2945 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
2946 fe7b0351 Michael Hanselmann
    try:
2947 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2948 781de953 Iustin Pop
      result = self.rpc.call_instance_os_add(inst.primary_node, inst)
2949 20e01edd Iustin Pop
      msg = result.RemoteFailMsg()
2950 20e01edd Iustin Pop
      if msg:
2951 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
2952 20e01edd Iustin Pop
                                 " on node %s: %s" %
2953 20e01edd Iustin Pop
                                 (inst.name, inst.primary_node, msg))
2954 fe7b0351 Michael Hanselmann
    finally:
2955 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
2956 fe7b0351 Michael Hanselmann
2957 fe7b0351 Michael Hanselmann
2958 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2959 decd5f45 Iustin Pop
  """Rename an instance.
2960 decd5f45 Iustin Pop

2961 decd5f45 Iustin Pop
  """
2962 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2963 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2964 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2965 decd5f45 Iustin Pop
2966 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2967 decd5f45 Iustin Pop
    """Build hooks env.
2968 decd5f45 Iustin Pop

2969 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2970 decd5f45 Iustin Pop

2971 decd5f45 Iustin Pop
    """
2972 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2973 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2974 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2975 decd5f45 Iustin Pop
    return env, nl, nl
2976 decd5f45 Iustin Pop
2977 decd5f45 Iustin Pop
  def CheckPrereq(self):
2978 decd5f45 Iustin Pop
    """Check prerequisites.
2979 decd5f45 Iustin Pop

2980 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2981 decd5f45 Iustin Pop

2982 decd5f45 Iustin Pop
    """
2983 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2984 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2985 decd5f45 Iustin Pop
    if instance is None:
2986 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2987 decd5f45 Iustin Pop
                                 self.op.instance_name)
2988 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
2989 7527a8a4 Iustin Pop
2990 0d68c45d Iustin Pop
    if instance.admin_up:
2991 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2992 decd5f45 Iustin Pop
                                 self.op.instance_name)
2993 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2994 72737a7f Iustin Pop
                                              instance.name,
2995 72737a7f Iustin Pop
                                              instance.hypervisor)
2996 781de953 Iustin Pop
    remote_info.Raise()
2997 781de953 Iustin Pop
    if remote_info.data:
2998 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2999 decd5f45 Iustin Pop
                                 (self.op.instance_name,
3000 decd5f45 Iustin Pop
                                  instance.primary_node))
3001 decd5f45 Iustin Pop
    self.instance = instance
3002 decd5f45 Iustin Pop
3003 decd5f45 Iustin Pop
    # new name verification
3004 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
3005 decd5f45 Iustin Pop
3006 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
3007 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
3008 7bde3275 Guido Trotter
    if new_name in instance_list:
3009 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3010 c09f363f Manuel Franceschini
                                 new_name)
3011 7bde3275 Guido Trotter
3012 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
3013 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
3014 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3015 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
3016 decd5f45 Iustin Pop
3017 decd5f45 Iustin Pop
3018 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
3019 decd5f45 Iustin Pop
    """Reinstall the instance.
3020 decd5f45 Iustin Pop

3021 decd5f45 Iustin Pop
    """
3022 decd5f45 Iustin Pop
    inst = self.instance
3023 decd5f45 Iustin Pop
    old_name = inst.name
3024 decd5f45 Iustin Pop
3025 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
3026 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3027 b23c4333 Manuel Franceschini
3028 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
3029 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
3030 cb4e8387 Iustin Pop
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
3031 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
3032 decd5f45 Iustin Pop
3033 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
3034 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
3035 decd5f45 Iustin Pop
3036 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
3037 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3038 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
3039 72737a7f Iustin Pop
                                                     old_file_storage_dir,
3040 72737a7f Iustin Pop
                                                     new_file_storage_dir)
3041 781de953 Iustin Pop
      result.Raise()
3042 781de953 Iustin Pop
      if not result.data:
3043 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not connect to node '%s' to rename"
3044 b23c4333 Manuel Franceschini
                                 " directory '%s' to '%s' (but the instance"
3045 b23c4333 Manuel Franceschini
                                 " has been renamed in Ganeti)" % (
3046 b23c4333 Manuel Franceschini
                                 inst.primary_node, old_file_storage_dir,
3047 b23c4333 Manuel Franceschini
                                 new_file_storage_dir))
3048 b23c4333 Manuel Franceschini
3049 781de953 Iustin Pop
      if not result.data[0]:
3050 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
3051 b23c4333 Manuel Franceschini
                                 " (but the instance has been renamed in"
3052 b23c4333 Manuel Franceschini
                                 " Ganeti)" % (old_file_storage_dir,
3053 b23c4333 Manuel Franceschini
                                               new_file_storage_dir))
3054 b23c4333 Manuel Franceschini
3055 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
3056 decd5f45 Iustin Pop
    try:
3057 781de953 Iustin Pop
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
3058 781de953 Iustin Pop
                                                 old_name)
3059 96841384 Iustin Pop
      msg = result.RemoteFailMsg()
3060 96841384 Iustin Pop
      if msg:
3061 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
3062 96841384 Iustin Pop
               " (but the instance has been renamed in Ganeti): %s" %
3063 96841384 Iustin Pop
               (inst.name, inst.primary_node, msg))
3064 86d9d3bb Iustin Pop
        self.proc.LogWarning(msg)
3065 decd5f45 Iustin Pop
    finally:
3066 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
3067 decd5f45 Iustin Pop
3068 decd5f45 Iustin Pop
3069 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
3070 a8083063 Iustin Pop
  """Remove an instance.
3071 a8083063 Iustin Pop

3072 a8083063 Iustin Pop
  """
3073 a8083063 Iustin Pop
  HPATH = "instance-remove"
3074 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3075 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
3076 cf472233 Guido Trotter
  REQ_BGL = False
3077 cf472233 Guido Trotter
3078 cf472233 Guido Trotter
  def ExpandNames(self):
3079 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
3080 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3081 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3082 cf472233 Guido Trotter
3083 cf472233 Guido Trotter
  def DeclareLocks(self, level):
3084 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
3085 cf472233 Guido Trotter
      self._LockInstancesNodes()
3086 a8083063 Iustin Pop
3087 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3088 a8083063 Iustin Pop
    """Build hooks env.
3089 a8083063 Iustin Pop

3090 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3091 a8083063 Iustin Pop

3092 a8083063 Iustin Pop
    """
3093 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3094 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
3095 a8083063 Iustin Pop
    return env, nl, nl
3096 a8083063 Iustin Pop
3097 a8083063 Iustin Pop
  def CheckPrereq(self):
3098 a8083063 Iustin Pop
    """Check prerequisites.
3099 a8083063 Iustin Pop

3100 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3101 a8083063 Iustin Pop

3102 a8083063 Iustin Pop
    """
3103 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3104 cf472233 Guido Trotter
    assert self.instance is not None, \
3105 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3106 a8083063 Iustin Pop
3107 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3108 a8083063 Iustin Pop
    """Remove the instance.
3109 a8083063 Iustin Pop

3110 a8083063 Iustin Pop
    """
3111 a8083063 Iustin Pop
    instance = self.instance
3112 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
3113 9a4f63d1 Iustin Pop
                 instance.name, instance.primary_node)
3114 a8083063 Iustin Pop
3115 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
3116 1fae010f Iustin Pop
    msg = result.RemoteFailMsg()
3117 1fae010f Iustin Pop
    if msg:
3118 1d67656e Iustin Pop
      if self.op.ignore_failures:
3119 1fae010f Iustin Pop
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
3120 1d67656e Iustin Pop
      else:
3121 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
3122 1fae010f Iustin Pop
                                 " node %s: %s" %
3123 1fae010f Iustin Pop
                                 (instance.name, instance.primary_node, msg))
3124 a8083063 Iustin Pop
3125 9a4f63d1 Iustin Pop
    logging.info("Removing block devices for instance %s", instance.name)
3126 a8083063 Iustin Pop
3127 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
3128 1d67656e Iustin Pop
      if self.op.ignore_failures:
3129 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
3130 1d67656e Iustin Pop
      else:
3131 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
3132 a8083063 Iustin Pop
3133 9a4f63d1 Iustin Pop
    logging.info("Removing instance %s out of cluster config", instance.name)
3134 a8083063 Iustin Pop
3135 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
3136 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
3137 a8083063 Iustin Pop
3138 a8083063 Iustin Pop
3139 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
3140 a8083063 Iustin Pop
  """Logical unit for querying instances.
3141 a8083063 Iustin Pop

3142 a8083063 Iustin Pop
  """
3143 ec79568d Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
3144 7eb9d8f7 Guido Trotter
  REQ_BGL = False
3145 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
3146 5b460366 Iustin Pop
                                    "admin_state",
3147 a2d2e1a7 Iustin Pop
                                    "disk_template", "ip", "mac", "bridge",
3148 a2d2e1a7 Iustin Pop
                                    "sda_size", "sdb_size", "vcpus", "tags",
3149 a2d2e1a7 Iustin Pop
                                    "network_port", "beparams",
3150 8aec325c Iustin Pop
                                    r"(disk)\.(size)/([0-9]+)",
3151 8aec325c Iustin Pop
                                    r"(disk)\.(sizes)", "disk_usage",
3152 8aec325c Iustin Pop
                                    r"(nic)\.(mac|ip|bridge)/([0-9]+)",
3153 8aec325c Iustin Pop
                                    r"(nic)\.(macs|ips|bridges)",
3154 8aec325c Iustin Pop
                                    r"(disk|nic)\.(count)",
3155 a2d2e1a7 Iustin Pop
                                    "serial_no", "hypervisor", "hvparams",] +
3156 a2d2e1a7 Iustin Pop
                                  ["hv/%s" % name
3157 a2d2e1a7 Iustin Pop
                                   for name in constants.HVS_PARAMETERS] +
3158 a2d2e1a7 Iustin Pop
                                  ["be/%s" % name
3159 a2d2e1a7 Iustin Pop
                                   for name in constants.BES_PARAMETERS])
3160 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
3161 31bf511f Iustin Pop
3162 a8083063 Iustin Pop
3163 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
3164 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
3165 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
3166 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
3167 a8083063 Iustin Pop
3168 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
3169 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
3170 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
3171 7eb9d8f7 Guido Trotter
3172 57a2fb91 Iustin Pop
    if self.op.names:
3173 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
3174 7eb9d8f7 Guido Trotter
    else:
3175 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
3176 7eb9d8f7 Guido Trotter
3177 ec79568d Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
3178 ec79568d Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
3179 57a2fb91 Iustin Pop
    if self.do_locking:
3180 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3181 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
3182 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3183 7eb9d8f7 Guido Trotter
3184 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
3185 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
3186 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
3187 7eb9d8f7 Guido Trotter
3188 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
3189 7eb9d8f7 Guido Trotter
    """Check prerequisites.
3190 7eb9d8f7 Guido Trotter

3191 7eb9d8f7 Guido Trotter
    """
3192 57a2fb91 Iustin Pop
    pass
3193 069dcc86 Iustin Pop
3194 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3195 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
3196 a8083063 Iustin Pop

3197 a8083063 Iustin Pop
    """
3198 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
3199 a7f5dc98 Iustin Pop
    if self.wanted == locking.ALL_SET:
3200 a7f5dc98 Iustin Pop
      # caller didn't specify instance names, so ordering is not important
3201 a7f5dc98 Iustin Pop
      if self.do_locking:
3202 a7f5dc98 Iustin Pop
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
3203 a7f5dc98 Iustin Pop
      else:
3204 a7f5dc98 Iustin Pop
        instance_names = all_info.keys()
3205 a7f5dc98 Iustin Pop
      instance_names = utils.NiceSort(instance_names)
3206 57a2fb91 Iustin Pop
    else:
3207 a7f5dc98 Iustin Pop
      # caller did specify names, so we must keep the ordering
3208 a7f5dc98 Iustin Pop
      if self.do_locking:
3209 a7f5dc98 Iustin Pop
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
3210 a7f5dc98 Iustin Pop
      else:
3211 a7f5dc98 Iustin Pop
        tgt_set = all_info.keys()
3212 a7f5dc98 Iustin Pop
      missing = set(self.wanted).difference(tgt_set)
3213 a7f5dc98 Iustin Pop
      if missing:
3214 a7f5dc98 Iustin Pop
        raise errors.OpExecError("Some instances were removed before"
3215 a7f5dc98 Iustin Pop
                                 " retrieving their data: %s" % missing)
3216 a7f5dc98 Iustin Pop
      instance_names = self.wanted
3217 c1f1cbb2 Iustin Pop
3218 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
3219 a8083063 Iustin Pop
3220 a8083063 Iustin Pop
    # begin data gathering
3221 a8083063 Iustin Pop
3222 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
3223 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
3224 a8083063 Iustin Pop
3225 a8083063 Iustin Pop
    bad_nodes = []
3226 cbfc4681 Iustin Pop
    off_nodes = []
3227 ec79568d Iustin Pop
    if self.do_node_query:
3228 a8083063 Iustin Pop
      live_data = {}
3229 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
3230 a8083063 Iustin Pop
      for name in nodes:
3231 a8083063 Iustin Pop
        result = node_data[name]
3232 cbfc4681 Iustin Pop
        if result.offline:
3233 cbfc4681 Iustin Pop
          # offline nodes will be in both lists
3234 cbfc4681 Iustin Pop
          off_nodes.append(name)
3235 781de953 Iustin Pop
        if result.failed:
3236 a8083063 Iustin Pop
          bad_nodes.append(name)
3237 781de953 Iustin Pop
        else:
3238 781de953 Iustin Pop
          if result.data:
3239 781de953 Iustin Pop
            live_data.update(result.data)
3240 781de953 Iustin Pop
            # else no instance is alive
3241 a8083063 Iustin Pop
    else:
3242 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
3243 a8083063 Iustin Pop
3244 a8083063 Iustin Pop
    # end data gathering
3245 a8083063 Iustin Pop
3246 5018a335 Iustin Pop
    HVPREFIX = "hv/"
3247 338e51e8 Iustin Pop
    BEPREFIX = "be/"
3248 a8083063 Iustin Pop
    output = []
3249 a8083063 Iustin Pop
    for instance in instance_list:
3250 a8083063 Iustin Pop
      iout = []
3251 5018a335 Iustin Pop
      i_hv = self.cfg.GetClusterInfo().FillHV(instance)
3252 338e51e8 Iustin Pop
      i_be = self.cfg.GetClusterInfo().FillBE(instance)
3253 a8083063 Iustin Pop
      for field in self.op.output_fields:
3254 71c1af58 Iustin Pop
        st_match = self._FIELDS_STATIC.Matches(field)
3255 a8083063 Iustin Pop
        if field == "name":
3256 a8083063 Iustin Pop
          val = instance.name
3257 a8083063 Iustin Pop
        elif field == "os":
3258 a8083063 Iustin Pop
          val = instance.os
3259 a8083063 Iustin Pop
        elif field == "pnode":
3260 a8083063 Iustin Pop
          val = instance.primary_node
3261 a8083063 Iustin Pop
        elif field == "snodes":
3262 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
3263 a8083063 Iustin Pop
        elif field == "admin_state":
3264 0d68c45d Iustin Pop
          val = instance.admin_up
3265 a8083063 Iustin Pop
        elif field == "oper_state":
3266 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
3267 8a23d2d3 Iustin Pop
            val = None
3268 a8083063 Iustin Pop
          else:
3269 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
3270 d8052456 Iustin Pop
        elif field == "status":
3271 cbfc4681 Iustin Pop
          if instance.primary_node in off_nodes:
3272 cbfc4681 Iustin Pop
            val = "ERROR_nodeoffline"
3273 cbfc4681 Iustin Pop
          elif instance.primary_node in bad_nodes:
3274 d8052456 Iustin Pop
            val = "ERROR_nodedown"
3275 d8052456 Iustin Pop
          else:
3276 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
3277 d8052456 Iustin Pop
            if running:
3278 0d68c45d Iustin Pop
              if instance.admin_up:
3279 d8052456 Iustin Pop
                val = "running"
3280 d8052456 Iustin Pop
              else:
3281 d8052456 Iustin Pop
                val = "ERROR_up"
3282 d8052456 Iustin Pop
            else:
3283 0d68c45d Iustin Pop
              if instance.admin_up:
3284 d8052456 Iustin Pop
                val = "ERROR_down"
3285 d8052456 Iustin Pop
              else:
3286 d8052456 Iustin Pop
                val = "ADMIN_down"
3287 a8083063 Iustin Pop
        elif field == "oper_ram":
3288 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
3289 8a23d2d3 Iustin Pop
            val = None
3290 a8083063 Iustin Pop
          elif instance.name in live_data:
3291 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
3292 a8083063 Iustin Pop
          else:
3293 a8083063 Iustin Pop
            val = "-"
3294 a8083063 Iustin Pop
        elif field == "disk_template":
3295 a8083063 Iustin Pop
          val = instance.disk_template
3296 a8083063 Iustin Pop
        elif field == "ip":
3297 a8083063 Iustin Pop
          val = instance.nics[0].ip
3298 a8083063 Iustin Pop
        elif field == "bridge":
3299 a8083063 Iustin Pop
          val = instance.nics[0].bridge
3300 a8083063 Iustin Pop
        elif field == "mac":
3301 a8083063 Iustin Pop
          val = instance.nics[0].mac
3302 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
3303 ad24e046 Iustin Pop
          idx = ord(field[2]) - ord('a')
3304 ad24e046 Iustin Pop
          try:
3305 ad24e046 Iustin Pop
            val = instance.FindDisk(idx).size
3306 ad24e046 Iustin Pop
          except errors.OpPrereqError:
3307 8a23d2d3 Iustin Pop
            val = None
3308 024e157f Iustin Pop
        elif field == "disk_usage": # total disk usage per node
3309 024e157f Iustin Pop
          disk_sizes = [{'size': disk.size} for disk in instance.disks]
3310 024e157f Iustin Pop
          val = _ComputeDiskSize(instance.disk_template, disk_sizes)
3311 130a6a6f Iustin Pop
        elif field == "tags":
3312 130a6a6f Iustin Pop
          val = list(instance.GetTags())
3313 38d7239a Iustin Pop
        elif field == "serial_no":
3314 38d7239a Iustin Pop
          val = instance.serial_no
3315 5018a335 Iustin Pop
        elif field == "network_port":
3316 5018a335 Iustin Pop
          val = instance.network_port
3317 338e51e8 Iustin Pop
        elif field == "hypervisor":
3318 338e51e8 Iustin Pop
          val = instance.hypervisor
3319 338e51e8 Iustin Pop
        elif field == "hvparams":
3320 338e51e8 Iustin Pop
          val = i_hv
3321 5018a335 Iustin Pop
        elif (field.startswith(HVPREFIX) and
3322 5018a335 Iustin Pop
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
3323 5018a335 Iustin Pop
          val = i_hv.get(field[len(HVPREFIX):], None)
3324 338e51e8 Iustin Pop
        elif field == "beparams":
3325 338e51e8 Iustin Pop
          val = i_be
3326 338e51e8 Iustin Pop
        elif (field.startswith(BEPREFIX) and
3327 338e51e8 Iustin Pop
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
3328 338e51e8 Iustin Pop
          val = i_be.get(field[len(BEPREFIX):], None)
3329 71c1af58 Iustin Pop
        elif st_match and st_match.groups():
3330 71c1af58 Iustin Pop
          # matches a variable list
3331 71c1af58 Iustin Pop
          st_groups = st_match.groups()
3332 71c1af58 Iustin Pop
          if st_groups and st_groups[0] == "disk":
3333 71c1af58 Iustin Pop
            if st_groups[1] == "count":
3334 71c1af58 Iustin Pop
              val = len(instance.disks)
3335 41a776da Iustin Pop
            elif st_groups[1] == "sizes":
3336 41a776da Iustin Pop
              val = [disk.size for disk in instance.disks]
3337 71c1af58 Iustin Pop
            elif st_groups[1] == "size":
3338 3e0cea06 Iustin Pop
              try:
3339 3e0cea06 Iustin Pop
                val = instance.FindDisk(st_groups[2]).size
3340 3e0cea06 Iustin Pop
              except errors.OpPrereqError:
3341 71c1af58 Iustin Pop
                val = None
3342 71c1af58 Iustin Pop
            else:
3343 71c1af58 Iustin Pop
              assert False, "Unhandled disk parameter"
3344 71c1af58 Iustin Pop
          elif st_groups[0] == "nic":
3345 71c1af58 Iustin Pop
            if st_groups[1] == "count":
3346 71c1af58 Iustin Pop
              val = len(instance.nics)
3347 41a776da Iustin Pop
            elif st_groups[1] == "macs":
3348 41a776da Iustin Pop
              val = [nic.mac for nic in instance.nics]
3349 41a776da Iustin Pop
            elif st_groups[1] == "ips":
3350 41a776da Iustin Pop
              val = [nic.ip for nic in instance.nics]
3351 41a776da Iustin Pop
            elif st_groups[1] == "bridges":
3352 41a776da Iustin Pop
              val = [nic.bridge for nic in instance.nics]
3353 71c1af58 Iustin Pop
            else:
3354 71c1af58 Iustin Pop
              # index-based item
3355 71c1af58 Iustin Pop
              nic_idx = int(st_groups[2])
3356 71c1af58 Iustin Pop
              if nic_idx >= len(instance.nics):
3357 71c1af58 Iustin Pop
                val = None
3358 71c1af58 Iustin Pop
              else:
3359 71c1af58 Iustin Pop
                if st_groups[1] == "mac":
3360 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].mac
3361 71c1af58 Iustin Pop
                elif st_groups[1] == "ip":
3362 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].ip
3363 71c1af58 Iustin Pop
                elif st_groups[1] == "bridge":
3364 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].bridge
3365 71c1af58 Iustin Pop
                else:
3366 71c1af58 Iustin Pop
                  assert False, "Unhandled NIC parameter"
3367 71c1af58 Iustin Pop
          else:
3368 71c1af58 Iustin Pop
            assert False, "Unhandled variable parameter"
3369 a8083063 Iustin Pop
        else:
3370 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
3371 a8083063 Iustin Pop
        iout.append(val)
3372 a8083063 Iustin Pop
      output.append(iout)
3373 a8083063 Iustin Pop
3374 a8083063 Iustin Pop
    return output
3375 a8083063 Iustin Pop
3376 a8083063 Iustin Pop
3377 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
3378 a8083063 Iustin Pop
  """Failover an instance.
3379 a8083063 Iustin Pop

3380 a8083063 Iustin Pop
  """
3381 a8083063 Iustin Pop
  HPATH = "instance-failover"
3382 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3383 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
3384 c9e5c064 Guido Trotter
  REQ_BGL = False
3385 c9e5c064 Guido Trotter
3386 c9e5c064 Guido Trotter
  def ExpandNames(self):
3387 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
3388 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3389 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3390 c9e5c064 Guido Trotter
3391 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
3392 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
3393 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
3394 a8083063 Iustin Pop
3395 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3396 a8083063 Iustin Pop
    """Build hooks env.
3397 a8083063 Iustin Pop

3398 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3399 a8083063 Iustin Pop

3400 a8083063 Iustin Pop
    """
3401 a8083063 Iustin Pop
    env = {
3402 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
3403 a8083063 Iustin Pop
      }
3404 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3405 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3406 a8083063 Iustin Pop
    return env, nl, nl
3407 a8083063 Iustin Pop
3408 a8083063 Iustin Pop
  def CheckPrereq(self):
3409 a8083063 Iustin Pop
    """Check prerequisites.
3410 a8083063 Iustin Pop

3411 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3412 a8083063 Iustin Pop

3413 a8083063 Iustin Pop
    """
3414 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3415 c9e5c064 Guido Trotter
    assert self.instance is not None, \
3416 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3417 a8083063 Iustin Pop
3418 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3419 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3420 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
3421 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
3422 2a710df1 Michael Hanselmann
3423 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
3424 2a710df1 Michael Hanselmann
    if not secondary_nodes:
3425 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
3426 abdf0113 Iustin Pop
                                   "a mirrored disk template")
3427 2a710df1 Michael Hanselmann
3428 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
3429 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, target_node)
3430 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, target_node)
3431 d4f16fd9 Iustin Pop
    # check memory requirements on the secondary node
3432 b9bddb6b Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
3433 338e51e8 Iustin Pop
                         instance.name, bep[constants.BE_MEMORY],
3434 e69d05fd Iustin Pop
                         instance.hypervisor)
3435 3a7c308e Guido Trotter
3436 a8083063 Iustin Pop
    # check bridge existance
3437 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
3438 781de953 Iustin Pop
    result = self.rpc.call_bridges_exist(target_node, brlist)
3439 781de953 Iustin Pop
    result.Raise()
3440 781de953 Iustin Pop
    if not result.data:
3441 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
3442 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
3443 50ff9a7a Iustin Pop
                                 (brlist, target_node))
3444 a8083063 Iustin Pop
3445 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3446 a8083063 Iustin Pop
    """Failover an instance.
3447 a8083063 Iustin Pop

3448 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
3449 a8083063 Iustin Pop
    starting it on the secondary.
3450 a8083063 Iustin Pop

3451 a8083063 Iustin Pop
    """
3452 a8083063 Iustin Pop
    instance = self.instance
3453 a8083063 Iustin Pop
3454 a8083063 Iustin Pop
    source_node = instance.primary_node
3455 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
3456 a8083063 Iustin Pop
3457 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
3458 a8083063 Iustin Pop
    for dev in instance.disks:
3459 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
3460 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
3461 0d68c45d Iustin Pop
        if instance.admin_up and not self.op.ignore_consistency:
3462 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
3463 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
3464 a8083063 Iustin Pop
3465 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
3466 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
3467 9a4f63d1 Iustin Pop
                 instance.name, source_node)
3468 a8083063 Iustin Pop
3469 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(source_node, instance)
3470 1fae010f Iustin Pop
    msg = result.RemoteFailMsg()
3471 1fae010f Iustin Pop
    if msg:
3472 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
3473 86d9d3bb Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
3474 1fae010f Iustin Pop
                             " Proceeding anyway. Please make sure node"
3475 1fae010f Iustin Pop
                             " %s is down. Error details: %s",
3476 1fae010f Iustin Pop
                             instance.name, source_node, source_node, msg)
3477 24a40d57 Iustin Pop
      else:
3478 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
3479 1fae010f Iustin Pop
                                 " node %s: %s" %
3480 1fae010f Iustin Pop
                                 (instance.name, source_node, msg))
3481 a8083063 Iustin Pop
3482 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
3483 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3484 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
3485 a8083063 Iustin Pop
3486 a8083063 Iustin Pop
    instance.primary_node = target_node
3487 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
3488 b6102dab Guido Trotter
    self.cfg.Update(instance)
3489 a8083063 Iustin Pop
3490 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
3491 0d68c45d Iustin Pop
    if instance.admin_up:
3492 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
3493 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s",
3494 9a4f63d1 Iustin Pop
                   instance.name, target_node)
3495 12a0cfbe Guido Trotter
3496 b9bddb6b Iustin Pop
      disks_ok, dummy = _AssembleInstanceDisks(self, instance,
3497 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
3498 12a0cfbe Guido Trotter
      if not disks_ok:
3499 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3500 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
3501 a8083063 Iustin Pop
3502 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
3503 781de953 Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance, None)
3504 dd279568 Iustin Pop
      msg = result.RemoteFailMsg()
3505 dd279568 Iustin Pop
      if msg:
3506 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3507 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
3508 dd279568 Iustin Pop
                                 (instance.name, target_node, msg))
3509 a8083063 Iustin Pop
3510 a8083063 Iustin Pop
3511 53c776b5 Iustin Pop
class LUMigrateInstance(LogicalUnit):
3512 53c776b5 Iustin Pop
  """Migrate an instance.
3513 53c776b5 Iustin Pop

3514 53c776b5 Iustin Pop
  This is migration without shutting down, compared to the failover,
3515 53c776b5 Iustin Pop
  which is done with shutdown.
3516 53c776b5 Iustin Pop

3517 53c776b5 Iustin Pop
  """
3518 53c776b5 Iustin Pop
  HPATH = "instance-migrate"
3519 53c776b5 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3520 53c776b5 Iustin Pop
  _OP_REQP = ["instance_name", "live", "cleanup"]
3521 53c776b5 Iustin Pop
3522 53c776b5 Iustin Pop
  REQ_BGL = False
3523 53c776b5 Iustin Pop
3524 53c776b5 Iustin Pop
  def ExpandNames(self):
3525 53c776b5 Iustin Pop
    self._ExpandAndLockInstance()
3526 53c776b5 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
3527 53c776b5 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3528 53c776b5 Iustin Pop
3529 53c776b5 Iustin Pop
  def DeclareLocks(self, level):
3530 53c776b5 Iustin Pop
    if level == locking.LEVEL_NODE:
3531 53c776b5 Iustin Pop
      self._LockInstancesNodes()
3532 53c776b5 Iustin Pop
3533 53c776b5 Iustin Pop
  def BuildHooksEnv(self):
3534 53c776b5 Iustin Pop
    """Build hooks env.
3535 53c776b5 Iustin Pop

3536 53c776b5 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3537 53c776b5 Iustin Pop

3538 53c776b5 Iustin Pop
    """
3539 53c776b5 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3540 53c776b5 Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3541 53c776b5 Iustin Pop
    return env, nl, nl
3542 53c776b5 Iustin Pop
3543 53c776b5 Iustin Pop
  def CheckPrereq(self):
3544 53c776b5 Iustin Pop
    """Check prerequisites.
3545 53c776b5 Iustin Pop

3546 53c776b5 Iustin Pop
    This checks that the instance is in the cluster.
3547 53c776b5 Iustin Pop

3548 53c776b5 Iustin Pop
    """
3549 53c776b5 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3550 53c776b5 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3551 53c776b5 Iustin Pop
    if instance is None:
3552 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3553 53c776b5 Iustin Pop
                                 self.op.instance_name)
3554 53c776b5 Iustin Pop
3555 53c776b5 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
3556 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3557 53c776b5 Iustin Pop
                                 " drbd8, cannot migrate.")
3558 53c776b5 Iustin Pop
3559 53c776b5 Iustin Pop
    secondary_nodes = instance.secondary_nodes
3560 53c776b5 Iustin Pop
    if not secondary_nodes:
3561 733a2b6a Iustin Pop
      raise errors.ConfigurationError("No secondary node but using"
3562 733a2b6a Iustin Pop
                                      " drbd8 disk template")
3563 53c776b5 Iustin Pop
3564 53c776b5 Iustin Pop
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
3565 53c776b5 Iustin Pop
3566 53c776b5 Iustin Pop
    target_node = secondary_nodes[0]
3567 53c776b5 Iustin Pop
    # check memory requirements on the secondary node
3568 53c776b5 Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
3569 53c776b5 Iustin Pop
                         instance.name, i_be[constants.BE_MEMORY],
3570 53c776b5 Iustin Pop
                         instance.hypervisor)
3571 53c776b5 Iustin Pop
3572 53c776b5 Iustin Pop
    # check bridge existance
3573 53c776b5 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
3574 53c776b5 Iustin Pop
    result = self.rpc.call_bridges_exist(target_node, brlist)
3575 53c776b5 Iustin Pop
    if result.failed or not result.data:
3576 53c776b5 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
3577 53c776b5 Iustin Pop
                                 " exist on destination node '%s'" %
3578 53c776b5 Iustin Pop
                                 (brlist, target_node))
3579 53c776b5 Iustin Pop
3580 53c776b5 Iustin Pop
    if not self.op.cleanup:
3581 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, target_node)
3582 53c776b5 Iustin Pop
      result = self.rpc.call_instance_migratable(instance.primary_node,
3583 53c776b5 Iustin Pop
                                                 instance)
3584 53c776b5 Iustin Pop
      msg = result.RemoteFailMsg()
3585 53c776b5 Iustin Pop
      if msg:
3586 53c776b5 Iustin Pop
        raise errors.OpPrereqError("Can't migrate: %s - please use failover" %
3587 53c776b5 Iustin Pop
                                   msg)
3588 53c776b5 Iustin Pop
3589 53c776b5 Iustin Pop
    self.instance = instance
3590 53c776b5 Iustin Pop
3591 53c776b5 Iustin Pop
  def _WaitUntilSync(self):
3592 53c776b5 Iustin Pop
    """Poll with custom rpc for disk sync.
3593 53c776b5 Iustin Pop

3594 53c776b5 Iustin Pop
    This uses our own step-based rpc call.
3595 53c776b5 Iustin Pop

3596 53c776b5 Iustin Pop
    """
3597 53c776b5 Iustin Pop
    self.feedback_fn("* wait until resync is done")
3598 53c776b5 Iustin Pop
    all_done = False
3599 53c776b5 Iustin Pop
    while not all_done:
3600 53c776b5 Iustin Pop
      all_done = True
3601 53c776b5 Iustin Pop
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
3602 53c776b5 Iustin Pop
                                            self.nodes_ip,
3603 53c776b5 Iustin Pop
                                            self.instance.disks)
3604 53c776b5 Iustin Pop
      min_percent = 100
3605 53c776b5 Iustin Pop
      for node, nres in result.items():
3606 53c776b5 Iustin Pop
        msg = nres.RemoteFailMsg()
3607 53c776b5 Iustin Pop
        if msg:
3608 53c776b5 Iustin Pop
          raise errors.OpExecError("Cannot resync disks on node %s: %s" %
3609 53c776b5 Iustin Pop
                                   (node, msg))
3610 0959c824 Iustin Pop
        node_done, node_percent = nres.payload
3611 53c776b5 Iustin Pop
        all_done = all_done and node_done
3612 53c776b5 Iustin Pop
        if node_percent is not None:
3613 53c776b5 Iustin Pop
          min_percent = min(min_percent, node_percent)
3614 53c776b5 Iustin Pop
      if not all_done:
3615 53c776b5 Iustin Pop
        if min_percent < 100:
3616 53c776b5 Iustin Pop
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
3617 53c776b5 Iustin Pop
        time.sleep(2)
3618 53c776b5 Iustin Pop
3619 53c776b5 Iustin Pop
  def _EnsureSecondary(self, node):
3620 53c776b5 Iustin Pop
    """Demote a node to secondary.
3621 53c776b5 Iustin Pop

3622 53c776b5 Iustin Pop
    """
3623 53c776b5 Iustin Pop
    self.feedback_fn("* switching node %s to secondary mode" % node)
3624 53c776b5 Iustin Pop
3625 53c776b5 Iustin Pop
    for dev in self.instance.disks:
3626 53c776b5 Iustin Pop
      self.cfg.SetDiskID(dev, node)
3627 53c776b5 Iustin Pop
3628 53c776b5 Iustin Pop
    result = self.rpc.call_blockdev_close(node, self.instance.name,
3629 53c776b5 Iustin Pop
                                          self.instance.disks)
3630 53c776b5 Iustin Pop
    msg = result.RemoteFailMsg()
3631 53c776b5 Iustin Pop
    if msg:
3632 53c776b5 Iustin Pop
      raise errors.OpExecError("Cannot change disk to secondary on node %s,"
3633 53c776b5 Iustin Pop
                               " error %s" % (node, msg))
3634 53c776b5 Iustin Pop
3635 53c776b5 Iustin Pop
  def _GoStandalone(self):
3636 53c776b5 Iustin Pop
    """Disconnect from the network.
3637 53c776b5 Iustin Pop

3638 53c776b5 Iustin Pop
    """
3639 53c776b5 Iustin Pop
    self.feedback_fn("* changing into standalone mode")
3640 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
3641 53c776b5 Iustin Pop
                                               self.instance.disks)
3642 53c776b5 Iustin Pop
    for node, nres in result.items():
3643 53c776b5 Iustin Pop
      msg = nres.RemoteFailMsg()
3644 53c776b5 Iustin Pop
      if msg:
3645 53c776b5 Iustin Pop
        raise errors.OpExecError("Cannot disconnect disks node %s,"
3646 53c776b5 Iustin Pop
                                 " error %s" % (node, msg))
3647 53c776b5 Iustin Pop
3648 53c776b5 Iustin Pop
  def _GoReconnect(self, multimaster):
3649 53c776b5 Iustin Pop
    """Reconnect to the network.
3650 53c776b5 Iustin Pop

3651 53c776b5 Iustin Pop
    """
3652 53c776b5 Iustin Pop
    if multimaster:
3653 53c776b5 Iustin Pop
      msg = "dual-master"
3654 53c776b5 Iustin Pop
    else:
3655 53c776b5 Iustin Pop
      msg = "single-master"
3656 53c776b5 Iustin Pop
    self.feedback_fn("* changing disks into %s mode" % msg)
3657 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
3658 53c776b5 Iustin Pop
                                           self.instance.disks,
3659 53c776b5 Iustin Pop
                                           self.instance.name, multimaster)
3660 53c776b5 Iustin Pop
    for node, nres in result.items():
3661 53c776b5 Iustin Pop
      msg = nres.RemoteFailMsg()
3662 53c776b5 Iustin Pop
      if msg:
3663 53c776b5 Iustin Pop
        raise errors.OpExecError("Cannot change disks config on node %s,"
3664 53c776b5 Iustin Pop
                                 " error: %s" % (node, msg))
3665 53c776b5 Iustin Pop
3666 53c776b5 Iustin Pop
  def _ExecCleanup(self):
3667 53c776b5 Iustin Pop
    """Try to cleanup after a failed migration.
3668 53c776b5 Iustin Pop

3669 53c776b5 Iustin Pop
    The cleanup is done by:
3670 53c776b5 Iustin Pop
      - check that the instance is running only on one node
3671 53c776b5 Iustin Pop
        (and update the config if needed)
3672 53c776b5 Iustin Pop
      - change disks on its secondary node to secondary
3673 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
3674 53c776b5 Iustin Pop
      - disconnect from the network
3675 53c776b5 Iustin Pop
      - change disks into single-master mode
3676 53c776b5 Iustin Pop
      - wait again until disks are fully synchronized
3677 53c776b5 Iustin Pop

3678 53c776b5 Iustin Pop
    """
3679 53c776b5 Iustin Pop
    instance = self.instance
3680 53c776b5 Iustin Pop
    target_node = self.target_node
3681 53c776b5 Iustin Pop
    source_node = self.source_node
3682 53c776b5 Iustin Pop
3683 53c776b5 Iustin Pop
    # check running on only one node
3684 53c776b5 Iustin Pop
    self.feedback_fn("* checking where the instance actually runs"
3685 53c776b5 Iustin Pop
                     " (if this hangs, the hypervisor might be in"
3686 53c776b5 Iustin Pop
                     " a bad state)")
3687 53c776b5 Iustin Pop
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
3688 53c776b5 Iustin Pop
    for node, result in ins_l.items():
3689 53c776b5 Iustin Pop
      result.Raise()
3690 53c776b5 Iustin Pop
      if not isinstance(result.data, list):
3691 53c776b5 Iustin Pop
        raise errors.OpExecError("Can't contact node '%s'" % node)
3692 53c776b5 Iustin Pop
3693 53c776b5 Iustin Pop
    runningon_source = instance.name in ins_l[source_node].data
3694 53c776b5 Iustin Pop
    runningon_target = instance.name in ins_l[target_node].data
3695 53c776b5 Iustin Pop
3696 53c776b5 Iustin Pop
    if runningon_source and runningon_target:
3697 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance seems to be running on two nodes,"
3698 53c776b5 Iustin Pop
                               " or the hypervisor is confused. You will have"
3699 53c776b5 Iustin Pop
                               " to ensure manually that it runs only on one"
3700 53c776b5 Iustin Pop
                               " and restart this operation.")
3701 53c776b5 Iustin Pop
3702 53c776b5 Iustin Pop
    if not (runningon_source or runningon_target):
3703 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance does not seem to be running at all."
3704 53c776b5 Iustin Pop
                               " In this case, it's safer to repair by"
3705 53c776b5 Iustin Pop
                               " running 'gnt-instance stop' to ensure disk"
3706 53c776b5 Iustin Pop
                               " shutdown, and then restarting it.")
3707 53c776b5 Iustin Pop
3708 53c776b5 Iustin Pop
    if runningon_target:
3709 53c776b5 Iustin Pop
      # the migration has actually succeeded, we need to update the config
3710 53c776b5 Iustin Pop
      self.feedback_fn("* instance running on secondary node (%s),"
3711 53c776b5 Iustin Pop
                       " updating config" % target_node)
3712 53c776b5 Iustin Pop
      instance.primary_node = target_node
3713 53c776b5 Iustin Pop
      self.cfg.Update(instance)
3714 53c776b5 Iustin Pop
      demoted_node = source_node
3715 53c776b5 Iustin Pop
    else:
3716 53c776b5 Iustin Pop
      self.feedback_fn("* instance confirmed to be running on its"
3717 53c776b5 Iustin Pop
                       " primary node (%s)" % source_node)
3718 53c776b5 Iustin Pop
      demoted_node = target_node
3719 53c776b5 Iustin Pop
3720 53c776b5 Iustin Pop
    self._EnsureSecondary(demoted_node)
3721 53c776b5 Iustin Pop
    try:
3722 53c776b5 Iustin Pop
      self._WaitUntilSync()
3723 53c776b5 Iustin Pop
    except errors.OpExecError:
3724 53c776b5 Iustin Pop
      # we ignore here errors, since if the device is standalone, it
3725 53c776b5 Iustin Pop
      # won't be able to sync
3726 53c776b5 Iustin Pop
      pass
3727 53c776b5 Iustin Pop
    self._GoStandalone()
3728 53c776b5 Iustin Pop
    self._GoReconnect(False)
3729 53c776b5 Iustin Pop
    self._WaitUntilSync()
3730 53c776b5 Iustin Pop
3731 53c776b5 Iustin Pop
    self.feedback_fn("* done")
3732 53c776b5 Iustin Pop
3733 6906a9d8 Guido Trotter
  def _RevertDiskStatus(self):
3734 6906a9d8 Guido Trotter
    """Try to revert the disk status after a failed migration.
3735 6906a9d8 Guido Trotter

3736 6906a9d8 Guido Trotter
    """
3737 6906a9d8 Guido Trotter
    target_node = self.target_node
3738 6906a9d8 Guido Trotter
    try:
3739 6906a9d8 Guido Trotter
      self._EnsureSecondary(target_node)
3740 6906a9d8 Guido Trotter
      self._GoStandalone()
3741 6906a9d8 Guido Trotter
      self._GoReconnect(False)
3742 6906a9d8 Guido Trotter
      self._WaitUntilSync()
3743 6906a9d8 Guido Trotter
    except errors.OpExecError, err:
3744 6906a9d8 Guido Trotter
      self.LogWarning("Migration failed and I can't reconnect the"
3745 6906a9d8 Guido Trotter
                      " drives: error '%s'\n"
3746 6906a9d8 Guido Trotter
                      "Please look and recover the instance status" %
3747 6906a9d8 Guido Trotter
                      str(err))
3748 6906a9d8 Guido Trotter
3749 6906a9d8 Guido Trotter
  def _AbortMigration(self):
3750 6906a9d8 Guido Trotter
    """Call the hypervisor code to abort a started migration.
3751 6906a9d8 Guido Trotter

3752 6906a9d8 Guido Trotter
    """
3753 6906a9d8 Guido Trotter
    instance = self.instance
3754 6906a9d8 Guido Trotter
    target_node = self.target_node
3755 6906a9d8 Guido Trotter
    migration_info = self.migration_info
3756 6906a9d8 Guido Trotter
3757 6906a9d8 Guido Trotter
    abort_result = self.rpc.call_finalize_migration(target_node,
3758 6906a9d8 Guido Trotter
                                                    instance,
3759 6906a9d8 Guido Trotter
                                                    migration_info,
3760 6906a9d8 Guido Trotter
                                                    False)
3761 6906a9d8 Guido Trotter
    abort_msg = abort_result.RemoteFailMsg()
3762 6906a9d8 Guido Trotter
    if abort_msg:
3763 6906a9d8 Guido Trotter
      logging.error("Aborting migration failed on target node %s: %s" %
3764 6906a9d8 Guido Trotter
                    (target_node, abort_msg))
3765 6906a9d8 Guido Trotter
      # Don't raise an exception here, as we stil have to try to revert the
3766 6906a9d8 Guido Trotter
      # disk status, even if this step failed.
3767 6906a9d8 Guido Trotter
3768 53c776b5 Iustin Pop
  def _ExecMigration(self):
3769 53c776b5 Iustin Pop
    """Migrate an instance.
3770 53c776b5 Iustin Pop

3771 53c776b5 Iustin Pop
    The migrate is done by:
3772 53c776b5 Iustin Pop
      - change the disks into dual-master mode
3773 53c776b5 Iustin Pop
      - wait until disks are fully synchronized again
3774 53c776b5 Iustin Pop
      - migrate the instance
3775 53c776b5 Iustin Pop
      - change disks on the new secondary node (the old primary) to secondary
3776 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
3777 53c776b5 Iustin Pop
      - change disks into single-master mode
3778 53c776b5 Iustin Pop

3779 53c776b5 Iustin Pop
    """
3780 53c776b5 Iustin Pop
    instance = self.instance
3781 53c776b5 Iustin Pop
    target_node = self.target_node
3782 53c776b5 Iustin Pop
    source_node = self.source_node
3783 53c776b5 Iustin Pop
3784 53c776b5 Iustin Pop
    self.feedback_fn("* checking disk consistency between source and target")
3785 53c776b5 Iustin Pop
    for dev in instance.disks:
3786 53c776b5 Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
3787 53c776b5 Iustin Pop
        raise errors.OpExecError("Disk %s is degraded or not fully"
3788 53c776b5 Iustin Pop
                                 " synchronized on target node,"
3789 53c776b5 Iustin Pop
                                 " aborting migrate." % dev.iv_name)
3790 53c776b5 Iustin Pop
3791 6906a9d8 Guido Trotter
    # First get the migration information from the remote node
3792 6906a9d8 Guido Trotter
    result = self.rpc.call_migration_info(source_node, instance)
3793 6906a9d8 Guido Trotter
    msg = result.RemoteFailMsg()
3794 6906a9d8 Guido Trotter
    if msg:
3795 6906a9d8 Guido Trotter
      log_err = ("Failed fetching source migration information from %s: %s" %
3796 0959c824 Iustin Pop
                 (source_node, msg))
3797 6906a9d8 Guido Trotter
      logging.error(log_err)
3798 6906a9d8 Guido Trotter
      raise errors.OpExecError(log_err)
3799 6906a9d8 Guido Trotter
3800 0959c824 Iustin Pop
    self.migration_info = migration_info = result.payload
3801 6906a9d8 Guido Trotter
3802 6906a9d8 Guido Trotter
    # Then switch the disks to master/master mode
3803 53c776b5 Iustin Pop
    self._EnsureSecondary(target_node)
3804 53c776b5 Iustin Pop
    self._GoStandalone()
3805 53c776b5 Iustin Pop
    self._GoReconnect(True)
3806 53c776b5 Iustin Pop
    self._WaitUntilSync()
3807 53c776b5 Iustin Pop
3808 6906a9d8 Guido Trotter
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
3809 6906a9d8 Guido Trotter
    result = self.rpc.call_accept_instance(target_node,
3810 6906a9d8 Guido Trotter
                                           instance,
3811 6906a9d8 Guido Trotter
                                           migration_info,
3812 6906a9d8 Guido Trotter
                                           self.nodes_ip[target_node])
3813 6906a9d8 Guido Trotter
3814 6906a9d8 Guido Trotter
    msg = result.RemoteFailMsg()
3815 6906a9d8 Guido Trotter
    if msg:
3816 6906a9d8 Guido Trotter
      logging.error("Instance pre-migration failed, trying to revert"
3817 6906a9d8 Guido Trotter
                    " disk status: %s", msg)
3818 6906a9d8 Guido Trotter
      self._AbortMigration()
3819 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
3820 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
3821 6906a9d8 Guido Trotter
                               (instance.name, msg))
3822 6906a9d8 Guido Trotter
3823 53c776b5 Iustin Pop
    self.feedback_fn("* migrating instance to %s" % target_node)
3824 53c776b5 Iustin Pop
    time.sleep(10)
3825 53c776b5 Iustin Pop
    result = self.rpc.call_instance_migrate(source_node, instance,
3826 53c776b5 Iustin Pop
                                            self.nodes_ip[target_node],
3827 53c776b5 Iustin Pop
                                            self.op.live)
3828 53c776b5 Iustin Pop
    msg = result.RemoteFailMsg()
3829 53c776b5 Iustin Pop
    if msg:
3830 53c776b5 Iustin Pop
      logging.error("Instance migration failed, trying to revert"
3831 53c776b5 Iustin Pop
                    " disk status: %s", msg)
3832 6906a9d8 Guido Trotter
      self._AbortMigration()
3833 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
3834 53c776b5 Iustin Pop
      raise errors.OpExecError("Could not migrate instance %s: %s" %
3835 53c776b5 Iustin Pop
                               (instance.name, msg))
3836 53c776b5 Iustin Pop
    time.sleep(10)
3837 53c776b5 Iustin Pop
3838 53c776b5 Iustin Pop
    instance.primary_node = target_node
3839 53c776b5 Iustin Pop
    # distribute new instance config to the other nodes
3840 53c776b5 Iustin Pop
    self.cfg.Update(instance)
3841 53c776b5 Iustin Pop
3842 6906a9d8 Guido Trotter
    result = self.rpc.call_finalize_migration(target_node,
3843 6906a9d8 Guido Trotter
                                              instance,
3844 6906a9d8 Guido Trotter
                                              migration_info,
3845 6906a9d8 Guido Trotter
                                              True)
3846 6906a9d8 Guido Trotter
    msg = result.RemoteFailMsg()
3847 6906a9d8 Guido Trotter
    if msg:
3848 6906a9d8 Guido Trotter
      logging.error("Instance migration succeeded, but finalization failed:"
3849 6906a9d8 Guido Trotter
                    " %s" % msg)
3850 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not finalize instance migration: %s" %
3851 6906a9d8 Guido Trotter
                               msg)
3852 6906a9d8 Guido Trotter
3853 53c776b5 Iustin Pop
    self._EnsureSecondary(source_node)
3854 53c776b5 Iustin Pop
    self._WaitUntilSync()
3855 53c776b5 Iustin Pop
    self._GoStandalone()
3856 53c776b5 Iustin Pop
    self._GoReconnect(False)
3857 53c776b5 Iustin Pop
    self._WaitUntilSync()
3858 53c776b5 Iustin Pop
3859 53c776b5 Iustin Pop
    self.feedback_fn("* done")
3860 53c776b5 Iustin Pop
3861 53c776b5 Iustin Pop
  def Exec(self, feedback_fn):
3862 53c776b5 Iustin Pop
    """Perform the migration.
3863 53c776b5 Iustin Pop

3864 53c776b5 Iustin Pop
    """
3865 53c776b5 Iustin Pop
    self.feedback_fn = feedback_fn
3866 53c776b5 Iustin Pop
3867 53c776b5 Iustin Pop
    self.source_node = self.instance.primary_node
3868 53c776b5 Iustin Pop
    self.target_node = self.instance.secondary_nodes[0]
3869 53c776b5 Iustin Pop
    self.all_nodes = [self.source_node, self.target_node]
3870 53c776b5 Iustin Pop
    self.nodes_ip = {
3871 53c776b5 Iustin Pop
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
3872 53c776b5 Iustin Pop
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
3873 53c776b5 Iustin Pop
      }
3874 53c776b5 Iustin Pop
    if self.op.cleanup:
3875 53c776b5 Iustin Pop
      return self._ExecCleanup()
3876 53c776b5 Iustin Pop
    else:
3877 53c776b5 Iustin Pop
      return self._ExecMigration()
3878 53c776b5 Iustin Pop
3879 53c776b5 Iustin Pop
3880 428958aa Iustin Pop
def _CreateBlockDev(lu, node, instance, device, force_create,
3881 428958aa Iustin Pop
                    info, force_open):
3882 428958aa Iustin Pop
  """Create a tree of block devices on a given node.
3883 a8083063 Iustin Pop

3884 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
3885 a8083063 Iustin Pop
  all its children.
3886 a8083063 Iustin Pop

3887 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
3888 a8083063 Iustin Pop

3889 428958aa Iustin Pop
  @param lu: the lu on whose behalf we execute
3890 428958aa Iustin Pop
  @param node: the node on which to create the device
3891 428958aa Iustin Pop
  @type instance: L{objects.Instance}
3892 428958aa Iustin Pop
  @param instance: the instance which owns the device
3893 428958aa Iustin Pop
  @type device: L{objects.Disk}
3894 428958aa Iustin Pop
  @param device: the device to create
3895 428958aa Iustin Pop
  @type force_create: boolean
3896 428958aa Iustin Pop
  @param force_create: whether to force creation of this device; this
3897 428958aa Iustin Pop
      will be change to True whenever we find a device which has
3898 428958aa Iustin Pop
      CreateOnSecondary() attribute
3899 428958aa Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
3900 428958aa Iustin Pop
      (this will be represented as a LVM tag)
3901 428958aa Iustin Pop
  @type force_open: boolean
3902 428958aa Iustin Pop
  @param force_open: this parameter will be passes to the
3903 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
3904 428958aa Iustin Pop
      whether we run on primary or not, and it affects both
3905 428958aa Iustin Pop
      the child assembly and the device own Open() execution
3906 428958aa Iustin Pop

3907 a8083063 Iustin Pop
  """
3908 a8083063 Iustin Pop
  if device.CreateOnSecondary():
3909 428958aa Iustin Pop
    force_create = True
3910 796cab27 Iustin Pop
3911 a8083063 Iustin Pop
  if device.children:
3912 a8083063 Iustin Pop
    for child in device.children:
3913 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, child, force_create,
3914 428958aa Iustin Pop
                      info, force_open)
3915 a8083063 Iustin Pop
3916 428958aa Iustin Pop
  if not force_create:
3917 796cab27 Iustin Pop
    return
3918 796cab27 Iustin Pop
3919 de12473a Iustin Pop
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
3920 de12473a Iustin Pop
3921 de12473a Iustin Pop
3922 de12473a Iustin Pop
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
3923 de12473a Iustin Pop
  """Create a single block device on a given node.
3924 de12473a Iustin Pop

3925 de12473a Iustin Pop
  This will not recurse over children of the device, so they must be
3926 de12473a Iustin Pop
  created in advance.
3927 de12473a Iustin Pop

3928 de12473a Iustin Pop
  @param lu: the lu on whose behalf we execute
3929 de12473a Iustin Pop
  @param node: the node on which to create the device
3930 de12473a Iustin Pop
  @type instance: L{objects.Instance}
3931 de12473a Iustin Pop
  @param instance: the instance which owns the device
3932 de12473a Iustin Pop
  @type device: L{objects.Disk}
3933 de12473a Iustin Pop
  @param device: the device to create
3934 de12473a Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
3935 de12473a Iustin Pop
      (this will be represented as a LVM tag)
3936 de12473a Iustin Pop
  @type force_open: boolean
3937 de12473a Iustin Pop
  @param force_open: this parameter will be passes to the
3938 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
3939 de12473a Iustin Pop
      whether we run on primary or not, and it affects both
3940 de12473a Iustin Pop
      the child assembly and the device own Open() execution
3941 de12473a Iustin Pop

3942 de12473a Iustin Pop
  """
3943 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
3944 7d81697f Iustin Pop
  result = lu.rpc.call_blockdev_create(node, device, device.size,
3945 428958aa Iustin Pop
                                       instance.name, force_open, info)
3946 7d81697f Iustin Pop
  msg = result.RemoteFailMsg()
3947 7d81697f Iustin Pop
  if msg:
3948 428958aa Iustin Pop
    raise errors.OpExecError("Can't create block device %s on"
3949 7d81697f Iustin Pop
                             " node %s for instance %s: %s" %
3950 7d81697f Iustin Pop
                             (device, node, instance.name, msg))
3951 a8083063 Iustin Pop
  if device.physical_id is None:
3952 0959c824 Iustin Pop
    device.physical_id = result.payload
3953 a8083063 Iustin Pop
3954 a8083063 Iustin Pop
3955 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
3956 923b1523 Iustin Pop
  """Generate a suitable LV name.
3957 923b1523 Iustin Pop

3958 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
3959 923b1523 Iustin Pop

3960 923b1523 Iustin Pop
  """
3961 923b1523 Iustin Pop
  results = []
3962 923b1523 Iustin Pop
  for val in exts:
3963 b9bddb6b Iustin Pop
    new_id = lu.cfg.GenerateUniqueID()
3964 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
3965 923b1523 Iustin Pop
  return results
3966 923b1523 Iustin Pop
3967 923b1523 Iustin Pop
3968 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
3969 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
3970 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
3971 a1f445d3 Iustin Pop

3972 a1f445d3 Iustin Pop
  """
3973 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
3974 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
3975 b9bddb6b Iustin Pop
  shared_secret = lu.cfg.GenerateDRBDSecret()
3976 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3977 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
3978 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3979 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
3980 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
3981 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
3982 f9518d38 Iustin Pop
                                      p_minor, s_minor,
3983 f9518d38 Iustin Pop
                                      shared_secret),
3984 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
3985 a1f445d3 Iustin Pop
                          iv_name=iv_name)
3986 a1f445d3 Iustin Pop
  return drbd_dev
3987 a1f445d3 Iustin Pop
3988 7c0d6283 Michael Hanselmann
3989 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
3990 a8083063 Iustin Pop
                          instance_name, primary_node,
3991 08db7c5c Iustin Pop
                          secondary_nodes, disk_info,
3992 e2a65344 Iustin Pop
                          file_storage_dir, file_driver,
3993 e2a65344 Iustin Pop
                          base_index):
3994 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
3995 a8083063 Iustin Pop

3996 a8083063 Iustin Pop
  """
3997 a8083063 Iustin Pop
  #TODO: compute space requirements
3998 a8083063 Iustin Pop
3999 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
4000 08db7c5c Iustin Pop
  disk_count = len(disk_info)
4001 08db7c5c Iustin Pop
  disks = []
4002 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
4003 08db7c5c Iustin Pop
    pass
4004 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
4005 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
4006 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
4007 923b1523 Iustin Pop
4008 08db7c5c Iustin Pop
    names = _GenerateUniqueNames(lu, [".disk%d" % i
4009 08db7c5c Iustin Pop
                                      for i in range(disk_count)])
4010 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4011 e2a65344 Iustin Pop
      disk_index = idx + base_index
4012 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
4013 08db7c5c Iustin Pop
                              logical_id=(vgname, names[idx]),
4014 6ec66eae Iustin Pop
                              iv_name="disk/%d" % disk_index,
4015 6ec66eae Iustin Pop
                              mode=disk["mode"])
4016 08db7c5c Iustin Pop
      disks.append(disk_dev)
4017 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
4018 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
4019 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
4020 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
4021 08db7c5c Iustin Pop
    minors = lu.cfg.AllocateDRBDMinor(
4022 08db7c5c Iustin Pop
      [primary_node, remote_node] * len(disk_info), instance_name)
4023 08db7c5c Iustin Pop
4024 e6c1ff2f Iustin Pop
    names = []
4025 e6c1ff2f Iustin Pop
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % i
4026 e6c1ff2f Iustin Pop
                                               for i in range(disk_count)]):
4027 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_data")
4028 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_meta")
4029 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4030 112050d9 Iustin Pop
      disk_index = idx + base_index
4031 08db7c5c Iustin Pop
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
4032 08db7c5c Iustin Pop
                                      disk["size"], names[idx*2:idx*2+2],
4033 e2a65344 Iustin Pop
                                      "disk/%d" % disk_index,
4034 08db7c5c Iustin Pop
                                      minors[idx*2], minors[idx*2+1])
4035 6ec66eae Iustin Pop
      disk_dev.mode = disk["mode"]
4036 08db7c5c Iustin Pop
      disks.append(disk_dev)
4037 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
4038 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
4039 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
4040 0f1a06e3 Manuel Franceschini
4041 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4042 112050d9 Iustin Pop
      disk_index = idx + base_index
4043 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
4044 e2a65344 Iustin Pop
                              iv_name="disk/%d" % disk_index,
4045 08db7c5c Iustin Pop
                              logical_id=(file_driver,
4046 08db7c5c Iustin Pop
                                          "%s/disk%d" % (file_storage_dir,
4047 43e99cff Guido Trotter
                                                         disk_index)),
4048 6ec66eae Iustin Pop
                              mode=disk["mode"])
4049 08db7c5c Iustin Pop
      disks.append(disk_dev)
4050 a8083063 Iustin Pop
  else:
4051 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
4052 a8083063 Iustin Pop
  return disks
4053 a8083063 Iustin Pop
4054 a8083063 Iustin Pop
4055 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
4056 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
4057 3ecf6786 Iustin Pop

4058 3ecf6786 Iustin Pop
  """
4059 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
4060 a0c3fea1 Michael Hanselmann
4061 a0c3fea1 Michael Hanselmann
4062 b9bddb6b Iustin Pop
def _CreateDisks(lu, instance):
4063 a8083063 Iustin Pop
  """Create all disks for an instance.
4064 a8083063 Iustin Pop

4065 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
4066 a8083063 Iustin Pop

4067 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
4068 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
4069 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
4070 e4376078 Iustin Pop
  @param instance: the instance whose disks we should create
4071 e4376078 Iustin Pop
  @rtype: boolean
4072 e4376078 Iustin Pop
  @return: the success of the creation
4073 a8083063 Iustin Pop

4074 a8083063 Iustin Pop
  """
4075 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
4076 428958aa Iustin Pop
  pnode = instance.primary_node
4077 a0c3fea1 Michael Hanselmann
4078 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
4079 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4080 428958aa Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
4081 0f1a06e3 Manuel Franceschini
4082 781de953 Iustin Pop
    if result.failed or not result.data:
4083 428958aa Iustin Pop
      raise errors.OpExecError("Could not connect to node '%s'" % pnode)
4084 0f1a06e3 Manuel Franceschini
4085 781de953 Iustin Pop
    if not result.data[0]:
4086 796cab27 Iustin Pop
      raise errors.OpExecError("Failed to create directory '%s'" %
4087 796cab27 Iustin Pop
                               file_storage_dir)
4088 0f1a06e3 Manuel Franceschini
4089 24991749 Iustin Pop
  # Note: this needs to be kept in sync with adding of disks in
4090 24991749 Iustin Pop
  # LUSetInstanceParams
4091 a8083063 Iustin Pop
  for device in instance.disks:
4092 9a4f63d1 Iustin Pop
    logging.info("Creating volume %s for instance %s",
4093 9a4f63d1 Iustin Pop
                 device.iv_name, instance.name)
4094 a8083063 Iustin Pop
    #HARDCODE
4095 428958aa Iustin Pop
    for node in instance.all_nodes:
4096 428958aa Iustin Pop
      f_create = node == pnode
4097 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
4098 a8083063 Iustin Pop
4099 a8083063 Iustin Pop
4100 b9bddb6b Iustin Pop
def _RemoveDisks(lu, instance):
4101 a8083063 Iustin Pop
  """Remove all disks for an instance.
4102 a8083063 Iustin Pop

4103 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
4104 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
4105 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
4106 a8083063 Iustin Pop
  with `_CreateDisks()`).
4107 a8083063 Iustin Pop

4108 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
4109 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
4110 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
4111 e4376078 Iustin Pop
  @param instance: the instance whose disks we should remove
4112 e4376078 Iustin Pop
  @rtype: boolean
4113 e4376078 Iustin Pop
  @return: the success of the removal
4114 a8083063 Iustin Pop

4115 a8083063 Iustin Pop
  """
4116 9a4f63d1 Iustin Pop
  logging.info("Removing block devices for instance %s", instance.name)
4117 a8083063 Iustin Pop
4118 e1bc0878 Iustin Pop
  all_result = True
4119 a8083063 Iustin Pop
  for device in instance.disks:
4120 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
4121 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
4122 e1bc0878 Iustin Pop
      msg = lu.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
4123 e1bc0878 Iustin Pop
      if msg:
4124 e1bc0878 Iustin Pop
        lu.LogWarning("Could not remove block device %s on node %s,"
4125 e1bc0878 Iustin Pop
                      " continuing anyway: %s", device.iv_name, node, msg)
4126 e1bc0878 Iustin Pop
        all_result = False
4127 0f1a06e3 Manuel Franceschini
4128 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
4129 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4130 781de953 Iustin Pop
    result = lu.rpc.call_file_storage_dir_remove(instance.primary_node,
4131 781de953 Iustin Pop
                                                 file_storage_dir)
4132 781de953 Iustin Pop
    if result.failed or not result.data:
4133 9a4f63d1 Iustin Pop
      logging.error("Could not remove directory '%s'", file_storage_dir)
4134 e1bc0878 Iustin Pop
      all_result = False
4135 0f1a06e3 Manuel Franceschini
4136 e1bc0878 Iustin Pop
  return all_result
4137 a8083063 Iustin Pop
4138 a8083063 Iustin Pop
4139 08db7c5c Iustin Pop
def _ComputeDiskSize(disk_template, disks):
4140 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
4141 e2fe6369 Iustin Pop

4142 e2fe6369 Iustin Pop
  """
4143 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
4144 e2fe6369 Iustin Pop
  req_size_dict = {
4145 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
4146 08db7c5c Iustin Pop
    constants.DT_PLAIN: sum(d["size"] for d in disks),
4147 08db7c5c Iustin Pop
    # 128 MB are added for drbd metadata for each disk
4148 08db7c5c Iustin Pop
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
4149 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
4150 e2fe6369 Iustin Pop
  }
4151 e2fe6369 Iustin Pop
4152 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
4153 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
4154 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
4155 e2fe6369 Iustin Pop
4156 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
4157 e2fe6369 Iustin Pop
4158 e2fe6369 Iustin Pop
4159 74409b12 Iustin Pop
def _CheckHVParams(lu, nodenames, hvname, hvparams):
4160 74409b12 Iustin Pop
  """Hypervisor parameter validation.
4161 74409b12 Iustin Pop

4162 74409b12 Iustin Pop
  This function abstract the hypervisor parameter validation to be
4163 74409b12 Iustin Pop
  used in both instance create and instance modify.
4164 74409b12 Iustin Pop

4165 74409b12 Iustin Pop
  @type lu: L{LogicalUnit}
4166 74409b12 Iustin Pop
  @param lu: the logical unit for which we check
4167 74409b12 Iustin Pop
  @type nodenames: list
4168 74409b12 Iustin Pop
  @param nodenames: the list of nodes on which we should check
4169 74409b12 Iustin Pop
  @type hvname: string
4170 74409b12 Iustin Pop
  @param hvname: the name of the hypervisor we should use
4171 74409b12 Iustin Pop
  @type hvparams: dict
4172 74409b12 Iustin Pop
  @param hvparams: the parameters which we need to check
4173 74409b12 Iustin Pop
  @raise errors.OpPrereqError: if the parameters are not valid
4174 74409b12 Iustin Pop

4175 74409b12 Iustin Pop
  """
4176 74409b12 Iustin Pop
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
4177 74409b12 Iustin Pop
                                                  hvname,
4178 74409b12 Iustin Pop
                                                  hvparams)
4179 74409b12 Iustin Pop
  for node in nodenames:
4180 781de953 Iustin Pop
    info = hvinfo[node]
4181 68c6f21c Iustin Pop
    if info.offline:
4182 68c6f21c Iustin Pop
      continue
4183 0959c824 Iustin Pop
    msg = info.RemoteFailMsg()
4184 0959c824 Iustin Pop
    if msg:
4185 74409b12 Iustin Pop
      raise errors.OpPrereqError("Hypervisor parameter validation failed:"
4186 0959c824 Iustin Pop
                                 " %s" % msg)
4187 74409b12 Iustin Pop
4188 74409b12 Iustin Pop
4189 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
4190 a8083063 Iustin Pop
  """Create an instance.
4191 a8083063 Iustin Pop

4192 a8083063 Iustin Pop
  """
4193 a8083063 Iustin Pop
  HPATH = "instance-add"
4194 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4195 08db7c5c Iustin Pop
  _OP_REQP = ["instance_name", "disks", "disk_template",
4196 08db7c5c Iustin Pop
              "mode", "start",
4197 08db7c5c Iustin Pop
              "wait_for_sync", "ip_check", "nics",
4198 338e51e8 Iustin Pop
              "hvparams", "beparams"]
4199 7baf741d Guido Trotter
  REQ_BGL = False
4200 7baf741d Guido Trotter
4201 7baf741d Guido Trotter
  def _ExpandNode(self, node):
4202 7baf741d Guido Trotter
    """Expands and checks one node name.
4203 7baf741d Guido Trotter

4204 7baf741d Guido Trotter
    """
4205 7baf741d Guido Trotter
    node_full = self.cfg.ExpandNodeName(node)
4206 7baf741d Guido Trotter
    if node_full is None:
4207 7baf741d Guido Trotter
      raise errors.OpPrereqError("Unknown node %s" % node)
4208 7baf741d Guido Trotter
    return node_full
4209 7baf741d Guido Trotter
4210 7baf741d Guido Trotter
  def ExpandNames(self):
4211 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
4212 7baf741d Guido Trotter

4213 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
4214 7baf741d Guido Trotter

4215 7baf741d Guido Trotter
    """
4216 7baf741d Guido Trotter
    self.needed_locks = {}
4217 7baf741d Guido Trotter
4218 7baf741d Guido Trotter
    # set optional parameters to none if they don't exist
4219 6785674e Iustin Pop
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
4220 7baf741d Guido Trotter
      if not hasattr(self.op, attr):
4221 7baf741d Guido Trotter
        setattr(self.op, attr, None)
4222 7baf741d Guido Trotter
4223 4b2f38dd Iustin Pop
    # cheap checks, mostly valid constants given
4224 4b2f38dd Iustin Pop
4225 7baf741d Guido Trotter
    # verify creation mode
4226 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
4227 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
4228 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
4229 7baf741d Guido Trotter
                                 self.op.mode)
4230 4b2f38dd Iustin Pop
4231 7baf741d Guido Trotter
    # disk template and mirror node verification
4232 7baf741d Guido Trotter
    if self.op.disk_template not in constants.DISK_TEMPLATES:
4233 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid disk template name")
4234 7baf741d Guido Trotter
4235 4b2f38dd Iustin Pop
    if self.op.hypervisor is None:
4236 4b2f38dd Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
4237 4b2f38dd Iustin Pop
4238 8705eb96 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
4239 8705eb96 Iustin Pop
    enabled_hvs = cluster.enabled_hypervisors
4240 4b2f38dd Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
4241 4b2f38dd Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
4242 4b2f38dd Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
4243 4b2f38dd Iustin Pop
                                  ",".join(enabled_hvs)))
4244 4b2f38dd Iustin Pop
4245 6785674e Iustin Pop
    # check hypervisor parameter syntax (locally)
4246 a5728081 Guido Trotter
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
4247 8705eb96 Iustin Pop
    filled_hvp = cluster.FillDict(cluster.hvparams[self.op.hypervisor],
4248 8705eb96 Iustin Pop
                                  self.op.hvparams)
4249 6785674e Iustin Pop
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
4250 8705eb96 Iustin Pop
    hv_type.CheckParameterSyntax(filled_hvp)
4251 6785674e Iustin Pop
4252 338e51e8 Iustin Pop
    # fill and remember the beparams dict
4253 a5728081 Guido Trotter
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
4254 338e51e8 Iustin Pop
    self.be_full = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
4255 338e51e8 Iustin Pop
                                    self.op.beparams)
4256 338e51e8 Iustin Pop
4257 7baf741d Guido Trotter
    #### instance parameters check
4258 7baf741d Guido Trotter
4259 7baf741d Guido Trotter
    # instance name verification
4260 7baf741d Guido Trotter
    hostname1 = utils.HostInfo(self.op.instance_name)
4261 7baf741d Guido Trotter
    self.op.instance_name = instance_name = hostname1.name
4262 7baf741d Guido Trotter
4263 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
4264 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
4265 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
4266 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
4267 7baf741d Guido Trotter
                                 instance_name)
4268 7baf741d Guido Trotter
4269 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
4270 7baf741d Guido Trotter
4271 08db7c5c Iustin Pop
    # NIC buildup
4272 08db7c5c Iustin Pop
    self.nics = []
4273 08db7c5c Iustin Pop
    for nic in self.op.nics:
4274 08db7c5c Iustin Pop
      # ip validity checks
4275 08db7c5c Iustin Pop
      ip = nic.get("ip", None)
4276 08db7c5c Iustin Pop
      if ip is None or ip.lower() == "none":
4277 08db7c5c Iustin Pop
        nic_ip = None
4278 08db7c5c Iustin Pop
      elif ip.lower() == constants.VALUE_AUTO:
4279 08db7c5c Iustin Pop
        nic_ip = hostname1.ip
4280 08db7c5c Iustin Pop
      else:
4281 08db7c5c Iustin Pop
        if not utils.IsValidIP(ip):
4282 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
4283 08db7c5c Iustin Pop
                                     " like a valid IP" % ip)
4284 08db7c5c Iustin Pop
        nic_ip = ip
4285 08db7c5c Iustin Pop
4286 08db7c5c Iustin Pop
      # MAC address verification
4287 08db7c5c Iustin Pop
      mac = nic.get("mac", constants.VALUE_AUTO)
4288 08db7c5c Iustin Pop
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4289 08db7c5c Iustin Pop
        if not utils.IsValidMac(mac.lower()):
4290 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Invalid MAC address specified: %s" %
4291 08db7c5c Iustin Pop
                                     mac)
4292 08db7c5c Iustin Pop
      # bridge verification
4293 9939547b Iustin Pop
      bridge = nic.get("bridge", None)
4294 9939547b Iustin Pop
      if bridge is None:
4295 9939547b Iustin Pop
        bridge = self.cfg.GetDefBridge()
4296 08db7c5c Iustin Pop
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, bridge=bridge))
4297 08db7c5c Iustin Pop
4298 08db7c5c Iustin Pop
    # disk checks/pre-build
4299 08db7c5c Iustin Pop
    self.disks = []
4300 08db7c5c Iustin Pop
    for disk in self.op.disks:
4301 08db7c5c Iustin Pop
      mode = disk.get("mode", constants.DISK_RDWR)
4302 08db7c5c Iustin Pop
      if mode not in constants.DISK_ACCESS_SET:
4303 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
4304 08db7c5c Iustin Pop
                                   mode)
4305 08db7c5c Iustin Pop
      size = disk.get("size", None)
4306 08db7c5c Iustin Pop
      if size is None:
4307 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Missing disk size")
4308 08db7c5c Iustin Pop
      try:
4309 08db7c5c Iustin Pop
        size = int(size)
4310 08db7c5c Iustin Pop
      except ValueError:
4311 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk size '%s'" % size)
4312 08db7c5c Iustin Pop
      self.disks.append({"size": size, "mode": mode})
4313 08db7c5c Iustin Pop
4314 7baf741d Guido Trotter
    # used in CheckPrereq for ip ping check
4315 7baf741d Guido Trotter
    self.check_ip = hostname1.ip
4316 7baf741d Guido Trotter
4317 7baf741d Guido Trotter
    # file storage checks
4318 7baf741d Guido Trotter
    if (self.op.file_driver and
4319 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
4320 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
4321 7baf741d Guido Trotter
                                 self.op.file_driver)
4322 7baf741d Guido Trotter
4323 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
4324 7baf741d Guido Trotter
      raise errors.OpPrereqError("File storage directory path not absolute")
4325 7baf741d Guido Trotter
4326 7baf741d Guido Trotter
    ### Node/iallocator related checks
4327 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
4328 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
4329 7baf741d Guido Trotter
                                 " node must be given")
4330 7baf741d Guido Trotter
4331 7baf741d Guido Trotter
    if self.op.iallocator:
4332 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4333 7baf741d Guido Trotter
    else:
4334 7baf741d Guido Trotter
      self.op.pnode = self._ExpandNode(self.op.pnode)
4335 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
4336 7baf741d Guido Trotter
      if self.op.snode is not None:
4337 7baf741d Guido Trotter
        self.op.snode = self._ExpandNode(self.op.snode)
4338 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
4339 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
4340 7baf741d Guido Trotter
4341 7baf741d Guido Trotter
    # in case of import lock the source node too
4342 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
4343 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
4344 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
4345 7baf741d Guido Trotter
4346 b9322a9f Guido Trotter
      if src_path is None:
4347 b9322a9f Guido Trotter
        self.op.src_path = src_path = self.op.instance_name
4348 b9322a9f Guido Trotter
4349 b9322a9f Guido Trotter
      if src_node is None:
4350 b9322a9f Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4351 b9322a9f Guido Trotter
        self.op.src_node = None
4352 b9322a9f Guido Trotter
        if os.path.isabs(src_path):
4353 b9322a9f Guido Trotter
          raise errors.OpPrereqError("Importing an instance from an absolute"
4354 b9322a9f Guido Trotter
                                     " path requires a source node option.")
4355 b9322a9f Guido Trotter
      else:
4356 b9322a9f Guido Trotter
        self.op.src_node = src_node = self._ExpandNode(src_node)
4357 b9322a9f Guido Trotter
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4358 b9322a9f Guido Trotter
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
4359 b9322a9f Guido Trotter
        if not os.path.isabs(src_path):
4360 b9322a9f Guido Trotter
          self.op.src_path = src_path = \
4361 b9322a9f Guido Trotter
            os.path.join(constants.EXPORT_DIR, src_path)
4362 7baf741d Guido Trotter
4363 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
4364 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
4365 7baf741d Guido Trotter
        raise errors.OpPrereqError("No guest OS specified")
4366 a8083063 Iustin Pop
4367 538475ca Iustin Pop
  def _RunAllocator(self):
4368 538475ca Iustin Pop
    """Run the allocator based on input opcode.
4369 538475ca Iustin Pop

4370 538475ca Iustin Pop
    """
4371 08db7c5c Iustin Pop
    nics = [n.ToDict() for n in self.nics]
4372 72737a7f Iustin Pop
    ial = IAllocator(self,
4373 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
4374 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
4375 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
4376 d1c2dd75 Iustin Pop
                     tags=[],
4377 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
4378 338e51e8 Iustin Pop
                     vcpus=self.be_full[constants.BE_VCPUS],
4379 338e51e8 Iustin Pop
                     mem_size=self.be_full[constants.BE_MEMORY],
4380 08db7c5c Iustin Pop
                     disks=self.disks,
4381 d1c2dd75 Iustin Pop
                     nics=nics,
4382 8cc7e742 Guido Trotter
                     hypervisor=self.op.hypervisor,
4383 29859cb7 Iustin Pop
                     )
4384 d1c2dd75 Iustin Pop
4385 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
4386 d1c2dd75 Iustin Pop
4387 d1c2dd75 Iustin Pop
    if not ial.success:
4388 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
4389 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
4390 d1c2dd75 Iustin Pop
                                                           ial.info))
4391 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
4392 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4393 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
4394 97abc79f Iustin Pop
                                 (self.op.iallocator, len(ial.nodes),
4395 1ce4bbe3 René Nussbaumer
                                  ial.required_nodes))
4396 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
4397 86d9d3bb Iustin Pop
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
4398 86d9d3bb Iustin Pop
                 self.op.instance_name, self.op.iallocator,
4399 86d9d3bb Iustin Pop
                 ", ".join(ial.nodes))
4400 27579978 Iustin Pop
    if ial.required_nodes == 2:
4401 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
4402 538475ca Iustin Pop
4403 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4404 a8083063 Iustin Pop
    """Build hooks env.
4405 a8083063 Iustin Pop

4406 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4407 a8083063 Iustin Pop

4408 a8083063 Iustin Pop
    """
4409 a8083063 Iustin Pop
    env = {
4410 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
4411 08db7c5c Iustin Pop
      "INSTANCE_DISK_SIZE": ",".join(str(d["size"]) for d in self.disks),
4412 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
4413 a8083063 Iustin Pop
      }
4414 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
4415 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
4416 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
4417 09acf207 Guido Trotter
      env["INSTANCE_SRC_IMAGES"] = self.src_images
4418 396e1b78 Michael Hanselmann
4419 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
4420 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
4421 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
4422 4978db17 Iustin Pop
      status=self.op.start,
4423 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
4424 338e51e8 Iustin Pop
      memory=self.be_full[constants.BE_MEMORY],
4425 338e51e8 Iustin Pop
      vcpus=self.be_full[constants.BE_VCPUS],
4426 08db7c5c Iustin Pop
      nics=[(n.ip, n.bridge, n.mac) for n in self.nics],
4427 396e1b78 Michael Hanselmann
    ))
4428 a8083063 Iustin Pop
4429 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
4430 a8083063 Iustin Pop
          self.secondaries)
4431 a8083063 Iustin Pop
    return env, nl, nl
4432 a8083063 Iustin Pop
4433 a8083063 Iustin Pop
4434 a8083063 Iustin Pop
  def CheckPrereq(self):
4435 a8083063 Iustin Pop
    """Check prerequisites.
4436 a8083063 Iustin Pop

4437 a8083063 Iustin Pop
    """
4438 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
4439 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
4440 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
4441 eedc99de Manuel Franceschini
                                 " instances")
4442 eedc99de Manuel Franceschini
4443 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
4444 7baf741d Guido Trotter
      src_node = self.op.src_node
4445 7baf741d Guido Trotter
      src_path = self.op.src_path
4446 a8083063 Iustin Pop
4447 c0cbdc67 Guido Trotter
      if src_node is None:
4448 c0cbdc67 Guido Trotter
        exp_list = self.rpc.call_export_list(
4449 781de953 Iustin Pop
          self.acquired_locks[locking.LEVEL_NODE])
4450 c0cbdc67 Guido Trotter
        found = False
4451 c0cbdc67 Guido Trotter
        for node in exp_list:
4452 781de953 Iustin Pop
          if not exp_list[node].failed and src_path in exp_list[node].data:
4453 c0cbdc67 Guido Trotter
            found = True
4454 c0cbdc67 Guido Trotter
            self.op.src_node = src_node = node
4455 c0cbdc67 Guido Trotter
            self.op.src_path = src_path = os.path.join(constants.EXPORT_DIR,
4456 c0cbdc67 Guido Trotter
                                                       src_path)
4457 c0cbdc67 Guido Trotter
            break
4458 c0cbdc67 Guido Trotter
        if not found:
4459 c0cbdc67 Guido Trotter
          raise errors.OpPrereqError("No export found for relative path %s" %
4460 c0cbdc67 Guido Trotter
                                      src_path)
4461 c0cbdc67 Guido Trotter
4462 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, src_node)
4463 781de953 Iustin Pop
      result = self.rpc.call_export_info(src_node, src_path)
4464 781de953 Iustin Pop
      result.Raise()
4465 781de953 Iustin Pop
      if not result.data:
4466 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
4467 a8083063 Iustin Pop
4468 781de953 Iustin Pop
      export_info = result.data
4469 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
4470 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
4471 a8083063 Iustin Pop
4472 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
4473 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
4474 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
4475 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
4476 a8083063 Iustin Pop
4477 09acf207 Guido Trotter
      # Check that the new instance doesn't have less disks than the export
4478 08db7c5c Iustin Pop
      instance_disks = len(self.disks)
4479 09acf207 Guido Trotter
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
4480 09acf207 Guido Trotter
      if instance_disks < export_disks:
4481 09acf207 Guido Trotter
        raise errors.OpPrereqError("Not enough disks to import."
4482 09acf207 Guido Trotter
                                   " (instance: %d, export: %d)" %
4483 726d7d68 Iustin Pop
                                   (instance_disks, export_disks))
4484 a8083063 Iustin Pop
4485 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
4486 09acf207 Guido Trotter
      disk_images = []
4487 09acf207 Guido Trotter
      for idx in range(export_disks):
4488 09acf207 Guido Trotter
        option = 'disk%d_dump' % idx
4489 09acf207 Guido Trotter
        if export_info.has_option(constants.INISECT_INS, option):
4490 09acf207 Guido Trotter
          # FIXME: are the old os-es, disk sizes, etc. useful?
4491 09acf207 Guido Trotter
          export_name = export_info.get(constants.INISECT_INS, option)
4492 09acf207 Guido Trotter
          image = os.path.join(src_path, export_name)
4493 09acf207 Guido Trotter
          disk_images.append(image)
4494 09acf207 Guido Trotter
        else:
4495 09acf207 Guido Trotter
          disk_images.append(False)
4496 09acf207 Guido Trotter
4497 09acf207 Guido Trotter
      self.src_images = disk_images
4498 901a65c1 Iustin Pop
4499 b4364a6b Guido Trotter
      old_name = export_info.get(constants.INISECT_INS, 'name')
4500 b4364a6b Guido Trotter
      # FIXME: int() here could throw a ValueError on broken exports
4501 b4364a6b Guido Trotter
      exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
4502 b4364a6b Guido Trotter
      if self.op.instance_name == old_name:
4503 b4364a6b Guido Trotter
        for idx, nic in enumerate(self.nics):
4504 b4364a6b Guido Trotter
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
4505 b4364a6b Guido Trotter
            nic_mac_ini = 'nic%d_mac' % idx
4506 b4364a6b Guido Trotter
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
4507 bc89efc3 Guido Trotter
4508 295728df Guido Trotter
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
4509 7baf741d Guido Trotter
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
4510 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
4511 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
4512 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
4513 901a65c1 Iustin Pop
4514 901a65c1 Iustin Pop
    if self.op.ip_check:
4515 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
4516 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
4517 7b3a8fb5 Iustin Pop
                                   (self.check_ip, self.op.instance_name))
4518 901a65c1 Iustin Pop
4519 295728df Guido Trotter
    #### mac address generation
4520 295728df Guido Trotter
    # By generating here the mac address both the allocator and the hooks get
4521 295728df Guido Trotter
    # the real final mac address rather than the 'auto' or 'generate' value.
4522 295728df Guido Trotter
    # There is a race condition between the generation and the instance object
4523 295728df Guido Trotter
    # creation, which means that we know the mac is valid now, but we're not
4524 295728df Guido Trotter
    # sure it will be when we actually add the instance. If things go bad
4525 295728df Guido Trotter
    # adding the instance will abort because of a duplicate mac, and the
4526 295728df Guido Trotter
    # creation job will fail.
4527 295728df Guido Trotter
    for nic in self.nics:
4528 295728df Guido Trotter
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4529 295728df Guido Trotter
        nic.mac = self.cfg.GenerateMAC()
4530 295728df Guido Trotter
4531 538475ca Iustin Pop
    #### allocator run
4532 538475ca Iustin Pop
4533 538475ca Iustin Pop
    if self.op.iallocator is not None:
4534 538475ca Iustin Pop
      self._RunAllocator()
4535 0f1a06e3 Manuel Franceschini
4536 901a65c1 Iustin Pop
    #### node related checks
4537 901a65c1 Iustin Pop
4538 901a65c1 Iustin Pop
    # check primary node
4539 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
4540 7baf741d Guido Trotter
    assert self.pnode is not None, \
4541 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
4542 7527a8a4 Iustin Pop
    if pnode.offline:
4543 7527a8a4 Iustin Pop
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
4544 7527a8a4 Iustin Pop
                                 pnode.name)
4545 733a2b6a Iustin Pop
    if pnode.drained:
4546 733a2b6a Iustin Pop
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
4547 733a2b6a Iustin Pop
                                 pnode.name)
4548 7527a8a4 Iustin Pop
4549 901a65c1 Iustin Pop
    self.secondaries = []
4550 901a65c1 Iustin Pop
4551 901a65c1 Iustin Pop
    # mirror node verification
4552 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
4553 7baf741d Guido Trotter
      if self.op.snode is None:
4554 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
4555 3ecf6786 Iustin Pop
                                   " a mirror node")
4556 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
4557 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
4558 3ecf6786 Iustin Pop
                                   " the primary node.")
4559 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, self.op.snode)
4560 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, self.op.snode)
4561 733a2b6a Iustin Pop
      self.secondaries.append(self.op.snode)
4562 a8083063 Iustin Pop
4563 6785674e Iustin Pop
    nodenames = [pnode.name] + self.secondaries
4564 6785674e Iustin Pop
4565 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
4566 08db7c5c Iustin Pop
                                self.disks)
4567 ed1ebc60 Guido Trotter
4568 8d75db10 Iustin Pop
    # Check lv size requirements
4569 8d75db10 Iustin Pop
    if req_size is not None:
4570 72737a7f Iustin Pop
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4571 72737a7f Iustin Pop
                                         self.op.hypervisor)
4572 8d75db10 Iustin Pop
      for node in nodenames:
4573 781de953 Iustin Pop
        info = nodeinfo[node]
4574 781de953 Iustin Pop
        info.Raise()
4575 781de953 Iustin Pop
        info = info.data
4576 8d75db10 Iustin Pop
        if not info:
4577 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
4578 3e91897b Iustin Pop
                                     " from node '%s'" % node)
4579 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
4580 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
4581 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
4582 8d75db10 Iustin Pop
                                     " node %s" % node)
4583 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
4584 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
4585 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
4586 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
4587 ed1ebc60 Guido Trotter
4588 74409b12 Iustin Pop
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
4589 6785674e Iustin Pop
4590 a8083063 Iustin Pop
    # os verification
4591 781de953 Iustin Pop
    result = self.rpc.call_os_get(pnode.name, self.op.os_type)
4592 781de953 Iustin Pop
    result.Raise()
4593 781de953 Iustin Pop
    if not isinstance(result.data, objects.OS):
4594 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
4595 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
4596 a8083063 Iustin Pop
4597 901a65c1 Iustin Pop
    # bridge check on primary node
4598 08db7c5c Iustin Pop
    bridges = [n.bridge for n in self.nics]
4599 781de953 Iustin Pop
    result = self.rpc.call_bridges_exist(self.pnode.name, bridges)
4600 781de953 Iustin Pop
    result.Raise()
4601 781de953 Iustin Pop
    if not result.data:
4602 781de953 Iustin Pop
      raise errors.OpPrereqError("One of the target bridges '%s' does not"
4603 781de953 Iustin Pop
                                 " exist on destination node '%s'" %
4604 08db7c5c Iustin Pop
                                 (",".join(bridges), pnode.name))
4605 a8083063 Iustin Pop
4606 49ce1563 Iustin Pop
    # memory check on primary node
4607 49ce1563 Iustin Pop
    if self.op.start:
4608 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
4609 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
4610 338e51e8 Iustin Pop
                           self.be_full[constants.BE_MEMORY],
4611 338e51e8 Iustin Pop
                           self.op.hypervisor)
4612 49ce1563 Iustin Pop
4613 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4614 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
4615 a8083063 Iustin Pop

4616 a8083063 Iustin Pop
    """
4617 a8083063 Iustin Pop
    instance = self.op.instance_name
4618 a8083063 Iustin Pop
    pnode_name = self.pnode.name
4619 a8083063 Iustin Pop
4620 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
4621 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
4622 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
4623 2a6469d5 Alexander Schreiber
    else:
4624 2a6469d5 Alexander Schreiber
      network_port = None
4625 58acb49d Alexander Schreiber
4626 6785674e Iustin Pop
    ##if self.op.vnc_bind_address is None:
4627 6785674e Iustin Pop
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
4628 31a853d2 Iustin Pop
4629 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
4630 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
4631 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
4632 2c313123 Manuel Franceschini
    else:
4633 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
4634 2c313123 Manuel Franceschini
4635 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
4636 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
4637 d6a02168 Michael Hanselmann
                                        self.cfg.GetFileStorageDir(),
4638 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
4639 0f1a06e3 Manuel Franceschini
4640 0f1a06e3 Manuel Franceschini
4641 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
4642 a8083063 Iustin Pop
                                  self.op.disk_template,
4643 a8083063 Iustin Pop
                                  instance, pnode_name,
4644 08db7c5c Iustin Pop
                                  self.secondaries,
4645 08db7c5c Iustin Pop
                                  self.disks,
4646 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
4647 e2a65344 Iustin Pop
                                  self.op.file_driver,
4648 e2a65344 Iustin Pop
                                  0)
4649 a8083063 Iustin Pop
4650 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
4651 a8083063 Iustin Pop
                            primary_node=pnode_name,
4652 08db7c5c Iustin Pop
                            nics=self.nics, disks=disks,
4653 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
4654 4978db17 Iustin Pop
                            admin_up=False,
4655 58acb49d Alexander Schreiber
                            network_port=network_port,
4656 338e51e8 Iustin Pop
                            beparams=self.op.beparams,
4657 6785674e Iustin Pop
                            hvparams=self.op.hvparams,
4658 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
4659 a8083063 Iustin Pop
                            )
4660 a8083063 Iustin Pop
4661 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
4662 796cab27 Iustin Pop
    try:
4663 796cab27 Iustin Pop
      _CreateDisks(self, iobj)
4664 796cab27 Iustin Pop
    except errors.OpExecError:
4665 796cab27 Iustin Pop
      self.LogWarning("Device creation failed, reverting...")
4666 796cab27 Iustin Pop
      try:
4667 796cab27 Iustin Pop
        _RemoveDisks(self, iobj)
4668 796cab27 Iustin Pop
      finally:
4669 796cab27 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance)
4670 796cab27 Iustin Pop
        raise
4671 a8083063 Iustin Pop
4672 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
4673 a8083063 Iustin Pop
4674 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
4675 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
4676 7baf741d Guido Trotter
    # added the instance to the config
4677 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
4678 e36e96b4 Guido Trotter
    # Unlock all the nodes
4679 9c8971d7 Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
4680 9c8971d7 Guido Trotter
      nodes_keep = [self.op.src_node]
4681 9c8971d7 Guido Trotter
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
4682 9c8971d7 Guido Trotter
                       if node != self.op.src_node]
4683 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
4684 9c8971d7 Guido Trotter
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
4685 9c8971d7 Guido Trotter
    else:
4686 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE)
4687 9c8971d7 Guido Trotter
      del self.acquired_locks[locking.LEVEL_NODE]
4688 a8083063 Iustin Pop
4689 a8083063 Iustin Pop
    if self.op.wait_for_sync:
4690 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
4691 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
4692 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
4693 a8083063 Iustin Pop
      time.sleep(15)
4694 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
4695 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
4696 a8083063 Iustin Pop
    else:
4697 a8083063 Iustin Pop
      disk_abort = False
4698 a8083063 Iustin Pop
4699 a8083063 Iustin Pop
    if disk_abort:
4700 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
4701 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
4702 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
4703 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
4704 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
4705 3ecf6786 Iustin Pop
                               " this instance")
4706 a8083063 Iustin Pop
4707 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
4708 a8083063 Iustin Pop
                (instance, pnode_name))
4709 a8083063 Iustin Pop
4710 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
4711 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
4712 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
4713 781de953 Iustin Pop
        result = self.rpc.call_instance_os_add(pnode_name, iobj)
4714 20e01edd Iustin Pop
        msg = result.RemoteFailMsg()
4715 20e01edd Iustin Pop
        if msg:
4716 781de953 Iustin Pop
          raise errors.OpExecError("Could not add os for instance %s"
4717 20e01edd Iustin Pop
                                   " on node %s: %s" %
4718 20e01edd Iustin Pop
                                   (instance, pnode_name, msg))
4719 a8083063 Iustin Pop
4720 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
4721 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
4722 a8083063 Iustin Pop
        src_node = self.op.src_node
4723 09acf207 Guido Trotter
        src_images = self.src_images
4724 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
4725 6c0af70e Guido Trotter
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
4726 09acf207 Guido Trotter
                                                         src_node, src_images,
4727 6c0af70e Guido Trotter
                                                         cluster_name)
4728 781de953 Iustin Pop
        import_result.Raise()
4729 781de953 Iustin Pop
        for idx, result in enumerate(import_result.data):
4730 09acf207 Guido Trotter
          if not result:
4731 726d7d68 Iustin Pop
            self.LogWarning("Could not import the image %s for instance"
4732 726d7d68 Iustin Pop
                            " %s, disk %d, on node %s" %
4733 726d7d68 Iustin Pop
                            (src_images[idx], instance, idx, pnode_name))
4734 a8083063 Iustin Pop
      else:
4735 a8083063 Iustin Pop
        # also checked in the prereq part
4736 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
4737 3ecf6786 Iustin Pop
                                     % self.op.mode)
4738 a8083063 Iustin Pop
4739 a8083063 Iustin Pop
    if self.op.start:
4740 4978db17 Iustin Pop
      iobj.admin_up = True
4741 4978db17 Iustin Pop
      self.cfg.Update(iobj)
4742 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s", instance, pnode_name)
4743 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
4744 781de953 Iustin Pop
      result = self.rpc.call_instance_start(pnode_name, iobj, None)
4745 dd279568 Iustin Pop
      msg = result.RemoteFailMsg()
4746 dd279568 Iustin Pop
      if msg:
4747 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance: %s" % msg)
4748 a8083063 Iustin Pop
4749 a8083063 Iustin Pop
4750 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
4751 a8083063 Iustin Pop
  """Connect to an instance's console.
4752 a8083063 Iustin Pop

4753 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
4754 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
4755 a8083063 Iustin Pop
  console.
4756 a8083063 Iustin Pop

4757 a8083063 Iustin Pop
  """
4758 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
4759 8659b73e Guido Trotter
  REQ_BGL = False
4760 8659b73e Guido Trotter
4761 8659b73e Guido Trotter
  def ExpandNames(self):
4762 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
4763 a8083063 Iustin Pop
4764 a8083063 Iustin Pop
  def CheckPrereq(self):
4765 a8083063 Iustin Pop
    """Check prerequisites.
4766 a8083063 Iustin Pop

4767 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4768 a8083063 Iustin Pop

4769 a8083063 Iustin Pop
    """
4770 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4771 8659b73e Guido Trotter
    assert self.instance is not None, \
4772 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4773 513e896d Guido Trotter
    _CheckNodeOnline(self, self.instance.primary_node)
4774 a8083063 Iustin Pop
4775 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4776 a8083063 Iustin Pop
    """Connect to the console of an instance
4777 a8083063 Iustin Pop

4778 a8083063 Iustin Pop
    """
4779 a8083063 Iustin Pop
    instance = self.instance
4780 a8083063 Iustin Pop
    node = instance.primary_node
4781 a8083063 Iustin Pop
4782 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
4783 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
4784 781de953 Iustin Pop
    node_insts.Raise()
4785 a8083063 Iustin Pop
4786 781de953 Iustin Pop
    if instance.name not in node_insts.data:
4787 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
4788 a8083063 Iustin Pop
4789 9a4f63d1 Iustin Pop
    logging.debug("Connecting to console of %s on %s", instance.name, node)
4790 a8083063 Iustin Pop
4791 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
4792 5431b2e4 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
4793 5431b2e4 Guido Trotter
    # beparams and hvparams are passed separately, to avoid editing the
4794 5431b2e4 Guido Trotter
    # instance and then saving the defaults in the instance itself.
4795 5431b2e4 Guido Trotter
    hvparams = cluster.FillHV(instance)
4796 5431b2e4 Guido Trotter
    beparams = cluster.FillBE(instance)
4797 5431b2e4 Guido Trotter
    console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
4798 b047857b Michael Hanselmann
4799 82122173 Iustin Pop
    # build ssh cmdline
4800 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
4801 a8083063 Iustin Pop
4802 a8083063 Iustin Pop
4803 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
4804 a8083063 Iustin Pop
  """Replace the disks of an instance.
4805 a8083063 Iustin Pop

4806 a8083063 Iustin Pop
  """
4807 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
4808 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4809 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
4810 efd990e4 Guido Trotter
  REQ_BGL = False
4811 efd990e4 Guido Trotter
4812 7e9366f7 Iustin Pop
  def CheckArguments(self):
4813 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
4814 efd990e4 Guido Trotter
      self.op.remote_node = None
4815 7e9366f7 Iustin Pop
    if not hasattr(self.op, "iallocator"):
4816 7e9366f7 Iustin Pop
      self.op.iallocator = None
4817 7e9366f7 Iustin Pop
4818 7e9366f7 Iustin Pop
    # check for valid parameter combination
4819 7e9366f7 Iustin Pop
    cnt = [self.op.remote_node, self.op.iallocator].count(None)
4820 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_CHG:
4821 7e9366f7 Iustin Pop
      if cnt == 2:
4822 7e9366f7 Iustin Pop
        raise errors.OpPrereqError("When changing the secondary either an"
4823 7e9366f7 Iustin Pop
                                   " iallocator script must be used or the"
4824 7e9366f7 Iustin Pop
                                   " new node given")
4825 7e9366f7 Iustin Pop
      elif cnt == 0:
4826 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Give either the iallocator or the new"
4827 efd990e4 Guido Trotter
                                   " secondary, not both")
4828 7e9366f7 Iustin Pop
    else: # not replacing the secondary
4829 7e9366f7 Iustin Pop
      if cnt != 2:
4830 7e9366f7 Iustin Pop
        raise errors.OpPrereqError("The iallocator and new node options can"
4831 7e9366f7 Iustin Pop
                                   " be used only when changing the"
4832 7e9366f7 Iustin Pop
                                   " secondary node")
4833 7e9366f7 Iustin Pop
4834 7e9366f7 Iustin Pop
  def ExpandNames(self):
4835 7e9366f7 Iustin Pop
    self._ExpandAndLockInstance()
4836 7e9366f7 Iustin Pop
4837 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
4838 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4839 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
4840 efd990e4 Guido Trotter
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
4841 efd990e4 Guido Trotter
      if remote_node is None:
4842 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Node '%s' not known" %
4843 efd990e4 Guido Trotter
                                   self.op.remote_node)
4844 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
4845 3b559640 Iustin Pop
      # Warning: do not remove the locking of the new secondary here
4846 3b559640 Iustin Pop
      # unless DRBD8.AddChildren is changed to work in parallel;
4847 3b559640 Iustin Pop
      # currently it doesn't since parallel invocations of
4848 3b559640 Iustin Pop
      # FindUnusedMinor will conflict
4849 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
4850 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
4851 efd990e4 Guido Trotter
    else:
4852 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
4853 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4854 efd990e4 Guido Trotter
4855 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
4856 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
4857 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
4858 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
4859 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
4860 efd990e4 Guido Trotter
      self._LockInstancesNodes()
4861 a8083063 Iustin Pop
4862 b6e82a65 Iustin Pop
  def _RunAllocator(self):
4863 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
4864 b6e82a65 Iustin Pop

4865 b6e82a65 Iustin Pop
    """
4866 72737a7f Iustin Pop
    ial = IAllocator(self,
4867 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
4868 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
4869 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
4870 b6e82a65 Iustin Pop
4871 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
4872 b6e82a65 Iustin Pop
4873 b6e82a65 Iustin Pop
    if not ial.success:
4874 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
4875 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
4876 b6e82a65 Iustin Pop
                                                           ial.info))
4877 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
4878 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4879 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
4880 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
4881 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
4882 86d9d3bb Iustin Pop
    self.LogInfo("Selected new secondary for the instance: %s",
4883 86d9d3bb Iustin Pop
                 self.op.remote_node)
4884 b6e82a65 Iustin Pop
4885 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4886 a8083063 Iustin Pop
    """Build hooks env.
4887 a8083063 Iustin Pop

4888 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
4889 a8083063 Iustin Pop

4890 a8083063 Iustin Pop
    """
4891 a8083063 Iustin Pop
    env = {
4892 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
4893 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
4894 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
4895 a8083063 Iustin Pop
      }
4896 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4897 0834c866 Iustin Pop
    nl = [
4898 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
4899 0834c866 Iustin Pop
      self.instance.primary_node,
4900 0834c866 Iustin Pop
      ]
4901 0834c866 Iustin Pop
    if self.op.remote_node is not None:
4902 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
4903 a8083063 Iustin Pop
    return env, nl, nl
4904 a8083063 Iustin Pop
4905 a8083063 Iustin Pop
  def CheckPrereq(self):
4906 a8083063 Iustin Pop
    """Check prerequisites.
4907 a8083063 Iustin Pop

4908 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4909 a8083063 Iustin Pop

4910 a8083063 Iustin Pop
    """
4911 efd990e4 Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4912 efd990e4 Guido Trotter
    assert instance is not None, \
4913 efd990e4 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4914 a8083063 Iustin Pop
    self.instance = instance
4915 a8083063 Iustin Pop
4916 7e9366f7 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
4917 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
4918 7e9366f7 Iustin Pop
                                 " instances")
4919 a8083063 Iustin Pop
4920 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
4921 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
4922 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
4923 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
4924 a8083063 Iustin Pop
4925 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
4926 a9e0c397 Iustin Pop
4927 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
4928 de8c7666 Guido Trotter
      self._RunAllocator()
4929 b6e82a65 Iustin Pop
4930 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
4931 a9e0c397 Iustin Pop
    if remote_node is not None:
4932 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
4933 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
4934 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
4935 a9e0c397 Iustin Pop
    else:
4936 a9e0c397 Iustin Pop
      self.remote_node_info = None
4937 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
4938 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
4939 3ecf6786 Iustin Pop
                                 " the instance.")
4940 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
4941 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("The specified node is already the"
4942 7e9366f7 Iustin Pop
                                 " secondary node of the instance.")
4943 7e9366f7 Iustin Pop
4944 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_PRI:
4945 7e9366f7 Iustin Pop
      n1 = self.tgt_node = instance.primary_node
4946 7e9366f7 Iustin Pop
      n2 = self.oth_node = self.sec_node
4947 7e9366f7 Iustin Pop
    elif self.op.mode == constants.REPLACE_DISK_SEC:
4948 7e9366f7 Iustin Pop
      n1 = self.tgt_node = self.sec_node
4949 7e9366f7 Iustin Pop
      n2 = self.oth_node = instance.primary_node
4950 7e9366f7 Iustin Pop
    elif self.op.mode == constants.REPLACE_DISK_CHG:
4951 7e9366f7 Iustin Pop
      n1 = self.new_node = remote_node
4952 7e9366f7 Iustin Pop
      n2 = self.oth_node = instance.primary_node
4953 7e9366f7 Iustin Pop
      self.tgt_node = self.sec_node
4954 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, remote_node)
4955 7e9366f7 Iustin Pop
    else:
4956 7e9366f7 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replace mode")
4957 7e9366f7 Iustin Pop
4958 7e9366f7 Iustin Pop
    _CheckNodeOnline(self, n1)
4959 7e9366f7 Iustin Pop
    _CheckNodeOnline(self, n2)
4960 a9e0c397 Iustin Pop
4961 54155f52 Iustin Pop
    if not self.op.disks:
4962 54155f52 Iustin Pop
      self.op.disks = range(len(instance.disks))
4963 54155f52 Iustin Pop
4964 54155f52 Iustin Pop
    for disk_idx in self.op.disks:
4965 3e0cea06 Iustin Pop
      instance.FindDisk(disk_idx)
4966 a8083063 Iustin Pop
4967 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
4968 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
4969 a9e0c397 Iustin Pop

4970 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
4971 e4376078 Iustin Pop

4972 e4376078 Iustin Pop
      1. for each disk to be replaced:
4973 e4376078 Iustin Pop

4974 e4376078 Iustin Pop
        1. create new LVs on the target node with unique names
4975 e4376078 Iustin Pop
        1. detach old LVs from the drbd device
4976 e4376078 Iustin Pop
        1. rename old LVs to name_replaced.<time_t>
4977 e4376078 Iustin Pop
        1. rename new LVs to old LVs
4978 e4376078 Iustin Pop
        1. attach the new LVs (with the old names now) to the drbd device
4979 e4376078 Iustin Pop

4980 e4376078 Iustin Pop
      1. wait for sync across all devices
4981 e4376078 Iustin Pop

4982 e4376078 Iustin Pop
      1. for each modified disk:
4983 e4376078 Iustin Pop

4984 e4376078 Iustin Pop
        1. remove old LVs (which have the name name_replaces.<time_t>)
4985 a9e0c397 Iustin Pop

4986 a9e0c397 Iustin Pop
    Failures are not very well handled.
4987 cff90b79 Iustin Pop

4988 a9e0c397 Iustin Pop
    """
4989 cff90b79 Iustin Pop
    steps_total = 6
4990 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
4991 a9e0c397 Iustin Pop
    instance = self.instance
4992 a9e0c397 Iustin Pop
    iv_names = {}
4993 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
4994 a9e0c397 Iustin Pop
    # start of work
4995 a9e0c397 Iustin Pop
    cfg = self.cfg
4996 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
4997 cff90b79 Iustin Pop
    oth_node = self.oth_node
4998 cff90b79 Iustin Pop
4999 cff90b79 Iustin Pop
    # Step: check device activation
5000 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
5001 cff90b79 Iustin Pop
    info("checking volume groups")
5002 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
5003 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([oth_node, tgt_node])
5004 cff90b79 Iustin Pop
    if not results:
5005 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
5006 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
5007 781de953 Iustin Pop
      res = results[node]
5008 781de953 Iustin Pop
      if res.failed or not res.data or my_vg not in res.data:
5009 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
5010 cff90b79 Iustin Pop
                                 (my_vg, node))
5011 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
5012 54155f52 Iustin Pop
      if idx not in self.op.disks:
5013 cff90b79 Iustin Pop
        continue
5014 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
5015 54155f52 Iustin Pop
        info("checking disk/%d on %s" % (idx, node))
5016 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
5017 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(node, dev)
5018 23829f6f Iustin Pop
        msg = result.RemoteFailMsg()
5019 23829f6f Iustin Pop
        if not msg and not result.payload:
5020 23829f6f Iustin Pop
          msg = "disk not found"
5021 23829f6f Iustin Pop
        if msg:
5022 23829f6f Iustin Pop
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5023 23829f6f Iustin Pop
                                   (idx, node, msg))
5024 cff90b79 Iustin Pop
5025 cff90b79 Iustin Pop
    # Step: check other node consistency
5026 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
5027 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
5028 54155f52 Iustin Pop
      if idx not in self.op.disks:
5029 cff90b79 Iustin Pop
        continue
5030 54155f52 Iustin Pop
      info("checking disk/%d consistency on %s" % (idx, oth_node))
5031 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, oth_node,
5032 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
5033 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
5034 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
5035 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
5036 cff90b79 Iustin Pop
5037 cff90b79 Iustin Pop
    # Step: create new storage
5038 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
5039 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
5040 54155f52 Iustin Pop
      if idx not in self.op.disks:
5041 a9e0c397 Iustin Pop
        continue
5042 a9e0c397 Iustin Pop
      size = dev.size
5043 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
5044 54155f52 Iustin Pop
      lv_names = [".disk%d_%s" % (idx, suf)
5045 54155f52 Iustin Pop
                  for suf in ["data", "meta"]]
5046 b9bddb6b Iustin Pop
      names = _GenerateUniqueNames(self, lv_names)
5047 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
5048 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
5049 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
5050 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
5051 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
5052 a9e0c397 Iustin Pop
      old_lvs = dev.children
5053 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
5054 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
5055 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
5056 428958aa Iustin Pop
      # we pass force_create=True to force the LVM creation
5057 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
5058 428958aa Iustin Pop
        _CreateBlockDev(self, tgt_node, instance, new_lv, True,
5059 428958aa Iustin Pop
                        _GetInstanceInfoText(instance), False)
5060 a9e0c397 Iustin Pop
5061 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
5062 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
5063 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
5064 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
5065 781de953 Iustin Pop
      result = self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs)
5066 781de953 Iustin Pop
      result.Raise()
5067 781de953 Iustin Pop
      if not result.data:
5068 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
5069 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
5070 cff90b79 Iustin Pop
      #dev.children = []
5071 cff90b79 Iustin Pop
      #cfg.Update(instance)
5072 a9e0c397 Iustin Pop
5073 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
5074 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
5075 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
5076 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
5077 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
5078 cff90b79 Iustin Pop
5079 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
5080 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
5081 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
5082 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
5083 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
5084 cff90b79 Iustin Pop
      rlist = []
5085 cff90b79 Iustin Pop
      for to_ren in old_lvs:
5086 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(tgt_node, to_ren)
5087 23829f6f Iustin Pop
        if not result.RemoteFailMsg() and result.payload:
5088 23829f6f Iustin Pop
          # device exists
5089 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
5090 cff90b79 Iustin Pop
5091 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
5092 781de953 Iustin Pop
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5093 781de953 Iustin Pop
      result.Raise()
5094 781de953 Iustin Pop
      if not result.data:
5095 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
5096 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
5097 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
5098 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
5099 781de953 Iustin Pop
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5100 781de953 Iustin Pop
      result.Raise()
5101 781de953 Iustin Pop
      if not result.data:
5102 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
5103 cff90b79 Iustin Pop
5104 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
5105 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
5106 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
5107 a9e0c397 Iustin Pop
5108 cff90b79 Iustin Pop
      for disk in old_lvs:
5109 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
5110 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
5111 a9e0c397 Iustin Pop
5112 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
5113 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
5114 4504c3d6 Iustin Pop
      result = self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs)
5115 781de953 Iustin Pop
      if result.failed or not result.data:
5116 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
5117 e1bc0878 Iustin Pop
          msg = self.rpc.call_blockdev_remove(tgt_node, new_lv).RemoteFailMsg()
5118 e1bc0878 Iustin Pop
          if msg:
5119 e1bc0878 Iustin Pop
            warning("Can't rollback device %s: %s", dev, msg,
5120 e1bc0878 Iustin Pop
                    hint="cleanup manually the unused logical volumes")
5121 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
5122 a9e0c397 Iustin Pop
5123 a9e0c397 Iustin Pop
      dev.children = new_lvs
5124 a9e0c397 Iustin Pop
      cfg.Update(instance)
5125 a9e0c397 Iustin Pop
5126 cff90b79 Iustin Pop
    # Step: wait for sync
5127 a9e0c397 Iustin Pop
5128 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
5129 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
5130 a9e0c397 Iustin Pop
    # return value
5131 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
5132 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
5133 a9e0c397 Iustin Pop
5134 a9e0c397 Iustin Pop
    # so check manually all the devices
5135 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5136 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
5137 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(instance.primary_node, dev)
5138 23829f6f Iustin Pop
      msg = result.RemoteFailMsg()
5139 23829f6f Iustin Pop
      if not msg and not result.payload:
5140 23829f6f Iustin Pop
        msg = "disk not found"
5141 23829f6f Iustin Pop
      if msg:
5142 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
5143 23829f6f Iustin Pop
                                 (name, msg))
5144 23829f6f Iustin Pop
      if result.payload[5]:
5145 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
5146 a9e0c397 Iustin Pop
5147 cff90b79 Iustin Pop
    # Step: remove old storage
5148 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
5149 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5150 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
5151 a9e0c397 Iustin Pop
      for lv in old_lvs:
5152 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
5153 e1bc0878 Iustin Pop
        msg = self.rpc.call_blockdev_remove(tgt_node, lv).RemoteFailMsg()
5154 e1bc0878 Iustin Pop
        if msg:
5155 e1bc0878 Iustin Pop
          warning("Can't remove old LV: %s" % msg,
5156 e1bc0878 Iustin Pop
                  hint="manually remove unused LVs")
5157 a9e0c397 Iustin Pop
          continue
5158 a9e0c397 Iustin Pop
5159 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
5160 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
5161 a9e0c397 Iustin Pop

5162 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
5163 a9e0c397 Iustin Pop
      - for all disks of the instance:
5164 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
5165 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
5166 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
5167 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
5168 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
5169 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
5170 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
5171 a9e0c397 Iustin Pop
          not network enabled
5172 a9e0c397 Iustin Pop
      - wait for sync across all devices
5173 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
5174 a9e0c397 Iustin Pop

5175 a9e0c397 Iustin Pop
    Failures are not very well handled.
5176 0834c866 Iustin Pop

5177 a9e0c397 Iustin Pop
    """
5178 0834c866 Iustin Pop
    steps_total = 6
5179 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
5180 a9e0c397 Iustin Pop
    instance = self.instance
5181 a9e0c397 Iustin Pop
    iv_names = {}
5182 a9e0c397 Iustin Pop
    # start of work
5183 a9e0c397 Iustin Pop
    cfg = self.cfg
5184 a9e0c397 Iustin Pop
    old_node = self.tgt_node
5185 a9e0c397 Iustin Pop
    new_node = self.new_node
5186 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
5187 a2d59d8b Iustin Pop
    nodes_ip = {
5188 a2d59d8b Iustin Pop
      old_node: self.cfg.GetNodeInfo(old_node).secondary_ip,
5189 a2d59d8b Iustin Pop
      new_node: self.cfg.GetNodeInfo(new_node).secondary_ip,
5190 a2d59d8b Iustin Pop
      pri_node: self.cfg.GetNodeInfo(pri_node).secondary_ip,
5191 a2d59d8b Iustin Pop
      }
5192 0834c866 Iustin Pop
5193 0834c866 Iustin Pop
    # Step: check device activation
5194 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
5195 0834c866 Iustin Pop
    info("checking volume groups")
5196 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
5197 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([pri_node, new_node])
5198 0834c866 Iustin Pop
    for node in pri_node, new_node:
5199 781de953 Iustin Pop
      res = results[node]
5200 781de953 Iustin Pop
      if res.failed or not res.data or my_vg not in res.data:
5201 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
5202 0834c866 Iustin Pop
                                 (my_vg, node))
5203 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5204 d418ebfb Iustin Pop
      if idx not in self.op.disks:
5205 0834c866 Iustin Pop
        continue
5206 d418ebfb Iustin Pop
      info("checking disk/%d on %s" % (idx, pri_node))
5207 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5208 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(pri_node, dev)
5209 23829f6f Iustin Pop
      msg = result.RemoteFailMsg()
5210 23829f6f Iustin Pop
      if not msg and not result.payload:
5211 23829f6f Iustin Pop
        msg = "disk not found"
5212 23829f6f Iustin Pop
      if msg:
5213 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5214 23829f6f Iustin Pop
                                 (idx, pri_node, msg))
5215 0834c866 Iustin Pop
5216 0834c866 Iustin Pop
    # Step: check other node consistency
5217 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
5218 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5219 d418ebfb Iustin Pop
      if idx not in self.op.disks:
5220 0834c866 Iustin Pop
        continue
5221 d418ebfb Iustin Pop
      info("checking disk/%d consistency on %s" % (idx, pri_node))
5222 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
5223 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
5224 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
5225 0834c866 Iustin Pop
                                 pri_node)
5226 0834c866 Iustin Pop
5227 0834c866 Iustin Pop
    # Step: create new storage
5228 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
5229 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5230 d418ebfb Iustin Pop
      info("adding new local storage on %s for disk/%d" %
5231 d418ebfb Iustin Pop
           (new_node, idx))
5232 428958aa Iustin Pop
      # we pass force_create=True to force LVM creation
5233 a9e0c397 Iustin Pop
      for new_lv in dev.children:
5234 428958aa Iustin Pop
        _CreateBlockDev(self, new_node, instance, new_lv, True,
5235 428958aa Iustin Pop
                        _GetInstanceInfoText(instance), False)
5236 a9e0c397 Iustin Pop
5237 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
5238 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
5239 a1578d63 Iustin Pop
    # error and the success paths
5240 a1578d63 Iustin Pop
    minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
5241 a1578d63 Iustin Pop
                                   instance.name)
5242 468b46f9 Iustin Pop
    logging.debug("Allocated minors %s" % (minors,))
5243 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
5244 d418ebfb Iustin Pop
    for idx, (dev, new_minor) in enumerate(zip(instance.disks, minors)):
5245 0834c866 Iustin Pop
      size = dev.size
5246 d418ebfb Iustin Pop
      info("activating a new drbd on %s for disk/%d" % (new_node, idx))
5247 a2d59d8b Iustin Pop
      # create new devices on new_node; note that we create two IDs:
5248 a2d59d8b Iustin Pop
      # one without port, so the drbd will be activated without
5249 a2d59d8b Iustin Pop
      # networking information on the new node at this stage, and one
5250 a2d59d8b Iustin Pop
      # with network, for the latter activation in step 4
5251 a2d59d8b Iustin Pop
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
5252 a2d59d8b Iustin Pop
      if pri_node == o_node1:
5253 a2d59d8b Iustin Pop
        p_minor = o_minor1
5254 ffa1c0dc Iustin Pop
      else:
5255 a2d59d8b Iustin Pop
        p_minor = o_minor2
5256 a2d59d8b Iustin Pop
5257 a2d59d8b Iustin Pop
      new_alone_id = (pri_node, new_node, None, p_minor, new_minor, o_secret)
5258 a2d59d8b Iustin Pop
      new_net_id = (pri_node, new_node, o_port, p_minor, new_minor, o_secret)
5259 a2d59d8b Iustin Pop
5260 a2d59d8b Iustin Pop
      iv_names[idx] = (dev, dev.children, new_net_id)
5261 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
5262 a2d59d8b Iustin Pop
                    new_net_id)
5263 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
5264 a2d59d8b Iustin Pop
                              logical_id=new_alone_id,
5265 a9e0c397 Iustin Pop
                              children=dev.children)
5266 796cab27 Iustin Pop
      try:
5267 de12473a Iustin Pop
        _CreateSingleBlockDev(self, new_node, instance, new_drbd,
5268 de12473a Iustin Pop
                              _GetInstanceInfoText(instance), False)
5269 1492cca7 Iustin Pop
      except errors.BlockDeviceError:
5270 a1578d63 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
5271 796cab27 Iustin Pop
        raise
5272 a9e0c397 Iustin Pop
5273 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5274 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
5275 d418ebfb Iustin Pop
      info("shutting down drbd for disk/%d on old node" % idx)
5276 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
5277 cacfd1fd Iustin Pop
      msg = self.rpc.call_blockdev_shutdown(old_node, dev).RemoteFailMsg()
5278 cacfd1fd Iustin Pop
      if msg:
5279 cacfd1fd Iustin Pop
        warning("Failed to shutdown drbd for disk/%d on old node: %s" %
5280 cacfd1fd Iustin Pop
                (idx, msg),
5281 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
5282 a9e0c397 Iustin Pop
5283 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
5284 a2d59d8b Iustin Pop
    result = self.rpc.call_drbd_disconnect_net([pri_node], nodes_ip,
5285 a2d59d8b Iustin Pop
                                               instance.disks)[pri_node]
5286 642445d9 Iustin Pop
5287 a2d59d8b Iustin Pop
    msg = result.RemoteFailMsg()
5288 a2d59d8b Iustin Pop
    if msg:
5289 a2d59d8b Iustin Pop
      # detaches didn't succeed (unlikely)
5290 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance.name)
5291 a2d59d8b Iustin Pop
      raise errors.OpExecError("Can't detach the disks from the network on"
5292 a2d59d8b Iustin Pop
                               " old node: %s" % (msg,))
5293 642445d9 Iustin Pop
5294 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
5295 642445d9 Iustin Pop
    # the instance to point to the new secondary
5296 642445d9 Iustin Pop
    info("updating instance configuration")
5297 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
5298 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
5299 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5300 642445d9 Iustin Pop
    cfg.Update(instance)
5301 a9e0c397 Iustin Pop
5302 642445d9 Iustin Pop
    # and now perform the drbd attach
5303 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
5304 a2d59d8b Iustin Pop
    result = self.rpc.call_drbd_attach_net([pri_node, new_node], nodes_ip,
5305 a2d59d8b Iustin Pop
                                           instance.disks, instance.name,
5306 a2d59d8b Iustin Pop
                                           False)
5307 a2d59d8b Iustin Pop
    for to_node, to_result in result.items():
5308 a2d59d8b Iustin Pop
      msg = to_result.RemoteFailMsg()
5309 a2d59d8b Iustin Pop
      if msg:
5310 a2d59d8b Iustin Pop
        warning("can't attach drbd disks on node %s: %s", to_node, msg,
5311 a2d59d8b Iustin Pop
                hint="please do a gnt-instance info to see the"
5312 a2d59d8b Iustin Pop
                " status of disks")
5313 a9e0c397 Iustin Pop
5314 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
5315 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
5316 a9e0c397 Iustin Pop
    # return value
5317 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
5318 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
5319 a9e0c397 Iustin Pop
5320 a9e0c397 Iustin Pop
    # so check manually all the devices
5321 d418ebfb Iustin Pop
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
5322 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5323 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(pri_node, dev)
5324 23829f6f Iustin Pop
      msg = result.RemoteFailMsg()
5325 23829f6f Iustin Pop
      if not msg and not result.payload:
5326 23829f6f Iustin Pop
        msg = "disk not found"
5327 23829f6f Iustin Pop
      if msg:
5328 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find DRBD device disk/%d: %s" %
5329 23829f6f Iustin Pop
                                 (idx, msg))
5330 23829f6f Iustin Pop
      if result.payload[5]:
5331 d418ebfb Iustin Pop
        raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx)
5332 a9e0c397 Iustin Pop
5333 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
5334 d418ebfb Iustin Pop
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
5335 d418ebfb Iustin Pop
      info("remove logical volumes for disk/%d" % idx)
5336 a9e0c397 Iustin Pop
      for lv in old_lvs:
5337 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
5338 e1bc0878 Iustin Pop
        msg = self.rpc.call_blockdev_remove(old_node, lv).RemoteFailMsg()
5339 e1bc0878 Iustin Pop
        if msg:
5340 e1bc0878 Iustin Pop
          warning("Can't remove LV on old secondary: %s", msg,
5341 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
5342 a9e0c397 Iustin Pop
5343 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
5344 a9e0c397 Iustin Pop
    """Execute disk replacement.
5345 a9e0c397 Iustin Pop

5346 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
5347 a9e0c397 Iustin Pop

5348 a9e0c397 Iustin Pop
    """
5349 a9e0c397 Iustin Pop
    instance = self.instance
5350 22985314 Guido Trotter
5351 22985314 Guido Trotter
    # Activate the instance disks if we're replacing them on a down instance
5352 0d68c45d Iustin Pop
    if not instance.admin_up:
5353 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, True)
5354 22985314 Guido Trotter
5355 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_CHG:
5356 7e9366f7 Iustin Pop
      fn = self._ExecD8Secondary
5357 a9e0c397 Iustin Pop
    else:
5358 7e9366f7 Iustin Pop
      fn = self._ExecD8DiskOnly
5359 22985314 Guido Trotter
5360 22985314 Guido Trotter
    ret = fn(feedback_fn)
5361 22985314 Guido Trotter
5362 22985314 Guido Trotter
    # Deactivate the instance disks if we're replacing them on a down instance
5363 0d68c45d Iustin Pop
    if not instance.admin_up:
5364 b9bddb6b Iustin Pop
      _SafeShutdownInstanceDisks(self, instance)
5365 22985314 Guido Trotter
5366 22985314 Guido Trotter
    return ret
5367 a9e0c397 Iustin Pop
5368 a8083063 Iustin Pop
5369 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
5370 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
5371 8729e0d7 Iustin Pop

5372 8729e0d7 Iustin Pop
  """
5373 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
5374 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5375 6605411d Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
5376 31e63dbf Guido Trotter
  REQ_BGL = False
5377 31e63dbf Guido Trotter
5378 31e63dbf Guido Trotter
  def ExpandNames(self):
5379 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
5380 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
5381 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5382 31e63dbf Guido Trotter
5383 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
5384 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
5385 31e63dbf Guido Trotter
      self._LockInstancesNodes()
5386 8729e0d7 Iustin Pop
5387 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
5388 8729e0d7 Iustin Pop
    """Build hooks env.
5389 8729e0d7 Iustin Pop

5390 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
5391 8729e0d7 Iustin Pop

5392 8729e0d7 Iustin Pop
    """
5393 8729e0d7 Iustin Pop
    env = {
5394 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
5395 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
5396 8729e0d7 Iustin Pop
      }
5397 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5398 8729e0d7 Iustin Pop
    nl = [
5399 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
5400 8729e0d7 Iustin Pop
      self.instance.primary_node,
5401 8729e0d7 Iustin Pop
      ]
5402 8729e0d7 Iustin Pop
    return env, nl, nl
5403 8729e0d7 Iustin Pop
5404 8729e0d7 Iustin Pop
  def CheckPrereq(self):
5405 8729e0d7 Iustin Pop
    """Check prerequisites.
5406 8729e0d7 Iustin Pop

5407 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
5408 8729e0d7 Iustin Pop

5409 8729e0d7 Iustin Pop
    """
5410 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5411 31e63dbf Guido Trotter
    assert instance is not None, \
5412 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5413 6b12959c Iustin Pop
    nodenames = list(instance.all_nodes)
5414 6b12959c Iustin Pop
    for node in nodenames:
5415 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, node)
5416 7527a8a4 Iustin Pop
5417 31e63dbf Guido Trotter
5418 8729e0d7 Iustin Pop
    self.instance = instance
5419 8729e0d7 Iustin Pop
5420 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
5421 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
5422 8729e0d7 Iustin Pop
                                 " growing.")
5423 8729e0d7 Iustin Pop
5424 ad24e046 Iustin Pop
    self.disk = instance.FindDisk(self.op.disk)
5425 8729e0d7 Iustin Pop
5426 72737a7f Iustin Pop
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
5427 72737a7f Iustin Pop
                                       instance.hypervisor)
5428 8729e0d7 Iustin Pop
    for node in nodenames:
5429 781de953 Iustin Pop
      info = nodeinfo[node]
5430 781de953 Iustin Pop
      if info.failed or not info.data:
5431 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
5432 8729e0d7 Iustin Pop
                                   " from node '%s'" % node)
5433 781de953 Iustin Pop
      vg_free = info.data.get('vg_free', None)
5434 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
5435 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
5436 8729e0d7 Iustin Pop
                                   " node %s" % node)
5437 781de953 Iustin Pop
      if self.op.amount > vg_free:
5438 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
5439 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
5440 781de953 Iustin Pop
                                   (node, vg_free, self.op.amount))
5441 8729e0d7 Iustin Pop
5442 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
5443 8729e0d7 Iustin Pop
    """Execute disk grow.
5444 8729e0d7 Iustin Pop

5445 8729e0d7 Iustin Pop
    """
5446 8729e0d7 Iustin Pop
    instance = self.instance
5447 ad24e046 Iustin Pop
    disk = self.disk
5448 6b12959c Iustin Pop
    for node in instance.all_nodes:
5449 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
5450 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
5451 0959c824 Iustin Pop
      msg = result.RemoteFailMsg()
5452 0959c824 Iustin Pop
      if msg:
5453 781de953 Iustin Pop
        raise errors.OpExecError("Grow request failed to node %s: %s" %
5454 0959c824 Iustin Pop
                                 (node, msg))
5455 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
5456 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
5457 6605411d Iustin Pop
    if self.op.wait_for_sync:
5458 cd4d138f Guido Trotter
      disk_abort = not _WaitForSync(self, instance)
5459 6605411d Iustin Pop
      if disk_abort:
5460 86d9d3bb Iustin Pop
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
5461 86d9d3bb Iustin Pop
                             " status.\nPlease check the instance.")
5462 8729e0d7 Iustin Pop
5463 8729e0d7 Iustin Pop
5464 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
5465 a8083063 Iustin Pop
  """Query runtime instance data.
5466 a8083063 Iustin Pop

5467 a8083063 Iustin Pop
  """
5468 57821cac Iustin Pop
  _OP_REQP = ["instances", "static"]
5469 a987fa48 Guido Trotter
  REQ_BGL = False
5470 ae5849b5 Michael Hanselmann
5471 a987fa48 Guido Trotter
  def ExpandNames(self):
5472 a987fa48 Guido Trotter
    self.needed_locks = {}
5473 a987fa48 Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
5474 a987fa48 Guido Trotter
5475 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
5476 a987fa48 Guido Trotter
      raise errors.OpPrereqError("Invalid argument type 'instances'")
5477 a987fa48 Guido Trotter
5478 a987fa48 Guido Trotter
    if self.op.instances:
5479 a987fa48 Guido Trotter
      self.wanted_names = []
5480 a987fa48 Guido Trotter
      for name in self.op.instances:
5481 a987fa48 Guido Trotter
        full_name = self.cfg.ExpandInstanceName(name)
5482 a987fa48 Guido Trotter
        if full_name is None:
5483 f57c76e4 Iustin Pop
          raise errors.OpPrereqError("Instance '%s' not known" % name)
5484 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
5485 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
5486 a987fa48 Guido Trotter
    else:
5487 a987fa48 Guido Trotter
      self.wanted_names = None
5488 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
5489 a987fa48 Guido Trotter
5490 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
5491 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5492 a987fa48 Guido Trotter
5493 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
5494 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
5495 a987fa48 Guido Trotter
      self._LockInstancesNodes()
5496 a8083063 Iustin Pop
5497 a8083063 Iustin Pop
  def CheckPrereq(self):
5498 a8083063 Iustin Pop
    """Check prerequisites.
5499 a8083063 Iustin Pop

5500 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
5501 a8083063 Iustin Pop

5502 a8083063 Iustin Pop
    """
5503 a987fa48 Guido Trotter
    if self.wanted_names is None:
5504 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
5505 a8083063 Iustin Pop
5506 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
5507 a987fa48 Guido Trotter
                             in self.wanted_names]
5508 a987fa48 Guido Trotter
    return
5509 a8083063 Iustin Pop
5510 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
5511 a8083063 Iustin Pop
    """Compute block device status.
5512 a8083063 Iustin Pop

5513 a8083063 Iustin Pop
    """
5514 57821cac Iustin Pop
    static = self.op.static
5515 57821cac Iustin Pop
    if not static:
5516 57821cac Iustin Pop
      self.cfg.SetDiskID(dev, instance.primary_node)
5517 57821cac Iustin Pop
      dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
5518 23829f6f Iustin Pop
      msg = dev_pstatus.RemoteFailMsg()
5519 23829f6f Iustin Pop
      if msg:
5520 23829f6f Iustin Pop
        raise errors.OpExecError("Can't compute disk status for %s: %s" %
5521 23829f6f Iustin Pop
                                 (instance.name, msg))
5522 23829f6f Iustin Pop
      dev_pstatus = dev_pstatus.payload
5523 57821cac Iustin Pop
    else:
5524 57821cac Iustin Pop
      dev_pstatus = None
5525 57821cac Iustin Pop
5526 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
5527 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
5528 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
5529 a8083063 Iustin Pop
        snode = dev.logical_id[1]
5530 a8083063 Iustin Pop
      else:
5531 a8083063 Iustin Pop
        snode = dev.logical_id[0]
5532 a8083063 Iustin Pop
5533 57821cac Iustin Pop
    if snode and not static:
5534 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
5535 72737a7f Iustin Pop
      dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
5536 23829f6f Iustin Pop
      msg = dev_sstatus.RemoteFailMsg()
5537 23829f6f Iustin Pop
      if msg:
5538 23829f6f Iustin Pop
        raise errors.OpExecError("Can't compute disk status for %s: %s" %
5539 23829f6f Iustin Pop
                                 (instance.name, msg))
5540 23829f6f Iustin Pop
      dev_sstatus = dev_sstatus.payload
5541 a8083063 Iustin Pop
    else:
5542 a8083063 Iustin Pop
      dev_sstatus = None
5543 a8083063 Iustin Pop
5544 a8083063 Iustin Pop
    if dev.children:
5545 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
5546 a8083063 Iustin Pop
                      for child in dev.children]
5547 a8083063 Iustin Pop
    else:
5548 a8083063 Iustin Pop
      dev_children = []
5549 a8083063 Iustin Pop
5550 a8083063 Iustin Pop
    data = {
5551 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
5552 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
5553 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
5554 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
5555 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
5556 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
5557 a8083063 Iustin Pop
      "children": dev_children,
5558 b6fdf8b8 Iustin Pop
      "mode": dev.mode,
5559 a8083063 Iustin Pop
      }
5560 a8083063 Iustin Pop
5561 a8083063 Iustin Pop
    return data
5562 a8083063 Iustin Pop
5563 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5564 a8083063 Iustin Pop
    """Gather and return data"""
5565 a8083063 Iustin Pop
    result = {}
5566 338e51e8 Iustin Pop
5567 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
5568 338e51e8 Iustin Pop
5569 a8083063 Iustin Pop
    for instance in self.wanted_instances:
5570 57821cac Iustin Pop
      if not self.op.static:
5571 57821cac Iustin Pop
        remote_info = self.rpc.call_instance_info(instance.primary_node,
5572 57821cac Iustin Pop
                                                  instance.name,
5573 57821cac Iustin Pop
                                                  instance.hypervisor)
5574 781de953 Iustin Pop
        remote_info.Raise()
5575 781de953 Iustin Pop
        remote_info = remote_info.data
5576 57821cac Iustin Pop
        if remote_info and "state" in remote_info:
5577 57821cac Iustin Pop
          remote_state = "up"
5578 57821cac Iustin Pop
        else:
5579 57821cac Iustin Pop
          remote_state = "down"
5580 a8083063 Iustin Pop
      else:
5581 57821cac Iustin Pop
        remote_state = None
5582 0d68c45d Iustin Pop
      if instance.admin_up:
5583 a8083063 Iustin Pop
        config_state = "up"
5584 0d68c45d Iustin Pop
      else:
5585 0d68c45d Iustin Pop
        config_state = "down"
5586 a8083063 Iustin Pop
5587 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
5588 a8083063 Iustin Pop
               for device in instance.disks]
5589 a8083063 Iustin Pop
5590 a8083063 Iustin Pop
      idict = {
5591 a8083063 Iustin Pop
        "name": instance.name,
5592 a8083063 Iustin Pop
        "config_state": config_state,
5593 a8083063 Iustin Pop
        "run_state": remote_state,
5594 a8083063 Iustin Pop
        "pnode": instance.primary_node,
5595 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
5596 a8083063 Iustin Pop
        "os": instance.os,
5597 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
5598 a8083063 Iustin Pop
        "disks": disks,
5599 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
5600 24838135 Iustin Pop
        "network_port": instance.network_port,
5601 24838135 Iustin Pop
        "hv_instance": instance.hvparams,
5602 338e51e8 Iustin Pop
        "hv_actual": cluster.FillHV(instance),
5603 338e51e8 Iustin Pop
        "be_instance": instance.beparams,
5604 338e51e8 Iustin Pop
        "be_actual": cluster.FillBE(instance),
5605 a8083063 Iustin Pop
        }
5606 a8083063 Iustin Pop
5607 a8083063 Iustin Pop
      result[instance.name] = idict
5608 a8083063 Iustin Pop
5609 a8083063 Iustin Pop
    return result
5610 a8083063 Iustin Pop
5611 a8083063 Iustin Pop
5612 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
5613 a8083063 Iustin Pop
  """Modifies an instances's parameters.
5614 a8083063 Iustin Pop

5615 a8083063 Iustin Pop
  """
5616 a8083063 Iustin Pop
  HPATH = "instance-modify"
5617 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5618 24991749 Iustin Pop
  _OP_REQP = ["instance_name"]
5619 1a5c7281 Guido Trotter
  REQ_BGL = False
5620 1a5c7281 Guido Trotter
5621 24991749 Iustin Pop
  def CheckArguments(self):
5622 24991749 Iustin Pop
    if not hasattr(self.op, 'nics'):
5623 24991749 Iustin Pop
      self.op.nics = []
5624 24991749 Iustin Pop
    if not hasattr(self.op, 'disks'):
5625 24991749 Iustin Pop
      self.op.disks = []
5626 24991749 Iustin Pop
    if not hasattr(self.op, 'beparams'):
5627 24991749 Iustin Pop
      self.op.beparams = {}
5628 24991749 Iustin Pop
    if not hasattr(self.op, 'hvparams'):
5629 24991749 Iustin Pop
      self.op.hvparams = {}
5630 24991749 Iustin Pop
    self.op.force = getattr(self.op, "force", False)
5631 24991749 Iustin Pop
    if not (self.op.nics or self.op.disks or
5632 24991749 Iustin Pop
            self.op.hvparams or self.op.beparams):
5633 24991749 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
5634 24991749 Iustin Pop
5635 24991749 Iustin Pop
    # Disk validation
5636 24991749 Iustin Pop
    disk_addremove = 0
5637 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
5638 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
5639 24991749 Iustin Pop
        disk_addremove += 1
5640 24991749 Iustin Pop
        continue
5641 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
5642 24991749 Iustin Pop
        disk_addremove += 1
5643 24991749 Iustin Pop
      else:
5644 24991749 Iustin Pop
        if not isinstance(disk_op, int):
5645 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index")
5646 24991749 Iustin Pop
      if disk_op == constants.DDM_ADD:
5647 24991749 Iustin Pop
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
5648 6ec66eae Iustin Pop
        if mode not in constants.DISK_ACCESS_SET:
5649 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode)
5650 24991749 Iustin Pop
        size = disk_dict.get('size', None)
5651 24991749 Iustin Pop
        if size is None:
5652 24991749 Iustin Pop
          raise errors.OpPrereqError("Required disk parameter size missing")
5653 24991749 Iustin Pop
        try:
5654 24991749 Iustin Pop
          size = int(size)
5655 24991749 Iustin Pop
        except ValueError, err:
5656 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
5657 24991749 Iustin Pop
                                     str(err))
5658 24991749 Iustin Pop
        disk_dict['size'] = size
5659 24991749 Iustin Pop
      else:
5660 24991749 Iustin Pop
        # modification of disk
5661 24991749 Iustin Pop
        if 'size' in disk_dict:
5662 24991749 Iustin Pop
          raise errors.OpPrereqError("Disk size change not possible, use"
5663 24991749 Iustin Pop
                                     " grow-disk")
5664 24991749 Iustin Pop
5665 24991749 Iustin Pop
    if disk_addremove > 1:
5666 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one disk add or remove operation"
5667 24991749 Iustin Pop
                                 " supported at a time")
5668 24991749 Iustin Pop
5669 24991749 Iustin Pop
    # NIC validation
5670 24991749 Iustin Pop
    nic_addremove = 0
5671 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
5672 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
5673 24991749 Iustin Pop
        nic_addremove += 1
5674 24991749 Iustin Pop
        continue
5675 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
5676 24991749 Iustin Pop
        nic_addremove += 1
5677 24991749 Iustin Pop
      else:
5678 24991749 Iustin Pop
        if not isinstance(nic_op, int):
5679 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid nic index")
5680 24991749 Iustin Pop
5681 24991749 Iustin Pop
      # nic_dict should be a dict
5682 24991749 Iustin Pop
      nic_ip = nic_dict.get('ip', None)
5683 24991749 Iustin Pop
      if nic_ip is not None:
5684 5c44da6a Guido Trotter
        if nic_ip.lower() == constants.VALUE_NONE:
5685 24991749 Iustin Pop
          nic_dict['ip'] = None
5686 24991749 Iustin Pop
        else:
5687 24991749 Iustin Pop
          if not utils.IsValidIP(nic_ip):
5688 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip)
5689 5c44da6a Guido Trotter
5690 5c44da6a Guido Trotter
      if nic_op == constants.DDM_ADD:
5691 5c44da6a Guido Trotter
        nic_bridge = nic_dict.get('bridge', None)
5692 5c44da6a Guido Trotter
        if nic_bridge is None:
5693 5c44da6a Guido Trotter
          nic_dict['bridge'] = self.cfg.GetDefBridge()
5694 5c44da6a Guido Trotter
        nic_mac = nic_dict.get('mac', None)
5695 5c44da6a Guido Trotter
        if nic_mac is None:
5696 5c44da6a Guido Trotter
          nic_dict['mac'] = constants.VALUE_AUTO
5697 5c44da6a Guido Trotter
5698 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
5699 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
5700 24991749 Iustin Pop
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5701 24991749 Iustin Pop
          if not utils.IsValidMac(nic_mac):
5702 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac)
5703 5c44da6a Guido Trotter
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
5704 5c44da6a Guido Trotter
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
5705 5c44da6a Guido Trotter
                                     " modifying an existing nic")
5706 5c44da6a Guido Trotter
5707 24991749 Iustin Pop
    if nic_addremove > 1:
5708 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one NIC add or remove operation"
5709 24991749 Iustin Pop
                                 " supported at a time")
5710 24991749 Iustin Pop
5711 1a5c7281 Guido Trotter
  def ExpandNames(self):
5712 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
5713 74409b12 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
5714 74409b12 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5715 74409b12 Iustin Pop
5716 74409b12 Iustin Pop
  def DeclareLocks(self, level):
5717 74409b12 Iustin Pop
    if level == locking.LEVEL_NODE:
5718 74409b12 Iustin Pop
      self._LockInstancesNodes()
5719 a8083063 Iustin Pop
5720 a8083063 Iustin Pop
  def BuildHooksEnv(self):
5721 a8083063 Iustin Pop
    """Build hooks env.
5722 a8083063 Iustin Pop

5723 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
5724 a8083063 Iustin Pop

5725 a8083063 Iustin Pop
    """
5726 396e1b78 Michael Hanselmann
    args = dict()
5727 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.be_new:
5728 338e51e8 Iustin Pop
      args['memory'] = self.be_new[constants.BE_MEMORY]
5729 338e51e8 Iustin Pop
    if constants.BE_VCPUS in self.be_new:
5730 61be6ba4 Iustin Pop
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
5731 d8dcf3c9 Guido Trotter
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
5732 d8dcf3c9 Guido Trotter
    # information at all.
5733 d8dcf3c9 Guido Trotter
    if self.op.nics:
5734 d8dcf3c9 Guido Trotter
      args['nics'] = []
5735 d8dcf3c9 Guido Trotter
      nic_override = dict(self.op.nics)
5736 d8dcf3c9 Guido Trotter
      for idx, nic in enumerate(self.instance.nics):
5737 d8dcf3c9 Guido Trotter
        if idx in nic_override:
5738 d8dcf3c9 Guido Trotter
          this_nic_override = nic_override[idx]
5739 d8dcf3c9 Guido Trotter
        else:
5740 d8dcf3c9 Guido Trotter
          this_nic_override = {}
5741 d8dcf3c9 Guido Trotter
        if 'ip' in this_nic_override:
5742 d8dcf3c9 Guido Trotter
          ip = this_nic_override['ip']
5743 d8dcf3c9 Guido Trotter
        else:
5744 d8dcf3c9 Guido Trotter
          ip = nic.ip
5745 d8dcf3c9 Guido Trotter
        if 'bridge' in this_nic_override:
5746 d8dcf3c9 Guido Trotter
          bridge = this_nic_override['bridge']
5747 d8dcf3c9 Guido Trotter
        else:
5748 d8dcf3c9 Guido Trotter
          bridge = nic.bridge
5749 d8dcf3c9 Guido Trotter
        if 'mac' in this_nic_override:
5750 d8dcf3c9 Guido Trotter
          mac = this_nic_override['mac']
5751 d8dcf3c9 Guido Trotter
        else:
5752 d8dcf3c9 Guido Trotter
          mac = nic.mac
5753 d8dcf3c9 Guido Trotter
        args['nics'].append((ip, bridge, mac))
5754 d8dcf3c9 Guido Trotter
      if constants.DDM_ADD in nic_override:
5755 d8dcf3c9 Guido Trotter
        ip = nic_override[constants.DDM_ADD].get('ip', None)
5756 d8dcf3c9 Guido Trotter
        bridge = nic_override[constants.DDM_ADD]['bridge']
5757 d8dcf3c9 Guido Trotter
        mac = nic_override[constants.DDM_ADD]['mac']
5758 d8dcf3c9 Guido Trotter
        args['nics'].append((ip, bridge, mac))
5759 d8dcf3c9 Guido Trotter
      elif constants.DDM_REMOVE in nic_override:
5760 d8dcf3c9 Guido Trotter
        del args['nics'][-1]
5761 d8dcf3c9 Guido Trotter
5762 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
5763 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5764 a8083063 Iustin Pop
    return env, nl, nl
5765 a8083063 Iustin Pop
5766 a8083063 Iustin Pop
  def CheckPrereq(self):
5767 a8083063 Iustin Pop
    """Check prerequisites.
5768 a8083063 Iustin Pop

5769 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
5770 a8083063 Iustin Pop

5771 a8083063 Iustin Pop
    """
5772 24991749 Iustin Pop
    force = self.force = self.op.force
5773 a8083063 Iustin Pop
5774 74409b12 Iustin Pop
    # checking the new params on the primary/secondary nodes
5775 31a853d2 Iustin Pop
5776 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5777 1a5c7281 Guido Trotter
    assert self.instance is not None, \
5778 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5779 6b12959c Iustin Pop
    pnode = instance.primary_node
5780 6b12959c Iustin Pop
    nodelist = list(instance.all_nodes)
5781 74409b12 Iustin Pop
5782 338e51e8 Iustin Pop
    # hvparams processing
5783 74409b12 Iustin Pop
    if self.op.hvparams:
5784 74409b12 Iustin Pop
      i_hvdict = copy.deepcopy(instance.hvparams)
5785 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
5786 8edcd611 Guido Trotter
        if val == constants.VALUE_DEFAULT:
5787 74409b12 Iustin Pop
          try:
5788 74409b12 Iustin Pop
            del i_hvdict[key]
5789 74409b12 Iustin Pop
          except KeyError:
5790 74409b12 Iustin Pop
            pass
5791 74409b12 Iustin Pop
        else:
5792 74409b12 Iustin Pop
          i_hvdict[key] = val
5793 74409b12 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
5794 a5728081 Guido Trotter
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
5795 74409b12 Iustin Pop
      hv_new = cluster.FillDict(cluster.hvparams[instance.hypervisor],
5796 74409b12 Iustin Pop
                                i_hvdict)
5797 74409b12 Iustin Pop
      # local check
5798 74409b12 Iustin Pop
      hypervisor.GetHypervisor(
5799 74409b12 Iustin Pop
        instance.hypervisor).CheckParameterSyntax(hv_new)
5800 74409b12 Iustin Pop
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
5801 338e51e8 Iustin Pop
      self.hv_new = hv_new # the new actual values
5802 338e51e8 Iustin Pop
      self.hv_inst = i_hvdict # the new dict (without defaults)
5803 338e51e8 Iustin Pop
    else:
5804 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
5805 338e51e8 Iustin Pop
5806 338e51e8 Iustin Pop
    # beparams processing
5807 338e51e8 Iustin Pop
    if self.op.beparams:
5808 338e51e8 Iustin Pop
      i_bedict = copy.deepcopy(instance.beparams)
5809 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
5810 8edcd611 Guido Trotter
        if val == constants.VALUE_DEFAULT:
5811 338e51e8 Iustin Pop
          try:
5812 338e51e8 Iustin Pop
            del i_bedict[key]
5813 338e51e8 Iustin Pop
          except KeyError:
5814 338e51e8 Iustin Pop
            pass
5815 338e51e8 Iustin Pop
        else:
5816 338e51e8 Iustin Pop
          i_bedict[key] = val
5817 338e51e8 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
5818 a5728081 Guido Trotter
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
5819 338e51e8 Iustin Pop
      be_new = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
5820 338e51e8 Iustin Pop
                                i_bedict)
5821 338e51e8 Iustin Pop
      self.be_new = be_new # the new actual values
5822 338e51e8 Iustin Pop
      self.be_inst = i_bedict # the new dict (without defaults)
5823 338e51e8 Iustin Pop
    else:
5824 b637ae4d Iustin Pop
      self.be_new = self.be_inst = {}
5825 74409b12 Iustin Pop
5826 cfefe007 Guido Trotter
    self.warn = []
5827 647a5d80 Iustin Pop
5828 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.op.beparams and not self.force:
5829 647a5d80 Iustin Pop
      mem_check_list = [pnode]
5830 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
5831 c0f2b229 Iustin Pop
        # either we changed auto_balance to yes or it was from before
5832 647a5d80 Iustin Pop
        mem_check_list.extend(instance.secondary_nodes)
5833 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
5834 72737a7f Iustin Pop
                                                  instance.hypervisor)
5835 647a5d80 Iustin Pop
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
5836 72737a7f Iustin Pop
                                         instance.hypervisor)
5837 781de953 Iustin Pop
      if nodeinfo[pnode].failed or not isinstance(nodeinfo[pnode].data, dict):
5838 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
5839 cfefe007 Guido Trotter
        self.warn.append("Can't get info from primary node %s" % pnode)
5840 cfefe007 Guido Trotter
      else:
5841 781de953 Iustin Pop
        if not instance_info.failed and instance_info.data:
5842 781de953 Iustin Pop
          current_mem = instance_info.data['memory']
5843 cfefe007 Guido Trotter
        else:
5844 cfefe007 Guido Trotter
          # Assume instance not running
5845 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
5846 cfefe007 Guido Trotter
          # and we have no other way to check)
5847 cfefe007 Guido Trotter
          current_mem = 0
5848 338e51e8 Iustin Pop
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
5849 781de953 Iustin Pop
                    nodeinfo[pnode].data['memory_free'])
5850 cfefe007 Guido Trotter
        if miss_mem > 0:
5851 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
5852 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
5853 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
5854 cfefe007 Guido Trotter
5855 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
5856 ea33068f Iustin Pop
        for node, nres in nodeinfo.iteritems():
5857 ea33068f Iustin Pop
          if node not in instance.secondary_nodes:
5858 ea33068f Iustin Pop
            continue
5859 781de953 Iustin Pop
          if nres.failed or not isinstance(nres.data, dict):
5860 647a5d80 Iustin Pop
            self.warn.append("Can't get info from secondary node %s" % node)
5861 781de953 Iustin Pop
          elif be_new[constants.BE_MEMORY] > nres.data['memory_free']:
5862 647a5d80 Iustin Pop
            self.warn.append("Not enough memory to failover instance to"
5863 647a5d80 Iustin Pop
                             " secondary node %s" % node)
5864 5bc84f33 Alexander Schreiber
5865 24991749 Iustin Pop
    # NIC processing
5866 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
5867 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
5868 24991749 Iustin Pop
        if not instance.nics:
5869 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance has no NICs, cannot remove")
5870 24991749 Iustin Pop
        continue
5871 24991749 Iustin Pop
      if nic_op != constants.DDM_ADD:
5872 24991749 Iustin Pop
        # an existing nic
5873 24991749 Iustin Pop
        if nic_op < 0 or nic_op >= len(instance.nics):
5874 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
5875 24991749 Iustin Pop
                                     " are 0 to %d" %
5876 24991749 Iustin Pop
                                     (nic_op, len(instance.nics)))
5877 5c44da6a Guido Trotter
      if 'bridge' in nic_dict:
5878 5c44da6a Guido Trotter
        nic_bridge = nic_dict['bridge']
5879 5c44da6a Guido Trotter
        if nic_bridge is None:
5880 5c44da6a Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic bridge to None')
5881 24991749 Iustin Pop
        if not self.rpc.call_bridges_exist(pnode, [nic_bridge]):
5882 24991749 Iustin Pop
          msg = ("Bridge '%s' doesn't exist on one of"
5883 24991749 Iustin Pop
                 " the instance nodes" % nic_bridge)
5884 24991749 Iustin Pop
          if self.force:
5885 24991749 Iustin Pop
            self.warn.append(msg)
5886 24991749 Iustin Pop
          else:
5887 24991749 Iustin Pop
            raise errors.OpPrereqError(msg)
5888 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
5889 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
5890 5c44da6a Guido Trotter
        if nic_mac is None:
5891 5c44da6a Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic mac to None')
5892 5c44da6a Guido Trotter
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5893 5c44da6a Guido Trotter
          # otherwise generate the mac
5894 5c44da6a Guido Trotter
          nic_dict['mac'] = self.cfg.GenerateMAC()
5895 5c44da6a Guido Trotter
        else:
5896 5c44da6a Guido Trotter
          # or validate/reserve the current one
5897 5c44da6a Guido Trotter
          if self.cfg.IsMacInUse(nic_mac):
5898 5c44da6a Guido Trotter
            raise errors.OpPrereqError("MAC address %s already in use"
5899 5c44da6a Guido Trotter
                                       " in cluster" % nic_mac)
5900 24991749 Iustin Pop
5901 24991749 Iustin Pop
    # DISK processing
5902 24991749 Iustin Pop
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
5903 24991749 Iustin Pop
      raise errors.OpPrereqError("Disk operations not supported for"
5904 24991749 Iustin Pop
                                 " diskless instances")
5905 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
5906 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
5907 24991749 Iustin Pop
        if len(instance.disks) == 1:
5908 24991749 Iustin Pop
          raise errors.OpPrereqError("Cannot remove the last disk of"
5909 24991749 Iustin Pop
                                     " an instance")
5910 24991749 Iustin Pop
        ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
5911 24991749 Iustin Pop
        ins_l = ins_l[pnode]
5912 4cfb9426 Iustin Pop
        if ins_l.failed or not isinstance(ins_l.data, list):
5913 24991749 Iustin Pop
          raise errors.OpPrereqError("Can't contact node '%s'" % pnode)
5914 4cfb9426 Iustin Pop
        if instance.name in ins_l.data:
5915 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance is running, can't remove"
5916 24991749 Iustin Pop
                                     " disks.")
5917 24991749 Iustin Pop
5918 24991749 Iustin Pop
      if (disk_op == constants.DDM_ADD and
5919 24991749 Iustin Pop
          len(instance.nics) >= constants.MAX_DISKS):
5920 24991749 Iustin Pop
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
5921 24991749 Iustin Pop
                                   " add more" % constants.MAX_DISKS)
5922 24991749 Iustin Pop
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
5923 24991749 Iustin Pop
        # an existing disk
5924 24991749 Iustin Pop
        if disk_op < 0 or disk_op >= len(instance.disks):
5925 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
5926 24991749 Iustin Pop
                                     " are 0 to %d" %
5927 24991749 Iustin Pop
                                     (disk_op, len(instance.disks)))
5928 24991749 Iustin Pop
5929 a8083063 Iustin Pop
    return
5930 a8083063 Iustin Pop
5931 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5932 a8083063 Iustin Pop
    """Modifies an instance.
5933 a8083063 Iustin Pop

5934 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
5935 24991749 Iustin Pop

5936 a8083063 Iustin Pop
    """
5937 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
5938 cfefe007 Guido Trotter
    # feedback_fn there.
5939 cfefe007 Guido Trotter
    for warn in self.warn:
5940 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
5941 cfefe007 Guido Trotter
5942 a8083063 Iustin Pop
    result = []
5943 a8083063 Iustin Pop
    instance = self.instance
5944 24991749 Iustin Pop
    # disk changes
5945 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
5946 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
5947 24991749 Iustin Pop
        # remove the last disk
5948 24991749 Iustin Pop
        device = instance.disks.pop()
5949 24991749 Iustin Pop
        device_idx = len(instance.disks)
5950 24991749 Iustin Pop
        for node, disk in device.ComputeNodeTree(instance.primary_node):
5951 24991749 Iustin Pop
          self.cfg.SetDiskID(disk, node)
5952 e1bc0878 Iustin Pop
          msg = self.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
5953 e1bc0878 Iustin Pop
          if msg:
5954 e1bc0878 Iustin Pop
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
5955 e1bc0878 Iustin Pop
                            " continuing anyway", device_idx, node, msg)
5956 24991749 Iustin Pop
        result.append(("disk/%d" % device_idx, "remove"))
5957 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
5958 24991749 Iustin Pop
        # add a new disk
5959 24991749 Iustin Pop
        if instance.disk_template == constants.DT_FILE:
5960 24991749 Iustin Pop
          file_driver, file_path = instance.disks[0].logical_id
5961 24991749 Iustin Pop
          file_path = os.path.dirname(file_path)
5962 24991749 Iustin Pop
        else:
5963 24991749 Iustin Pop
          file_driver = file_path = None
5964 24991749 Iustin Pop
        disk_idx_base = len(instance.disks)
5965 24991749 Iustin Pop
        new_disk = _GenerateDiskTemplate(self,
5966 24991749 Iustin Pop
                                         instance.disk_template,
5967 32388e6d Iustin Pop
                                         instance.name, instance.primary_node,
5968 24991749 Iustin Pop
                                         instance.secondary_nodes,
5969 24991749 Iustin Pop
                                         [disk_dict],
5970 24991749 Iustin Pop
                                         file_path,
5971 24991749 Iustin Pop
                                         file_driver,
5972 24991749 Iustin Pop
                                         disk_idx_base)[0]
5973 24991749 Iustin Pop
        instance.disks.append(new_disk)
5974 24991749 Iustin Pop
        info = _GetInstanceInfoText(instance)
5975 24991749 Iustin Pop
5976 24991749 Iustin Pop
        logging.info("Creating volume %s for instance %s",
5977 24991749 Iustin Pop
                     new_disk.iv_name, instance.name)
5978 24991749 Iustin Pop
        # Note: this needs to be kept in sync with _CreateDisks
5979 24991749 Iustin Pop
        #HARDCODE
5980 428958aa Iustin Pop
        for node in instance.all_nodes:
5981 428958aa Iustin Pop
          f_create = node == instance.primary_node
5982 796cab27 Iustin Pop
          try:
5983 428958aa Iustin Pop
            _CreateBlockDev(self, node, instance, new_disk,
5984 428958aa Iustin Pop
                            f_create, info, f_create)
5985 1492cca7 Iustin Pop
          except errors.OpExecError, err:
5986 24991749 Iustin Pop
            self.LogWarning("Failed to create volume %s (%s) on"
5987 428958aa Iustin Pop
                            " node %s: %s",
5988 428958aa Iustin Pop
                            new_disk.iv_name, new_disk, node, err)
5989 24991749 Iustin Pop
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
5990 24991749 Iustin Pop
                       (new_disk.size, new_disk.mode)))
5991 24991749 Iustin Pop
      else:
5992 24991749 Iustin Pop
        # change a given disk
5993 24991749 Iustin Pop
        instance.disks[disk_op].mode = disk_dict['mode']
5994 24991749 Iustin Pop
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
5995 24991749 Iustin Pop
    # NIC changes
5996 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
5997 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
5998 24991749 Iustin Pop
        # remove the last nic
5999 24991749 Iustin Pop
        del instance.nics[-1]
6000 24991749 Iustin Pop
        result.append(("nic.%d" % len(instance.nics), "remove"))
6001 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
6002 5c44da6a Guido Trotter
        # mac and bridge should be set, by now
6003 5c44da6a Guido Trotter
        mac = nic_dict['mac']
6004 5c44da6a Guido Trotter
        bridge = nic_dict['bridge']
6005 24991749 Iustin Pop
        new_nic = objects.NIC(mac=mac, ip=nic_dict.get('ip', None),
6006 5c44da6a Guido Trotter
                              bridge=bridge)
6007 24991749 Iustin Pop
        instance.nics.append(new_nic)
6008 24991749 Iustin Pop
        result.append(("nic.%d" % (len(instance.nics) - 1),
6009 24991749 Iustin Pop
                       "add:mac=%s,ip=%s,bridge=%s" %
6010 24991749 Iustin Pop
                       (new_nic.mac, new_nic.ip, new_nic.bridge)))
6011 24991749 Iustin Pop
      else:
6012 24991749 Iustin Pop
        # change a given nic
6013 24991749 Iustin Pop
        for key in 'mac', 'ip', 'bridge':
6014 24991749 Iustin Pop
          if key in nic_dict:
6015 24991749 Iustin Pop
            setattr(instance.nics[nic_op], key, nic_dict[key])
6016 24991749 Iustin Pop
            result.append(("nic.%s/%d" % (key, nic_op), nic_dict[key]))
6017 24991749 Iustin Pop
6018 24991749 Iustin Pop
    # hvparams changes
6019 74409b12 Iustin Pop
    if self.op.hvparams:
6020 12649e35 Guido Trotter
      instance.hvparams = self.hv_inst
6021 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
6022 74409b12 Iustin Pop
        result.append(("hv/%s" % key, val))
6023 24991749 Iustin Pop
6024 24991749 Iustin Pop
    # beparams changes
6025 338e51e8 Iustin Pop
    if self.op.beparams:
6026 338e51e8 Iustin Pop
      instance.beparams = self.be_inst
6027 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
6028 338e51e8 Iustin Pop
        result.append(("be/%s" % key, val))
6029 a8083063 Iustin Pop
6030 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
6031 a8083063 Iustin Pop
6032 a8083063 Iustin Pop
    return result
6033 a8083063 Iustin Pop
6034 a8083063 Iustin Pop
6035 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
6036 a8083063 Iustin Pop
  """Query the exports list
6037 a8083063 Iustin Pop

6038 a8083063 Iustin Pop
  """
6039 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
6040 21a15682 Guido Trotter
  REQ_BGL = False
6041 21a15682 Guido Trotter
6042 21a15682 Guido Trotter
  def ExpandNames(self):
6043 21a15682 Guido Trotter
    self.needed_locks = {}
6044 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
6045 21a15682 Guido Trotter
    if not self.op.nodes:
6046 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6047 21a15682 Guido Trotter
    else:
6048 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
6049 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
6050 a8083063 Iustin Pop
6051 a8083063 Iustin Pop
  def CheckPrereq(self):
6052 21a15682 Guido Trotter
    """Check prerequisites.
6053 a8083063 Iustin Pop

6054 a8083063 Iustin Pop
    """
6055 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
6056 a8083063 Iustin Pop
6057 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6058 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
6059 a8083063 Iustin Pop

6060 e4376078 Iustin Pop
    @rtype: dict
6061 e4376078 Iustin Pop
    @return: a dictionary with the structure node->(export-list)
6062 e4376078 Iustin Pop
        where export-list is a list of the instances exported on
6063 e4376078 Iustin Pop
        that node.
6064 a8083063 Iustin Pop

6065 a8083063 Iustin Pop
    """
6066 b04285f2 Guido Trotter
    rpcresult = self.rpc.call_export_list(self.nodes)
6067 b04285f2 Guido Trotter
    result = {}
6068 b04285f2 Guido Trotter
    for node in rpcresult:
6069 b04285f2 Guido Trotter
      if rpcresult[node].failed:
6070 b04285f2 Guido Trotter
        result[node] = False
6071 b04285f2 Guido Trotter
      else:
6072 b04285f2 Guido Trotter
        result[node] = rpcresult[node].data
6073 b04285f2 Guido Trotter
6074 b04285f2 Guido Trotter
    return result
6075 a8083063 Iustin Pop
6076 a8083063 Iustin Pop
6077 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
6078 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
6079 a8083063 Iustin Pop

6080 a8083063 Iustin Pop
  """
6081 a8083063 Iustin Pop
  HPATH = "instance-export"
6082 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
6083 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
6084 6657590e Guido Trotter
  REQ_BGL = False
6085 6657590e Guido Trotter
6086 6657590e Guido Trotter
  def ExpandNames(self):
6087 6657590e Guido Trotter
    self._ExpandAndLockInstance()
6088 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
6089 6657590e Guido Trotter
    #
6090 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
6091 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
6092 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
6093 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
6094 6657590e Guido Trotter
    #    then one to remove, after
6095 6657590e Guido Trotter
    #  - removing the removal operation altoghether
6096 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6097 6657590e Guido Trotter
6098 6657590e Guido Trotter
  def DeclareLocks(self, level):
6099 6657590e Guido Trotter
    """Last minute lock declaration."""
6100 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
6101 a8083063 Iustin Pop
6102 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6103 a8083063 Iustin Pop
    """Build hooks env.
6104 a8083063 Iustin Pop

6105 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
6106 a8083063 Iustin Pop

6107 a8083063 Iustin Pop
    """
6108 a8083063 Iustin Pop
    env = {
6109 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
6110 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
6111 a8083063 Iustin Pop
      }
6112 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6113 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
6114 a8083063 Iustin Pop
          self.op.target_node]
6115 a8083063 Iustin Pop
    return env, nl, nl
6116 a8083063 Iustin Pop
6117 a8083063 Iustin Pop
  def CheckPrereq(self):
6118 a8083063 Iustin Pop
    """Check prerequisites.
6119 a8083063 Iustin Pop

6120 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
6121 a8083063 Iustin Pop

6122 a8083063 Iustin Pop
    """
6123 6657590e Guido Trotter
    instance_name = self.op.instance_name
6124 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
6125 6657590e Guido Trotter
    assert self.instance is not None, \
6126 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
6127 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
6128 a8083063 Iustin Pop
6129 6657590e Guido Trotter
    self.dst_node = self.cfg.GetNodeInfo(
6130 6657590e Guido Trotter
      self.cfg.ExpandNodeName(self.op.target_node))
6131 a8083063 Iustin Pop
6132 268b8e42 Iustin Pop
    if self.dst_node is None:
6133 268b8e42 Iustin Pop
      # This is wrong node name, not a non-locked node
6134 268b8e42 Iustin Pop
      raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node)
6135 aeb83a2b Iustin Pop
    _CheckNodeOnline(self, self.dst_node.name)
6136 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, self.dst_node.name)
6137 a8083063 Iustin Pop
6138 b6023d6c Manuel Franceschini
    # instance disk type verification
6139 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
6140 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
6141 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
6142 b6023d6c Manuel Franceschini
                                   " file-based disks")
6143 b6023d6c Manuel Franceschini
6144 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6145 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
6146 a8083063 Iustin Pop

6147 a8083063 Iustin Pop
    """
6148 a8083063 Iustin Pop
    instance = self.instance
6149 a8083063 Iustin Pop
    dst_node = self.dst_node
6150 a8083063 Iustin Pop
    src_node = instance.primary_node
6151 a8083063 Iustin Pop
    if self.op.shutdown:
6152 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
6153 781de953 Iustin Pop
      result = self.rpc.call_instance_shutdown(src_node, instance)
6154 1fae010f Iustin Pop
      msg = result.RemoteFailMsg()
6155 1fae010f Iustin Pop
      if msg:
6156 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
6157 1fae010f Iustin Pop
                                 " node %s: %s" %
6158 1fae010f Iustin Pop
                                 (instance.name, src_node, msg))
6159 a8083063 Iustin Pop
6160 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
6161 a8083063 Iustin Pop
6162 a8083063 Iustin Pop
    snap_disks = []
6163 a8083063 Iustin Pop
6164 998c712c Iustin Pop
    # set the disks ID correctly since call_instance_start needs the
6165 998c712c Iustin Pop
    # correct drbd minor to create the symlinks
6166 998c712c Iustin Pop
    for disk in instance.disks:
6167 998c712c Iustin Pop
      self.cfg.SetDiskID(disk, src_node)
6168 998c712c Iustin Pop
6169 a8083063 Iustin Pop
    try:
6170 a8083063 Iustin Pop
      for disk in instance.disks:
6171 19d7f90a Guido Trotter
        # new_dev_name will be a snapshot of an lvm leaf of the one we passed
6172 19d7f90a Guido Trotter
        new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk)
6173 781de953 Iustin Pop
        if new_dev_name.failed or not new_dev_name.data:
6174 19d7f90a Guido Trotter
          self.LogWarning("Could not snapshot block device %s on node %s",
6175 9a4f63d1 Iustin Pop
                          disk.logical_id[1], src_node)
6176 19d7f90a Guido Trotter
          snap_disks.append(False)
6177 19d7f90a Guido Trotter
        else:
6178 19d7f90a Guido Trotter
          new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
6179 781de953 Iustin Pop
                                 logical_id=(vgname, new_dev_name.data),
6180 781de953 Iustin Pop
                                 physical_id=(vgname, new_dev_name.data),
6181 19d7f90a Guido Trotter
                                 iv_name=disk.iv_name)
6182 19d7f90a Guido Trotter
          snap_disks.append(new_dev)
6183 a8083063 Iustin Pop
6184 a8083063 Iustin Pop
    finally:
6185 0d68c45d Iustin Pop
      if self.op.shutdown and instance.admin_up:
6186 781de953 Iustin Pop
        result = self.rpc.call_instance_start(src_node, instance, None)
6187 dd279568 Iustin Pop
        msg = result.RemoteFailMsg()
6188 dd279568 Iustin Pop
        if msg:
6189 b9bddb6b Iustin Pop
          _ShutdownInstanceDisks(self, instance)
6190 dd279568 Iustin Pop
          raise errors.OpExecError("Could not start instance: %s" % msg)
6191 a8083063 Iustin Pop
6192 a8083063 Iustin Pop
    # TODO: check for size
6193 a8083063 Iustin Pop
6194 62c9ec92 Iustin Pop
    cluster_name = self.cfg.GetClusterName()
6195 74c47259 Iustin Pop
    for idx, dev in enumerate(snap_disks):
6196 19d7f90a Guido Trotter
      if dev:
6197 781de953 Iustin Pop
        result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
6198 781de953 Iustin Pop
                                               instance, cluster_name, idx)
6199 781de953 Iustin Pop
        if result.failed or not result.data:
6200 19d7f90a Guido Trotter
          self.LogWarning("Could not export block device %s from node %s to"
6201 19d7f90a Guido Trotter
                          " node %s", dev.logical_id[1], src_node,
6202 19d7f90a Guido Trotter
                          dst_node.name)
6203 e1bc0878 Iustin Pop
        msg = self.rpc.call_blockdev_remove(src_node, dev).RemoteFailMsg()
6204 e1bc0878 Iustin Pop
        if msg:
6205 19d7f90a Guido Trotter
          self.LogWarning("Could not remove snapshot block device %s from node"
6206 e1bc0878 Iustin Pop
                          " %s: %s", dev.logical_id[1], src_node, msg)
6207 a8083063 Iustin Pop
6208 781de953 Iustin Pop
    result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
6209 781de953 Iustin Pop
    if result.failed or not result.data:
6210 19d7f90a Guido Trotter
      self.LogWarning("Could not finalize export for instance %s on node %s",
6211 19d7f90a Guido Trotter
                      instance.name, dst_node.name)
6212 a8083063 Iustin Pop
6213 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
6214 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
6215 a8083063 Iustin Pop
6216 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
6217 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
6218 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
6219 a8083063 Iustin Pop
    if nodelist:
6220 72737a7f Iustin Pop
      exportlist = self.rpc.call_export_list(nodelist)
6221 a8083063 Iustin Pop
      for node in exportlist:
6222 781de953 Iustin Pop
        if exportlist[node].failed:
6223 781de953 Iustin Pop
          continue
6224 781de953 Iustin Pop
        if instance.name in exportlist[node].data:
6225 72737a7f Iustin Pop
          if not self.rpc.call_export_remove(node, instance.name):
6226 19d7f90a Guido Trotter
            self.LogWarning("Could not remove older export for instance %s"
6227 19d7f90a Guido Trotter
                            " on node %s", instance.name, node)
6228 5c947f38 Iustin Pop
6229 5c947f38 Iustin Pop
6230 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
6231 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
6232 9ac99fda Guido Trotter

6233 9ac99fda Guido Trotter
  """
6234 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
6235 3656b3af Guido Trotter
  REQ_BGL = False
6236 3656b3af Guido Trotter
6237 3656b3af Guido Trotter
  def ExpandNames(self):
6238 3656b3af Guido Trotter
    self.needed_locks = {}
6239 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
6240 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
6241 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
6242 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6243 9ac99fda Guido Trotter
6244 9ac99fda Guido Trotter
  def CheckPrereq(self):
6245 9ac99fda Guido Trotter
    """Check prerequisites.
6246 9ac99fda Guido Trotter
    """
6247 9ac99fda Guido Trotter
    pass
6248 9ac99fda Guido Trotter
6249 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
6250 9ac99fda Guido Trotter
    """Remove any export.
6251 9ac99fda Guido Trotter

6252 9ac99fda Guido Trotter
    """
6253 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
6254 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
6255 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
6256 9ac99fda Guido Trotter
    fqdn_warn = False
6257 9ac99fda Guido Trotter
    if not instance_name:
6258 9ac99fda Guido Trotter
      fqdn_warn = True
6259 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
6260 9ac99fda Guido Trotter
6261 72737a7f Iustin Pop
    exportlist = self.rpc.call_export_list(self.acquired_locks[
6262 72737a7f Iustin Pop
      locking.LEVEL_NODE])
6263 9ac99fda Guido Trotter
    found = False
6264 9ac99fda Guido Trotter
    for node in exportlist:
6265 781de953 Iustin Pop
      if exportlist[node].failed:
6266 25361b9a Iustin Pop
        self.LogWarning("Failed to query node %s, continuing" % node)
6267 781de953 Iustin Pop
        continue
6268 781de953 Iustin Pop
      if instance_name in exportlist[node].data:
6269 9ac99fda Guido Trotter
        found = True
6270 781de953 Iustin Pop
        result = self.rpc.call_export_remove(node, instance_name)
6271 781de953 Iustin Pop
        if result.failed or not result.data:
6272 9a4f63d1 Iustin Pop
          logging.error("Could not remove export for instance %s"
6273 9a4f63d1 Iustin Pop
                        " on node %s", instance_name, node)
6274 9ac99fda Guido Trotter
6275 9ac99fda Guido Trotter
    if fqdn_warn and not found:
6276 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
6277 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
6278 9ac99fda Guido Trotter
                  " Domain Name.")
6279 9ac99fda Guido Trotter
6280 9ac99fda Guido Trotter
6281 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
6282 5c947f38 Iustin Pop
  """Generic tags LU.
6283 5c947f38 Iustin Pop

6284 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
6285 5c947f38 Iustin Pop

6286 5c947f38 Iustin Pop
  """
6287 5c947f38 Iustin Pop
6288 8646adce Guido Trotter
  def ExpandNames(self):
6289 8646adce Guido Trotter
    self.needed_locks = {}
6290 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
6291 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
6292 5c947f38 Iustin Pop
      if name is None:
6293 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
6294 3ecf6786 Iustin Pop
                                   (self.op.name,))
6295 5c947f38 Iustin Pop
      self.op.name = name
6296 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = name
6297 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
6298 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
6299 5c947f38 Iustin Pop
      if name is None:
6300 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
6301 3ecf6786 Iustin Pop
                                   (self.op.name,))
6302 5c947f38 Iustin Pop
      self.op.name = name
6303 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = name
6304 8646adce Guido Trotter
6305 8646adce Guido Trotter
  def CheckPrereq(self):
6306 8646adce Guido Trotter
    """Check prerequisites.
6307 8646adce Guido Trotter

6308 8646adce Guido Trotter
    """
6309 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
6310 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
6311 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
6312 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
6313 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
6314 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
6315 5c947f38 Iustin Pop
    else:
6316 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
6317 3ecf6786 Iustin Pop
                                 str(self.op.kind))
6318 5c947f38 Iustin Pop
6319 5c947f38 Iustin Pop
6320 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
6321 5c947f38 Iustin Pop
  """Returns the tags of a given object.
6322 5c947f38 Iustin Pop

6323 5c947f38 Iustin Pop
  """
6324 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
6325 8646adce Guido Trotter
  REQ_BGL = False
6326 5c947f38 Iustin Pop
6327 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6328 5c947f38 Iustin Pop
    """Returns the tag list.
6329 5c947f38 Iustin Pop

6330 5c947f38 Iustin Pop
    """
6331 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
6332 5c947f38 Iustin Pop
6333 5c947f38 Iustin Pop
6334 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
6335 73415719 Iustin Pop
  """Searches the tags for a given pattern.
6336 73415719 Iustin Pop

6337 73415719 Iustin Pop
  """
6338 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
6339 8646adce Guido Trotter
  REQ_BGL = False
6340 8646adce Guido Trotter
6341 8646adce Guido Trotter
  def ExpandNames(self):
6342 8646adce Guido Trotter
    self.needed_locks = {}
6343 73415719 Iustin Pop
6344 73415719 Iustin Pop
  def CheckPrereq(self):
6345 73415719 Iustin Pop
    """Check prerequisites.
6346 73415719 Iustin Pop

6347 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
6348 73415719 Iustin Pop

6349 73415719 Iustin Pop
    """
6350 73415719 Iustin Pop
    try:
6351 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
6352 73415719 Iustin Pop
    except re.error, err:
6353 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
6354 73415719 Iustin Pop
                                 (self.op.pattern, err))
6355 73415719 Iustin Pop
6356 73415719 Iustin Pop
  def Exec(self, feedback_fn):
6357 73415719 Iustin Pop
    """Returns the tag list.
6358 73415719 Iustin Pop

6359 73415719 Iustin Pop
    """
6360 73415719 Iustin Pop
    cfg = self.cfg
6361 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
6362 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
6363 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
6364 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
6365 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
6366 73415719 Iustin Pop
    results = []
6367 73415719 Iustin Pop
    for path, target in tgts:
6368 73415719 Iustin Pop
      for tag in target.GetTags():
6369 73415719 Iustin Pop
        if self.re.search(tag):
6370 73415719 Iustin Pop
          results.append((path, tag))
6371 73415719 Iustin Pop
    return results
6372 73415719 Iustin Pop
6373 73415719 Iustin Pop
6374 f27302fa Iustin Pop
class LUAddTags(TagsLU):
6375 5c947f38 Iustin Pop
  """Sets a tag on a given object.
6376 5c947f38 Iustin Pop

6377 5c947f38 Iustin Pop
  """
6378 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
6379 8646adce Guido Trotter
  REQ_BGL = False
6380 5c947f38 Iustin Pop
6381 5c947f38 Iustin Pop
  def CheckPrereq(self):
6382 5c947f38 Iustin Pop
    """Check prerequisites.
6383 5c947f38 Iustin Pop

6384 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
6385 5c947f38 Iustin Pop

6386 5c947f38 Iustin Pop
    """
6387 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
6388 f27302fa Iustin Pop
    for tag in self.op.tags:
6389 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
6390 5c947f38 Iustin Pop
6391 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6392 5c947f38 Iustin Pop
    """Sets the tag.
6393 5c947f38 Iustin Pop

6394 5c947f38 Iustin Pop
    """
6395 5c947f38 Iustin Pop
    try:
6396 f27302fa Iustin Pop
      for tag in self.op.tags:
6397 f27302fa Iustin Pop
        self.target.AddTag(tag)
6398 5c947f38 Iustin Pop
    except errors.TagError, err:
6399 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
6400 5c947f38 Iustin Pop
    try:
6401 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
6402 5c947f38 Iustin Pop
    except errors.ConfigurationError:
6403 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
6404 3ecf6786 Iustin Pop
                                " config file and the operation has been"
6405 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
6406 5c947f38 Iustin Pop
6407 5c947f38 Iustin Pop
6408 f27302fa Iustin Pop
class LUDelTags(TagsLU):
6409 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
6410 5c947f38 Iustin Pop

6411 5c947f38 Iustin Pop
  """
6412 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
6413 8646adce Guido Trotter
  REQ_BGL = False
6414 5c947f38 Iustin Pop
6415 5c947f38 Iustin Pop
  def CheckPrereq(self):
6416 5c947f38 Iustin Pop
    """Check prerequisites.
6417 5c947f38 Iustin Pop

6418 5c947f38 Iustin Pop
    This checks that we have the given tag.
6419 5c947f38 Iustin Pop

6420 5c947f38 Iustin Pop
    """
6421 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
6422 f27302fa Iustin Pop
    for tag in self.op.tags:
6423 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
6424 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
6425 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
6426 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
6427 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
6428 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
6429 f27302fa Iustin Pop
      diff_names.sort()
6430 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
6431 f27302fa Iustin Pop
                                 (",".join(diff_names)))
6432 5c947f38 Iustin Pop
6433 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6434 5c947f38 Iustin Pop
    """Remove the tag from the object.
6435 5c947f38 Iustin Pop

6436 5c947f38 Iustin Pop
    """
6437 f27302fa Iustin Pop
    for tag in self.op.tags:
6438 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
6439 5c947f38 Iustin Pop
    try:
6440 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
6441 5c947f38 Iustin Pop
    except errors.ConfigurationError:
6442 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
6443 3ecf6786 Iustin Pop
                                " config file and the operation has been"
6444 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
6445 06009e27 Iustin Pop
6446 0eed6e61 Guido Trotter
6447 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
6448 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
6449 06009e27 Iustin Pop

6450 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
6451 06009e27 Iustin Pop
  time.
6452 06009e27 Iustin Pop

6453 06009e27 Iustin Pop
  """
6454 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
6455 fbe9022f Guido Trotter
  REQ_BGL = False
6456 06009e27 Iustin Pop
6457 fbe9022f Guido Trotter
  def ExpandNames(self):
6458 fbe9022f Guido Trotter
    """Expand names and set required locks.
6459 06009e27 Iustin Pop

6460 fbe9022f Guido Trotter
    This expands the node list, if any.
6461 06009e27 Iustin Pop

6462 06009e27 Iustin Pop
    """
6463 fbe9022f Guido Trotter
    self.needed_locks = {}
6464 06009e27 Iustin Pop
    if self.op.on_nodes:
6465 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
6466 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
6467 fbe9022f Guido Trotter
      # more information.
6468 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
6469 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
6470 fbe9022f Guido Trotter
6471 fbe9022f Guido Trotter
  def CheckPrereq(self):
6472 fbe9022f Guido Trotter
    """Check prerequisites.
6473 fbe9022f Guido Trotter

6474 fbe9022f Guido Trotter
    """
6475 06009e27 Iustin Pop
6476 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
6477 06009e27 Iustin Pop
    """Do the actual sleep.
6478 06009e27 Iustin Pop

6479 06009e27 Iustin Pop
    """
6480 06009e27 Iustin Pop
    if self.op.on_master:
6481 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
6482 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
6483 06009e27 Iustin Pop
    if self.op.on_nodes:
6484 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
6485 06009e27 Iustin Pop
      if not result:
6486 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
6487 06009e27 Iustin Pop
      for node, node_result in result.items():
6488 781de953 Iustin Pop
        node_result.Raise()
6489 781de953 Iustin Pop
        if not node_result.data:
6490 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
6491 781de953 Iustin Pop
                                   " result: %s" % (node, node_result.data))
6492 d61df03e Iustin Pop
6493 d61df03e Iustin Pop
6494 d1c2dd75 Iustin Pop
class IAllocator(object):
6495 d1c2dd75 Iustin Pop
  """IAllocator framework.
6496 d61df03e Iustin Pop

6497 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
6498 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
6499 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
6500 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
6501 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
6502 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
6503 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
6504 d1c2dd75 Iustin Pop
      easy usage
6505 d61df03e Iustin Pop

6506 d61df03e Iustin Pop
  """
6507 29859cb7 Iustin Pop
  _ALLO_KEYS = [
6508 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
6509 8cc7e742 Guido Trotter
    "os", "tags", "nics", "vcpus", "hypervisor",
6510 d1c2dd75 Iustin Pop
    ]
6511 29859cb7 Iustin Pop
  _RELO_KEYS = [
6512 29859cb7 Iustin Pop
    "relocate_from",
6513 29859cb7 Iustin Pop
    ]
6514 d1c2dd75 Iustin Pop
6515 72737a7f Iustin Pop
  def __init__(self, lu, mode, name, **kwargs):
6516 72737a7f Iustin Pop
    self.lu = lu
6517 d1c2dd75 Iustin Pop
    # init buffer variables
6518 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
6519 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
6520 29859cb7 Iustin Pop
    self.mode = mode
6521 29859cb7 Iustin Pop
    self.name = name
6522 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
6523 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
6524 a0add446 Iustin Pop
    self.hypervisor = None
6525 29859cb7 Iustin Pop
    self.relocate_from = None
6526 27579978 Iustin Pop
    # computed fields
6527 27579978 Iustin Pop
    self.required_nodes = None
6528 d1c2dd75 Iustin Pop
    # init result fields
6529 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
6530 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6531 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
6532 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6533 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
6534 29859cb7 Iustin Pop
    else:
6535 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
6536 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
6537 d1c2dd75 Iustin Pop
    for key in kwargs:
6538 29859cb7 Iustin Pop
      if key not in keyset:
6539 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
6540 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
6541 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
6542 29859cb7 Iustin Pop
    for key in keyset:
6543 d1c2dd75 Iustin Pop
      if key not in kwargs:
6544 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
6545 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
6546 d1c2dd75 Iustin Pop
    self._BuildInputData()
6547 d1c2dd75 Iustin Pop
6548 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
6549 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
6550 d1c2dd75 Iustin Pop

6551 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
6552 d1c2dd75 Iustin Pop

6553 d1c2dd75 Iustin Pop
    """
6554 72737a7f Iustin Pop
    cfg = self.lu.cfg
6555 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
6556 d1c2dd75 Iustin Pop
    # cluster data
6557 d1c2dd75 Iustin Pop
    data = {
6558 d1c2dd75 Iustin Pop
      "version": 1,
6559 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
6560 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
6561 1325da74 Iustin Pop
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
6562 d1c2dd75 Iustin Pop
      # we don't have job IDs
6563 d61df03e Iustin Pop
      }
6564 b57e9819 Guido Trotter
    iinfo = cfg.GetAllInstancesInfo().values()
6565 b57e9819 Guido Trotter
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
6566 6286519f Iustin Pop
6567 d1c2dd75 Iustin Pop
    # node data
6568 d1c2dd75 Iustin Pop
    node_results = {}
6569 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
6570 8cc7e742 Guido Trotter
6571 8cc7e742 Guido Trotter
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6572 a0add446 Iustin Pop
      hypervisor_name = self.hypervisor
6573 8cc7e742 Guido Trotter
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6574 a0add446 Iustin Pop
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
6575 8cc7e742 Guido Trotter
6576 72737a7f Iustin Pop
    node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
6577 a0add446 Iustin Pop
                                           hypervisor_name)
6578 18640d69 Guido Trotter
    node_iinfo = self.lu.rpc.call_all_instances_info(node_list,
6579 18640d69 Guido Trotter
                       cluster_info.enabled_hypervisors)
6580 1325da74 Iustin Pop
    for nname, nresult in node_data.items():
6581 1325da74 Iustin Pop
      # first fill in static (config-based) values
6582 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
6583 d1c2dd75 Iustin Pop
      pnr = {
6584 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
6585 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
6586 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
6587 fc0fe88c Iustin Pop
        "offline": ninfo.offline,
6588 0b2454b9 Iustin Pop
        "drained": ninfo.drained,
6589 1325da74 Iustin Pop
        "master_candidate": ninfo.master_candidate,
6590 d1c2dd75 Iustin Pop
        }
6591 1325da74 Iustin Pop
6592 1325da74 Iustin Pop
      if not ninfo.offline:
6593 1325da74 Iustin Pop
        nresult.Raise()
6594 1325da74 Iustin Pop
        if not isinstance(nresult.data, dict):
6595 1325da74 Iustin Pop
          raise errors.OpExecError("Can't get data for node %s" % nname)
6596 1325da74 Iustin Pop
        remote_info = nresult.data
6597 1325da74 Iustin Pop
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
6598 1325da74 Iustin Pop
                     'vg_size', 'vg_free', 'cpu_total']:
6599 1325da74 Iustin Pop
          if attr not in remote_info:
6600 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' didn't return attribute"
6601 1325da74 Iustin Pop
                                     " '%s'" % (nname, attr))
6602 1325da74 Iustin Pop
          try:
6603 1325da74 Iustin Pop
            remote_info[attr] = int(remote_info[attr])
6604 1325da74 Iustin Pop
          except ValueError, err:
6605 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' returned invalid value"
6606 1325da74 Iustin Pop
                                     " for '%s': %s" % (nname, attr, err))
6607 1325da74 Iustin Pop
        # compute memory used by primary instances
6608 1325da74 Iustin Pop
        i_p_mem = i_p_up_mem = 0
6609 1325da74 Iustin Pop
        for iinfo, beinfo in i_list:
6610 1325da74 Iustin Pop
          if iinfo.primary_node == nname:
6611 1325da74 Iustin Pop
            i_p_mem += beinfo[constants.BE_MEMORY]
6612 1325da74 Iustin Pop
            if iinfo.name not in node_iinfo[nname].data:
6613 1325da74 Iustin Pop
              i_used_mem = 0
6614 1325da74 Iustin Pop
            else:
6615 1325da74 Iustin Pop
              i_used_mem = int(node_iinfo[nname].data[iinfo.name]['memory'])
6616 1325da74 Iustin Pop
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
6617 1325da74 Iustin Pop
            remote_info['memory_free'] -= max(0, i_mem_diff)
6618 1325da74 Iustin Pop
6619 1325da74 Iustin Pop
            if iinfo.admin_up:
6620 1325da74 Iustin Pop
              i_p_up_mem += beinfo[constants.BE_MEMORY]
6621 1325da74 Iustin Pop
6622 1325da74 Iustin Pop
        # compute memory used by instances
6623 1325da74 Iustin Pop
        pnr_dyn = {
6624 1325da74 Iustin Pop
          "total_memory": remote_info['memory_total'],
6625 1325da74 Iustin Pop
          "reserved_memory": remote_info['memory_dom0'],
6626 1325da74 Iustin Pop
          "free_memory": remote_info['memory_free'],
6627 1325da74 Iustin Pop
          "total_disk": remote_info['vg_size'],
6628 1325da74 Iustin Pop
          "free_disk": remote_info['vg_free'],
6629 1325da74 Iustin Pop
          "total_cpus": remote_info['cpu_total'],
6630 1325da74 Iustin Pop
          "i_pri_memory": i_p_mem,
6631 1325da74 Iustin Pop
          "i_pri_up_memory": i_p_up_mem,
6632 1325da74 Iustin Pop
          }
6633 1325da74 Iustin Pop
        pnr.update(pnr_dyn)
6634 1325da74 Iustin Pop
6635 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
6636 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
6637 d1c2dd75 Iustin Pop
6638 d1c2dd75 Iustin Pop
    # instance data
6639 d1c2dd75 Iustin Pop
    instance_data = {}
6640 338e51e8 Iustin Pop
    for iinfo, beinfo in i_list:
6641 d1c2dd75 Iustin Pop
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
6642 d1c2dd75 Iustin Pop
                  for n in iinfo.nics]
6643 d1c2dd75 Iustin Pop
      pir = {
6644 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
6645 1325da74 Iustin Pop
        "admin_up": iinfo.admin_up,
6646 338e51e8 Iustin Pop
        "vcpus": beinfo[constants.BE_VCPUS],
6647 338e51e8 Iustin Pop
        "memory": beinfo[constants.BE_MEMORY],
6648 d1c2dd75 Iustin Pop
        "os": iinfo.os,
6649 1325da74 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
6650 d1c2dd75 Iustin Pop
        "nics": nic_data,
6651 1325da74 Iustin Pop
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
6652 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
6653 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
6654 d1c2dd75 Iustin Pop
        }
6655 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
6656 d61df03e Iustin Pop
6657 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
6658 d61df03e Iustin Pop
6659 d1c2dd75 Iustin Pop
    self.in_data = data
6660 d61df03e Iustin Pop
6661 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
6662 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
6663 d61df03e Iustin Pop

6664 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
6665 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
6666 d61df03e Iustin Pop

6667 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
6668 d1c2dd75 Iustin Pop
    done.
6669 d61df03e Iustin Pop

6670 d1c2dd75 Iustin Pop
    """
6671 d1c2dd75 Iustin Pop
    data = self.in_data
6672 d1c2dd75 Iustin Pop
6673 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
6674 d1c2dd75 Iustin Pop
6675 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
6676 27579978 Iustin Pop
      self.required_nodes = 2
6677 27579978 Iustin Pop
    else:
6678 27579978 Iustin Pop
      self.required_nodes = 1
6679 d1c2dd75 Iustin Pop
    request = {
6680 d1c2dd75 Iustin Pop
      "type": "allocate",
6681 d1c2dd75 Iustin Pop
      "name": self.name,
6682 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
6683 d1c2dd75 Iustin Pop
      "tags": self.tags,
6684 d1c2dd75 Iustin Pop
      "os": self.os,
6685 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
6686 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
6687 d1c2dd75 Iustin Pop
      "disks": self.disks,
6688 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
6689 d1c2dd75 Iustin Pop
      "nics": self.nics,
6690 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
6691 d1c2dd75 Iustin Pop
      }
6692 d1c2dd75 Iustin Pop
    data["request"] = request
6693 298fe380 Iustin Pop
6694 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
6695 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
6696 298fe380 Iustin Pop

6697 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
6698 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
6699 d61df03e Iustin Pop

6700 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
6701 d1c2dd75 Iustin Pop
    done.
6702 d61df03e Iustin Pop

6703 d1c2dd75 Iustin Pop
    """
6704 72737a7f Iustin Pop
    instance = self.lu.cfg.GetInstanceInfo(self.name)
6705 27579978 Iustin Pop
    if instance is None:
6706 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
6707 27579978 Iustin Pop
                                   " IAllocator" % self.name)
6708 27579978 Iustin Pop
6709 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
6710 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
6711 27579978 Iustin Pop
6712 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
6713 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
6714 2a139bb0 Iustin Pop
6715 27579978 Iustin Pop
    self.required_nodes = 1
6716 dafc7302 Guido Trotter
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
6717 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
6718 27579978 Iustin Pop
6719 d1c2dd75 Iustin Pop
    request = {
6720 2a139bb0 Iustin Pop
      "type": "relocate",
6721 d1c2dd75 Iustin Pop
      "name": self.name,
6722 27579978 Iustin Pop
      "disk_space_total": disk_space,
6723 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
6724 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
6725 d1c2dd75 Iustin Pop
      }
6726 27579978 Iustin Pop
    self.in_data["request"] = request
6727 d61df03e Iustin Pop
6728 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
6729 d1c2dd75 Iustin Pop
    """Build input data structures.
6730 d61df03e Iustin Pop

6731 d1c2dd75 Iustin Pop
    """
6732 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
6733 d61df03e Iustin Pop
6734 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6735 d1c2dd75 Iustin Pop
      self._AddNewInstance()
6736 d1c2dd75 Iustin Pop
    else:
6737 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
6738 d61df03e Iustin Pop
6739 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
6740 d61df03e Iustin Pop
6741 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
6742 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
6743 298fe380 Iustin Pop

6744 d1c2dd75 Iustin Pop
    """
6745 72737a7f Iustin Pop
    if call_fn is None:
6746 72737a7f Iustin Pop
      call_fn = self.lu.rpc.call_iallocator_runner
6747 d1c2dd75 Iustin Pop
    data = self.in_text
6748 298fe380 Iustin Pop
6749 72737a7f Iustin Pop
    result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
6750 781de953 Iustin Pop
    result.Raise()
6751 298fe380 Iustin Pop
6752 781de953 Iustin Pop
    if not isinstance(result.data, (list, tuple)) or len(result.data) != 4:
6753 8d528b7c Iustin Pop
      raise errors.OpExecError("Invalid result from master iallocator runner")
6754 8d528b7c Iustin Pop
6755 781de953 Iustin Pop
    rcode, stdout, stderr, fail = result.data
6756 8d528b7c Iustin Pop
6757 8d528b7c Iustin Pop
    if rcode == constants.IARUN_NOTFOUND:
6758 8d528b7c Iustin Pop
      raise errors.OpExecError("Can't find allocator '%s'" % name)
6759 8d528b7c Iustin Pop
    elif rcode == constants.IARUN_FAILURE:
6760 38206f3c Iustin Pop
      raise errors.OpExecError("Instance allocator call failed: %s,"
6761 38206f3c Iustin Pop
                               " output: %s" % (fail, stdout+stderr))
6762 8d528b7c Iustin Pop
    self.out_text = stdout
6763 d1c2dd75 Iustin Pop
    if validate:
6764 d1c2dd75 Iustin Pop
      self._ValidateResult()
6765 298fe380 Iustin Pop
6766 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
6767 d1c2dd75 Iustin Pop
    """Process the allocator results.
6768 538475ca Iustin Pop

6769 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
6770 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
6771 538475ca Iustin Pop

6772 d1c2dd75 Iustin Pop
    """
6773 d1c2dd75 Iustin Pop
    try:
6774 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
6775 d1c2dd75 Iustin Pop
    except Exception, err:
6776 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
6777 d1c2dd75 Iustin Pop
6778 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
6779 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
6780 538475ca Iustin Pop
6781 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
6782 d1c2dd75 Iustin Pop
      if key not in rdict:
6783 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
6784 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
6785 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
6786 538475ca Iustin Pop
6787 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
6788 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
6789 d1c2dd75 Iustin Pop
                               " is not a list")
6790 d1c2dd75 Iustin Pop
    self.out_data = rdict
6791 538475ca Iustin Pop
6792 538475ca Iustin Pop
6793 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
6794 d61df03e Iustin Pop
  """Run allocator tests.
6795 d61df03e Iustin Pop

6796 d61df03e Iustin Pop
  This LU runs the allocator tests
6797 d61df03e Iustin Pop

6798 d61df03e Iustin Pop
  """
6799 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
6800 d61df03e Iustin Pop
6801 d61df03e Iustin Pop
  def CheckPrereq(self):
6802 d61df03e Iustin Pop
    """Check prerequisites.
6803 d61df03e Iustin Pop

6804 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
6805 d61df03e Iustin Pop

6806 d61df03e Iustin Pop
    """
6807 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
6808 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
6809 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
6810 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
6811 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
6812 d61df03e Iustin Pop
                                     attr)
6813 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
6814 d61df03e Iustin Pop
      if iname is not None:
6815 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
6816 d61df03e Iustin Pop
                                   iname)
6817 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
6818 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
6819 d61df03e Iustin Pop
      for row in self.op.nics:
6820 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
6821 d61df03e Iustin Pop
            "mac" not in row or
6822 d61df03e Iustin Pop
            "ip" not in row or
6823 d61df03e Iustin Pop
            "bridge" not in row):
6824 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
6825 d61df03e Iustin Pop
                                     " 'nics' parameter")
6826 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
6827 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
6828 d61df03e Iustin Pop
      for row in self.op.disks:
6829 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
6830 d61df03e Iustin Pop
            "size" not in row or
6831 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
6832 d61df03e Iustin Pop
            "mode" not in row or
6833 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
6834 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
6835 d61df03e Iustin Pop
                                     " 'disks' parameter")
6836 8901997e Iustin Pop
      if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None:
6837 8cc7e742 Guido Trotter
        self.op.hypervisor = self.cfg.GetHypervisorType()
6838 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
6839 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
6840 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
6841 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
6842 d61df03e Iustin Pop
      if fname is None:
6843 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
6844 d61df03e Iustin Pop
                                   self.op.name)
6845 d61df03e Iustin Pop
      self.op.name = fname
6846 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
6847 d61df03e Iustin Pop
    else:
6848 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
6849 d61df03e Iustin Pop
                                 self.op.mode)
6850 d61df03e Iustin Pop
6851 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
6852 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
6853 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
6854 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
6855 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
6856 d61df03e Iustin Pop
                                 self.op.direction)
6857 d61df03e Iustin Pop
6858 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
6859 d61df03e Iustin Pop
    """Run the allocator test.
6860 d61df03e Iustin Pop

6861 d61df03e Iustin Pop
    """
6862 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
6863 72737a7f Iustin Pop
      ial = IAllocator(self,
6864 29859cb7 Iustin Pop
                       mode=self.op.mode,
6865 29859cb7 Iustin Pop
                       name=self.op.name,
6866 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
6867 29859cb7 Iustin Pop
                       disks=self.op.disks,
6868 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
6869 29859cb7 Iustin Pop
                       os=self.op.os,
6870 29859cb7 Iustin Pop
                       tags=self.op.tags,
6871 29859cb7 Iustin Pop
                       nics=self.op.nics,
6872 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
6873 8cc7e742 Guido Trotter
                       hypervisor=self.op.hypervisor,
6874 29859cb7 Iustin Pop
                       )
6875 29859cb7 Iustin Pop
    else:
6876 72737a7f Iustin Pop
      ial = IAllocator(self,
6877 29859cb7 Iustin Pop
                       mode=self.op.mode,
6878 29859cb7 Iustin Pop
                       name=self.op.name,
6879 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
6880 29859cb7 Iustin Pop
                       )
6881 d61df03e Iustin Pop
6882 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
6883 d1c2dd75 Iustin Pop
      result = ial.in_text
6884 298fe380 Iustin Pop
    else:
6885 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
6886 d1c2dd75 Iustin Pop
      result = ial.out_text
6887 298fe380 Iustin Pop
    return result