Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 77031881

History | View | Annotate | Download (240.3 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 ffa1c0dc Iustin Pop
import logging
34 74409b12 Iustin Pop
import copy
35 4b7735f9 Iustin Pop
import random
36 a8083063 Iustin Pop
37 a8083063 Iustin Pop
from ganeti import ssh
38 a8083063 Iustin Pop
from ganeti import utils
39 a8083063 Iustin Pop
from ganeti import errors
40 a8083063 Iustin Pop
from ganeti import hypervisor
41 6048c986 Guido Trotter
from ganeti import locking
42 a8083063 Iustin Pop
from ganeti import constants
43 a8083063 Iustin Pop
from ganeti import objects
44 a8083063 Iustin Pop
from ganeti import opcodes
45 8d14b30d Iustin Pop
from ganeti import serializer
46 112f18a5 Iustin Pop
from ganeti import ssconf
47 d61df03e Iustin Pop
48 d61df03e Iustin Pop
49 a8083063 Iustin Pop
class LogicalUnit(object):
50 396e1b78 Michael Hanselmann
  """Logical Unit base class.
51 a8083063 Iustin Pop

52 a8083063 Iustin Pop
  Subclasses must follow these rules:
53 d465bdc8 Guido Trotter
    - implement ExpandNames
54 d465bdc8 Guido Trotter
    - implement CheckPrereq
55 a8083063 Iustin Pop
    - implement Exec
56 a8083063 Iustin Pop
    - implement BuildHooksEnv
57 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
58 05f86716 Guido Trotter
    - optionally redefine their run requirements:
59 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
60 05f86716 Guido Trotter

61 05f86716 Guido Trotter
  Note that all commands require root permissions.
62 a8083063 Iustin Pop

63 a8083063 Iustin Pop
  """
64 a8083063 Iustin Pop
  HPATH = None
65 a8083063 Iustin Pop
  HTYPE = None
66 a8083063 Iustin Pop
  _OP_REQP = []
67 7e55040e Guido Trotter
  REQ_BGL = True
68 a8083063 Iustin Pop
69 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
70 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
71 a8083063 Iustin Pop

72 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
73 a8083063 Iustin Pop
    validity.
74 a8083063 Iustin Pop

75 a8083063 Iustin Pop
    """
76 5bfac263 Iustin Pop
    self.proc = processor
77 a8083063 Iustin Pop
    self.op = op
78 77b657a3 Guido Trotter
    self.cfg = context.cfg
79 77b657a3 Guido Trotter
    self.context = context
80 72737a7f Iustin Pop
    self.rpc = rpc
81 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
82 d465bdc8 Guido Trotter
    self.needed_locks = None
83 6683bba2 Guido Trotter
    self.acquired_locks = {}
84 3977a4c1 Guido Trotter
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
85 ca2a79e1 Guido Trotter
    self.add_locks = {}
86 ca2a79e1 Guido Trotter
    self.remove_locks = {}
87 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
88 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
89 c92b310a Michael Hanselmann
    self.__ssh = None
90 86d9d3bb Iustin Pop
    # logging
91 86d9d3bb Iustin Pop
    self.LogWarning = processor.LogWarning
92 86d9d3bb Iustin Pop
    self.LogInfo = processor.LogInfo
93 c92b310a Michael Hanselmann
94 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
95 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
96 a8083063 Iustin Pop
      if attr_val is None:
97 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
98 3ecf6786 Iustin Pop
                                   attr_name)
99 4be4691d Iustin Pop
    self.CheckArguments()
100 a8083063 Iustin Pop
101 c92b310a Michael Hanselmann
  def __GetSSH(self):
102 c92b310a Michael Hanselmann
    """Returns the SshRunner object
103 c92b310a Michael Hanselmann

104 c92b310a Michael Hanselmann
    """
105 c92b310a Michael Hanselmann
    if not self.__ssh:
106 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
107 c92b310a Michael Hanselmann
    return self.__ssh
108 c92b310a Michael Hanselmann
109 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
110 c92b310a Michael Hanselmann
111 4be4691d Iustin Pop
  def CheckArguments(self):
112 4be4691d Iustin Pop
    """Check syntactic validity for the opcode arguments.
113 4be4691d Iustin Pop

114 4be4691d Iustin Pop
    This method is for doing a simple syntactic check and ensure
115 4be4691d Iustin Pop
    validity of opcode parameters, without any cluster-related
116 4be4691d Iustin Pop
    checks. While the same can be accomplished in ExpandNames and/or
117 4be4691d Iustin Pop
    CheckPrereq, doing these separate is better because:
118 4be4691d Iustin Pop

119 4be4691d Iustin Pop
      - ExpandNames is left as as purely a lock-related function
120 4be4691d Iustin Pop
      - CheckPrereq is run after we have aquired locks (and possible
121 4be4691d Iustin Pop
        waited for them)
122 4be4691d Iustin Pop

123 4be4691d Iustin Pop
    The function is allowed to change the self.op attribute so that
124 4be4691d Iustin Pop
    later methods can no longer worry about missing parameters.
125 4be4691d Iustin Pop

126 4be4691d Iustin Pop
    """
127 4be4691d Iustin Pop
    pass
128 4be4691d Iustin Pop
129 d465bdc8 Guido Trotter
  def ExpandNames(self):
130 d465bdc8 Guido Trotter
    """Expand names for this LU.
131 d465bdc8 Guido Trotter

132 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
133 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
134 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
135 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
136 d465bdc8 Guido Trotter

137 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
138 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
139 d465bdc8 Guido Trotter
    as values. Rules:
140 e4376078 Iustin Pop

141 e4376078 Iustin Pop
      - use an empty dict if you don't need any lock
142 e4376078 Iustin Pop
      - if you don't need any lock at a particular level omit that level
143 e4376078 Iustin Pop
      - don't put anything for the BGL level
144 e4376078 Iustin Pop
      - if you want all locks at a level use locking.ALL_SET as a value
145 d465bdc8 Guido Trotter

146 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
147 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
148 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
149 3977a4c1 Guido Trotter

150 e4376078 Iustin Pop
    Examples::
151 e4376078 Iustin Pop

152 e4376078 Iustin Pop
      # Acquire all nodes and one instance
153 e4376078 Iustin Pop
      self.needed_locks = {
154 e4376078 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
155 e4376078 Iustin Pop
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
156 e4376078 Iustin Pop
      }
157 e4376078 Iustin Pop
      # Acquire just two nodes
158 e4376078 Iustin Pop
      self.needed_locks = {
159 e4376078 Iustin Pop
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
160 e4376078 Iustin Pop
      }
161 e4376078 Iustin Pop
      # Acquire no locks
162 e4376078 Iustin Pop
      self.needed_locks = {} # No, you can't leave it to the default value None
163 d465bdc8 Guido Trotter

164 d465bdc8 Guido Trotter
    """
165 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
166 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
167 d465bdc8 Guido Trotter
    # time.
168 d465bdc8 Guido Trotter
    if self.REQ_BGL:
169 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
170 d465bdc8 Guido Trotter
    else:
171 d465bdc8 Guido Trotter
      raise NotImplementedError
172 d465bdc8 Guido Trotter
173 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
174 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
175 fb8dcb62 Guido Trotter

176 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
177 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
178 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
179 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
180 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
181 fb8dcb62 Guido Trotter
    default it does nothing.
182 fb8dcb62 Guido Trotter

183 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
184 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
185 fb8dcb62 Guido Trotter

186 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
187 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
188 fb8dcb62 Guido Trotter

189 fb8dcb62 Guido Trotter
    """
190 fb8dcb62 Guido Trotter
191 a8083063 Iustin Pop
  def CheckPrereq(self):
192 a8083063 Iustin Pop
    """Check prerequisites for this LU.
193 a8083063 Iustin Pop

194 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
195 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
196 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
197 a8083063 Iustin Pop
    allowed.
198 a8083063 Iustin Pop

199 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
200 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
201 a8083063 Iustin Pop

202 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
203 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
204 a8083063 Iustin Pop

205 a8083063 Iustin Pop
    """
206 a8083063 Iustin Pop
    raise NotImplementedError
207 a8083063 Iustin Pop
208 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
209 a8083063 Iustin Pop
    """Execute the LU.
210 a8083063 Iustin Pop

211 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
212 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
213 a8083063 Iustin Pop
    code, or expected.
214 a8083063 Iustin Pop

215 a8083063 Iustin Pop
    """
216 a8083063 Iustin Pop
    raise NotImplementedError
217 a8083063 Iustin Pop
218 a8083063 Iustin Pop
  def BuildHooksEnv(self):
219 a8083063 Iustin Pop
    """Build hooks environment for this LU.
220 a8083063 Iustin Pop

221 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
222 a8083063 Iustin Pop
    containing the environment that will be used for running the
223 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
224 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
225 a8083063 Iustin Pop
    the hook should run after the execution.
226 a8083063 Iustin Pop

227 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
228 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
229 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
230 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
231 a8083063 Iustin Pop

232 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
233 a8083063 Iustin Pop

234 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
235 a8083063 Iustin Pop
    not be called.
236 a8083063 Iustin Pop

237 a8083063 Iustin Pop
    """
238 a8083063 Iustin Pop
    raise NotImplementedError
239 a8083063 Iustin Pop
240 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
241 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
242 1fce5219 Guido Trotter

243 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
244 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
245 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
246 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
247 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
248 1fce5219 Guido Trotter

249 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
250 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
251 e4376078 Iustin Pop
    @param hook_results: the results of the multi-node hooks rpc call
252 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
253 e4376078 Iustin Pop
    @param lu_result: the previous Exec result this LU had, or None
254 e4376078 Iustin Pop
        in the PRE phase
255 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
256 e4376078 Iustin Pop
        and hook results
257 1fce5219 Guido Trotter

258 1fce5219 Guido Trotter
    """
259 1fce5219 Guido Trotter
    return lu_result
260 1fce5219 Guido Trotter
261 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
262 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
263 43905206 Guido Trotter

264 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
265 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
266 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
267 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
268 43905206 Guido Trotter
    before.
269 43905206 Guido Trotter

270 43905206 Guido Trotter
    """
271 43905206 Guido Trotter
    if self.needed_locks is None:
272 43905206 Guido Trotter
      self.needed_locks = {}
273 43905206 Guido Trotter
    else:
274 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
275 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
276 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
277 43905206 Guido Trotter
    if expanded_name is None:
278 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
279 43905206 Guido Trotter
                                  self.op.instance_name)
280 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
281 43905206 Guido Trotter
    self.op.instance_name = expanded_name
282 43905206 Guido Trotter
283 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
284 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
285 c4a2fee1 Guido Trotter

286 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
287 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
288 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
289 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
290 c4a2fee1 Guido Trotter

291 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
292 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
293 c4a2fee1 Guido Trotter

294 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
295 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
296 c4a2fee1 Guido Trotter

297 e4376078 Iustin Pop
    If should be called in DeclareLocks in a way similar to::
298 c4a2fee1 Guido Trotter

299 e4376078 Iustin Pop
      if level == locking.LEVEL_NODE:
300 e4376078 Iustin Pop
        self._LockInstancesNodes()
301 c4a2fee1 Guido Trotter

302 a82ce292 Guido Trotter
    @type primary_only: boolean
303 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
304 a82ce292 Guido Trotter

305 c4a2fee1 Guido Trotter
    """
306 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
307 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
308 c4a2fee1 Guido Trotter
309 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
310 c4a2fee1 Guido Trotter
311 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
312 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
313 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
314 c4a2fee1 Guido Trotter
    wanted_nodes = []
315 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
316 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
317 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
318 a82ce292 Guido Trotter
      if not primary_only:
319 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
320 9513b6ab Guido Trotter
321 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
322 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
323 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
324 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
325 c4a2fee1 Guido Trotter
326 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
327 c4a2fee1 Guido Trotter
328 a8083063 Iustin Pop
329 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
330 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
331 a8083063 Iustin Pop

332 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
333 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
334 a8083063 Iustin Pop

335 a8083063 Iustin Pop
  """
336 a8083063 Iustin Pop
  HPATH = None
337 a8083063 Iustin Pop
  HTYPE = None
338 a8083063 Iustin Pop
339 a8083063 Iustin Pop
340 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
341 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
342 83120a01 Michael Hanselmann

343 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
344 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
345 e4376078 Iustin Pop
  @type nodes: list
346 e4376078 Iustin Pop
  @param nodes: list of node names or None for all nodes
347 e4376078 Iustin Pop
  @rtype: list
348 e4376078 Iustin Pop
  @return: the list of nodes, sorted
349 e4376078 Iustin Pop
  @raise errors.OpProgrammerError: if the nodes parameter is wrong type
350 83120a01 Michael Hanselmann

351 83120a01 Michael Hanselmann
  """
352 3312b702 Iustin Pop
  if not isinstance(nodes, list):
353 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
354 dcb93971 Michael Hanselmann
355 ea47808a Guido Trotter
  if not nodes:
356 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
357 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
358 dcb93971 Michael Hanselmann
359 ea47808a Guido Trotter
  wanted = []
360 ea47808a Guido Trotter
  for name in nodes:
361 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
362 ea47808a Guido Trotter
    if node is None:
363 ea47808a Guido Trotter
      raise errors.OpPrereqError("No such node name '%s'" % name)
364 ea47808a Guido Trotter
    wanted.append(node)
365 dcb93971 Michael Hanselmann
366 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
367 3312b702 Iustin Pop
368 3312b702 Iustin Pop
369 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
370 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
371 3312b702 Iustin Pop

372 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
373 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
374 e4376078 Iustin Pop
  @type instances: list
375 e4376078 Iustin Pop
  @param instances: list of instance names or None for all instances
376 e4376078 Iustin Pop
  @rtype: list
377 e4376078 Iustin Pop
  @return: the list of instances, sorted
378 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if the instances parameter is wrong type
379 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if any of the passed instances is not found
380 3312b702 Iustin Pop

381 3312b702 Iustin Pop
  """
382 3312b702 Iustin Pop
  if not isinstance(instances, list):
383 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
384 3312b702 Iustin Pop
385 3312b702 Iustin Pop
  if instances:
386 3312b702 Iustin Pop
    wanted = []
387 3312b702 Iustin Pop
388 3312b702 Iustin Pop
    for name in instances:
389 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
390 3312b702 Iustin Pop
      if instance is None:
391 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
392 3312b702 Iustin Pop
      wanted.append(instance)
393 3312b702 Iustin Pop
394 3312b702 Iustin Pop
  else:
395 a7f5dc98 Iustin Pop
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
396 a7f5dc98 Iustin Pop
  return wanted
397 dcb93971 Michael Hanselmann
398 dcb93971 Michael Hanselmann
399 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
400 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
401 83120a01 Michael Hanselmann

402 a2d2e1a7 Iustin Pop
  @type static: L{utils.FieldSet}
403 31bf511f Iustin Pop
  @param static: static fields set
404 a2d2e1a7 Iustin Pop
  @type dynamic: L{utils.FieldSet}
405 31bf511f Iustin Pop
  @param dynamic: dynamic fields set
406 83120a01 Michael Hanselmann

407 83120a01 Michael Hanselmann
  """
408 a2d2e1a7 Iustin Pop
  f = utils.FieldSet()
409 31bf511f Iustin Pop
  f.Extend(static)
410 31bf511f Iustin Pop
  f.Extend(dynamic)
411 dcb93971 Michael Hanselmann
412 31bf511f Iustin Pop
  delta = f.NonMatching(selected)
413 31bf511f Iustin Pop
  if delta:
414 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
415 31bf511f Iustin Pop
                               % ",".join(delta))
416 dcb93971 Michael Hanselmann
417 dcb93971 Michael Hanselmann
418 a5961235 Iustin Pop
def _CheckBooleanOpField(op, name):
419 a5961235 Iustin Pop
  """Validates boolean opcode parameters.
420 a5961235 Iustin Pop

421 a5961235 Iustin Pop
  This will ensure that an opcode parameter is either a boolean value,
422 a5961235 Iustin Pop
  or None (but that it always exists).
423 a5961235 Iustin Pop

424 a5961235 Iustin Pop
  """
425 a5961235 Iustin Pop
  val = getattr(op, name, None)
426 a5961235 Iustin Pop
  if not (val is None or isinstance(val, bool)):
427 a5961235 Iustin Pop
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
428 a5961235 Iustin Pop
                               (name, str(val)))
429 a5961235 Iustin Pop
  setattr(op, name, val)
430 a5961235 Iustin Pop
431 a5961235 Iustin Pop
432 a5961235 Iustin Pop
def _CheckNodeOnline(lu, node):
433 a5961235 Iustin Pop
  """Ensure that a given node is online.
434 a5961235 Iustin Pop

435 a5961235 Iustin Pop
  @param lu: the LU on behalf of which we make the check
436 a5961235 Iustin Pop
  @param node: the node to check
437 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is offline
438 a5961235 Iustin Pop

439 a5961235 Iustin Pop
  """
440 a5961235 Iustin Pop
  if lu.cfg.GetNodeInfo(node).offline:
441 a5961235 Iustin Pop
    raise errors.OpPrereqError("Can't use offline node %s" % node)
442 a5961235 Iustin Pop
443 a5961235 Iustin Pop
444 733a2b6a Iustin Pop
def _CheckNodeNotDrained(lu, node):
445 733a2b6a Iustin Pop
  """Ensure that a given node is not drained.
446 733a2b6a Iustin Pop

447 733a2b6a Iustin Pop
  @param lu: the LU on behalf of which we make the check
448 733a2b6a Iustin Pop
  @param node: the node to check
449 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is drained
450 733a2b6a Iustin Pop

451 733a2b6a Iustin Pop
  """
452 733a2b6a Iustin Pop
  if lu.cfg.GetNodeInfo(node).drained:
453 733a2b6a Iustin Pop
    raise errors.OpPrereqError("Can't use drained node %s" % node)
454 733a2b6a Iustin Pop
455 733a2b6a Iustin Pop
456 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
457 2c2690c9 Iustin Pop
                          memory, vcpus, nics, disk_template, disks):
458 e4376078 Iustin Pop
  """Builds instance related env variables for hooks
459 e4376078 Iustin Pop

460 e4376078 Iustin Pop
  This builds the hook environment from individual variables.
461 e4376078 Iustin Pop

462 e4376078 Iustin Pop
  @type name: string
463 e4376078 Iustin Pop
  @param name: the name of the instance
464 e4376078 Iustin Pop
  @type primary_node: string
465 e4376078 Iustin Pop
  @param primary_node: the name of the instance's primary node
466 e4376078 Iustin Pop
  @type secondary_nodes: list
467 e4376078 Iustin Pop
  @param secondary_nodes: list of secondary nodes as strings
468 e4376078 Iustin Pop
  @type os_type: string
469 e4376078 Iustin Pop
  @param os_type: the name of the instance's OS
470 0d68c45d Iustin Pop
  @type status: boolean
471 0d68c45d Iustin Pop
  @param status: the should_run status of the instance
472 e4376078 Iustin Pop
  @type memory: string
473 e4376078 Iustin Pop
  @param memory: the memory size of the instance
474 e4376078 Iustin Pop
  @type vcpus: string
475 e4376078 Iustin Pop
  @param vcpus: the count of VCPUs the instance has
476 e4376078 Iustin Pop
  @type nics: list
477 e4376078 Iustin Pop
  @param nics: list of tuples (ip, bridge, mac) representing
478 e4376078 Iustin Pop
      the NICs the instance  has
479 2c2690c9 Iustin Pop
  @type disk_template: string
480 2c2690c9 Iustin Pop
  @param disk_template: the distk template of the instance
481 2c2690c9 Iustin Pop
  @type disks: list
482 2c2690c9 Iustin Pop
  @param disks: the list of (size, mode) pairs
483 e4376078 Iustin Pop
  @rtype: dict
484 e4376078 Iustin Pop
  @return: the hook environment for this instance
485 ecb215b5 Michael Hanselmann

486 396e1b78 Michael Hanselmann
  """
487 0d68c45d Iustin Pop
  if status:
488 0d68c45d Iustin Pop
    str_status = "up"
489 0d68c45d Iustin Pop
  else:
490 0d68c45d Iustin Pop
    str_status = "down"
491 396e1b78 Michael Hanselmann
  env = {
492 0e137c28 Iustin Pop
    "OP_TARGET": name,
493 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
494 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
495 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
496 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
497 0d68c45d Iustin Pop
    "INSTANCE_STATUS": str_status,
498 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
499 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
500 2c2690c9 Iustin Pop
    "INSTANCE_DISK_TEMPLATE": disk_template,
501 396e1b78 Michael Hanselmann
  }
502 396e1b78 Michael Hanselmann
503 396e1b78 Michael Hanselmann
  if nics:
504 396e1b78 Michael Hanselmann
    nic_count = len(nics)
505 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
506 396e1b78 Michael Hanselmann
      if ip is None:
507 396e1b78 Michael Hanselmann
        ip = ""
508 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
509 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
510 2c2690c9 Iustin Pop
      env["INSTANCE_NIC%d_MAC" % idx] = mac
511 396e1b78 Michael Hanselmann
  else:
512 396e1b78 Michael Hanselmann
    nic_count = 0
513 396e1b78 Michael Hanselmann
514 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
515 396e1b78 Michael Hanselmann
516 2c2690c9 Iustin Pop
  if disks:
517 2c2690c9 Iustin Pop
    disk_count = len(disks)
518 2c2690c9 Iustin Pop
    for idx, (size, mode) in enumerate(disks):
519 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_SIZE" % idx] = size
520 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_MODE" % idx] = mode
521 2c2690c9 Iustin Pop
  else:
522 2c2690c9 Iustin Pop
    disk_count = 0
523 2c2690c9 Iustin Pop
524 2c2690c9 Iustin Pop
  env["INSTANCE_DISK_COUNT"] = disk_count
525 2c2690c9 Iustin Pop
526 396e1b78 Michael Hanselmann
  return env
527 396e1b78 Michael Hanselmann
528 396e1b78 Michael Hanselmann
529 338e51e8 Iustin Pop
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
530 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
531 ecb215b5 Michael Hanselmann

532 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
533 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
534 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
535 e4376078 Iustin Pop
  @param instance: the instance for which we should build the
536 e4376078 Iustin Pop
      environment
537 e4376078 Iustin Pop
  @type override: dict
538 e4376078 Iustin Pop
  @param override: dictionary with key/values that will override
539 e4376078 Iustin Pop
      our values
540 e4376078 Iustin Pop
  @rtype: dict
541 e4376078 Iustin Pop
  @return: the hook environment dictionary
542 e4376078 Iustin Pop

543 ecb215b5 Michael Hanselmann
  """
544 338e51e8 Iustin Pop
  bep = lu.cfg.GetClusterInfo().FillBE(instance)
545 396e1b78 Michael Hanselmann
  args = {
546 396e1b78 Michael Hanselmann
    'name': instance.name,
547 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
548 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
549 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
550 0d68c45d Iustin Pop
    'status': instance.admin_up,
551 338e51e8 Iustin Pop
    'memory': bep[constants.BE_MEMORY],
552 338e51e8 Iustin Pop
    'vcpus': bep[constants.BE_VCPUS],
553 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
554 2c2690c9 Iustin Pop
    'disk_template': instance.disk_template,
555 2c2690c9 Iustin Pop
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
556 396e1b78 Michael Hanselmann
  }
557 396e1b78 Michael Hanselmann
  if override:
558 396e1b78 Michael Hanselmann
    args.update(override)
559 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
560 396e1b78 Michael Hanselmann
561 396e1b78 Michael Hanselmann
562 ec0292f1 Iustin Pop
def _AdjustCandidatePool(lu):
563 ec0292f1 Iustin Pop
  """Adjust the candidate pool after node operations.
564 ec0292f1 Iustin Pop

565 ec0292f1 Iustin Pop
  """
566 ec0292f1 Iustin Pop
  mod_list = lu.cfg.MaintainCandidatePool()
567 ec0292f1 Iustin Pop
  if mod_list:
568 ec0292f1 Iustin Pop
    lu.LogInfo("Promoted nodes to master candidate role: %s",
569 ee513a66 Iustin Pop
               ", ".join(node.name for node in mod_list))
570 ec0292f1 Iustin Pop
    for name in mod_list:
571 ec0292f1 Iustin Pop
      lu.context.ReaddNode(name)
572 ec0292f1 Iustin Pop
  mc_now, mc_max = lu.cfg.GetMasterCandidateStats()
573 ec0292f1 Iustin Pop
  if mc_now > mc_max:
574 ec0292f1 Iustin Pop
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
575 ec0292f1 Iustin Pop
               (mc_now, mc_max))
576 ec0292f1 Iustin Pop
577 ec0292f1 Iustin Pop
578 b9bddb6b Iustin Pop
def _CheckInstanceBridgesExist(lu, instance):
579 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
580 bf6929a2 Alexander Schreiber

581 bf6929a2 Alexander Schreiber
  """
582 bf6929a2 Alexander Schreiber
  # check bridges existance
583 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
584 781de953 Iustin Pop
  result = lu.rpc.call_bridges_exist(instance.primary_node, brlist)
585 781de953 Iustin Pop
  result.Raise()
586 781de953 Iustin Pop
  if not result.data:
587 781de953 Iustin Pop
    raise errors.OpPrereqError("One or more target bridges %s does not"
588 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
589 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
590 bf6929a2 Alexander Schreiber
591 bf6929a2 Alexander Schreiber
592 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
593 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
594 a8083063 Iustin Pop

595 a8083063 Iustin Pop
  """
596 a8083063 Iustin Pop
  _OP_REQP = []
597 a8083063 Iustin Pop
598 a8083063 Iustin Pop
  def CheckPrereq(self):
599 a8083063 Iustin Pop
    """Check prerequisites.
600 a8083063 Iustin Pop

601 a8083063 Iustin Pop
    This checks whether the cluster is empty.
602 a8083063 Iustin Pop

603 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
604 a8083063 Iustin Pop

605 a8083063 Iustin Pop
    """
606 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
607 a8083063 Iustin Pop
608 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
609 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
610 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
611 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
612 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
613 db915bd1 Michael Hanselmann
    if instancelist:
614 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
615 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
616 a8083063 Iustin Pop
617 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
618 a8083063 Iustin Pop
    """Destroys the cluster.
619 a8083063 Iustin Pop

620 a8083063 Iustin Pop
    """
621 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
622 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
623 781de953 Iustin Pop
    result.Raise()
624 781de953 Iustin Pop
    if not result.data:
625 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
626 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
627 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
628 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
629 140aa4a8 Iustin Pop
    return master
630 a8083063 Iustin Pop
631 a8083063 Iustin Pop
632 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
633 a8083063 Iustin Pop
  """Verifies the cluster status.
634 a8083063 Iustin Pop

635 a8083063 Iustin Pop
  """
636 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
637 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
638 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
639 d4b9d97f Guido Trotter
  REQ_BGL = False
640 d4b9d97f Guido Trotter
641 d4b9d97f Guido Trotter
  def ExpandNames(self):
642 d4b9d97f Guido Trotter
    self.needed_locks = {
643 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
644 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
645 d4b9d97f Guido Trotter
    }
646 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
647 a8083063 Iustin Pop
648 25361b9a Iustin Pop
  def _VerifyNode(self, nodeinfo, file_list, local_cksum,
649 6d2e83d5 Iustin Pop
                  node_result, feedback_fn, master_files,
650 cc9e1230 Guido Trotter
                  drbd_map, vg_name):
651 a8083063 Iustin Pop
    """Run multiple tests against a node.
652 a8083063 Iustin Pop

653 112f18a5 Iustin Pop
    Test list:
654 e4376078 Iustin Pop

655 a8083063 Iustin Pop
      - compares ganeti version
656 a8083063 Iustin Pop
      - checks vg existance and size > 20G
657 a8083063 Iustin Pop
      - checks config file checksum
658 a8083063 Iustin Pop
      - checks ssh to other nodes
659 a8083063 Iustin Pop

660 112f18a5 Iustin Pop
    @type nodeinfo: L{objects.Node}
661 112f18a5 Iustin Pop
    @param nodeinfo: the node to check
662 e4376078 Iustin Pop
    @param file_list: required list of files
663 e4376078 Iustin Pop
    @param local_cksum: dictionary of local files and their checksums
664 e4376078 Iustin Pop
    @param node_result: the results from the node
665 e4376078 Iustin Pop
    @param feedback_fn: function used to accumulate results
666 112f18a5 Iustin Pop
    @param master_files: list of files that only masters should have
667 6d2e83d5 Iustin Pop
    @param drbd_map: the useddrbd minors for this node, in
668 6d2e83d5 Iustin Pop
        form of minor: (instance, must_exist) which correspond to instances
669 6d2e83d5 Iustin Pop
        and their running status
670 cc9e1230 Guido Trotter
    @param vg_name: Ganeti Volume Group (result of self.cfg.GetVGName())
671 098c0958 Michael Hanselmann

672 a8083063 Iustin Pop
    """
673 112f18a5 Iustin Pop
    node = nodeinfo.name
674 25361b9a Iustin Pop
675 25361b9a Iustin Pop
    # main result, node_result should be a non-empty dict
676 25361b9a Iustin Pop
    if not node_result or not isinstance(node_result, dict):
677 25361b9a Iustin Pop
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
678 25361b9a Iustin Pop
      return True
679 25361b9a Iustin Pop
680 a8083063 Iustin Pop
    # compares ganeti version
681 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
682 25361b9a Iustin Pop
    remote_version = node_result.get('version', None)
683 e9ce0a64 Iustin Pop
    if not (remote_version and isinstance(remote_version, (list, tuple)) and
684 e9ce0a64 Iustin Pop
            len(remote_version) == 2):
685 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
686 a8083063 Iustin Pop
      return True
687 a8083063 Iustin Pop
688 e9ce0a64 Iustin Pop
    if local_version != remote_version[0]:
689 e9ce0a64 Iustin Pop
      feedback_fn("  - ERROR: incompatible protocol versions: master %s,"
690 e9ce0a64 Iustin Pop
                  " node %s %s" % (local_version, node, remote_version[0]))
691 a8083063 Iustin Pop
      return True
692 a8083063 Iustin Pop
693 e9ce0a64 Iustin Pop
    # node seems compatible, we can actually try to look into its results
694 a8083063 Iustin Pop
695 a8083063 Iustin Pop
    bad = False
696 e9ce0a64 Iustin Pop
697 e9ce0a64 Iustin Pop
    # full package version
698 e9ce0a64 Iustin Pop
    if constants.RELEASE_VERSION != remote_version[1]:
699 e9ce0a64 Iustin Pop
      feedback_fn("  - WARNING: software version mismatch: master %s,"
700 e9ce0a64 Iustin Pop
                  " node %s %s" %
701 e9ce0a64 Iustin Pop
                  (constants.RELEASE_VERSION, node, remote_version[1]))
702 e9ce0a64 Iustin Pop
703 e9ce0a64 Iustin Pop
    # checks vg existence and size > 20G
704 cc9e1230 Guido Trotter
    if vg_name is not None:
705 cc9e1230 Guido Trotter
      vglist = node_result.get(constants.NV_VGLIST, None)
706 cc9e1230 Guido Trotter
      if not vglist:
707 cc9e1230 Guido Trotter
        feedback_fn("  - ERROR: unable to check volume groups on node %s." %
708 cc9e1230 Guido Trotter
                        (node,))
709 a8083063 Iustin Pop
        bad = True
710 cc9e1230 Guido Trotter
      else:
711 cc9e1230 Guido Trotter
        vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
712 cc9e1230 Guido Trotter
                                              constants.MIN_VG_SIZE)
713 cc9e1230 Guido Trotter
        if vgstatus:
714 cc9e1230 Guido Trotter
          feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
715 cc9e1230 Guido Trotter
          bad = True
716 a8083063 Iustin Pop
717 a8083063 Iustin Pop
    # checks config file checksum
718 a8083063 Iustin Pop
719 25361b9a Iustin Pop
    remote_cksum = node_result.get(constants.NV_FILELIST, None)
720 25361b9a Iustin Pop
    if not isinstance(remote_cksum, dict):
721 a8083063 Iustin Pop
      bad = True
722 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
723 a8083063 Iustin Pop
    else:
724 a8083063 Iustin Pop
      for file_name in file_list:
725 112f18a5 Iustin Pop
        node_is_mc = nodeinfo.master_candidate
726 112f18a5 Iustin Pop
        must_have_file = file_name not in master_files
727 a8083063 Iustin Pop
        if file_name not in remote_cksum:
728 112f18a5 Iustin Pop
          if node_is_mc or must_have_file:
729 112f18a5 Iustin Pop
            bad = True
730 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' missing" % file_name)
731 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
732 112f18a5 Iustin Pop
          if node_is_mc or must_have_file:
733 112f18a5 Iustin Pop
            bad = True
734 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
735 112f18a5 Iustin Pop
          else:
736 112f18a5 Iustin Pop
            # not candidate and this is not a must-have file
737 112f18a5 Iustin Pop
            bad = True
738 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: non master-candidate has old/wrong file"
739 112f18a5 Iustin Pop
                        " '%s'" % file_name)
740 112f18a5 Iustin Pop
        else:
741 112f18a5 Iustin Pop
          # all good, except non-master/non-must have combination
742 112f18a5 Iustin Pop
          if not node_is_mc and not must_have_file:
743 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' should not exist on non master"
744 112f18a5 Iustin Pop
                        " candidates" % file_name)
745 a8083063 Iustin Pop
746 25361b9a Iustin Pop
    # checks ssh to any
747 25361b9a Iustin Pop
748 25361b9a Iustin Pop
    if constants.NV_NODELIST not in node_result:
749 a8083063 Iustin Pop
      bad = True
750 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
751 a8083063 Iustin Pop
    else:
752 25361b9a Iustin Pop
      if node_result[constants.NV_NODELIST]:
753 a8083063 Iustin Pop
        bad = True
754 25361b9a Iustin Pop
        for node in node_result[constants.NV_NODELIST]:
755 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
756 25361b9a Iustin Pop
                          (node, node_result[constants.NV_NODELIST][node]))
757 25361b9a Iustin Pop
758 25361b9a Iustin Pop
    if constants.NV_NODENETTEST not in node_result:
759 9d4bfc96 Iustin Pop
      bad = True
760 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
761 9d4bfc96 Iustin Pop
    else:
762 25361b9a Iustin Pop
      if node_result[constants.NV_NODENETTEST]:
763 9d4bfc96 Iustin Pop
        bad = True
764 25361b9a Iustin Pop
        nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys())
765 9d4bfc96 Iustin Pop
        for node in nlist:
766 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
767 25361b9a Iustin Pop
                          (node, node_result[constants.NV_NODENETTEST][node]))
768 9d4bfc96 Iustin Pop
769 25361b9a Iustin Pop
    hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
770 e69d05fd Iustin Pop
    if isinstance(hyp_result, dict):
771 e69d05fd Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
772 e69d05fd Iustin Pop
        if hv_result is not None:
773 e69d05fd Iustin Pop
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
774 e69d05fd Iustin Pop
                      (hv_name, hv_result))
775 6d2e83d5 Iustin Pop
776 6d2e83d5 Iustin Pop
    # check used drbd list
777 cc9e1230 Guido Trotter
    if vg_name is not None:
778 cc9e1230 Guido Trotter
      used_minors = node_result.get(constants.NV_DRBDLIST, [])
779 cc9e1230 Guido Trotter
      if not isinstance(used_minors, (tuple, list)):
780 cc9e1230 Guido Trotter
        feedback_fn("  - ERROR: cannot parse drbd status file: %s" %
781 cc9e1230 Guido Trotter
                    str(used_minors))
782 cc9e1230 Guido Trotter
      else:
783 cc9e1230 Guido Trotter
        for minor, (iname, must_exist) in drbd_map.items():
784 cc9e1230 Guido Trotter
          if minor not in used_minors and must_exist:
785 cc9e1230 Guido Trotter
            feedback_fn("  - ERROR: drbd minor %d of instance %s is not active" %
786 cc9e1230 Guido Trotter
                        (minor, iname))
787 cc9e1230 Guido Trotter
            bad = True
788 cc9e1230 Guido Trotter
        for minor in used_minors:
789 cc9e1230 Guido Trotter
          if minor not in drbd_map:
790 cc9e1230 Guido Trotter
            feedback_fn("  - ERROR: unallocated drbd minor %d is in use" % minor)
791 cc9e1230 Guido Trotter
            bad = True
792 6d2e83d5 Iustin Pop
793 a8083063 Iustin Pop
    return bad
794 a8083063 Iustin Pop
795 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
796 0a66c968 Iustin Pop
                      node_instance, feedback_fn, n_offline):
797 a8083063 Iustin Pop
    """Verify an instance.
798 a8083063 Iustin Pop

799 a8083063 Iustin Pop
    This function checks to see if the required block devices are
800 a8083063 Iustin Pop
    available on the instance's node.
801 a8083063 Iustin Pop

802 a8083063 Iustin Pop
    """
803 a8083063 Iustin Pop
    bad = False
804 a8083063 Iustin Pop
805 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
806 a8083063 Iustin Pop
807 a8083063 Iustin Pop
    node_vol_should = {}
808 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
809 a8083063 Iustin Pop
810 a8083063 Iustin Pop
    for node in node_vol_should:
811 0a66c968 Iustin Pop
      if node in n_offline:
812 0a66c968 Iustin Pop
        # ignore missing volumes on offline nodes
813 0a66c968 Iustin Pop
        continue
814 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
815 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
816 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
817 a8083063 Iustin Pop
                          (volume, node))
818 a8083063 Iustin Pop
          bad = True
819 a8083063 Iustin Pop
820 0d68c45d Iustin Pop
    if instanceconfig.admin_up:
821 0a66c968 Iustin Pop
      if ((node_current not in node_instance or
822 0a66c968 Iustin Pop
          not instance in node_instance[node_current]) and
823 0a66c968 Iustin Pop
          node_current not in n_offline):
824 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
825 a8083063 Iustin Pop
                        (instance, node_current))
826 a8083063 Iustin Pop
        bad = True
827 a8083063 Iustin Pop
828 a8083063 Iustin Pop
    for node in node_instance:
829 a8083063 Iustin Pop
      if (not node == node_current):
830 a8083063 Iustin Pop
        if instance in node_instance[node]:
831 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
832 a8083063 Iustin Pop
                          (instance, node))
833 a8083063 Iustin Pop
          bad = True
834 a8083063 Iustin Pop
835 6a438c98 Michael Hanselmann
    return bad
836 a8083063 Iustin Pop
837 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
838 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
839 a8083063 Iustin Pop

840 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
841 a8083063 Iustin Pop
    reported as unknown.
842 a8083063 Iustin Pop

843 a8083063 Iustin Pop
    """
844 a8083063 Iustin Pop
    bad = False
845 a8083063 Iustin Pop
846 a8083063 Iustin Pop
    for node in node_vol_is:
847 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
848 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
849 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
850 a8083063 Iustin Pop
                      (volume, node))
851 a8083063 Iustin Pop
          bad = True
852 a8083063 Iustin Pop
    return bad
853 a8083063 Iustin Pop
854 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
855 a8083063 Iustin Pop
    """Verify the list of running instances.
856 a8083063 Iustin Pop

857 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
858 a8083063 Iustin Pop

859 a8083063 Iustin Pop
    """
860 a8083063 Iustin Pop
    bad = False
861 a8083063 Iustin Pop
    for node in node_instance:
862 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
863 a8083063 Iustin Pop
        if runninginstance not in instancelist:
864 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
865 a8083063 Iustin Pop
                          (runninginstance, node))
866 a8083063 Iustin Pop
          bad = True
867 a8083063 Iustin Pop
    return bad
868 a8083063 Iustin Pop
869 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
870 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
871 2b3b6ddd Guido Trotter

872 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
873 2b3b6ddd Guido Trotter
    was primary for.
874 2b3b6ddd Guido Trotter

875 2b3b6ddd Guido Trotter
    """
876 2b3b6ddd Guido Trotter
    bad = False
877 2b3b6ddd Guido Trotter
878 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
879 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
880 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
881 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
882 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
883 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
884 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
885 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
886 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
887 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
888 2b3b6ddd Guido Trotter
        needed_mem = 0
889 2b3b6ddd Guido Trotter
        for instance in instances:
890 338e51e8 Iustin Pop
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
891 c0f2b229 Iustin Pop
          if bep[constants.BE_AUTO_BALANCE]:
892 3924700f Iustin Pop
            needed_mem += bep[constants.BE_MEMORY]
893 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
894 2b3b6ddd Guido Trotter
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
895 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
896 2b3b6ddd Guido Trotter
          bad = True
897 2b3b6ddd Guido Trotter
    return bad
898 2b3b6ddd Guido Trotter
899 a8083063 Iustin Pop
  def CheckPrereq(self):
900 a8083063 Iustin Pop
    """Check prerequisites.
901 a8083063 Iustin Pop

902 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
903 e54c4c5e Guido Trotter
    all its members are valid.
904 a8083063 Iustin Pop

905 a8083063 Iustin Pop
    """
906 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
907 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
908 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
909 a8083063 Iustin Pop
910 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
911 d8fff41c Guido Trotter
    """Build hooks env.
912 d8fff41c Guido Trotter

913 d8fff41c Guido Trotter
    Cluster-Verify hooks just rone in the post phase and their failure makes
914 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
915 d8fff41c Guido Trotter

916 d8fff41c Guido Trotter
    """
917 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
918 d8fff41c Guido Trotter
    # TODO: populate the environment with useful information for verify hooks
919 d8fff41c Guido Trotter
    env = {}
920 d8fff41c Guido Trotter
    return env, [], all_nodes
921 d8fff41c Guido Trotter
922 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
923 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
924 a8083063 Iustin Pop

925 a8083063 Iustin Pop
    """
926 a8083063 Iustin Pop
    bad = False
927 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
928 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
929 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
930 a8083063 Iustin Pop
931 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
932 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
933 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
934 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
935 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
936 6d2e83d5 Iustin Pop
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
937 6d2e83d5 Iustin Pop
                        for iname in instancelist)
938 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
939 3924700f Iustin Pop
    i_non_a_balanced = [] # Non auto-balanced instances
940 0a66c968 Iustin Pop
    n_offline = [] # List of offline nodes
941 22f0f71d Iustin Pop
    n_drained = [] # List of nodes being drained
942 a8083063 Iustin Pop
    node_volume = {}
943 a8083063 Iustin Pop
    node_instance = {}
944 9c9c7d30 Guido Trotter
    node_info = {}
945 26b6af5e Guido Trotter
    instance_cfg = {}
946 a8083063 Iustin Pop
947 a8083063 Iustin Pop
    # FIXME: verify OS list
948 a8083063 Iustin Pop
    # do local checksums
949 112f18a5 Iustin Pop
    master_files = [constants.CLUSTER_CONF_FILE]
950 112f18a5 Iustin Pop
951 112f18a5 Iustin Pop
    file_names = ssconf.SimpleStore().GetFileList()
952 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
953 699777f2 Michael Hanselmann
    file_names.append(constants.RAPI_CERT_FILE)
954 112f18a5 Iustin Pop
    file_names.extend(master_files)
955 112f18a5 Iustin Pop
956 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
957 a8083063 Iustin Pop
958 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
959 a8083063 Iustin Pop
    node_verify_param = {
960 25361b9a Iustin Pop
      constants.NV_FILELIST: file_names,
961 82e37788 Iustin Pop
      constants.NV_NODELIST: [node.name for node in nodeinfo
962 82e37788 Iustin Pop
                              if not node.offline],
963 25361b9a Iustin Pop
      constants.NV_HYPERVISOR: hypervisors,
964 25361b9a Iustin Pop
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
965 82e37788 Iustin Pop
                                  node.secondary_ip) for node in nodeinfo
966 82e37788 Iustin Pop
                                 if not node.offline],
967 25361b9a Iustin Pop
      constants.NV_INSTANCELIST: hypervisors,
968 25361b9a Iustin Pop
      constants.NV_VERSION: None,
969 25361b9a Iustin Pop
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
970 a8083063 Iustin Pop
      }
971 cc9e1230 Guido Trotter
    if vg_name is not None:
972 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_VGLIST] = None
973 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_LVLIST] = vg_name
974 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_DRBDLIST] = None
975 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
976 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
977 a8083063 Iustin Pop
978 3924700f Iustin Pop
    cluster = self.cfg.GetClusterInfo()
979 112f18a5 Iustin Pop
    master_node = self.cfg.GetMasterNode()
980 6d2e83d5 Iustin Pop
    all_drbd_map = self.cfg.ComputeDRBDMap()
981 6d2e83d5 Iustin Pop
982 112f18a5 Iustin Pop
    for node_i in nodeinfo:
983 112f18a5 Iustin Pop
      node = node_i.name
984 25361b9a Iustin Pop
      nresult = all_nvinfo[node].data
985 25361b9a Iustin Pop
986 0a66c968 Iustin Pop
      if node_i.offline:
987 0a66c968 Iustin Pop
        feedback_fn("* Skipping offline node %s" % (node,))
988 0a66c968 Iustin Pop
        n_offline.append(node)
989 0a66c968 Iustin Pop
        continue
990 0a66c968 Iustin Pop
991 112f18a5 Iustin Pop
      if node == master_node:
992 25361b9a Iustin Pop
        ntype = "master"
993 112f18a5 Iustin Pop
      elif node_i.master_candidate:
994 25361b9a Iustin Pop
        ntype = "master candidate"
995 22f0f71d Iustin Pop
      elif node_i.drained:
996 22f0f71d Iustin Pop
        ntype = "drained"
997 22f0f71d Iustin Pop
        n_drained.append(node)
998 112f18a5 Iustin Pop
      else:
999 25361b9a Iustin Pop
        ntype = "regular"
1000 112f18a5 Iustin Pop
      feedback_fn("* Verifying node %s (%s)" % (node, ntype))
1001 25361b9a Iustin Pop
1002 25361b9a Iustin Pop
      if all_nvinfo[node].failed or not isinstance(nresult, dict):
1003 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
1004 25361b9a Iustin Pop
        bad = True
1005 25361b9a Iustin Pop
        continue
1006 25361b9a Iustin Pop
1007 6d2e83d5 Iustin Pop
      node_drbd = {}
1008 6d2e83d5 Iustin Pop
      for minor, instance in all_drbd_map[node].items():
1009 6d2e83d5 Iustin Pop
        instance = instanceinfo[instance]
1010 0d68c45d Iustin Pop
        node_drbd[minor] = (instance.name, instance.admin_up)
1011 112f18a5 Iustin Pop
      result = self._VerifyNode(node_i, file_names, local_checksums,
1012 6d2e83d5 Iustin Pop
                                nresult, feedback_fn, master_files,
1013 cc9e1230 Guido Trotter
                                node_drbd, vg_name)
1014 a8083063 Iustin Pop
      bad = bad or result
1015 a8083063 Iustin Pop
1016 25361b9a Iustin Pop
      lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1017 cc9e1230 Guido Trotter
      if vg_name is None:
1018 cc9e1230 Guido Trotter
        node_volume[node] = {}
1019 cc9e1230 Guido Trotter
      elif isinstance(lvdata, basestring):
1020 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
1021 26f15862 Iustin Pop
                    (node, utils.SafeEncode(lvdata)))
1022 b63ed789 Iustin Pop
        bad = True
1023 b63ed789 Iustin Pop
        node_volume[node] = {}
1024 25361b9a Iustin Pop
      elif not isinstance(lvdata, dict):
1025 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (lvlist)" % (node,))
1026 a8083063 Iustin Pop
        bad = True
1027 a8083063 Iustin Pop
        continue
1028 b63ed789 Iustin Pop
      else:
1029 25361b9a Iustin Pop
        node_volume[node] = lvdata
1030 a8083063 Iustin Pop
1031 a8083063 Iustin Pop
      # node_instance
1032 25361b9a Iustin Pop
      idata = nresult.get(constants.NV_INSTANCELIST, None)
1033 25361b9a Iustin Pop
      if not isinstance(idata, list):
1034 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (instancelist)" %
1035 25361b9a Iustin Pop
                    (node,))
1036 a8083063 Iustin Pop
        bad = True
1037 a8083063 Iustin Pop
        continue
1038 a8083063 Iustin Pop
1039 25361b9a Iustin Pop
      node_instance[node] = idata
1040 a8083063 Iustin Pop
1041 9c9c7d30 Guido Trotter
      # node_info
1042 25361b9a Iustin Pop
      nodeinfo = nresult.get(constants.NV_HVINFO, None)
1043 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
1044 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (hvinfo)" % (node,))
1045 9c9c7d30 Guido Trotter
        bad = True
1046 9c9c7d30 Guido Trotter
        continue
1047 9c9c7d30 Guido Trotter
1048 9c9c7d30 Guido Trotter
      try:
1049 9c9c7d30 Guido Trotter
        node_info[node] = {
1050 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
1051 93e4c50b Guido Trotter
          "pinst": [],
1052 93e4c50b Guido Trotter
          "sinst": [],
1053 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
1054 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
1055 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
1056 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
1057 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
1058 36e7da50 Guido Trotter
          # secondary.
1059 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
1060 9c9c7d30 Guido Trotter
        }
1061 cc9e1230 Guido Trotter
        # FIXME: devise a free space model for file based instances as well
1062 cc9e1230 Guido Trotter
        if vg_name is not None:
1063 cc9e1230 Guido Trotter
          node_info[node]["dfree"] = int(nresult[constants.NV_VGLIST][vg_name])
1064 9c9c7d30 Guido Trotter
      except ValueError:
1065 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
1066 9c9c7d30 Guido Trotter
        bad = True
1067 9c9c7d30 Guido Trotter
        continue
1068 9c9c7d30 Guido Trotter
1069 a8083063 Iustin Pop
    node_vol_should = {}
1070 a8083063 Iustin Pop
1071 a8083063 Iustin Pop
    for instance in instancelist:
1072 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
1073 6d2e83d5 Iustin Pop
      inst_config = instanceinfo[instance]
1074 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
1075 0a66c968 Iustin Pop
                                     node_instance, feedback_fn, n_offline)
1076 c5705f58 Guido Trotter
      bad = bad or result
1077 832261fd Iustin Pop
      inst_nodes_offline = []
1078 a8083063 Iustin Pop
1079 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
1080 a8083063 Iustin Pop
1081 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
1082 26b6af5e Guido Trotter
1083 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
1084 93e4c50b Guido Trotter
      if pnode in node_info:
1085 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
1086 0a66c968 Iustin Pop
      elif pnode not in n_offline:
1087 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
1088 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
1089 93e4c50b Guido Trotter
        bad = True
1090 93e4c50b Guido Trotter
1091 832261fd Iustin Pop
      if pnode in n_offline:
1092 832261fd Iustin Pop
        inst_nodes_offline.append(pnode)
1093 832261fd Iustin Pop
1094 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
1095 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
1096 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
1097 93e4c50b Guido Trotter
      # supported either.
1098 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
1099 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
1100 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
1101 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
1102 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
1103 93e4c50b Guido Trotter
                    % instance)
1104 93e4c50b Guido Trotter
1105 c0f2b229 Iustin Pop
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1106 3924700f Iustin Pop
        i_non_a_balanced.append(instance)
1107 3924700f Iustin Pop
1108 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
1109 93e4c50b Guido Trotter
        if snode in node_info:
1110 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
1111 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
1112 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
1113 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
1114 0a66c968 Iustin Pop
        elif snode not in n_offline:
1115 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
1116 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
1117 832261fd Iustin Pop
          bad = True
1118 832261fd Iustin Pop
        if snode in n_offline:
1119 832261fd Iustin Pop
          inst_nodes_offline.append(snode)
1120 832261fd Iustin Pop
1121 832261fd Iustin Pop
      if inst_nodes_offline:
1122 832261fd Iustin Pop
        # warn that the instance lives on offline nodes, and set bad=True
1123 832261fd Iustin Pop
        feedback_fn("  - ERROR: instance lives on offline node(s) %s" %
1124 832261fd Iustin Pop
                    ", ".join(inst_nodes_offline))
1125 832261fd Iustin Pop
        bad = True
1126 93e4c50b Guido Trotter
1127 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
1128 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
1129 a8083063 Iustin Pop
                                       feedback_fn)
1130 a8083063 Iustin Pop
    bad = bad or result
1131 a8083063 Iustin Pop
1132 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
1133 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
1134 a8083063 Iustin Pop
                                         feedback_fn)
1135 a8083063 Iustin Pop
    bad = bad or result
1136 a8083063 Iustin Pop
1137 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1138 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
1139 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
1140 e54c4c5e Guido Trotter
      bad = bad or result
1141 2b3b6ddd Guido Trotter
1142 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
1143 2b3b6ddd Guido Trotter
    if i_non_redundant:
1144 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1145 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
1146 2b3b6ddd Guido Trotter
1147 3924700f Iustin Pop
    if i_non_a_balanced:
1148 3924700f Iustin Pop
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1149 3924700f Iustin Pop
                  % len(i_non_a_balanced))
1150 3924700f Iustin Pop
1151 0a66c968 Iustin Pop
    if n_offline:
1152 0a66c968 Iustin Pop
      feedback_fn("  - NOTICE: %d offline node(s) found." % len(n_offline))
1153 0a66c968 Iustin Pop
1154 22f0f71d Iustin Pop
    if n_drained:
1155 22f0f71d Iustin Pop
      feedback_fn("  - NOTICE: %d drained node(s) found." % len(n_drained))
1156 22f0f71d Iustin Pop
1157 34290825 Michael Hanselmann
    return not bad
1158 a8083063 Iustin Pop
1159 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1160 e4376078 Iustin Pop
    """Analize the post-hooks' result
1161 e4376078 Iustin Pop

1162 e4376078 Iustin Pop
    This method analyses the hook result, handles it, and sends some
1163 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
1164 d8fff41c Guido Trotter

1165 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1166 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1167 e4376078 Iustin Pop
    @param hooks_results: the results of the multi-node hooks rpc call
1168 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
1169 e4376078 Iustin Pop
    @param lu_result: previous Exec result
1170 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
1171 e4376078 Iustin Pop
        and hook results
1172 d8fff41c Guido Trotter

1173 d8fff41c Guido Trotter
    """
1174 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
1175 38206f3c Iustin Pop
    # their results
1176 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
1177 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
1178 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
1179 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
1180 d8fff41c Guido Trotter
      if not hooks_results:
1181 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
1182 d8fff41c Guido Trotter
        lu_result = 1
1183 d8fff41c Guido Trotter
      else:
1184 d8fff41c Guido Trotter
        for node_name in hooks_results:
1185 d8fff41c Guido Trotter
          show_node_header = True
1186 d8fff41c Guido Trotter
          res = hooks_results[node_name]
1187 25361b9a Iustin Pop
          if res.failed or res.data is False or not isinstance(res.data, list):
1188 0a66c968 Iustin Pop
            if res.offline:
1189 0a66c968 Iustin Pop
              # no need to warn or set fail return value
1190 0a66c968 Iustin Pop
              continue
1191 25361b9a Iustin Pop
            feedback_fn("    Communication failure in hooks execution")
1192 d8fff41c Guido Trotter
            lu_result = 1
1193 d8fff41c Guido Trotter
            continue
1194 25361b9a Iustin Pop
          for script, hkr, output in res.data:
1195 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
1196 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
1197 d8fff41c Guido Trotter
              # failing hooks on that node
1198 d8fff41c Guido Trotter
              if show_node_header:
1199 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
1200 d8fff41c Guido Trotter
                show_node_header = False
1201 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
1202 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
1203 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
1204 d8fff41c Guido Trotter
              lu_result = 1
1205 d8fff41c Guido Trotter
1206 d8fff41c Guido Trotter
      return lu_result
1207 d8fff41c Guido Trotter
1208 a8083063 Iustin Pop
1209 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
1210 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
1211 2c95a8d4 Iustin Pop

1212 2c95a8d4 Iustin Pop
  """
1213 2c95a8d4 Iustin Pop
  _OP_REQP = []
1214 d4b9d97f Guido Trotter
  REQ_BGL = False
1215 d4b9d97f Guido Trotter
1216 d4b9d97f Guido Trotter
  def ExpandNames(self):
1217 d4b9d97f Guido Trotter
    self.needed_locks = {
1218 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1219 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1220 d4b9d97f Guido Trotter
    }
1221 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1222 2c95a8d4 Iustin Pop
1223 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
1224 2c95a8d4 Iustin Pop
    """Check prerequisites.
1225 2c95a8d4 Iustin Pop

1226 2c95a8d4 Iustin Pop
    This has no prerequisites.
1227 2c95a8d4 Iustin Pop

1228 2c95a8d4 Iustin Pop
    """
1229 2c95a8d4 Iustin Pop
    pass
1230 2c95a8d4 Iustin Pop
1231 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
1232 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
1233 2c95a8d4 Iustin Pop

1234 2c95a8d4 Iustin Pop
    """
1235 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
1236 2c95a8d4 Iustin Pop
1237 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
1238 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1239 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
1240 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
1241 2c95a8d4 Iustin Pop
1242 2c95a8d4 Iustin Pop
    nv_dict = {}
1243 2c95a8d4 Iustin Pop
    for inst in instances:
1244 2c95a8d4 Iustin Pop
      inst_lvs = {}
1245 0d68c45d Iustin Pop
      if (not inst.admin_up or
1246 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
1247 2c95a8d4 Iustin Pop
        continue
1248 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
1249 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1250 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
1251 2c95a8d4 Iustin Pop
        for vol in vol_list:
1252 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
1253 2c95a8d4 Iustin Pop
1254 2c95a8d4 Iustin Pop
    if not nv_dict:
1255 2c95a8d4 Iustin Pop
      return result
1256 2c95a8d4 Iustin Pop
1257 72737a7f Iustin Pop
    node_lvs = self.rpc.call_volume_list(nodes, vg_name)
1258 2c95a8d4 Iustin Pop
1259 2c95a8d4 Iustin Pop
    to_act = set()
1260 2c95a8d4 Iustin Pop
    for node in nodes:
1261 2c95a8d4 Iustin Pop
      # node_volume
1262 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
1263 781de953 Iustin Pop
      if lvs.failed:
1264 0a66c968 Iustin Pop
        if not lvs.offline:
1265 0a66c968 Iustin Pop
          self.LogWarning("Connection to node %s failed: %s" %
1266 0a66c968 Iustin Pop
                          (node, lvs.data))
1267 781de953 Iustin Pop
        continue
1268 781de953 Iustin Pop
      lvs = lvs.data
1269 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
1270 9a4f63d1 Iustin Pop
        logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
1271 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
1272 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
1273 9a4f63d1 Iustin Pop
        logging.warning("Connection to node %s failed or invalid data"
1274 9a4f63d1 Iustin Pop
                        " returned", node)
1275 2c95a8d4 Iustin Pop
        res_nodes.append(node)
1276 2c95a8d4 Iustin Pop
        continue
1277 2c95a8d4 Iustin Pop
1278 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
1279 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
1280 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
1281 b63ed789 Iustin Pop
            and inst.name not in res_instances):
1282 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
1283 2c95a8d4 Iustin Pop
1284 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1285 b63ed789 Iustin Pop
    # data better
1286 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1287 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1288 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1289 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1290 b63ed789 Iustin Pop
1291 2c95a8d4 Iustin Pop
    return result
1292 2c95a8d4 Iustin Pop
1293 2c95a8d4 Iustin Pop
1294 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1295 07bd8a51 Iustin Pop
  """Rename the cluster.
1296 07bd8a51 Iustin Pop

1297 07bd8a51 Iustin Pop
  """
1298 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1299 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1300 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1301 07bd8a51 Iustin Pop
1302 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1303 07bd8a51 Iustin Pop
    """Build hooks env.
1304 07bd8a51 Iustin Pop

1305 07bd8a51 Iustin Pop
    """
1306 07bd8a51 Iustin Pop
    env = {
1307 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1308 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1309 07bd8a51 Iustin Pop
      }
1310 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1311 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1312 07bd8a51 Iustin Pop
1313 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1314 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1315 07bd8a51 Iustin Pop

1316 07bd8a51 Iustin Pop
    """
1317 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1318 07bd8a51 Iustin Pop
1319 bcf043c9 Iustin Pop
    new_name = hostname.name
1320 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1321 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
1322 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
1323 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1324 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1325 07bd8a51 Iustin Pop
                                 " cluster has changed")
1326 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1327 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1328 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1329 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1330 07bd8a51 Iustin Pop
                                   new_ip)
1331 07bd8a51 Iustin Pop
1332 07bd8a51 Iustin Pop
    self.op.name = new_name
1333 07bd8a51 Iustin Pop
1334 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1335 07bd8a51 Iustin Pop
    """Rename the cluster.
1336 07bd8a51 Iustin Pop

1337 07bd8a51 Iustin Pop
    """
1338 07bd8a51 Iustin Pop
    clustername = self.op.name
1339 07bd8a51 Iustin Pop
    ip = self.ip
1340 07bd8a51 Iustin Pop
1341 07bd8a51 Iustin Pop
    # shutdown the master IP
1342 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
1343 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
1344 781de953 Iustin Pop
    if result.failed or not result.data:
1345 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1346 07bd8a51 Iustin Pop
1347 07bd8a51 Iustin Pop
    try:
1348 55cf7d83 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
1349 55cf7d83 Iustin Pop
      cluster.cluster_name = clustername
1350 55cf7d83 Iustin Pop
      cluster.master_ip = ip
1351 55cf7d83 Iustin Pop
      self.cfg.Update(cluster)
1352 ec85e3d5 Iustin Pop
1353 ec85e3d5 Iustin Pop
      # update the known hosts file
1354 ec85e3d5 Iustin Pop
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
1355 ec85e3d5 Iustin Pop
      node_list = self.cfg.GetNodeList()
1356 ec85e3d5 Iustin Pop
      try:
1357 ec85e3d5 Iustin Pop
        node_list.remove(master)
1358 ec85e3d5 Iustin Pop
      except ValueError:
1359 ec85e3d5 Iustin Pop
        pass
1360 ec85e3d5 Iustin Pop
      result = self.rpc.call_upload_file(node_list,
1361 ec85e3d5 Iustin Pop
                                         constants.SSH_KNOWN_HOSTS_FILE)
1362 ec85e3d5 Iustin Pop
      for to_node, to_result in result.iteritems():
1363 ec85e3d5 Iustin Pop
        if to_result.failed or not to_result.data:
1364 d1dc3548 Iustin Pop
          logging.error("Copy of file %s to node %s failed",
1365 d1dc3548 Iustin Pop
                        constants.SSH_KNOWN_HOSTS_FILE, to_node)
1366 ec85e3d5 Iustin Pop
1367 07bd8a51 Iustin Pop
    finally:
1368 781de953 Iustin Pop
      result = self.rpc.call_node_start_master(master, False)
1369 781de953 Iustin Pop
      if result.failed or not result.data:
1370 86d9d3bb Iustin Pop
        self.LogWarning("Could not re-enable the master role on"
1371 86d9d3bb Iustin Pop
                        " the master, please restart manually.")
1372 07bd8a51 Iustin Pop
1373 07bd8a51 Iustin Pop
1374 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1375 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1376 8084f9f6 Manuel Franceschini

1377 e4376078 Iustin Pop
  @type disk: L{objects.Disk}
1378 e4376078 Iustin Pop
  @param disk: the disk to check
1379 e4376078 Iustin Pop
  @rtype: booleean
1380 e4376078 Iustin Pop
  @return: boolean indicating whether a LD_LV dev_type was found or not
1381 8084f9f6 Manuel Franceschini

1382 8084f9f6 Manuel Franceschini
  """
1383 8084f9f6 Manuel Franceschini
  if disk.children:
1384 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1385 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1386 8084f9f6 Manuel Franceschini
        return True
1387 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1388 8084f9f6 Manuel Franceschini
1389 8084f9f6 Manuel Franceschini
1390 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1391 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1392 8084f9f6 Manuel Franceschini

1393 8084f9f6 Manuel Franceschini
  """
1394 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1395 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1396 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1397 c53279cf Guido Trotter
  REQ_BGL = False
1398 c53279cf Guido Trotter
1399 4b7735f9 Iustin Pop
  def CheckParameters(self):
1400 4b7735f9 Iustin Pop
    """Check parameters
1401 4b7735f9 Iustin Pop

1402 4b7735f9 Iustin Pop
    """
1403 4b7735f9 Iustin Pop
    if not hasattr(self.op, "candidate_pool_size"):
1404 4b7735f9 Iustin Pop
      self.op.candidate_pool_size = None
1405 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1406 4b7735f9 Iustin Pop
      try:
1407 4b7735f9 Iustin Pop
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1408 4b7735f9 Iustin Pop
      except ValueError, err:
1409 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1410 4b7735f9 Iustin Pop
                                   str(err))
1411 4b7735f9 Iustin Pop
      if self.op.candidate_pool_size < 1:
1412 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("At least one master candidate needed")
1413 4b7735f9 Iustin Pop
1414 c53279cf Guido Trotter
  def ExpandNames(self):
1415 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
1416 c53279cf Guido Trotter
    # all nodes to be modified.
1417 c53279cf Guido Trotter
    self.needed_locks = {
1418 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1419 c53279cf Guido Trotter
    }
1420 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1421 8084f9f6 Manuel Franceschini
1422 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1423 8084f9f6 Manuel Franceschini
    """Build hooks env.
1424 8084f9f6 Manuel Franceschini

1425 8084f9f6 Manuel Franceschini
    """
1426 8084f9f6 Manuel Franceschini
    env = {
1427 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1428 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1429 8084f9f6 Manuel Franceschini
      }
1430 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1431 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1432 8084f9f6 Manuel Franceschini
1433 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1434 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1435 8084f9f6 Manuel Franceschini

1436 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1437 5f83e263 Iustin Pop
    if the given volume group is valid.
1438 8084f9f6 Manuel Franceschini

1439 8084f9f6 Manuel Franceschini
    """
1440 779c15bb Iustin Pop
    if self.op.vg_name is not None and not self.op.vg_name:
1441 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
1442 8084f9f6 Manuel Franceschini
      for inst in instances:
1443 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1444 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1445 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1446 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1447 8084f9f6 Manuel Franceschini
1448 779c15bb Iustin Pop
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1449 779c15bb Iustin Pop
1450 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1451 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1452 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
1453 8084f9f6 Manuel Franceschini
      for node in node_list:
1454 781de953 Iustin Pop
        if vglist[node].failed:
1455 781de953 Iustin Pop
          # ignoring down node
1456 781de953 Iustin Pop
          self.LogWarning("Node %s unreachable/error, ignoring" % node)
1457 781de953 Iustin Pop
          continue
1458 781de953 Iustin Pop
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].data,
1459 781de953 Iustin Pop
                                              self.op.vg_name,
1460 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1461 8084f9f6 Manuel Franceschini
        if vgstatus:
1462 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1463 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1464 8084f9f6 Manuel Franceschini
1465 779c15bb Iustin Pop
    self.cluster = cluster = self.cfg.GetClusterInfo()
1466 d4b72030 Guido Trotter
    # validate beparams changes
1467 779c15bb Iustin Pop
    if self.op.beparams:
1468 a5728081 Guido Trotter
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
1469 779c15bb Iustin Pop
      self.new_beparams = cluster.FillDict(
1470 779c15bb Iustin Pop
        cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)
1471 779c15bb Iustin Pop
1472 779c15bb Iustin Pop
    # hypervisor list/parameters
1473 779c15bb Iustin Pop
    self.new_hvparams = cluster.FillDict(cluster.hvparams, {})
1474 779c15bb Iustin Pop
    if self.op.hvparams:
1475 779c15bb Iustin Pop
      if not isinstance(self.op.hvparams, dict):
1476 779c15bb Iustin Pop
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1477 779c15bb Iustin Pop
      for hv_name, hv_dict in self.op.hvparams.items():
1478 779c15bb Iustin Pop
        if hv_name not in self.new_hvparams:
1479 779c15bb Iustin Pop
          self.new_hvparams[hv_name] = hv_dict
1480 779c15bb Iustin Pop
        else:
1481 779c15bb Iustin Pop
          self.new_hvparams[hv_name].update(hv_dict)
1482 779c15bb Iustin Pop
1483 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1484 779c15bb Iustin Pop
      self.hv_list = self.op.enabled_hypervisors
1485 779c15bb Iustin Pop
    else:
1486 779c15bb Iustin Pop
      self.hv_list = cluster.enabled_hypervisors
1487 779c15bb Iustin Pop
1488 779c15bb Iustin Pop
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1489 779c15bb Iustin Pop
      # either the enabled list has changed, or the parameters have, validate
1490 779c15bb Iustin Pop
      for hv_name, hv_params in self.new_hvparams.items():
1491 779c15bb Iustin Pop
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1492 779c15bb Iustin Pop
            (self.op.enabled_hypervisors and
1493 779c15bb Iustin Pop
             hv_name in self.op.enabled_hypervisors)):
1494 779c15bb Iustin Pop
          # either this is a new hypervisor, or its parameters have changed
1495 779c15bb Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
1496 a5728081 Guido Trotter
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1497 779c15bb Iustin Pop
          hv_class.CheckParameterSyntax(hv_params)
1498 779c15bb Iustin Pop
          _CheckHVParams(self, node_list, hv_name, hv_params)
1499 779c15bb Iustin Pop
1500 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1501 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1502 8084f9f6 Manuel Franceschini

1503 8084f9f6 Manuel Franceschini
    """
1504 779c15bb Iustin Pop
    if self.op.vg_name is not None:
1505 779c15bb Iustin Pop
      if self.op.vg_name != self.cfg.GetVGName():
1506 779c15bb Iustin Pop
        self.cfg.SetVGName(self.op.vg_name)
1507 779c15bb Iustin Pop
      else:
1508 779c15bb Iustin Pop
        feedback_fn("Cluster LVM configuration already in desired"
1509 779c15bb Iustin Pop
                    " state, not changing")
1510 779c15bb Iustin Pop
    if self.op.hvparams:
1511 779c15bb Iustin Pop
      self.cluster.hvparams = self.new_hvparams
1512 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1513 779c15bb Iustin Pop
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1514 779c15bb Iustin Pop
    if self.op.beparams:
1515 779c15bb Iustin Pop
      self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams
1516 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1517 4b7735f9 Iustin Pop
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
1518 4b7735f9 Iustin Pop
1519 779c15bb Iustin Pop
    self.cfg.Update(self.cluster)
1520 8084f9f6 Manuel Franceschini
1521 4b7735f9 Iustin Pop
    # we want to update nodes after the cluster so that if any errors
1522 4b7735f9 Iustin Pop
    # happen, we have recorded and saved the cluster info
1523 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1524 ec0292f1 Iustin Pop
      _AdjustCandidatePool(self)
1525 4b7735f9 Iustin Pop
1526 8084f9f6 Manuel Franceschini
1527 afee0879 Iustin Pop
class LURedistributeConfig(NoHooksLU):
1528 afee0879 Iustin Pop
  """Force the redistribution of cluster configuration.
1529 afee0879 Iustin Pop

1530 afee0879 Iustin Pop
  This is a very simple LU.
1531 afee0879 Iustin Pop

1532 afee0879 Iustin Pop
  """
1533 afee0879 Iustin Pop
  _OP_REQP = []
1534 afee0879 Iustin Pop
  REQ_BGL = False
1535 afee0879 Iustin Pop
1536 afee0879 Iustin Pop
  def ExpandNames(self):
1537 afee0879 Iustin Pop
    self.needed_locks = {
1538 afee0879 Iustin Pop
      locking.LEVEL_NODE: locking.ALL_SET,
1539 afee0879 Iustin Pop
    }
1540 afee0879 Iustin Pop
    self.share_locks[locking.LEVEL_NODE] = 1
1541 afee0879 Iustin Pop
1542 afee0879 Iustin Pop
  def CheckPrereq(self):
1543 afee0879 Iustin Pop
    """Check prerequisites.
1544 afee0879 Iustin Pop

1545 afee0879 Iustin Pop
    """
1546 afee0879 Iustin Pop
1547 afee0879 Iustin Pop
  def Exec(self, feedback_fn):
1548 afee0879 Iustin Pop
    """Redistribute the configuration.
1549 afee0879 Iustin Pop

1550 afee0879 Iustin Pop
    """
1551 afee0879 Iustin Pop
    self.cfg.Update(self.cfg.GetClusterInfo())
1552 afee0879 Iustin Pop
1553 afee0879 Iustin Pop
1554 b9bddb6b Iustin Pop
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1555 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1556 a8083063 Iustin Pop

1557 a8083063 Iustin Pop
  """
1558 a8083063 Iustin Pop
  if not instance.disks:
1559 a8083063 Iustin Pop
    return True
1560 a8083063 Iustin Pop
1561 a8083063 Iustin Pop
  if not oneshot:
1562 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1563 a8083063 Iustin Pop
1564 a8083063 Iustin Pop
  node = instance.primary_node
1565 a8083063 Iustin Pop
1566 a8083063 Iustin Pop
  for dev in instance.disks:
1567 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
1568 a8083063 Iustin Pop
1569 a8083063 Iustin Pop
  retries = 0
1570 a8083063 Iustin Pop
  while True:
1571 a8083063 Iustin Pop
    max_time = 0
1572 a8083063 Iustin Pop
    done = True
1573 a8083063 Iustin Pop
    cumul_degraded = False
1574 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1575 781de953 Iustin Pop
    if rstats.failed or not rstats.data:
1576 86d9d3bb Iustin Pop
      lu.LogWarning("Can't get any data from node %s", node)
1577 a8083063 Iustin Pop
      retries += 1
1578 a8083063 Iustin Pop
      if retries >= 10:
1579 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1580 3ecf6786 Iustin Pop
                                 " aborting." % node)
1581 a8083063 Iustin Pop
      time.sleep(6)
1582 a8083063 Iustin Pop
      continue
1583 781de953 Iustin Pop
    rstats = rstats.data
1584 a8083063 Iustin Pop
    retries = 0
1585 1492cca7 Iustin Pop
    for i, mstat in enumerate(rstats):
1586 a8083063 Iustin Pop
      if mstat is None:
1587 86d9d3bb Iustin Pop
        lu.LogWarning("Can't compute data for node %s/%s",
1588 86d9d3bb Iustin Pop
                           node, instance.disks[i].iv_name)
1589 a8083063 Iustin Pop
        continue
1590 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1591 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1592 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1593 a8083063 Iustin Pop
      if perc_done is not None:
1594 a8083063 Iustin Pop
        done = False
1595 a8083063 Iustin Pop
        if est_time is not None:
1596 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1597 a8083063 Iustin Pop
          max_time = est_time
1598 a8083063 Iustin Pop
        else:
1599 a8083063 Iustin Pop
          rem_time = "no time estimate"
1600 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1601 b9bddb6b Iustin Pop
                        (instance.disks[i].iv_name, perc_done, rem_time))
1602 a8083063 Iustin Pop
    if done or oneshot:
1603 a8083063 Iustin Pop
      break
1604 a8083063 Iustin Pop
1605 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
1606 a8083063 Iustin Pop
1607 a8083063 Iustin Pop
  if done:
1608 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1609 a8083063 Iustin Pop
  return not cumul_degraded
1610 a8083063 Iustin Pop
1611 a8083063 Iustin Pop
1612 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1613 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1614 a8083063 Iustin Pop

1615 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1616 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1617 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1618 0834c866 Iustin Pop

1619 a8083063 Iustin Pop
  """
1620 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
1621 0834c866 Iustin Pop
  if ldisk:
1622 0834c866 Iustin Pop
    idx = 6
1623 0834c866 Iustin Pop
  else:
1624 0834c866 Iustin Pop
    idx = 5
1625 a8083063 Iustin Pop
1626 a8083063 Iustin Pop
  result = True
1627 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1628 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
1629 23829f6f Iustin Pop
    msg = rstats.RemoteFailMsg()
1630 23829f6f Iustin Pop
    if msg:
1631 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
1632 23829f6f Iustin Pop
      result = False
1633 23829f6f Iustin Pop
    elif not rstats.payload:
1634 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s", node)
1635 a8083063 Iustin Pop
      result = False
1636 a8083063 Iustin Pop
    else:
1637 23829f6f Iustin Pop
      result = result and (not rstats.payload[idx])
1638 a8083063 Iustin Pop
  if dev.children:
1639 a8083063 Iustin Pop
    for child in dev.children:
1640 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1641 a8083063 Iustin Pop
1642 a8083063 Iustin Pop
  return result
1643 a8083063 Iustin Pop
1644 a8083063 Iustin Pop
1645 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1646 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1647 a8083063 Iustin Pop

1648 a8083063 Iustin Pop
  """
1649 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1650 6bf01bbb Guido Trotter
  REQ_BGL = False
1651 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet()
1652 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
1653 a8083063 Iustin Pop
1654 6bf01bbb Guido Trotter
  def ExpandNames(self):
1655 1f9430d6 Iustin Pop
    if self.op.names:
1656 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1657 1f9430d6 Iustin Pop
1658 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1659 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1660 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1661 1f9430d6 Iustin Pop
1662 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
1663 6bf01bbb Guido Trotter
    self.needed_locks = {}
1664 6bf01bbb Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1665 e310b019 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1666 6bf01bbb Guido Trotter
1667 6bf01bbb Guido Trotter
  def CheckPrereq(self):
1668 6bf01bbb Guido Trotter
    """Check prerequisites.
1669 6bf01bbb Guido Trotter

1670 6bf01bbb Guido Trotter
    """
1671 6bf01bbb Guido Trotter
1672 1f9430d6 Iustin Pop
  @staticmethod
1673 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1674 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1675 1f9430d6 Iustin Pop

1676 e4376078 Iustin Pop
    @param node_list: a list with the names of all nodes
1677 e4376078 Iustin Pop
    @param rlist: a map with node names as keys and OS objects as values
1678 1f9430d6 Iustin Pop

1679 e4376078 Iustin Pop
    @rtype: dict
1680 5fcc718f Iustin Pop
    @return: a dictionary with osnames as keys and as value another map, with
1681 e4376078 Iustin Pop
        nodes as keys and list of OS objects as values, eg::
1682 e4376078 Iustin Pop

1683 e4376078 Iustin Pop
          {"debian-etch": {"node1": [<object>,...],
1684 e4376078 Iustin Pop
                           "node2": [<object>,]}
1685 e4376078 Iustin Pop
          }
1686 1f9430d6 Iustin Pop

1687 1f9430d6 Iustin Pop
    """
1688 1f9430d6 Iustin Pop
    all_os = {}
1689 1f9430d6 Iustin Pop
    for node_name, nr in rlist.iteritems():
1690 781de953 Iustin Pop
      if nr.failed or not nr.data:
1691 1f9430d6 Iustin Pop
        continue
1692 781de953 Iustin Pop
      for os_obj in nr.data:
1693 b4de68a9 Iustin Pop
        if os_obj.name not in all_os:
1694 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1695 1f9430d6 Iustin Pop
          # for each node in node_list
1696 b4de68a9 Iustin Pop
          all_os[os_obj.name] = {}
1697 1f9430d6 Iustin Pop
          for nname in node_list:
1698 b4de68a9 Iustin Pop
            all_os[os_obj.name][nname] = []
1699 b4de68a9 Iustin Pop
        all_os[os_obj.name][node_name].append(os_obj)
1700 1f9430d6 Iustin Pop
    return all_os
1701 a8083063 Iustin Pop
1702 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1703 a8083063 Iustin Pop
    """Compute the list of OSes.
1704 a8083063 Iustin Pop

1705 a8083063 Iustin Pop
    """
1706 6bf01bbb Guido Trotter
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1707 94a02bb5 Iustin Pop
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()
1708 94a02bb5 Iustin Pop
                   if node in node_list]
1709 94a02bb5 Iustin Pop
    node_data = self.rpc.call_os_diagnose(valid_nodes)
1710 a8083063 Iustin Pop
    if node_data == False:
1711 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1712 94a02bb5 Iustin Pop
    pol = self._DiagnoseByOS(valid_nodes, node_data)
1713 1f9430d6 Iustin Pop
    output = []
1714 1f9430d6 Iustin Pop
    for os_name, os_data in pol.iteritems():
1715 1f9430d6 Iustin Pop
      row = []
1716 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1717 1f9430d6 Iustin Pop
        if field == "name":
1718 1f9430d6 Iustin Pop
          val = os_name
1719 1f9430d6 Iustin Pop
        elif field == "valid":
1720 1f9430d6 Iustin Pop
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1721 1f9430d6 Iustin Pop
        elif field == "node_status":
1722 1f9430d6 Iustin Pop
          val = {}
1723 1f9430d6 Iustin Pop
          for node_name, nos_list in os_data.iteritems():
1724 1f9430d6 Iustin Pop
            val[node_name] = [(v.status, v.path) for v in nos_list]
1725 1f9430d6 Iustin Pop
        else:
1726 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1727 1f9430d6 Iustin Pop
        row.append(val)
1728 1f9430d6 Iustin Pop
      output.append(row)
1729 1f9430d6 Iustin Pop
1730 1f9430d6 Iustin Pop
    return output
1731 a8083063 Iustin Pop
1732 a8083063 Iustin Pop
1733 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1734 a8083063 Iustin Pop
  """Logical unit for removing a node.
1735 a8083063 Iustin Pop

1736 a8083063 Iustin Pop
  """
1737 a8083063 Iustin Pop
  HPATH = "node-remove"
1738 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1739 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1740 a8083063 Iustin Pop
1741 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1742 a8083063 Iustin Pop
    """Build hooks env.
1743 a8083063 Iustin Pop

1744 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1745 d08869ee Guido Trotter
    node would then be impossible to remove.
1746 a8083063 Iustin Pop

1747 a8083063 Iustin Pop
    """
1748 396e1b78 Michael Hanselmann
    env = {
1749 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1750 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1751 396e1b78 Michael Hanselmann
      }
1752 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1753 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1754 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1755 a8083063 Iustin Pop
1756 a8083063 Iustin Pop
  def CheckPrereq(self):
1757 a8083063 Iustin Pop
    """Check prerequisites.
1758 a8083063 Iustin Pop

1759 a8083063 Iustin Pop
    This checks:
1760 a8083063 Iustin Pop
     - the node exists in the configuration
1761 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1762 a8083063 Iustin Pop
     - it's not the master
1763 a8083063 Iustin Pop

1764 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1765 a8083063 Iustin Pop

1766 a8083063 Iustin Pop
    """
1767 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1768 a8083063 Iustin Pop
    if node is None:
1769 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1770 a8083063 Iustin Pop
1771 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1772 a8083063 Iustin Pop
1773 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
1774 a8083063 Iustin Pop
    if node.name == masternode:
1775 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1776 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1777 a8083063 Iustin Pop
1778 a8083063 Iustin Pop
    for instance_name in instance_list:
1779 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1780 6b12959c Iustin Pop
      if node.name in instance.all_nodes:
1781 6b12959c Iustin Pop
        raise errors.OpPrereqError("Instance %s is still running on the node,"
1782 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1783 a8083063 Iustin Pop
    self.op.node_name = node.name
1784 a8083063 Iustin Pop
    self.node = node
1785 a8083063 Iustin Pop
1786 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1787 a8083063 Iustin Pop
    """Removes the node from the cluster.
1788 a8083063 Iustin Pop

1789 a8083063 Iustin Pop
    """
1790 a8083063 Iustin Pop
    node = self.node
1791 9a4f63d1 Iustin Pop
    logging.info("Stopping the node daemon and removing configs from node %s",
1792 9a4f63d1 Iustin Pop
                 node.name)
1793 a8083063 Iustin Pop
1794 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
1795 a8083063 Iustin Pop
1796 72737a7f Iustin Pop
    self.rpc.call_node_leave_cluster(node.name)
1797 c8a0948f Michael Hanselmann
1798 eb1742d5 Guido Trotter
    # Promote nodes to master candidate as needed
1799 ec0292f1 Iustin Pop
    _AdjustCandidatePool(self)
1800 eb1742d5 Guido Trotter
1801 a8083063 Iustin Pop
1802 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1803 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1804 a8083063 Iustin Pop

1805 a8083063 Iustin Pop
  """
1806 bc8e4a1a Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
1807 35705d8f Guido Trotter
  REQ_BGL = False
1808 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet(
1809 31bf511f Iustin Pop
    "dtotal", "dfree",
1810 31bf511f Iustin Pop
    "mtotal", "mnode", "mfree",
1811 31bf511f Iustin Pop
    "bootid",
1812 0105bad3 Iustin Pop
    "ctotal", "cnodes", "csockets",
1813 31bf511f Iustin Pop
    )
1814 31bf511f Iustin Pop
1815 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(
1816 31bf511f Iustin Pop
    "name", "pinst_cnt", "sinst_cnt",
1817 31bf511f Iustin Pop
    "pinst_list", "sinst_list",
1818 31bf511f Iustin Pop
    "pip", "sip", "tags",
1819 31bf511f Iustin Pop
    "serial_no",
1820 0e67cdbe Iustin Pop
    "master_candidate",
1821 0e67cdbe Iustin Pop
    "master",
1822 9ddb5e45 Iustin Pop
    "offline",
1823 0b2454b9 Iustin Pop
    "drained",
1824 31bf511f Iustin Pop
    )
1825 a8083063 Iustin Pop
1826 35705d8f Guido Trotter
  def ExpandNames(self):
1827 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1828 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1829 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1830 a8083063 Iustin Pop
1831 35705d8f Guido Trotter
    self.needed_locks = {}
1832 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1833 c8d8b4c8 Iustin Pop
1834 c8d8b4c8 Iustin Pop
    if self.op.names:
1835 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
1836 35705d8f Guido Trotter
    else:
1837 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
1838 c8d8b4c8 Iustin Pop
1839 bc8e4a1a Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
1840 bc8e4a1a Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
1841 c8d8b4c8 Iustin Pop
    if self.do_locking:
1842 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
1843 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
1844 c8d8b4c8 Iustin Pop
1845 35705d8f Guido Trotter
1846 35705d8f Guido Trotter
  def CheckPrereq(self):
1847 35705d8f Guido Trotter
    """Check prerequisites.
1848 35705d8f Guido Trotter

1849 35705d8f Guido Trotter
    """
1850 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
1851 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
1852 c8d8b4c8 Iustin Pop
    pass
1853 a8083063 Iustin Pop
1854 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1855 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1856 a8083063 Iustin Pop

1857 a8083063 Iustin Pop
    """
1858 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
1859 c8d8b4c8 Iustin Pop
    if self.do_locking:
1860 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
1861 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
1862 3fa93523 Guido Trotter
      nodenames = self.wanted
1863 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
1864 3fa93523 Guido Trotter
      if missing:
1865 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
1866 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
1867 c8d8b4c8 Iustin Pop
    else:
1868 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
1869 c1f1cbb2 Iustin Pop
1870 c1f1cbb2 Iustin Pop
    nodenames = utils.NiceSort(nodenames)
1871 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
1872 a8083063 Iustin Pop
1873 a8083063 Iustin Pop
    # begin data gathering
1874 a8083063 Iustin Pop
1875 bc8e4a1a Iustin Pop
    if self.do_node_query:
1876 a8083063 Iustin Pop
      live_data = {}
1877 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
1878 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
1879 a8083063 Iustin Pop
      for name in nodenames:
1880 781de953 Iustin Pop
        nodeinfo = node_data[name]
1881 781de953 Iustin Pop
        if not nodeinfo.failed and nodeinfo.data:
1882 781de953 Iustin Pop
          nodeinfo = nodeinfo.data
1883 d599d686 Iustin Pop
          fn = utils.TryConvert
1884 a8083063 Iustin Pop
          live_data[name] = {
1885 d599d686 Iustin Pop
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
1886 d599d686 Iustin Pop
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
1887 d599d686 Iustin Pop
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
1888 d599d686 Iustin Pop
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
1889 d599d686 Iustin Pop
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
1890 d599d686 Iustin Pop
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
1891 d599d686 Iustin Pop
            "bootid": nodeinfo.get('bootid', None),
1892 0105bad3 Iustin Pop
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
1893 0105bad3 Iustin Pop
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
1894 a8083063 Iustin Pop
            }
1895 a8083063 Iustin Pop
        else:
1896 a8083063 Iustin Pop
          live_data[name] = {}
1897 a8083063 Iustin Pop
    else:
1898 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1899 a8083063 Iustin Pop
1900 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1901 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1902 a8083063 Iustin Pop
1903 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1904 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1905 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1906 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1907 a8083063 Iustin Pop
1908 ec223efb Iustin Pop
      for instance_name in instancelist:
1909 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1910 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1911 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1912 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1913 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1914 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1915 a8083063 Iustin Pop
1916 0e67cdbe Iustin Pop
    master_node = self.cfg.GetMasterNode()
1917 0e67cdbe Iustin Pop
1918 a8083063 Iustin Pop
    # end data gathering
1919 a8083063 Iustin Pop
1920 a8083063 Iustin Pop
    output = []
1921 a8083063 Iustin Pop
    for node in nodelist:
1922 a8083063 Iustin Pop
      node_output = []
1923 a8083063 Iustin Pop
      for field in self.op.output_fields:
1924 a8083063 Iustin Pop
        if field == "name":
1925 a8083063 Iustin Pop
          val = node.name
1926 ec223efb Iustin Pop
        elif field == "pinst_list":
1927 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1928 ec223efb Iustin Pop
        elif field == "sinst_list":
1929 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1930 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1931 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1932 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1933 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1934 a8083063 Iustin Pop
        elif field == "pip":
1935 a8083063 Iustin Pop
          val = node.primary_ip
1936 a8083063 Iustin Pop
        elif field == "sip":
1937 a8083063 Iustin Pop
          val = node.secondary_ip
1938 130a6a6f Iustin Pop
        elif field == "tags":
1939 130a6a6f Iustin Pop
          val = list(node.GetTags())
1940 38d7239a Iustin Pop
        elif field == "serial_no":
1941 38d7239a Iustin Pop
          val = node.serial_no
1942 0e67cdbe Iustin Pop
        elif field == "master_candidate":
1943 0e67cdbe Iustin Pop
          val = node.master_candidate
1944 0e67cdbe Iustin Pop
        elif field == "master":
1945 0e67cdbe Iustin Pop
          val = node.name == master_node
1946 9ddb5e45 Iustin Pop
        elif field == "offline":
1947 9ddb5e45 Iustin Pop
          val = node.offline
1948 0b2454b9 Iustin Pop
        elif field == "drained":
1949 0b2454b9 Iustin Pop
          val = node.drained
1950 31bf511f Iustin Pop
        elif self._FIELDS_DYNAMIC.Matches(field):
1951 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1952 a8083063 Iustin Pop
        else:
1953 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1954 a8083063 Iustin Pop
        node_output.append(val)
1955 a8083063 Iustin Pop
      output.append(node_output)
1956 a8083063 Iustin Pop
1957 a8083063 Iustin Pop
    return output
1958 a8083063 Iustin Pop
1959 a8083063 Iustin Pop
1960 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1961 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1962 dcb93971 Michael Hanselmann

1963 dcb93971 Michael Hanselmann
  """
1964 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1965 21a15682 Guido Trotter
  REQ_BGL = False
1966 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
1967 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("node")
1968 21a15682 Guido Trotter
1969 21a15682 Guido Trotter
  def ExpandNames(self):
1970 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1971 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1972 21a15682 Guido Trotter
                       selected=self.op.output_fields)
1973 21a15682 Guido Trotter
1974 21a15682 Guido Trotter
    self.needed_locks = {}
1975 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1976 21a15682 Guido Trotter
    if not self.op.nodes:
1977 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1978 21a15682 Guido Trotter
    else:
1979 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
1980 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
1981 dcb93971 Michael Hanselmann
1982 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1983 dcb93971 Michael Hanselmann
    """Check prerequisites.
1984 dcb93971 Michael Hanselmann

1985 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1986 dcb93971 Michael Hanselmann

1987 dcb93971 Michael Hanselmann
    """
1988 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
1989 dcb93971 Michael Hanselmann
1990 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1991 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1992 dcb93971 Michael Hanselmann

1993 dcb93971 Michael Hanselmann
    """
1994 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1995 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
1996 dcb93971 Michael Hanselmann
1997 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1998 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1999 dcb93971 Michael Hanselmann
2000 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
2001 dcb93971 Michael Hanselmann
2002 dcb93971 Michael Hanselmann
    output = []
2003 dcb93971 Michael Hanselmann
    for node in nodenames:
2004 781de953 Iustin Pop
      if node not in volumes or volumes[node].failed or not volumes[node].data:
2005 37d19eb2 Michael Hanselmann
        continue
2006 37d19eb2 Michael Hanselmann
2007 781de953 Iustin Pop
      node_vols = volumes[node].data[:]
2008 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
2009 dcb93971 Michael Hanselmann
2010 dcb93971 Michael Hanselmann
      for vol in node_vols:
2011 dcb93971 Michael Hanselmann
        node_output = []
2012 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
2013 dcb93971 Michael Hanselmann
          if field == "node":
2014 dcb93971 Michael Hanselmann
            val = node
2015 dcb93971 Michael Hanselmann
          elif field == "phys":
2016 dcb93971 Michael Hanselmann
            val = vol['dev']
2017 dcb93971 Michael Hanselmann
          elif field == "vg":
2018 dcb93971 Michael Hanselmann
            val = vol['vg']
2019 dcb93971 Michael Hanselmann
          elif field == "name":
2020 dcb93971 Michael Hanselmann
            val = vol['name']
2021 dcb93971 Michael Hanselmann
          elif field == "size":
2022 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
2023 dcb93971 Michael Hanselmann
          elif field == "instance":
2024 dcb93971 Michael Hanselmann
            for inst in ilist:
2025 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
2026 dcb93971 Michael Hanselmann
                continue
2027 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
2028 dcb93971 Michael Hanselmann
                val = inst.name
2029 dcb93971 Michael Hanselmann
                break
2030 dcb93971 Michael Hanselmann
            else:
2031 dcb93971 Michael Hanselmann
              val = '-'
2032 dcb93971 Michael Hanselmann
          else:
2033 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
2034 dcb93971 Michael Hanselmann
          node_output.append(str(val))
2035 dcb93971 Michael Hanselmann
2036 dcb93971 Michael Hanselmann
        output.append(node_output)
2037 dcb93971 Michael Hanselmann
2038 dcb93971 Michael Hanselmann
    return output
2039 dcb93971 Michael Hanselmann
2040 dcb93971 Michael Hanselmann
2041 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
2042 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
2043 a8083063 Iustin Pop

2044 a8083063 Iustin Pop
  """
2045 a8083063 Iustin Pop
  HPATH = "node-add"
2046 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2047 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
2048 a8083063 Iustin Pop
2049 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2050 a8083063 Iustin Pop
    """Build hooks env.
2051 a8083063 Iustin Pop

2052 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
2053 a8083063 Iustin Pop

2054 a8083063 Iustin Pop
    """
2055 a8083063 Iustin Pop
    env = {
2056 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
2057 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
2058 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
2059 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
2060 a8083063 Iustin Pop
      }
2061 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
2062 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
2063 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
2064 a8083063 Iustin Pop
2065 a8083063 Iustin Pop
  def CheckPrereq(self):
2066 a8083063 Iustin Pop
    """Check prerequisites.
2067 a8083063 Iustin Pop

2068 a8083063 Iustin Pop
    This checks:
2069 a8083063 Iustin Pop
     - the new node is not already in the config
2070 a8083063 Iustin Pop
     - it is resolvable
2071 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
2072 a8083063 Iustin Pop

2073 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
2074 a8083063 Iustin Pop

2075 a8083063 Iustin Pop
    """
2076 a8083063 Iustin Pop
    node_name = self.op.node_name
2077 a8083063 Iustin Pop
    cfg = self.cfg
2078 a8083063 Iustin Pop
2079 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
2080 a8083063 Iustin Pop
2081 bcf043c9 Iustin Pop
    node = dns_data.name
2082 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
2083 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
2084 a8083063 Iustin Pop
    if secondary_ip is None:
2085 a8083063 Iustin Pop
      secondary_ip = primary_ip
2086 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
2087 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
2088 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
2089 e7c6e02b Michael Hanselmann
2090 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
2091 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
2092 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
2093 e7c6e02b Michael Hanselmann
                                 node)
2094 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
2095 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
2096 a8083063 Iustin Pop
2097 a8083063 Iustin Pop
    for existing_node_name in node_list:
2098 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
2099 e7c6e02b Michael Hanselmann
2100 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
2101 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
2102 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
2103 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
2104 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
2105 e7c6e02b Michael Hanselmann
        continue
2106 e7c6e02b Michael Hanselmann
2107 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
2108 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
2109 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
2110 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
2111 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
2112 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
2113 a8083063 Iustin Pop
2114 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
2115 a8083063 Iustin Pop
    # same as for the master
2116 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
2117 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
2118 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
2119 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
2120 a8083063 Iustin Pop
      if master_singlehomed:
2121 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
2122 3ecf6786 Iustin Pop
                                   " new node has one")
2123 a8083063 Iustin Pop
      else:
2124 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
2125 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
2126 a8083063 Iustin Pop
2127 a8083063 Iustin Pop
    # checks reachablity
2128 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
2129 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
2130 a8083063 Iustin Pop
2131 a8083063 Iustin Pop
    if not newbie_singlehomed:
2132 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
2133 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
2134 b15d625f Iustin Pop
                           source=myself.secondary_ip):
2135 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
2136 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
2137 a8083063 Iustin Pop
2138 0fff97e9 Guido Trotter
    cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2139 ec0292f1 Iustin Pop
    mc_now, _ = self.cfg.GetMasterCandidateStats()
2140 ec0292f1 Iustin Pop
    master_candidate = mc_now < cp_size
2141 0fff97e9 Guido Trotter
2142 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
2143 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
2144 0fff97e9 Guido Trotter
                                 secondary_ip=secondary_ip,
2145 fc0fe88c Iustin Pop
                                 master_candidate=master_candidate,
2146 af64c0ea Iustin Pop
                                 offline=False, drained=False)
2147 a8083063 Iustin Pop
2148 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2149 a8083063 Iustin Pop
    """Adds the new node to the cluster.
2150 a8083063 Iustin Pop

2151 a8083063 Iustin Pop
    """
2152 a8083063 Iustin Pop
    new_node = self.new_node
2153 a8083063 Iustin Pop
    node = new_node.name
2154 a8083063 Iustin Pop
2155 a8083063 Iustin Pop
    # check connectivity
2156 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
2157 781de953 Iustin Pop
    result.Raise()
2158 781de953 Iustin Pop
    if result.data:
2159 781de953 Iustin Pop
      if constants.PROTOCOL_VERSION == result.data:
2160 9a4f63d1 Iustin Pop
        logging.info("Communication to node %s fine, sw version %s match",
2161 781de953 Iustin Pop
                     node, result.data)
2162 a8083063 Iustin Pop
      else:
2163 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
2164 3ecf6786 Iustin Pop
                                 " node version %s" %
2165 781de953 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result.data))
2166 a8083063 Iustin Pop
    else:
2167 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
2168 a8083063 Iustin Pop
2169 a8083063 Iustin Pop
    # setup ssh on node
2170 9a4f63d1 Iustin Pop
    logging.info("Copy ssh key to node %s", node)
2171 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
2172 a8083063 Iustin Pop
    keyarray = []
2173 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
2174 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
2175 70d9e3d8 Iustin Pop
                priv_key, pub_key]
2176 a8083063 Iustin Pop
2177 a8083063 Iustin Pop
    for i in keyfiles:
2178 a8083063 Iustin Pop
      f = open(i, 'r')
2179 a8083063 Iustin Pop
      try:
2180 a8083063 Iustin Pop
        keyarray.append(f.read())
2181 a8083063 Iustin Pop
      finally:
2182 a8083063 Iustin Pop
        f.close()
2183 a8083063 Iustin Pop
2184 72737a7f Iustin Pop
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
2185 72737a7f Iustin Pop
                                    keyarray[2],
2186 72737a7f Iustin Pop
                                    keyarray[3], keyarray[4], keyarray[5])
2187 a8083063 Iustin Pop
2188 a1b805fb Iustin Pop
    msg = result.RemoteFailMsg()
2189 a1b805fb Iustin Pop
    if msg:
2190 a1b805fb Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the"
2191 a1b805fb Iustin Pop
                               " new node: %s" % msg)
2192 a8083063 Iustin Pop
2193 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
2194 d9c02ca6 Michael Hanselmann
    utils.AddHostToEtcHosts(new_node.name)
2195 c8a0948f Michael Hanselmann
2196 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
2197 781de953 Iustin Pop
      result = self.rpc.call_node_has_ip_address(new_node.name,
2198 781de953 Iustin Pop
                                                 new_node.secondary_ip)
2199 781de953 Iustin Pop
      if result.failed or not result.data:
2200 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
2201 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
2202 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
2203 a8083063 Iustin Pop
2204 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
2205 5c0527ed Guido Trotter
    node_verify_param = {
2206 5c0527ed Guido Trotter
      'nodelist': [node],
2207 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
2208 5c0527ed Guido Trotter
    }
2209 5c0527ed Guido Trotter
2210 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
2211 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
2212 5c0527ed Guido Trotter
    for verifier in node_verify_list:
2213 f08ce603 Guido Trotter
      if result[verifier].failed or not result[verifier].data:
2214 5c0527ed Guido Trotter
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
2215 5c0527ed Guido Trotter
                                 " for remote verification" % verifier)
2216 781de953 Iustin Pop
      if result[verifier].data['nodelist']:
2217 781de953 Iustin Pop
        for failed in result[verifier].data['nodelist']:
2218 5c0527ed Guido Trotter
          feedback_fn("ssh/hostname verification failed %s -> %s" %
2219 bafc1d90 Iustin Pop
                      (verifier, result[verifier].data['nodelist'][failed]))
2220 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
2221 ff98055b Iustin Pop
2222 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
2223 a8083063 Iustin Pop
    # including the node just added
2224 d6a02168 Michael Hanselmann
    myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
2225 102b115b Michael Hanselmann
    dist_nodes = self.cfg.GetNodeList()
2226 102b115b Michael Hanselmann
    if not self.op.readd:
2227 102b115b Michael Hanselmann
      dist_nodes.append(node)
2228 a8083063 Iustin Pop
    if myself.name in dist_nodes:
2229 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
2230 a8083063 Iustin Pop
2231 9a4f63d1 Iustin Pop
    logging.debug("Copying hosts and known_hosts to all nodes")
2232 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
2233 72737a7f Iustin Pop
      result = self.rpc.call_upload_file(dist_nodes, fname)
2234 ec85e3d5 Iustin Pop
      for to_node, to_result in result.iteritems():
2235 ec85e3d5 Iustin Pop
        if to_result.failed or not to_result.data:
2236 9a4f63d1 Iustin Pop
          logging.error("Copy of file %s to node %s failed", fname, to_node)
2237 a8083063 Iustin Pop
2238 d6a02168 Michael Hanselmann
    to_copy = []
2239 2928f08d Guido Trotter
    enabled_hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
2240 ccd905ac Guido Trotter
    if constants.HTS_COPY_VNC_PASSWORD.intersection(enabled_hypervisors):
2241 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
2242 2928f08d Guido Trotter
2243 a8083063 Iustin Pop
    for fname in to_copy:
2244 72737a7f Iustin Pop
      result = self.rpc.call_upload_file([node], fname)
2245 781de953 Iustin Pop
      if result[node].failed or not result[node]:
2246 9a4f63d1 Iustin Pop
        logging.error("Could not copy file %s to node %s", fname, node)
2247 a8083063 Iustin Pop
2248 d8470559 Michael Hanselmann
    if self.op.readd:
2249 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
2250 d8470559 Michael Hanselmann
    else:
2251 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
2252 a8083063 Iustin Pop
2253 a8083063 Iustin Pop
2254 b31c8676 Iustin Pop
class LUSetNodeParams(LogicalUnit):
2255 b31c8676 Iustin Pop
  """Modifies the parameters of a node.
2256 b31c8676 Iustin Pop

2257 b31c8676 Iustin Pop
  """
2258 b31c8676 Iustin Pop
  HPATH = "node-modify"
2259 b31c8676 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2260 b31c8676 Iustin Pop
  _OP_REQP = ["node_name"]
2261 b31c8676 Iustin Pop
  REQ_BGL = False
2262 b31c8676 Iustin Pop
2263 b31c8676 Iustin Pop
  def CheckArguments(self):
2264 b31c8676 Iustin Pop
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2265 b31c8676 Iustin Pop
    if node_name is None:
2266 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2267 b31c8676 Iustin Pop
    self.op.node_name = node_name
2268 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'master_candidate')
2269 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'offline')
2270 c9d443ea Iustin Pop
    _CheckBooleanOpField(self.op, 'drained')
2271 c9d443ea Iustin Pop
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
2272 c9d443ea Iustin Pop
    if all_mods.count(None) == 3:
2273 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Please pass at least one modification")
2274 c9d443ea Iustin Pop
    if all_mods.count(True) > 1:
2275 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Can't set the node into more than one"
2276 c9d443ea Iustin Pop
                                 " state at the same time")
2277 b31c8676 Iustin Pop
2278 b31c8676 Iustin Pop
  def ExpandNames(self):
2279 b31c8676 Iustin Pop
    self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
2280 b31c8676 Iustin Pop
2281 b31c8676 Iustin Pop
  def BuildHooksEnv(self):
2282 b31c8676 Iustin Pop
    """Build hooks env.
2283 b31c8676 Iustin Pop

2284 b31c8676 Iustin Pop
    This runs on the master node.
2285 b31c8676 Iustin Pop

2286 b31c8676 Iustin Pop
    """
2287 b31c8676 Iustin Pop
    env = {
2288 b31c8676 Iustin Pop
      "OP_TARGET": self.op.node_name,
2289 b31c8676 Iustin Pop
      "MASTER_CANDIDATE": str(self.op.master_candidate),
2290 3a5ba66a Iustin Pop
      "OFFLINE": str(self.op.offline),
2291 c9d443ea Iustin Pop
      "DRAINED": str(self.op.drained),
2292 b31c8676 Iustin Pop
      }
2293 b31c8676 Iustin Pop
    nl = [self.cfg.GetMasterNode(),
2294 b31c8676 Iustin Pop
          self.op.node_name]
2295 b31c8676 Iustin Pop
    return env, nl, nl
2296 b31c8676 Iustin Pop
2297 b31c8676 Iustin Pop
  def CheckPrereq(self):
2298 b31c8676 Iustin Pop
    """Check prerequisites.
2299 b31c8676 Iustin Pop

2300 b31c8676 Iustin Pop
    This only checks the instance list against the existing names.
2301 b31c8676 Iustin Pop

2302 b31c8676 Iustin Pop
    """
2303 3a5ba66a Iustin Pop
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
2304 b31c8676 Iustin Pop
2305 c9d443ea Iustin Pop
    if ((self.op.master_candidate == False or self.op.offline == True or
2306 c9d443ea Iustin Pop
         self.op.drained == True) and node.master_candidate):
2307 3a5ba66a Iustin Pop
      # we will demote the node from master_candidate
2308 3a26773f Iustin Pop
      if self.op.node_name == self.cfg.GetMasterNode():
2309 3a26773f Iustin Pop
        raise errors.OpPrereqError("The master node has to be a"
2310 c9d443ea Iustin Pop
                                   " master candidate, online and not drained")
2311 3e83dd48 Iustin Pop
      cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2312 3a5ba66a Iustin Pop
      num_candidates, _ = self.cfg.GetMasterCandidateStats()
2313 3e83dd48 Iustin Pop
      if num_candidates <= cp_size:
2314 3e83dd48 Iustin Pop
        msg = ("Not enough master candidates (desired"
2315 3e83dd48 Iustin Pop
               " %d, new value will be %d)" % (cp_size, num_candidates-1))
2316 3a5ba66a Iustin Pop
        if self.op.force:
2317 3e83dd48 Iustin Pop
          self.LogWarning(msg)
2318 3e83dd48 Iustin Pop
        else:
2319 3e83dd48 Iustin Pop
          raise errors.OpPrereqError(msg)
2320 3e83dd48 Iustin Pop
2321 c9d443ea Iustin Pop
    if (self.op.master_candidate == True and
2322 c9d443ea Iustin Pop
        ((node.offline and not self.op.offline == False) or
2323 c9d443ea Iustin Pop
         (node.drained and not self.op.drained == False))):
2324 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
2325 c9d443ea Iustin Pop
                                 " to master_candidate")
2326 3a5ba66a Iustin Pop
2327 b31c8676 Iustin Pop
    return
2328 b31c8676 Iustin Pop
2329 b31c8676 Iustin Pop
  def Exec(self, feedback_fn):
2330 b31c8676 Iustin Pop
    """Modifies a node.
2331 b31c8676 Iustin Pop

2332 b31c8676 Iustin Pop
    """
2333 3a5ba66a Iustin Pop
    node = self.node
2334 b31c8676 Iustin Pop
2335 b31c8676 Iustin Pop
    result = []
2336 c9d443ea Iustin Pop
    changed_mc = False
2337 b31c8676 Iustin Pop
2338 3a5ba66a Iustin Pop
    if self.op.offline is not None:
2339 3a5ba66a Iustin Pop
      node.offline = self.op.offline
2340 3a5ba66a Iustin Pop
      result.append(("offline", str(self.op.offline)))
2341 c9d443ea Iustin Pop
      if self.op.offline == True:
2342 c9d443ea Iustin Pop
        if node.master_candidate:
2343 c9d443ea Iustin Pop
          node.master_candidate = False
2344 c9d443ea Iustin Pop
          changed_mc = True
2345 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to offline"))
2346 c9d443ea Iustin Pop
        if node.drained:
2347 c9d443ea Iustin Pop
          node.drained = False
2348 c9d443ea Iustin Pop
          result.append(("drained", "clear drained status due to offline"))
2349 3a5ba66a Iustin Pop
2350 b31c8676 Iustin Pop
    if self.op.master_candidate is not None:
2351 b31c8676 Iustin Pop
      node.master_candidate = self.op.master_candidate
2352 c9d443ea Iustin Pop
      changed_mc = True
2353 b31c8676 Iustin Pop
      result.append(("master_candidate", str(self.op.master_candidate)))
2354 56aa9fd5 Iustin Pop
      if self.op.master_candidate == False:
2355 56aa9fd5 Iustin Pop
        rrc = self.rpc.call_node_demote_from_mc(node.name)
2356 0959c824 Iustin Pop
        msg = rrc.RemoteFailMsg()
2357 0959c824 Iustin Pop
        if msg:
2358 0959c824 Iustin Pop
          self.LogWarning("Node failed to demote itself: %s" % msg)
2359 b31c8676 Iustin Pop
2360 c9d443ea Iustin Pop
    if self.op.drained is not None:
2361 c9d443ea Iustin Pop
      node.drained = self.op.drained
2362 82e12743 Iustin Pop
      result.append(("drained", str(self.op.drained)))
2363 c9d443ea Iustin Pop
      if self.op.drained == True:
2364 c9d443ea Iustin Pop
        if node.master_candidate:
2365 c9d443ea Iustin Pop
          node.master_candidate = False
2366 c9d443ea Iustin Pop
          changed_mc = True
2367 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to drain"))
2368 c9d443ea Iustin Pop
        if node.offline:
2369 c9d443ea Iustin Pop
          node.offline = False
2370 c9d443ea Iustin Pop
          result.append(("offline", "clear offline status due to drain"))
2371 c9d443ea Iustin Pop
2372 b31c8676 Iustin Pop
    # this will trigger configuration file update, if needed
2373 b31c8676 Iustin Pop
    self.cfg.Update(node)
2374 b31c8676 Iustin Pop
    # this will trigger job queue propagation or cleanup
2375 c9d443ea Iustin Pop
    if changed_mc:
2376 3a26773f Iustin Pop
      self.context.ReaddNode(node)
2377 b31c8676 Iustin Pop
2378 b31c8676 Iustin Pop
    return result
2379 b31c8676 Iustin Pop
2380 b31c8676 Iustin Pop
2381 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
2382 a8083063 Iustin Pop
  """Query cluster configuration.
2383 a8083063 Iustin Pop

2384 a8083063 Iustin Pop
  """
2385 a8083063 Iustin Pop
  _OP_REQP = []
2386 642339cf Guido Trotter
  REQ_BGL = False
2387 642339cf Guido Trotter
2388 642339cf Guido Trotter
  def ExpandNames(self):
2389 642339cf Guido Trotter
    self.needed_locks = {}
2390 a8083063 Iustin Pop
2391 a8083063 Iustin Pop
  def CheckPrereq(self):
2392 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
2393 a8083063 Iustin Pop

2394 a8083063 Iustin Pop
    """
2395 a8083063 Iustin Pop
    pass
2396 a8083063 Iustin Pop
2397 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2398 a8083063 Iustin Pop
    """Return cluster config.
2399 a8083063 Iustin Pop

2400 a8083063 Iustin Pop
    """
2401 469f88e1 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
2402 a8083063 Iustin Pop
    result = {
2403 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
2404 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
2405 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
2406 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
2407 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
2408 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
2409 469f88e1 Iustin Pop
      "name": cluster.cluster_name,
2410 469f88e1 Iustin Pop
      "master": cluster.master_node,
2411 02691904 Alexander Schreiber
      "default_hypervisor": cluster.default_hypervisor,
2412 469f88e1 Iustin Pop
      "enabled_hypervisors": cluster.enabled_hypervisors,
2413 7a735d6a Guido Trotter
      "hvparams": dict([(hypervisor, cluster.hvparams[hypervisor])
2414 7a735d6a Guido Trotter
                        for hypervisor in cluster.enabled_hypervisors]),
2415 469f88e1 Iustin Pop
      "beparams": cluster.beparams,
2416 4b7735f9 Iustin Pop
      "candidate_pool_size": cluster.candidate_pool_size,
2417 a8083063 Iustin Pop
      }
2418 a8083063 Iustin Pop
2419 a8083063 Iustin Pop
    return result
2420 a8083063 Iustin Pop
2421 a8083063 Iustin Pop
2422 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
2423 ae5849b5 Michael Hanselmann
  """Return configuration values.
2424 a8083063 Iustin Pop

2425 a8083063 Iustin Pop
  """
2426 a8083063 Iustin Pop
  _OP_REQP = []
2427 642339cf Guido Trotter
  REQ_BGL = False
2428 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet()
2429 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag")
2430 642339cf Guido Trotter
2431 642339cf Guido Trotter
  def ExpandNames(self):
2432 642339cf Guido Trotter
    self.needed_locks = {}
2433 a8083063 Iustin Pop
2434 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2435 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2436 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
2437 ae5849b5 Michael Hanselmann
2438 a8083063 Iustin Pop
  def CheckPrereq(self):
2439 a8083063 Iustin Pop
    """No prerequisites.
2440 a8083063 Iustin Pop

2441 a8083063 Iustin Pop
    """
2442 a8083063 Iustin Pop
    pass
2443 a8083063 Iustin Pop
2444 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2445 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
2446 a8083063 Iustin Pop

2447 a8083063 Iustin Pop
    """
2448 ae5849b5 Michael Hanselmann
    values = []
2449 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
2450 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
2451 3ccafd0e Iustin Pop
        entry = self.cfg.GetClusterName()
2452 ae5849b5 Michael Hanselmann
      elif field == "master_node":
2453 3ccafd0e Iustin Pop
        entry = self.cfg.GetMasterNode()
2454 3ccafd0e Iustin Pop
      elif field == "drain_flag":
2455 3ccafd0e Iustin Pop
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2456 ae5849b5 Michael Hanselmann
      else:
2457 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
2458 3ccafd0e Iustin Pop
      values.append(entry)
2459 ae5849b5 Michael Hanselmann
    return values
2460 a8083063 Iustin Pop
2461 a8083063 Iustin Pop
2462 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
2463 a8083063 Iustin Pop
  """Bring up an instance's disks.
2464 a8083063 Iustin Pop

2465 a8083063 Iustin Pop
  """
2466 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2467 f22a8ba3 Guido Trotter
  REQ_BGL = False
2468 f22a8ba3 Guido Trotter
2469 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2470 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2471 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2472 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2473 f22a8ba3 Guido Trotter
2474 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2475 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2476 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2477 a8083063 Iustin Pop
2478 a8083063 Iustin Pop
  def CheckPrereq(self):
2479 a8083063 Iustin Pop
    """Check prerequisites.
2480 a8083063 Iustin Pop

2481 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2482 a8083063 Iustin Pop

2483 a8083063 Iustin Pop
    """
2484 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2485 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2486 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2487 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
2488 a8083063 Iustin Pop
2489 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2490 a8083063 Iustin Pop
    """Activate the disks.
2491 a8083063 Iustin Pop

2492 a8083063 Iustin Pop
    """
2493 b9bddb6b Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
2494 a8083063 Iustin Pop
    if not disks_ok:
2495 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
2496 a8083063 Iustin Pop
2497 a8083063 Iustin Pop
    return disks_info
2498 a8083063 Iustin Pop
2499 a8083063 Iustin Pop
2500 b9bddb6b Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
2501 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
2502 a8083063 Iustin Pop

2503 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
2504 a8083063 Iustin Pop

2505 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
2506 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
2507 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
2508 e4376078 Iustin Pop
  @param instance: the instance for whose disks we assemble
2509 e4376078 Iustin Pop
  @type ignore_secondaries: boolean
2510 e4376078 Iustin Pop
  @param ignore_secondaries: if true, errors on secondary nodes
2511 e4376078 Iustin Pop
      won't result in an error return from the function
2512 e4376078 Iustin Pop
  @return: False if the operation failed, otherwise a list of
2513 e4376078 Iustin Pop
      (host, instance_visible_name, node_visible_name)
2514 e4376078 Iustin Pop
      with the mapping from node devices to instance devices
2515 a8083063 Iustin Pop

2516 a8083063 Iustin Pop
  """
2517 a8083063 Iustin Pop
  device_info = []
2518 a8083063 Iustin Pop
  disks_ok = True
2519 fdbd668d Iustin Pop
  iname = instance.name
2520 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
2521 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
2522 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
2523 fdbd668d Iustin Pop
2524 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
2525 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
2526 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
2527 fdbd668d Iustin Pop
  # SyncSource, etc.)
2528 fdbd668d Iustin Pop
2529 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
2530 a8083063 Iustin Pop
  for inst_disk in instance.disks:
2531 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2532 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2533 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2534 53c14ef1 Iustin Pop
      msg = result.RemoteFailMsg()
2535 53c14ef1 Iustin Pop
      if msg:
2536 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2537 53c14ef1 Iustin Pop
                           " (is_primary=False, pass=1): %s",
2538 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
2539 fdbd668d Iustin Pop
        if not ignore_secondaries:
2540 a8083063 Iustin Pop
          disks_ok = False
2541 fdbd668d Iustin Pop
2542 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
2543 fdbd668d Iustin Pop
2544 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
2545 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
2546 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2547 fdbd668d Iustin Pop
      if node != instance.primary_node:
2548 fdbd668d Iustin Pop
        continue
2549 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2550 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2551 53c14ef1 Iustin Pop
      msg = result.RemoteFailMsg()
2552 53c14ef1 Iustin Pop
      if msg:
2553 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2554 53c14ef1 Iustin Pop
                           " (is_primary=True, pass=2): %s",
2555 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
2556 fdbd668d Iustin Pop
        disks_ok = False
2557 1dff8e07 Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name,
2558 1dff8e07 Iustin Pop
                        result.payload))
2559 a8083063 Iustin Pop
2560 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
2561 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
2562 b352ab5b Iustin Pop
  # improving the logical/physical id handling
2563 b352ab5b Iustin Pop
  for disk in instance.disks:
2564 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
2565 b352ab5b Iustin Pop
2566 a8083063 Iustin Pop
  return disks_ok, device_info
2567 a8083063 Iustin Pop
2568 a8083063 Iustin Pop
2569 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
2570 3ecf6786 Iustin Pop
  """Start the disks of an instance.
2571 3ecf6786 Iustin Pop

2572 3ecf6786 Iustin Pop
  """
2573 b9bddb6b Iustin Pop
  disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2574 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
2575 fe7b0351 Michael Hanselmann
  if not disks_ok:
2576 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
2577 fe7b0351 Michael Hanselmann
    if force is not None and not force:
2578 86d9d3bb Iustin Pop
      lu.proc.LogWarning("", hint="If the message above refers to a"
2579 86d9d3bb Iustin Pop
                         " secondary node,"
2580 86d9d3bb Iustin Pop
                         " you can retry the operation using '--force'.")
2581 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
2582 fe7b0351 Michael Hanselmann
2583 fe7b0351 Michael Hanselmann
2584 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
2585 a8083063 Iustin Pop
  """Shutdown an instance's disks.
2586 a8083063 Iustin Pop

2587 a8083063 Iustin Pop
  """
2588 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2589 f22a8ba3 Guido Trotter
  REQ_BGL = False
2590 f22a8ba3 Guido Trotter
2591 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2592 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2593 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2594 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2595 f22a8ba3 Guido Trotter
2596 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2597 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2598 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2599 a8083063 Iustin Pop
2600 a8083063 Iustin Pop
  def CheckPrereq(self):
2601 a8083063 Iustin Pop
    """Check prerequisites.
2602 a8083063 Iustin Pop

2603 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2604 a8083063 Iustin Pop

2605 a8083063 Iustin Pop
    """
2606 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2607 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2608 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2609 a8083063 Iustin Pop
2610 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2611 a8083063 Iustin Pop
    """Deactivate the disks
2612 a8083063 Iustin Pop

2613 a8083063 Iustin Pop
    """
2614 a8083063 Iustin Pop
    instance = self.instance
2615 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
2616 a8083063 Iustin Pop
2617 a8083063 Iustin Pop
2618 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
2619 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
2620 155d6c75 Guido Trotter

2621 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
2622 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
2623 155d6c75 Guido Trotter

2624 155d6c75 Guido Trotter
  """
2625 72737a7f Iustin Pop
  ins_l = lu.rpc.call_instance_list([instance.primary_node],
2626 72737a7f Iustin Pop
                                      [instance.hypervisor])
2627 155d6c75 Guido Trotter
  ins_l = ins_l[instance.primary_node]
2628 781de953 Iustin Pop
  if ins_l.failed or not isinstance(ins_l.data, list):
2629 155d6c75 Guido Trotter
    raise errors.OpExecError("Can't contact node '%s'" %
2630 155d6c75 Guido Trotter
                             instance.primary_node)
2631 155d6c75 Guido Trotter
2632 781de953 Iustin Pop
  if instance.name in ins_l.data:
2633 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
2634 155d6c75 Guido Trotter
                             " block devices.")
2635 155d6c75 Guido Trotter
2636 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
2637 a8083063 Iustin Pop
2638 a8083063 Iustin Pop
2639 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2640 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
2641 a8083063 Iustin Pop

2642 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
2643 a8083063 Iustin Pop

2644 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
2645 a8083063 Iustin Pop
  ignored.
2646 a8083063 Iustin Pop

2647 a8083063 Iustin Pop
  """
2648 cacfd1fd Iustin Pop
  all_result = True
2649 a8083063 Iustin Pop
  for disk in instance.disks:
2650 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2651 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
2652 781de953 Iustin Pop
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
2653 cacfd1fd Iustin Pop
      msg = result.RemoteFailMsg()
2654 cacfd1fd Iustin Pop
      if msg:
2655 cacfd1fd Iustin Pop
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
2656 cacfd1fd Iustin Pop
                      disk.iv_name, node, msg)
2657 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
2658 cacfd1fd Iustin Pop
          all_result = False
2659 cacfd1fd Iustin Pop
  return all_result
2660 a8083063 Iustin Pop
2661 a8083063 Iustin Pop
2662 9ca87a96 Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
2663 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
2664 d4f16fd9 Iustin Pop

2665 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
2666 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
2667 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
2668 d4f16fd9 Iustin Pop
  exception.
2669 d4f16fd9 Iustin Pop

2670 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
2671 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
2672 e69d05fd Iustin Pop
  @type node: C{str}
2673 e69d05fd Iustin Pop
  @param node: the node to check
2674 e69d05fd Iustin Pop
  @type reason: C{str}
2675 e69d05fd Iustin Pop
  @param reason: string to use in the error message
2676 e69d05fd Iustin Pop
  @type requested: C{int}
2677 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
2678 9ca87a96 Iustin Pop
  @type hypervisor_name: C{str}
2679 9ca87a96 Iustin Pop
  @param hypervisor_name: the hypervisor to ask for memory stats
2680 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2681 e69d05fd Iustin Pop
      we cannot check the node
2682 d4f16fd9 Iustin Pop

2683 d4f16fd9 Iustin Pop
  """
2684 9ca87a96 Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
2685 781de953 Iustin Pop
  nodeinfo[node].Raise()
2686 781de953 Iustin Pop
  free_mem = nodeinfo[node].data.get('memory_free')
2687 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2688 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2689 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
2690 d4f16fd9 Iustin Pop
  if requested > free_mem:
2691 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2692 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
2693 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
2694 d4f16fd9 Iustin Pop
2695 d4f16fd9 Iustin Pop
2696 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2697 a8083063 Iustin Pop
  """Starts an instance.
2698 a8083063 Iustin Pop

2699 a8083063 Iustin Pop
  """
2700 a8083063 Iustin Pop
  HPATH = "instance-start"
2701 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2702 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2703 e873317a Guido Trotter
  REQ_BGL = False
2704 e873317a Guido Trotter
2705 e873317a Guido Trotter
  def ExpandNames(self):
2706 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2707 a8083063 Iustin Pop
2708 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2709 a8083063 Iustin Pop
    """Build hooks env.
2710 a8083063 Iustin Pop

2711 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2712 a8083063 Iustin Pop

2713 a8083063 Iustin Pop
    """
2714 a8083063 Iustin Pop
    env = {
2715 a8083063 Iustin Pop
      "FORCE": self.op.force,
2716 a8083063 Iustin Pop
      }
2717 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2718 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2719 a8083063 Iustin Pop
    return env, nl, nl
2720 a8083063 Iustin Pop
2721 a8083063 Iustin Pop
  def CheckPrereq(self):
2722 a8083063 Iustin Pop
    """Check prerequisites.
2723 a8083063 Iustin Pop

2724 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2725 a8083063 Iustin Pop

2726 a8083063 Iustin Pop
    """
2727 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2728 e873317a Guido Trotter
    assert self.instance is not None, \
2729 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2730 a8083063 Iustin Pop
2731 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
2732 7527a8a4 Iustin Pop
2733 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2734 a8083063 Iustin Pop
    # check bridges existance
2735 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2736 a8083063 Iustin Pop
2737 b9bddb6b Iustin Pop
    _CheckNodeFreeMemory(self, instance.primary_node,
2738 d4f16fd9 Iustin Pop
                         "starting instance %s" % instance.name,
2739 338e51e8 Iustin Pop
                         bep[constants.BE_MEMORY], instance.hypervisor)
2740 d4f16fd9 Iustin Pop
2741 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2742 a8083063 Iustin Pop
    """Start the instance.
2743 a8083063 Iustin Pop

2744 a8083063 Iustin Pop
    """
2745 a8083063 Iustin Pop
    instance = self.instance
2746 a8083063 Iustin Pop
    force = self.op.force
2747 a8083063 Iustin Pop
2748 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2749 fe482621 Iustin Pop
2750 a8083063 Iustin Pop
    node_current = instance.primary_node
2751 a8083063 Iustin Pop
2752 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
2753 a8083063 Iustin Pop
2754 07813a9e Iustin Pop
    result = self.rpc.call_instance_start(node_current, instance)
2755 dd279568 Iustin Pop
    msg = result.RemoteFailMsg()
2756 dd279568 Iustin Pop
    if msg:
2757 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
2758 dd279568 Iustin Pop
      raise errors.OpExecError("Could not start instance: %s" % msg)
2759 a8083063 Iustin Pop
2760 a8083063 Iustin Pop
2761 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2762 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2763 bf6929a2 Alexander Schreiber

2764 bf6929a2 Alexander Schreiber
  """
2765 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2766 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2767 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2768 e873317a Guido Trotter
  REQ_BGL = False
2769 e873317a Guido Trotter
2770 e873317a Guido Trotter
  def ExpandNames(self):
2771 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2772 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2773 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
2774 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2775 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
2776 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2777 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
2778 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2779 bf6929a2 Alexander Schreiber
2780 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2781 bf6929a2 Alexander Schreiber
    """Build hooks env.
2782 bf6929a2 Alexander Schreiber

2783 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2784 bf6929a2 Alexander Schreiber

2785 bf6929a2 Alexander Schreiber
    """
2786 bf6929a2 Alexander Schreiber
    env = {
2787 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2788 2c2690c9 Iustin Pop
      "REBOOT_TYPE": self.op.reboot_type,
2789 bf6929a2 Alexander Schreiber
      }
2790 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2791 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2792 bf6929a2 Alexander Schreiber
    return env, nl, nl
2793 bf6929a2 Alexander Schreiber
2794 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2795 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2796 bf6929a2 Alexander Schreiber

2797 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2798 bf6929a2 Alexander Schreiber

2799 bf6929a2 Alexander Schreiber
    """
2800 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2801 e873317a Guido Trotter
    assert self.instance is not None, \
2802 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2803 bf6929a2 Alexander Schreiber
2804 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
2805 7527a8a4 Iustin Pop
2806 bf6929a2 Alexander Schreiber
    # check bridges existance
2807 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2808 bf6929a2 Alexander Schreiber
2809 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2810 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2811 bf6929a2 Alexander Schreiber

2812 bf6929a2 Alexander Schreiber
    """
2813 bf6929a2 Alexander Schreiber
    instance = self.instance
2814 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2815 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2816 bf6929a2 Alexander Schreiber
2817 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2818 bf6929a2 Alexander Schreiber
2819 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2820 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2821 ae48ac32 Iustin Pop
      for disk in instance.disks:
2822 ae48ac32 Iustin Pop
        self.cfg.SetDiskID(disk, node_current)
2823 781de953 Iustin Pop
      result = self.rpc.call_instance_reboot(node_current, instance,
2824 07813a9e Iustin Pop
                                             reboot_type)
2825 489fcbe9 Iustin Pop
      msg = result.RemoteFailMsg()
2826 489fcbe9 Iustin Pop
      if msg:
2827 489fcbe9 Iustin Pop
        raise errors.OpExecError("Could not reboot instance: %s" % msg)
2828 bf6929a2 Alexander Schreiber
    else:
2829 1fae010f Iustin Pop
      result = self.rpc.call_instance_shutdown(node_current, instance)
2830 1fae010f Iustin Pop
      msg = result.RemoteFailMsg()
2831 1fae010f Iustin Pop
      if msg:
2832 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance for"
2833 1fae010f Iustin Pop
                                 " full reboot: %s" % msg)
2834 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
2835 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
2836 07813a9e Iustin Pop
      result = self.rpc.call_instance_start(node_current, instance)
2837 dd279568 Iustin Pop
      msg = result.RemoteFailMsg()
2838 dd279568 Iustin Pop
      if msg:
2839 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
2840 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance for"
2841 dd279568 Iustin Pop
                                 " full reboot: %s" % msg)
2842 bf6929a2 Alexander Schreiber
2843 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2844 bf6929a2 Alexander Schreiber
2845 bf6929a2 Alexander Schreiber
2846 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2847 a8083063 Iustin Pop
  """Shutdown an instance.
2848 a8083063 Iustin Pop

2849 a8083063 Iustin Pop
  """
2850 a8083063 Iustin Pop
  HPATH = "instance-stop"
2851 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2852 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2853 e873317a Guido Trotter
  REQ_BGL = False
2854 e873317a Guido Trotter
2855 e873317a Guido Trotter
  def ExpandNames(self):
2856 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2857 a8083063 Iustin Pop
2858 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2859 a8083063 Iustin Pop
    """Build hooks env.
2860 a8083063 Iustin Pop

2861 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2862 a8083063 Iustin Pop

2863 a8083063 Iustin Pop
    """
2864 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2865 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2866 a8083063 Iustin Pop
    return env, nl, nl
2867 a8083063 Iustin Pop
2868 a8083063 Iustin Pop
  def CheckPrereq(self):
2869 a8083063 Iustin Pop
    """Check prerequisites.
2870 a8083063 Iustin Pop

2871 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2872 a8083063 Iustin Pop

2873 a8083063 Iustin Pop
    """
2874 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2875 e873317a Guido Trotter
    assert self.instance is not None, \
2876 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2877 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
2878 a8083063 Iustin Pop
2879 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2880 a8083063 Iustin Pop
    """Shutdown the instance.
2881 a8083063 Iustin Pop

2882 a8083063 Iustin Pop
    """
2883 a8083063 Iustin Pop
    instance = self.instance
2884 a8083063 Iustin Pop
    node_current = instance.primary_node
2885 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2886 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(node_current, instance)
2887 1fae010f Iustin Pop
    msg = result.RemoteFailMsg()
2888 1fae010f Iustin Pop
    if msg:
2889 1fae010f Iustin Pop
      self.proc.LogWarning("Could not shutdown instance: %s" % msg)
2890 a8083063 Iustin Pop
2891 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
2892 a8083063 Iustin Pop
2893 a8083063 Iustin Pop
2894 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2895 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2896 fe7b0351 Michael Hanselmann

2897 fe7b0351 Michael Hanselmann
  """
2898 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2899 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2900 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2901 4e0b4d2d Guido Trotter
  REQ_BGL = False
2902 4e0b4d2d Guido Trotter
2903 4e0b4d2d Guido Trotter
  def ExpandNames(self):
2904 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
2905 fe7b0351 Michael Hanselmann
2906 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2907 fe7b0351 Michael Hanselmann
    """Build hooks env.
2908 fe7b0351 Michael Hanselmann

2909 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2910 fe7b0351 Michael Hanselmann

2911 fe7b0351 Michael Hanselmann
    """
2912 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2913 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2914 fe7b0351 Michael Hanselmann
    return env, nl, nl
2915 fe7b0351 Michael Hanselmann
2916 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2917 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2918 fe7b0351 Michael Hanselmann

2919 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2920 fe7b0351 Michael Hanselmann

2921 fe7b0351 Michael Hanselmann
    """
2922 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2923 4e0b4d2d Guido Trotter
    assert instance is not None, \
2924 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2925 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
2926 4e0b4d2d Guido Trotter
2927 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2928 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2929 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2930 0d68c45d Iustin Pop
    if instance.admin_up:
2931 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2932 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2933 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2934 72737a7f Iustin Pop
                                              instance.name,
2935 72737a7f Iustin Pop
                                              instance.hypervisor)
2936 781de953 Iustin Pop
    if remote_info.failed or remote_info.data:
2937 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2938 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2939 3ecf6786 Iustin Pop
                                  instance.primary_node))
2940 d0834de3 Michael Hanselmann
2941 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2942 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2943 d0834de3 Michael Hanselmann
      # OS verification
2944 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2945 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2946 d0834de3 Michael Hanselmann
      if pnode is None:
2947 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2948 3ecf6786 Iustin Pop
                                   self.op.pnode)
2949 781de953 Iustin Pop
      result = self.rpc.call_os_get(pnode.name, self.op.os_type)
2950 781de953 Iustin Pop
      result.Raise()
2951 781de953 Iustin Pop
      if not isinstance(result.data, objects.OS):
2952 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2953 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2954 d0834de3 Michael Hanselmann
2955 fe7b0351 Michael Hanselmann
    self.instance = instance
2956 fe7b0351 Michael Hanselmann
2957 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2958 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2959 fe7b0351 Michael Hanselmann

2960 fe7b0351 Michael Hanselmann
    """
2961 fe7b0351 Michael Hanselmann
    inst = self.instance
2962 fe7b0351 Michael Hanselmann
2963 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2964 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2965 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2966 97abc79f Iustin Pop
      self.cfg.Update(inst)
2967 d0834de3 Michael Hanselmann
2968 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
2969 fe7b0351 Michael Hanselmann
    try:
2970 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2971 781de953 Iustin Pop
      result = self.rpc.call_instance_os_add(inst.primary_node, inst)
2972 20e01edd Iustin Pop
      msg = result.RemoteFailMsg()
2973 20e01edd Iustin Pop
      if msg:
2974 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
2975 20e01edd Iustin Pop
                                 " on node %s: %s" %
2976 20e01edd Iustin Pop
                                 (inst.name, inst.primary_node, msg))
2977 fe7b0351 Michael Hanselmann
    finally:
2978 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
2979 fe7b0351 Michael Hanselmann
2980 fe7b0351 Michael Hanselmann
2981 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2982 decd5f45 Iustin Pop
  """Rename an instance.
2983 decd5f45 Iustin Pop

2984 decd5f45 Iustin Pop
  """
2985 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2986 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2987 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2988 decd5f45 Iustin Pop
2989 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2990 decd5f45 Iustin Pop
    """Build hooks env.
2991 decd5f45 Iustin Pop

2992 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2993 decd5f45 Iustin Pop

2994 decd5f45 Iustin Pop
    """
2995 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2996 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2997 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2998 decd5f45 Iustin Pop
    return env, nl, nl
2999 decd5f45 Iustin Pop
3000 decd5f45 Iustin Pop
  def CheckPrereq(self):
3001 decd5f45 Iustin Pop
    """Check prerequisites.
3002 decd5f45 Iustin Pop

3003 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
3004 decd5f45 Iustin Pop

3005 decd5f45 Iustin Pop
    """
3006 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3007 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3008 decd5f45 Iustin Pop
    if instance is None:
3009 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3010 decd5f45 Iustin Pop
                                 self.op.instance_name)
3011 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3012 7527a8a4 Iustin Pop
3013 0d68c45d Iustin Pop
    if instance.admin_up:
3014 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3015 decd5f45 Iustin Pop
                                 self.op.instance_name)
3016 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3017 72737a7f Iustin Pop
                                              instance.name,
3018 72737a7f Iustin Pop
                                              instance.hypervisor)
3019 781de953 Iustin Pop
    remote_info.Raise()
3020 781de953 Iustin Pop
    if remote_info.data:
3021 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3022 decd5f45 Iustin Pop
                                 (self.op.instance_name,
3023 decd5f45 Iustin Pop
                                  instance.primary_node))
3024 decd5f45 Iustin Pop
    self.instance = instance
3025 decd5f45 Iustin Pop
3026 decd5f45 Iustin Pop
    # new name verification
3027 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
3028 decd5f45 Iustin Pop
3029 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
3030 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
3031 7bde3275 Guido Trotter
    if new_name in instance_list:
3032 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3033 c09f363f Manuel Franceschini
                                 new_name)
3034 7bde3275 Guido Trotter
3035 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
3036 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
3037 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3038 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
3039 decd5f45 Iustin Pop
3040 decd5f45 Iustin Pop
3041 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
3042 decd5f45 Iustin Pop
    """Reinstall the instance.
3043 decd5f45 Iustin Pop

3044 decd5f45 Iustin Pop
    """
3045 decd5f45 Iustin Pop
    inst = self.instance
3046 decd5f45 Iustin Pop
    old_name = inst.name
3047 decd5f45 Iustin Pop
3048 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
3049 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3050 b23c4333 Manuel Franceschini
3051 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
3052 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
3053 cb4e8387 Iustin Pop
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
3054 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
3055 decd5f45 Iustin Pop
3056 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
3057 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
3058 decd5f45 Iustin Pop
3059 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
3060 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3061 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
3062 72737a7f Iustin Pop
                                                     old_file_storage_dir,
3063 72737a7f Iustin Pop
                                                     new_file_storage_dir)
3064 781de953 Iustin Pop
      result.Raise()
3065 781de953 Iustin Pop
      if not result.data:
3066 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not connect to node '%s' to rename"
3067 b23c4333 Manuel Franceschini
                                 " directory '%s' to '%s' (but the instance"
3068 b23c4333 Manuel Franceschini
                                 " has been renamed in Ganeti)" % (
3069 b23c4333 Manuel Franceschini
                                 inst.primary_node, old_file_storage_dir,
3070 b23c4333 Manuel Franceschini
                                 new_file_storage_dir))
3071 b23c4333 Manuel Franceschini
3072 781de953 Iustin Pop
      if not result.data[0]:
3073 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
3074 b23c4333 Manuel Franceschini
                                 " (but the instance has been renamed in"
3075 b23c4333 Manuel Franceschini
                                 " Ganeti)" % (old_file_storage_dir,
3076 b23c4333 Manuel Franceschini
                                               new_file_storage_dir))
3077 b23c4333 Manuel Franceschini
3078 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
3079 decd5f45 Iustin Pop
    try:
3080 781de953 Iustin Pop
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
3081 781de953 Iustin Pop
                                                 old_name)
3082 96841384 Iustin Pop
      msg = result.RemoteFailMsg()
3083 96841384 Iustin Pop
      if msg:
3084 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
3085 96841384 Iustin Pop
               " (but the instance has been renamed in Ganeti): %s" %
3086 96841384 Iustin Pop
               (inst.name, inst.primary_node, msg))
3087 86d9d3bb Iustin Pop
        self.proc.LogWarning(msg)
3088 decd5f45 Iustin Pop
    finally:
3089 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
3090 decd5f45 Iustin Pop
3091 decd5f45 Iustin Pop
3092 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
3093 a8083063 Iustin Pop
  """Remove an instance.
3094 a8083063 Iustin Pop

3095 a8083063 Iustin Pop
  """
3096 a8083063 Iustin Pop
  HPATH = "instance-remove"
3097 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3098 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
3099 cf472233 Guido Trotter
  REQ_BGL = False
3100 cf472233 Guido Trotter
3101 cf472233 Guido Trotter
  def ExpandNames(self):
3102 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
3103 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3104 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3105 cf472233 Guido Trotter
3106 cf472233 Guido Trotter
  def DeclareLocks(self, level):
3107 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
3108 cf472233 Guido Trotter
      self._LockInstancesNodes()
3109 a8083063 Iustin Pop
3110 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3111 a8083063 Iustin Pop
    """Build hooks env.
3112 a8083063 Iustin Pop

3113 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3114 a8083063 Iustin Pop

3115 a8083063 Iustin Pop
    """
3116 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3117 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
3118 a8083063 Iustin Pop
    return env, nl, nl
3119 a8083063 Iustin Pop
3120 a8083063 Iustin Pop
  def CheckPrereq(self):
3121 a8083063 Iustin Pop
    """Check prerequisites.
3122 a8083063 Iustin Pop

3123 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3124 a8083063 Iustin Pop

3125 a8083063 Iustin Pop
    """
3126 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3127 cf472233 Guido Trotter
    assert self.instance is not None, \
3128 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3129 a8083063 Iustin Pop
3130 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3131 a8083063 Iustin Pop
    """Remove the instance.
3132 a8083063 Iustin Pop

3133 a8083063 Iustin Pop
    """
3134 a8083063 Iustin Pop
    instance = self.instance
3135 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
3136 9a4f63d1 Iustin Pop
                 instance.name, instance.primary_node)
3137 a8083063 Iustin Pop
3138 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
3139 1fae010f Iustin Pop
    msg = result.RemoteFailMsg()
3140 1fae010f Iustin Pop
    if msg:
3141 1d67656e Iustin Pop
      if self.op.ignore_failures:
3142 1fae010f Iustin Pop
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
3143 1d67656e Iustin Pop
      else:
3144 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
3145 1fae010f Iustin Pop
                                 " node %s: %s" %
3146 1fae010f Iustin Pop
                                 (instance.name, instance.primary_node, msg))
3147 a8083063 Iustin Pop
3148 9a4f63d1 Iustin Pop
    logging.info("Removing block devices for instance %s", instance.name)
3149 a8083063 Iustin Pop
3150 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
3151 1d67656e Iustin Pop
      if self.op.ignore_failures:
3152 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
3153 1d67656e Iustin Pop
      else:
3154 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
3155 a8083063 Iustin Pop
3156 9a4f63d1 Iustin Pop
    logging.info("Removing instance %s out of cluster config", instance.name)
3157 a8083063 Iustin Pop
3158 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
3159 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
3160 a8083063 Iustin Pop
3161 a8083063 Iustin Pop
3162 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
3163 a8083063 Iustin Pop
  """Logical unit for querying instances.
3164 a8083063 Iustin Pop

3165 a8083063 Iustin Pop
  """
3166 ec79568d Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
3167 7eb9d8f7 Guido Trotter
  REQ_BGL = False
3168 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
3169 5b460366 Iustin Pop
                                    "admin_state",
3170 a2d2e1a7 Iustin Pop
                                    "disk_template", "ip", "mac", "bridge",
3171 a2d2e1a7 Iustin Pop
                                    "sda_size", "sdb_size", "vcpus", "tags",
3172 a2d2e1a7 Iustin Pop
                                    "network_port", "beparams",
3173 8aec325c Iustin Pop
                                    r"(disk)\.(size)/([0-9]+)",
3174 8aec325c Iustin Pop
                                    r"(disk)\.(sizes)", "disk_usage",
3175 8aec325c Iustin Pop
                                    r"(nic)\.(mac|ip|bridge)/([0-9]+)",
3176 8aec325c Iustin Pop
                                    r"(nic)\.(macs|ips|bridges)",
3177 8aec325c Iustin Pop
                                    r"(disk|nic)\.(count)",
3178 a2d2e1a7 Iustin Pop
                                    "serial_no", "hypervisor", "hvparams",] +
3179 a2d2e1a7 Iustin Pop
                                  ["hv/%s" % name
3180 a2d2e1a7 Iustin Pop
                                   for name in constants.HVS_PARAMETERS] +
3181 a2d2e1a7 Iustin Pop
                                  ["be/%s" % name
3182 a2d2e1a7 Iustin Pop
                                   for name in constants.BES_PARAMETERS])
3183 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
3184 31bf511f Iustin Pop
3185 a8083063 Iustin Pop
3186 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
3187 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
3188 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
3189 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
3190 a8083063 Iustin Pop
3191 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
3192 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
3193 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
3194 7eb9d8f7 Guido Trotter
3195 57a2fb91 Iustin Pop
    if self.op.names:
3196 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
3197 7eb9d8f7 Guido Trotter
    else:
3198 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
3199 7eb9d8f7 Guido Trotter
3200 ec79568d Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
3201 ec79568d Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
3202 57a2fb91 Iustin Pop
    if self.do_locking:
3203 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3204 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
3205 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3206 7eb9d8f7 Guido Trotter
3207 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
3208 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
3209 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
3210 7eb9d8f7 Guido Trotter
3211 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
3212 7eb9d8f7 Guido Trotter
    """Check prerequisites.
3213 7eb9d8f7 Guido Trotter

3214 7eb9d8f7 Guido Trotter
    """
3215 57a2fb91 Iustin Pop
    pass
3216 069dcc86 Iustin Pop
3217 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3218 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
3219 a8083063 Iustin Pop

3220 a8083063 Iustin Pop
    """
3221 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
3222 a7f5dc98 Iustin Pop
    if self.wanted == locking.ALL_SET:
3223 a7f5dc98 Iustin Pop
      # caller didn't specify instance names, so ordering is not important
3224 a7f5dc98 Iustin Pop
      if self.do_locking:
3225 a7f5dc98 Iustin Pop
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
3226 a7f5dc98 Iustin Pop
      else:
3227 a7f5dc98 Iustin Pop
        instance_names = all_info.keys()
3228 a7f5dc98 Iustin Pop
      instance_names = utils.NiceSort(instance_names)
3229 57a2fb91 Iustin Pop
    else:
3230 a7f5dc98 Iustin Pop
      # caller did specify names, so we must keep the ordering
3231 a7f5dc98 Iustin Pop
      if self.do_locking:
3232 a7f5dc98 Iustin Pop
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
3233 a7f5dc98 Iustin Pop
      else:
3234 a7f5dc98 Iustin Pop
        tgt_set = all_info.keys()
3235 a7f5dc98 Iustin Pop
      missing = set(self.wanted).difference(tgt_set)
3236 a7f5dc98 Iustin Pop
      if missing:
3237 a7f5dc98 Iustin Pop
        raise errors.OpExecError("Some instances were removed before"
3238 a7f5dc98 Iustin Pop
                                 " retrieving their data: %s" % missing)
3239 a7f5dc98 Iustin Pop
      instance_names = self.wanted
3240 c1f1cbb2 Iustin Pop
3241 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
3242 a8083063 Iustin Pop
3243 a8083063 Iustin Pop
    # begin data gathering
3244 a8083063 Iustin Pop
3245 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
3246 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
3247 a8083063 Iustin Pop
3248 a8083063 Iustin Pop
    bad_nodes = []
3249 cbfc4681 Iustin Pop
    off_nodes = []
3250 ec79568d Iustin Pop
    if self.do_node_query:
3251 a8083063 Iustin Pop
      live_data = {}
3252 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
3253 a8083063 Iustin Pop
      for name in nodes:
3254 a8083063 Iustin Pop
        result = node_data[name]
3255 cbfc4681 Iustin Pop
        if result.offline:
3256 cbfc4681 Iustin Pop
          # offline nodes will be in both lists
3257 cbfc4681 Iustin Pop
          off_nodes.append(name)
3258 781de953 Iustin Pop
        if result.failed:
3259 a8083063 Iustin Pop
          bad_nodes.append(name)
3260 781de953 Iustin Pop
        else:
3261 781de953 Iustin Pop
          if result.data:
3262 781de953 Iustin Pop
            live_data.update(result.data)
3263 781de953 Iustin Pop
            # else no instance is alive
3264 a8083063 Iustin Pop
    else:
3265 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
3266 a8083063 Iustin Pop
3267 a8083063 Iustin Pop
    # end data gathering
3268 a8083063 Iustin Pop
3269 5018a335 Iustin Pop
    HVPREFIX = "hv/"
3270 338e51e8 Iustin Pop
    BEPREFIX = "be/"
3271 a8083063 Iustin Pop
    output = []
3272 a8083063 Iustin Pop
    for instance in instance_list:
3273 a8083063 Iustin Pop
      iout = []
3274 5018a335 Iustin Pop
      i_hv = self.cfg.GetClusterInfo().FillHV(instance)
3275 338e51e8 Iustin Pop
      i_be = self.cfg.GetClusterInfo().FillBE(instance)
3276 a8083063 Iustin Pop
      for field in self.op.output_fields:
3277 71c1af58 Iustin Pop
        st_match = self._FIELDS_STATIC.Matches(field)
3278 a8083063 Iustin Pop
        if field == "name":
3279 a8083063 Iustin Pop
          val = instance.name
3280 a8083063 Iustin Pop
        elif field == "os":
3281 a8083063 Iustin Pop
          val = instance.os
3282 a8083063 Iustin Pop
        elif field == "pnode":
3283 a8083063 Iustin Pop
          val = instance.primary_node
3284 a8083063 Iustin Pop
        elif field == "snodes":
3285 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
3286 a8083063 Iustin Pop
        elif field == "admin_state":
3287 0d68c45d Iustin Pop
          val = instance.admin_up
3288 a8083063 Iustin Pop
        elif field == "oper_state":
3289 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
3290 8a23d2d3 Iustin Pop
            val = None
3291 a8083063 Iustin Pop
          else:
3292 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
3293 d8052456 Iustin Pop
        elif field == "status":
3294 cbfc4681 Iustin Pop
          if instance.primary_node in off_nodes:
3295 cbfc4681 Iustin Pop
            val = "ERROR_nodeoffline"
3296 cbfc4681 Iustin Pop
          elif instance.primary_node in bad_nodes:
3297 d8052456 Iustin Pop
            val = "ERROR_nodedown"
3298 d8052456 Iustin Pop
          else:
3299 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
3300 d8052456 Iustin Pop
            if running:
3301 0d68c45d Iustin Pop
              if instance.admin_up:
3302 d8052456 Iustin Pop
                val = "running"
3303 d8052456 Iustin Pop
              else:
3304 d8052456 Iustin Pop
                val = "ERROR_up"
3305 d8052456 Iustin Pop
            else:
3306 0d68c45d Iustin Pop
              if instance.admin_up:
3307 d8052456 Iustin Pop
                val = "ERROR_down"
3308 d8052456 Iustin Pop
              else:
3309 d8052456 Iustin Pop
                val = "ADMIN_down"
3310 a8083063 Iustin Pop
        elif field == "oper_ram":
3311 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
3312 8a23d2d3 Iustin Pop
            val = None
3313 a8083063 Iustin Pop
          elif instance.name in live_data:
3314 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
3315 a8083063 Iustin Pop
          else:
3316 a8083063 Iustin Pop
            val = "-"
3317 a8083063 Iustin Pop
        elif field == "disk_template":
3318 a8083063 Iustin Pop
          val = instance.disk_template
3319 a8083063 Iustin Pop
        elif field == "ip":
3320 a8083063 Iustin Pop
          val = instance.nics[0].ip
3321 a8083063 Iustin Pop
        elif field == "bridge":
3322 a8083063 Iustin Pop
          val = instance.nics[0].bridge
3323 a8083063 Iustin Pop
        elif field == "mac":
3324 a8083063 Iustin Pop
          val = instance.nics[0].mac
3325 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
3326 ad24e046 Iustin Pop
          idx = ord(field[2]) - ord('a')
3327 ad24e046 Iustin Pop
          try:
3328 ad24e046 Iustin Pop
            val = instance.FindDisk(idx).size
3329 ad24e046 Iustin Pop
          except errors.OpPrereqError:
3330 8a23d2d3 Iustin Pop
            val = None
3331 024e157f Iustin Pop
        elif field == "disk_usage": # total disk usage per node
3332 024e157f Iustin Pop
          disk_sizes = [{'size': disk.size} for disk in instance.disks]
3333 024e157f Iustin Pop
          val = _ComputeDiskSize(instance.disk_template, disk_sizes)
3334 130a6a6f Iustin Pop
        elif field == "tags":
3335 130a6a6f Iustin Pop
          val = list(instance.GetTags())
3336 38d7239a Iustin Pop
        elif field == "serial_no":
3337 38d7239a Iustin Pop
          val = instance.serial_no
3338 5018a335 Iustin Pop
        elif field == "network_port":
3339 5018a335 Iustin Pop
          val = instance.network_port
3340 338e51e8 Iustin Pop
        elif field == "hypervisor":
3341 338e51e8 Iustin Pop
          val = instance.hypervisor
3342 338e51e8 Iustin Pop
        elif field == "hvparams":
3343 338e51e8 Iustin Pop
          val = i_hv
3344 5018a335 Iustin Pop
        elif (field.startswith(HVPREFIX) and
3345 5018a335 Iustin Pop
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
3346 5018a335 Iustin Pop
          val = i_hv.get(field[len(HVPREFIX):], None)
3347 338e51e8 Iustin Pop
        elif field == "beparams":
3348 338e51e8 Iustin Pop
          val = i_be
3349 338e51e8 Iustin Pop
        elif (field.startswith(BEPREFIX) and
3350 338e51e8 Iustin Pop
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
3351 338e51e8 Iustin Pop
          val = i_be.get(field[len(BEPREFIX):], None)
3352 71c1af58 Iustin Pop
        elif st_match and st_match.groups():
3353 71c1af58 Iustin Pop
          # matches a variable list
3354 71c1af58 Iustin Pop
          st_groups = st_match.groups()
3355 71c1af58 Iustin Pop
          if st_groups and st_groups[0] == "disk":
3356 71c1af58 Iustin Pop
            if st_groups[1] == "count":
3357 71c1af58 Iustin Pop
              val = len(instance.disks)
3358 41a776da Iustin Pop
            elif st_groups[1] == "sizes":
3359 41a776da Iustin Pop
              val = [disk.size for disk in instance.disks]
3360 71c1af58 Iustin Pop
            elif st_groups[1] == "size":
3361 3e0cea06 Iustin Pop
              try:
3362 3e0cea06 Iustin Pop
                val = instance.FindDisk(st_groups[2]).size
3363 3e0cea06 Iustin Pop
              except errors.OpPrereqError:
3364 71c1af58 Iustin Pop
                val = None
3365 71c1af58 Iustin Pop
            else:
3366 71c1af58 Iustin Pop
              assert False, "Unhandled disk parameter"
3367 71c1af58 Iustin Pop
          elif st_groups[0] == "nic":
3368 71c1af58 Iustin Pop
            if st_groups[1] == "count":
3369 71c1af58 Iustin Pop
              val = len(instance.nics)
3370 41a776da Iustin Pop
            elif st_groups[1] == "macs":
3371 41a776da Iustin Pop
              val = [nic.mac for nic in instance.nics]
3372 41a776da Iustin Pop
            elif st_groups[1] == "ips":
3373 41a776da Iustin Pop
              val = [nic.ip for nic in instance.nics]
3374 41a776da Iustin Pop
            elif st_groups[1] == "bridges":
3375 41a776da Iustin Pop
              val = [nic.bridge for nic in instance.nics]
3376 71c1af58 Iustin Pop
            else:
3377 71c1af58 Iustin Pop
              # index-based item
3378 71c1af58 Iustin Pop
              nic_idx = int(st_groups[2])
3379 71c1af58 Iustin Pop
              if nic_idx >= len(instance.nics):
3380 71c1af58 Iustin Pop
                val = None
3381 71c1af58 Iustin Pop
              else:
3382 71c1af58 Iustin Pop
                if st_groups[1] == "mac":
3383 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].mac
3384 71c1af58 Iustin Pop
                elif st_groups[1] == "ip":
3385 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].ip
3386 71c1af58 Iustin Pop
                elif st_groups[1] == "bridge":
3387 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].bridge
3388 71c1af58 Iustin Pop
                else:
3389 71c1af58 Iustin Pop
                  assert False, "Unhandled NIC parameter"
3390 71c1af58 Iustin Pop
          else:
3391 71c1af58 Iustin Pop
            assert False, "Unhandled variable parameter"
3392 a8083063 Iustin Pop
        else:
3393 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
3394 a8083063 Iustin Pop
        iout.append(val)
3395 a8083063 Iustin Pop
      output.append(iout)
3396 a8083063 Iustin Pop
3397 a8083063 Iustin Pop
    return output
3398 a8083063 Iustin Pop
3399 a8083063 Iustin Pop
3400 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
3401 a8083063 Iustin Pop
  """Failover an instance.
3402 a8083063 Iustin Pop

3403 a8083063 Iustin Pop
  """
3404 a8083063 Iustin Pop
  HPATH = "instance-failover"
3405 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3406 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
3407 c9e5c064 Guido Trotter
  REQ_BGL = False
3408 c9e5c064 Guido Trotter
3409 c9e5c064 Guido Trotter
  def ExpandNames(self):
3410 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
3411 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3412 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3413 c9e5c064 Guido Trotter
3414 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
3415 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
3416 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
3417 a8083063 Iustin Pop
3418 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3419 a8083063 Iustin Pop
    """Build hooks env.
3420 a8083063 Iustin Pop

3421 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3422 a8083063 Iustin Pop

3423 a8083063 Iustin Pop
    """
3424 a8083063 Iustin Pop
    env = {
3425 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
3426 a8083063 Iustin Pop
      }
3427 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3428 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3429 a8083063 Iustin Pop
    return env, nl, nl
3430 a8083063 Iustin Pop
3431 a8083063 Iustin Pop
  def CheckPrereq(self):
3432 a8083063 Iustin Pop
    """Check prerequisites.
3433 a8083063 Iustin Pop

3434 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3435 a8083063 Iustin Pop

3436 a8083063 Iustin Pop
    """
3437 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3438 c9e5c064 Guido Trotter
    assert self.instance is not None, \
3439 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3440 a8083063 Iustin Pop
3441 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3442 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3443 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
3444 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
3445 2a710df1 Michael Hanselmann
3446 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
3447 2a710df1 Michael Hanselmann
    if not secondary_nodes:
3448 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
3449 abdf0113 Iustin Pop
                                   "a mirrored disk template")
3450 2a710df1 Michael Hanselmann
3451 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
3452 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, target_node)
3453 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, target_node)
3454 d4f16fd9 Iustin Pop
    # check memory requirements on the secondary node
3455 b9bddb6b Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
3456 338e51e8 Iustin Pop
                         instance.name, bep[constants.BE_MEMORY],
3457 e69d05fd Iustin Pop
                         instance.hypervisor)
3458 3a7c308e Guido Trotter
3459 a8083063 Iustin Pop
    # check bridge existance
3460 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
3461 781de953 Iustin Pop
    result = self.rpc.call_bridges_exist(target_node, brlist)
3462 781de953 Iustin Pop
    result.Raise()
3463 781de953 Iustin Pop
    if not result.data:
3464 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
3465 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
3466 50ff9a7a Iustin Pop
                                 (brlist, target_node))
3467 a8083063 Iustin Pop
3468 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3469 a8083063 Iustin Pop
    """Failover an instance.
3470 a8083063 Iustin Pop

3471 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
3472 a8083063 Iustin Pop
    starting it on the secondary.
3473 a8083063 Iustin Pop

3474 a8083063 Iustin Pop
    """
3475 a8083063 Iustin Pop
    instance = self.instance
3476 a8083063 Iustin Pop
3477 a8083063 Iustin Pop
    source_node = instance.primary_node
3478 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
3479 a8083063 Iustin Pop
3480 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
3481 a8083063 Iustin Pop
    for dev in instance.disks:
3482 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
3483 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
3484 0d68c45d Iustin Pop
        if instance.admin_up and not self.op.ignore_consistency:
3485 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
3486 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
3487 a8083063 Iustin Pop
3488 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
3489 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
3490 9a4f63d1 Iustin Pop
                 instance.name, source_node)
3491 a8083063 Iustin Pop
3492 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(source_node, instance)
3493 1fae010f Iustin Pop
    msg = result.RemoteFailMsg()
3494 1fae010f Iustin Pop
    if msg:
3495 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
3496 86d9d3bb Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
3497 1fae010f Iustin Pop
                             " Proceeding anyway. Please make sure node"
3498 1fae010f Iustin Pop
                             " %s is down. Error details: %s",
3499 1fae010f Iustin Pop
                             instance.name, source_node, source_node, msg)
3500 24a40d57 Iustin Pop
      else:
3501 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
3502 1fae010f Iustin Pop
                                 " node %s: %s" %
3503 1fae010f Iustin Pop
                                 (instance.name, source_node, msg))
3504 a8083063 Iustin Pop
3505 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
3506 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3507 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
3508 a8083063 Iustin Pop
3509 a8083063 Iustin Pop
    instance.primary_node = target_node
3510 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
3511 b6102dab Guido Trotter
    self.cfg.Update(instance)
3512 a8083063 Iustin Pop
3513 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
3514 0d68c45d Iustin Pop
    if instance.admin_up:
3515 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
3516 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s",
3517 9a4f63d1 Iustin Pop
                   instance.name, target_node)
3518 12a0cfbe Guido Trotter
3519 b9bddb6b Iustin Pop
      disks_ok, dummy = _AssembleInstanceDisks(self, instance,
3520 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
3521 12a0cfbe Guido Trotter
      if not disks_ok:
3522 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3523 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
3524 a8083063 Iustin Pop
3525 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
3526 07813a9e Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance)
3527 dd279568 Iustin Pop
      msg = result.RemoteFailMsg()
3528 dd279568 Iustin Pop
      if msg:
3529 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3530 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
3531 dd279568 Iustin Pop
                                 (instance.name, target_node, msg))
3532 a8083063 Iustin Pop
3533 a8083063 Iustin Pop
3534 53c776b5 Iustin Pop
class LUMigrateInstance(LogicalUnit):
3535 53c776b5 Iustin Pop
  """Migrate an instance.
3536 53c776b5 Iustin Pop

3537 53c776b5 Iustin Pop
  This is migration without shutting down, compared to the failover,
3538 53c776b5 Iustin Pop
  which is done with shutdown.
3539 53c776b5 Iustin Pop

3540 53c776b5 Iustin Pop
  """
3541 53c776b5 Iustin Pop
  HPATH = "instance-migrate"
3542 53c776b5 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3543 53c776b5 Iustin Pop
  _OP_REQP = ["instance_name", "live", "cleanup"]
3544 53c776b5 Iustin Pop
3545 53c776b5 Iustin Pop
  REQ_BGL = False
3546 53c776b5 Iustin Pop
3547 53c776b5 Iustin Pop
  def ExpandNames(self):
3548 53c776b5 Iustin Pop
    self._ExpandAndLockInstance()
3549 53c776b5 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
3550 53c776b5 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3551 53c776b5 Iustin Pop
3552 53c776b5 Iustin Pop
  def DeclareLocks(self, level):
3553 53c776b5 Iustin Pop
    if level == locking.LEVEL_NODE:
3554 53c776b5 Iustin Pop
      self._LockInstancesNodes()
3555 53c776b5 Iustin Pop
3556 53c776b5 Iustin Pop
  def BuildHooksEnv(self):
3557 53c776b5 Iustin Pop
    """Build hooks env.
3558 53c776b5 Iustin Pop

3559 53c776b5 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3560 53c776b5 Iustin Pop

3561 53c776b5 Iustin Pop
    """
3562 53c776b5 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3563 2c2690c9 Iustin Pop
    env["MIGRATE_LIVE"] = self.op.live
3564 2c2690c9 Iustin Pop
    env["MIGRATE_CLEANUP"] = self.op.cleanup
3565 53c776b5 Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3566 53c776b5 Iustin Pop
    return env, nl, nl
3567 53c776b5 Iustin Pop
3568 53c776b5 Iustin Pop
  def CheckPrereq(self):
3569 53c776b5 Iustin Pop
    """Check prerequisites.
3570 53c776b5 Iustin Pop

3571 53c776b5 Iustin Pop
    This checks that the instance is in the cluster.
3572 53c776b5 Iustin Pop

3573 53c776b5 Iustin Pop
    """
3574 53c776b5 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3575 53c776b5 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3576 53c776b5 Iustin Pop
    if instance is None:
3577 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3578 53c776b5 Iustin Pop
                                 self.op.instance_name)
3579 53c776b5 Iustin Pop
3580 53c776b5 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
3581 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3582 53c776b5 Iustin Pop
                                 " drbd8, cannot migrate.")
3583 53c776b5 Iustin Pop
3584 53c776b5 Iustin Pop
    secondary_nodes = instance.secondary_nodes
3585 53c776b5 Iustin Pop
    if not secondary_nodes:
3586 733a2b6a Iustin Pop
      raise errors.ConfigurationError("No secondary node but using"
3587 733a2b6a Iustin Pop
                                      " drbd8 disk template")
3588 53c776b5 Iustin Pop
3589 53c776b5 Iustin Pop
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
3590 53c776b5 Iustin Pop
3591 53c776b5 Iustin Pop
    target_node = secondary_nodes[0]
3592 53c776b5 Iustin Pop
    # check memory requirements on the secondary node
3593 53c776b5 Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
3594 53c776b5 Iustin Pop
                         instance.name, i_be[constants.BE_MEMORY],
3595 53c776b5 Iustin Pop
                         instance.hypervisor)
3596 53c776b5 Iustin Pop
3597 53c776b5 Iustin Pop
    # check bridge existance
3598 53c776b5 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
3599 53c776b5 Iustin Pop
    result = self.rpc.call_bridges_exist(target_node, brlist)
3600 53c776b5 Iustin Pop
    if result.failed or not result.data:
3601 53c776b5 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
3602 53c776b5 Iustin Pop
                                 " exist on destination node '%s'" %
3603 53c776b5 Iustin Pop
                                 (brlist, target_node))
3604 53c776b5 Iustin Pop
3605 53c776b5 Iustin Pop
    if not self.op.cleanup:
3606 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, target_node)
3607 53c776b5 Iustin Pop
      result = self.rpc.call_instance_migratable(instance.primary_node,
3608 53c776b5 Iustin Pop
                                                 instance)
3609 53c776b5 Iustin Pop
      msg = result.RemoteFailMsg()
3610 53c776b5 Iustin Pop
      if msg:
3611 53c776b5 Iustin Pop
        raise errors.OpPrereqError("Can't migrate: %s - please use failover" %
3612 53c776b5 Iustin Pop
                                   msg)
3613 53c776b5 Iustin Pop
3614 53c776b5 Iustin Pop
    self.instance = instance
3615 53c776b5 Iustin Pop
3616 53c776b5 Iustin Pop
  def _WaitUntilSync(self):
3617 53c776b5 Iustin Pop
    """Poll with custom rpc for disk sync.
3618 53c776b5 Iustin Pop

3619 53c776b5 Iustin Pop
    This uses our own step-based rpc call.
3620 53c776b5 Iustin Pop

3621 53c776b5 Iustin Pop
    """
3622 53c776b5 Iustin Pop
    self.feedback_fn("* wait until resync is done")
3623 53c776b5 Iustin Pop
    all_done = False
3624 53c776b5 Iustin Pop
    while not all_done:
3625 53c776b5 Iustin Pop
      all_done = True
3626 53c776b5 Iustin Pop
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
3627 53c776b5 Iustin Pop
                                            self.nodes_ip,
3628 53c776b5 Iustin Pop
                                            self.instance.disks)
3629 53c776b5 Iustin Pop
      min_percent = 100
3630 53c776b5 Iustin Pop
      for node, nres in result.items():
3631 53c776b5 Iustin Pop
        msg = nres.RemoteFailMsg()
3632 53c776b5 Iustin Pop
        if msg:
3633 53c776b5 Iustin Pop
          raise errors.OpExecError("Cannot resync disks on node %s: %s" %
3634 53c776b5 Iustin Pop
                                   (node, msg))
3635 0959c824 Iustin Pop
        node_done, node_percent = nres.payload
3636 53c776b5 Iustin Pop
        all_done = all_done and node_done
3637 53c776b5 Iustin Pop
        if node_percent is not None:
3638 53c776b5 Iustin Pop
          min_percent = min(min_percent, node_percent)
3639 53c776b5 Iustin Pop
      if not all_done:
3640 53c776b5 Iustin Pop
        if min_percent < 100:
3641 53c776b5 Iustin Pop
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
3642 53c776b5 Iustin Pop
        time.sleep(2)
3643 53c776b5 Iustin Pop
3644 53c776b5 Iustin Pop
  def _EnsureSecondary(self, node):
3645 53c776b5 Iustin Pop
    """Demote a node to secondary.
3646 53c776b5 Iustin Pop

3647 53c776b5 Iustin Pop
    """
3648 53c776b5 Iustin Pop
    self.feedback_fn("* switching node %s to secondary mode" % node)
3649 53c776b5 Iustin Pop
3650 53c776b5 Iustin Pop
    for dev in self.instance.disks:
3651 53c776b5 Iustin Pop
      self.cfg.SetDiskID(dev, node)
3652 53c776b5 Iustin Pop
3653 53c776b5 Iustin Pop
    result = self.rpc.call_blockdev_close(node, self.instance.name,
3654 53c776b5 Iustin Pop
                                          self.instance.disks)
3655 53c776b5 Iustin Pop
    msg = result.RemoteFailMsg()
3656 53c776b5 Iustin Pop
    if msg:
3657 53c776b5 Iustin Pop
      raise errors.OpExecError("Cannot change disk to secondary on node %s,"
3658 53c776b5 Iustin Pop
                               " error %s" % (node, msg))
3659 53c776b5 Iustin Pop
3660 53c776b5 Iustin Pop
  def _GoStandalone(self):
3661 53c776b5 Iustin Pop
    """Disconnect from the network.
3662 53c776b5 Iustin Pop

3663 53c776b5 Iustin Pop
    """
3664 53c776b5 Iustin Pop
    self.feedback_fn("* changing into standalone mode")
3665 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
3666 53c776b5 Iustin Pop
                                               self.instance.disks)
3667 53c776b5 Iustin Pop
    for node, nres in result.items():
3668 53c776b5 Iustin Pop
      msg = nres.RemoteFailMsg()
3669 53c776b5 Iustin Pop
      if msg:
3670 53c776b5 Iustin Pop
        raise errors.OpExecError("Cannot disconnect disks node %s,"
3671 53c776b5 Iustin Pop
                                 " error %s" % (node, msg))
3672 53c776b5 Iustin Pop
3673 53c776b5 Iustin Pop
  def _GoReconnect(self, multimaster):
3674 53c776b5 Iustin Pop
    """Reconnect to the network.
3675 53c776b5 Iustin Pop

3676 53c776b5 Iustin Pop
    """
3677 53c776b5 Iustin Pop
    if multimaster:
3678 53c776b5 Iustin Pop
      msg = "dual-master"
3679 53c776b5 Iustin Pop
    else:
3680 53c776b5 Iustin Pop
      msg = "single-master"
3681 53c776b5 Iustin Pop
    self.feedback_fn("* changing disks into %s mode" % msg)
3682 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
3683 53c776b5 Iustin Pop
                                           self.instance.disks,
3684 53c776b5 Iustin Pop
                                           self.instance.name, multimaster)
3685 53c776b5 Iustin Pop
    for node, nres in result.items():
3686 53c776b5 Iustin Pop
      msg = nres.RemoteFailMsg()
3687 53c776b5 Iustin Pop
      if msg:
3688 53c776b5 Iustin Pop
        raise errors.OpExecError("Cannot change disks config on node %s,"
3689 53c776b5 Iustin Pop
                                 " error: %s" % (node, msg))
3690 53c776b5 Iustin Pop
3691 53c776b5 Iustin Pop
  def _ExecCleanup(self):
3692 53c776b5 Iustin Pop
    """Try to cleanup after a failed migration.
3693 53c776b5 Iustin Pop

3694 53c776b5 Iustin Pop
    The cleanup is done by:
3695 53c776b5 Iustin Pop
      - check that the instance is running only on one node
3696 53c776b5 Iustin Pop
        (and update the config if needed)
3697 53c776b5 Iustin Pop
      - change disks on its secondary node to secondary
3698 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
3699 53c776b5 Iustin Pop
      - disconnect from the network
3700 53c776b5 Iustin Pop
      - change disks into single-master mode
3701 53c776b5 Iustin Pop
      - wait again until disks are fully synchronized
3702 53c776b5 Iustin Pop

3703 53c776b5 Iustin Pop
    """
3704 53c776b5 Iustin Pop
    instance = self.instance
3705 53c776b5 Iustin Pop
    target_node = self.target_node
3706 53c776b5 Iustin Pop
    source_node = self.source_node
3707 53c776b5 Iustin Pop
3708 53c776b5 Iustin Pop
    # check running on only one node
3709 53c776b5 Iustin Pop
    self.feedback_fn("* checking where the instance actually runs"
3710 53c776b5 Iustin Pop
                     " (if this hangs, the hypervisor might be in"
3711 53c776b5 Iustin Pop
                     " a bad state)")
3712 53c776b5 Iustin Pop
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
3713 53c776b5 Iustin Pop
    for node, result in ins_l.items():
3714 53c776b5 Iustin Pop
      result.Raise()
3715 53c776b5 Iustin Pop
      if not isinstance(result.data, list):
3716 53c776b5 Iustin Pop
        raise errors.OpExecError("Can't contact node '%s'" % node)
3717 53c776b5 Iustin Pop
3718 53c776b5 Iustin Pop
    runningon_source = instance.name in ins_l[source_node].data
3719 53c776b5 Iustin Pop
    runningon_target = instance.name in ins_l[target_node].data
3720 53c776b5 Iustin Pop
3721 53c776b5 Iustin Pop
    if runningon_source and runningon_target:
3722 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance seems to be running on two nodes,"
3723 53c776b5 Iustin Pop
                               " or the hypervisor is confused. You will have"
3724 53c776b5 Iustin Pop
                               " to ensure manually that it runs only on one"
3725 53c776b5 Iustin Pop
                               " and restart this operation.")
3726 53c776b5 Iustin Pop
3727 53c776b5 Iustin Pop
    if not (runningon_source or runningon_target):
3728 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance does not seem to be running at all."
3729 53c776b5 Iustin Pop
                               " In this case, it's safer to repair by"
3730 53c776b5 Iustin Pop
                               " running 'gnt-instance stop' to ensure disk"
3731 53c776b5 Iustin Pop
                               " shutdown, and then restarting it.")
3732 53c776b5 Iustin Pop
3733 53c776b5 Iustin Pop
    if runningon_target:
3734 53c776b5 Iustin Pop
      # the migration has actually succeeded, we need to update the config
3735 53c776b5 Iustin Pop
      self.feedback_fn("* instance running on secondary node (%s),"
3736 53c776b5 Iustin Pop
                       " updating config" % target_node)
3737 53c776b5 Iustin Pop
      instance.primary_node = target_node
3738 53c776b5 Iustin Pop
      self.cfg.Update(instance)
3739 53c776b5 Iustin Pop
      demoted_node = source_node
3740 53c776b5 Iustin Pop
    else:
3741 53c776b5 Iustin Pop
      self.feedback_fn("* instance confirmed to be running on its"
3742 53c776b5 Iustin Pop
                       " primary node (%s)" % source_node)
3743 53c776b5 Iustin Pop
      demoted_node = target_node
3744 53c776b5 Iustin Pop
3745 53c776b5 Iustin Pop
    self._EnsureSecondary(demoted_node)
3746 53c776b5 Iustin Pop
    try:
3747 53c776b5 Iustin Pop
      self._WaitUntilSync()
3748 53c776b5 Iustin Pop
    except errors.OpExecError:
3749 53c776b5 Iustin Pop
      # we ignore here errors, since if the device is standalone, it
3750 53c776b5 Iustin Pop
      # won't be able to sync
3751 53c776b5 Iustin Pop
      pass
3752 53c776b5 Iustin Pop
    self._GoStandalone()
3753 53c776b5 Iustin Pop
    self._GoReconnect(False)
3754 53c776b5 Iustin Pop
    self._WaitUntilSync()
3755 53c776b5 Iustin Pop
3756 53c776b5 Iustin Pop
    self.feedback_fn("* done")
3757 53c776b5 Iustin Pop
3758 6906a9d8 Guido Trotter
  def _RevertDiskStatus(self):
3759 6906a9d8 Guido Trotter
    """Try to revert the disk status after a failed migration.
3760 6906a9d8 Guido Trotter

3761 6906a9d8 Guido Trotter
    """
3762 6906a9d8 Guido Trotter
    target_node = self.target_node
3763 6906a9d8 Guido Trotter
    try:
3764 6906a9d8 Guido Trotter
      self._EnsureSecondary(target_node)
3765 6906a9d8 Guido Trotter
      self._GoStandalone()
3766 6906a9d8 Guido Trotter
      self._GoReconnect(False)
3767 6906a9d8 Guido Trotter
      self._WaitUntilSync()
3768 6906a9d8 Guido Trotter
    except errors.OpExecError, err:
3769 6906a9d8 Guido Trotter
      self.LogWarning("Migration failed and I can't reconnect the"
3770 6906a9d8 Guido Trotter
                      " drives: error '%s'\n"
3771 6906a9d8 Guido Trotter
                      "Please look and recover the instance status" %
3772 6906a9d8 Guido Trotter
                      str(err))
3773 6906a9d8 Guido Trotter
3774 6906a9d8 Guido Trotter
  def _AbortMigration(self):
3775 6906a9d8 Guido Trotter
    """Call the hypervisor code to abort a started migration.
3776 6906a9d8 Guido Trotter

3777 6906a9d8 Guido Trotter
    """
3778 6906a9d8 Guido Trotter
    instance = self.instance
3779 6906a9d8 Guido Trotter
    target_node = self.target_node
3780 6906a9d8 Guido Trotter
    migration_info = self.migration_info
3781 6906a9d8 Guido Trotter
3782 6906a9d8 Guido Trotter
    abort_result = self.rpc.call_finalize_migration(target_node,
3783 6906a9d8 Guido Trotter
                                                    instance,
3784 6906a9d8 Guido Trotter
                                                    migration_info,
3785 6906a9d8 Guido Trotter
                                                    False)
3786 6906a9d8 Guido Trotter
    abort_msg = abort_result.RemoteFailMsg()
3787 6906a9d8 Guido Trotter
    if abort_msg:
3788 6906a9d8 Guido Trotter
      logging.error("Aborting migration failed on target node %s: %s" %
3789 6906a9d8 Guido Trotter
                    (target_node, abort_msg))
3790 6906a9d8 Guido Trotter
      # Don't raise an exception here, as we stil have to try to revert the
3791 6906a9d8 Guido Trotter
      # disk status, even if this step failed.
3792 6906a9d8 Guido Trotter
3793 53c776b5 Iustin Pop
  def _ExecMigration(self):
3794 53c776b5 Iustin Pop
    """Migrate an instance.
3795 53c776b5 Iustin Pop

3796 53c776b5 Iustin Pop
    The migrate is done by:
3797 53c776b5 Iustin Pop
      - change the disks into dual-master mode
3798 53c776b5 Iustin Pop
      - wait until disks are fully synchronized again
3799 53c776b5 Iustin Pop
      - migrate the instance
3800 53c776b5 Iustin Pop
      - change disks on the new secondary node (the old primary) to secondary
3801 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
3802 53c776b5 Iustin Pop
      - change disks into single-master mode
3803 53c776b5 Iustin Pop

3804 53c776b5 Iustin Pop
    """
3805 53c776b5 Iustin Pop
    instance = self.instance
3806 53c776b5 Iustin Pop
    target_node = self.target_node
3807 53c776b5 Iustin Pop
    source_node = self.source_node
3808 53c776b5 Iustin Pop
3809 53c776b5 Iustin Pop
    self.feedback_fn("* checking disk consistency between source and target")
3810 53c776b5 Iustin Pop
    for dev in instance.disks:
3811 53c776b5 Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
3812 53c776b5 Iustin Pop
        raise errors.OpExecError("Disk %s is degraded or not fully"
3813 53c776b5 Iustin Pop
                                 " synchronized on target node,"
3814 53c776b5 Iustin Pop
                                 " aborting migrate." % dev.iv_name)
3815 53c776b5 Iustin Pop
3816 6906a9d8 Guido Trotter
    # First get the migration information from the remote node
3817 6906a9d8 Guido Trotter
    result = self.rpc.call_migration_info(source_node, instance)
3818 6906a9d8 Guido Trotter
    msg = result.RemoteFailMsg()
3819 6906a9d8 Guido Trotter
    if msg:
3820 6906a9d8 Guido Trotter
      log_err = ("Failed fetching source migration information from %s: %s" %
3821 0959c824 Iustin Pop
                 (source_node, msg))
3822 6906a9d8 Guido Trotter
      logging.error(log_err)
3823 6906a9d8 Guido Trotter
      raise errors.OpExecError(log_err)
3824 6906a9d8 Guido Trotter
3825 0959c824 Iustin Pop
    self.migration_info = migration_info = result.payload
3826 6906a9d8 Guido Trotter
3827 6906a9d8 Guido Trotter
    # Then switch the disks to master/master mode
3828 53c776b5 Iustin Pop
    self._EnsureSecondary(target_node)
3829 53c776b5 Iustin Pop
    self._GoStandalone()
3830 53c776b5 Iustin Pop
    self._GoReconnect(True)
3831 53c776b5 Iustin Pop
    self._WaitUntilSync()
3832 53c776b5 Iustin Pop
3833 6906a9d8 Guido Trotter
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
3834 6906a9d8 Guido Trotter
    result = self.rpc.call_accept_instance(target_node,
3835 6906a9d8 Guido Trotter
                                           instance,
3836 6906a9d8 Guido Trotter
                                           migration_info,
3837 6906a9d8 Guido Trotter
                                           self.nodes_ip[target_node])
3838 6906a9d8 Guido Trotter
3839 6906a9d8 Guido Trotter
    msg = result.RemoteFailMsg()
3840 6906a9d8 Guido Trotter
    if msg:
3841 6906a9d8 Guido Trotter
      logging.error("Instance pre-migration failed, trying to revert"
3842 6906a9d8 Guido Trotter
                    " disk status: %s", msg)
3843 6906a9d8 Guido Trotter
      self._AbortMigration()
3844 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
3845 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
3846 6906a9d8 Guido Trotter
                               (instance.name, msg))
3847 6906a9d8 Guido Trotter
3848 53c776b5 Iustin Pop
    self.feedback_fn("* migrating instance to %s" % target_node)
3849 53c776b5 Iustin Pop
    time.sleep(10)
3850 53c776b5 Iustin Pop
    result = self.rpc.call_instance_migrate(source_node, instance,
3851 53c776b5 Iustin Pop
                                            self.nodes_ip[target_node],
3852 53c776b5 Iustin Pop
                                            self.op.live)
3853 53c776b5 Iustin Pop
    msg = result.RemoteFailMsg()
3854 53c776b5 Iustin Pop
    if msg:
3855 53c776b5 Iustin Pop
      logging.error("Instance migration failed, trying to revert"
3856 53c776b5 Iustin Pop
                    " disk status: %s", msg)
3857 6906a9d8 Guido Trotter
      self._AbortMigration()
3858 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
3859 53c776b5 Iustin Pop
      raise errors.OpExecError("Could not migrate instance %s: %s" %
3860 53c776b5 Iustin Pop
                               (instance.name, msg))
3861 53c776b5 Iustin Pop
    time.sleep(10)
3862 53c776b5 Iustin Pop
3863 53c776b5 Iustin Pop
    instance.primary_node = target_node
3864 53c776b5 Iustin Pop
    # distribute new instance config to the other nodes
3865 53c776b5 Iustin Pop
    self.cfg.Update(instance)
3866 53c776b5 Iustin Pop
3867 6906a9d8 Guido Trotter
    result = self.rpc.call_finalize_migration(target_node,
3868 6906a9d8 Guido Trotter
                                              instance,
3869 6906a9d8 Guido Trotter
                                              migration_info,
3870 6906a9d8 Guido Trotter
                                              True)
3871 6906a9d8 Guido Trotter
    msg = result.RemoteFailMsg()
3872 6906a9d8 Guido Trotter
    if msg:
3873 6906a9d8 Guido Trotter
      logging.error("Instance migration succeeded, but finalization failed:"
3874 6906a9d8 Guido Trotter
                    " %s" % msg)
3875 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not finalize instance migration: %s" %
3876 6906a9d8 Guido Trotter
                               msg)
3877 6906a9d8 Guido Trotter
3878 53c776b5 Iustin Pop
    self._EnsureSecondary(source_node)
3879 53c776b5 Iustin Pop
    self._WaitUntilSync()
3880 53c776b5 Iustin Pop
    self._GoStandalone()
3881 53c776b5 Iustin Pop
    self._GoReconnect(False)
3882 53c776b5 Iustin Pop
    self._WaitUntilSync()
3883 53c776b5 Iustin Pop
3884 53c776b5 Iustin Pop
    self.feedback_fn("* done")
3885 53c776b5 Iustin Pop
3886 53c776b5 Iustin Pop
  def Exec(self, feedback_fn):
3887 53c776b5 Iustin Pop
    """Perform the migration.
3888 53c776b5 Iustin Pop

3889 53c776b5 Iustin Pop
    """
3890 53c776b5 Iustin Pop
    self.feedback_fn = feedback_fn
3891 53c776b5 Iustin Pop
3892 53c776b5 Iustin Pop
    self.source_node = self.instance.primary_node
3893 53c776b5 Iustin Pop
    self.target_node = self.instance.secondary_nodes[0]
3894 53c776b5 Iustin Pop
    self.all_nodes = [self.source_node, self.target_node]
3895 53c776b5 Iustin Pop
    self.nodes_ip = {
3896 53c776b5 Iustin Pop
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
3897 53c776b5 Iustin Pop
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
3898 53c776b5 Iustin Pop
      }
3899 53c776b5 Iustin Pop
    if self.op.cleanup:
3900 53c776b5 Iustin Pop
      return self._ExecCleanup()
3901 53c776b5 Iustin Pop
    else:
3902 53c776b5 Iustin Pop
      return self._ExecMigration()
3903 53c776b5 Iustin Pop
3904 53c776b5 Iustin Pop
3905 428958aa Iustin Pop
def _CreateBlockDev(lu, node, instance, device, force_create,
3906 428958aa Iustin Pop
                    info, force_open):
3907 428958aa Iustin Pop
  """Create a tree of block devices on a given node.
3908 a8083063 Iustin Pop

3909 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
3910 a8083063 Iustin Pop
  all its children.
3911 a8083063 Iustin Pop

3912 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
3913 a8083063 Iustin Pop

3914 428958aa Iustin Pop
  @param lu: the lu on whose behalf we execute
3915 428958aa Iustin Pop
  @param node: the node on which to create the device
3916 428958aa Iustin Pop
  @type instance: L{objects.Instance}
3917 428958aa Iustin Pop
  @param instance: the instance which owns the device
3918 428958aa Iustin Pop
  @type device: L{objects.Disk}
3919 428958aa Iustin Pop
  @param device: the device to create
3920 428958aa Iustin Pop
  @type force_create: boolean
3921 428958aa Iustin Pop
  @param force_create: whether to force creation of this device; this
3922 428958aa Iustin Pop
      will be change to True whenever we find a device which has
3923 428958aa Iustin Pop
      CreateOnSecondary() attribute
3924 428958aa Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
3925 428958aa Iustin Pop
      (this will be represented as a LVM tag)
3926 428958aa Iustin Pop
  @type force_open: boolean
3927 428958aa Iustin Pop
  @param force_open: this parameter will be passes to the
3928 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
3929 428958aa Iustin Pop
      whether we run on primary or not, and it affects both
3930 428958aa Iustin Pop
      the child assembly and the device own Open() execution
3931 428958aa Iustin Pop

3932 a8083063 Iustin Pop
  """
3933 a8083063 Iustin Pop
  if device.CreateOnSecondary():
3934 428958aa Iustin Pop
    force_create = True
3935 796cab27 Iustin Pop
3936 a8083063 Iustin Pop
  if device.children:
3937 a8083063 Iustin Pop
    for child in device.children:
3938 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, child, force_create,
3939 428958aa Iustin Pop
                      info, force_open)
3940 a8083063 Iustin Pop
3941 428958aa Iustin Pop
  if not force_create:
3942 796cab27 Iustin Pop
    return
3943 796cab27 Iustin Pop
3944 de12473a Iustin Pop
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
3945 de12473a Iustin Pop
3946 de12473a Iustin Pop
3947 de12473a Iustin Pop
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
3948 de12473a Iustin Pop
  """Create a single block device on a given node.
3949 de12473a Iustin Pop

3950 de12473a Iustin Pop
  This will not recurse over children of the device, so they must be
3951 de12473a Iustin Pop
  created in advance.
3952 de12473a Iustin Pop

3953 de12473a Iustin Pop
  @param lu: the lu on whose behalf we execute
3954 de12473a Iustin Pop
  @param node: the node on which to create the device
3955 de12473a Iustin Pop
  @type instance: L{objects.Instance}
3956 de12473a Iustin Pop
  @param instance: the instance which owns the device
3957 de12473a Iustin Pop
  @type device: L{objects.Disk}
3958 de12473a Iustin Pop
  @param device: the device to create
3959 de12473a Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
3960 de12473a Iustin Pop
      (this will be represented as a LVM tag)
3961 de12473a Iustin Pop
  @type force_open: boolean
3962 de12473a Iustin Pop
  @param force_open: this parameter will be passes to the
3963 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
3964 de12473a Iustin Pop
      whether we run on primary or not, and it affects both
3965 de12473a Iustin Pop
      the child assembly and the device own Open() execution
3966 de12473a Iustin Pop

3967 de12473a Iustin Pop
  """
3968 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
3969 7d81697f Iustin Pop
  result = lu.rpc.call_blockdev_create(node, device, device.size,
3970 428958aa Iustin Pop
                                       instance.name, force_open, info)
3971 7d81697f Iustin Pop
  msg = result.RemoteFailMsg()
3972 7d81697f Iustin Pop
  if msg:
3973 428958aa Iustin Pop
    raise errors.OpExecError("Can't create block device %s on"
3974 7d81697f Iustin Pop
                             " node %s for instance %s: %s" %
3975 7d81697f Iustin Pop
                             (device, node, instance.name, msg))
3976 a8083063 Iustin Pop
  if device.physical_id is None:
3977 0959c824 Iustin Pop
    device.physical_id = result.payload
3978 a8083063 Iustin Pop
3979 a8083063 Iustin Pop
3980 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
3981 923b1523 Iustin Pop
  """Generate a suitable LV name.
3982 923b1523 Iustin Pop

3983 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
3984 923b1523 Iustin Pop

3985 923b1523 Iustin Pop
  """
3986 923b1523 Iustin Pop
  results = []
3987 923b1523 Iustin Pop
  for val in exts:
3988 b9bddb6b Iustin Pop
    new_id = lu.cfg.GenerateUniqueID()
3989 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
3990 923b1523 Iustin Pop
  return results
3991 923b1523 Iustin Pop
3992 923b1523 Iustin Pop
3993 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
3994 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
3995 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
3996 a1f445d3 Iustin Pop

3997 a1f445d3 Iustin Pop
  """
3998 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
3999 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
4000 b9bddb6b Iustin Pop
  shared_secret = lu.cfg.GenerateDRBDSecret()
4001 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
4002 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
4003 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
4004 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
4005 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
4006 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
4007 f9518d38 Iustin Pop
                                      p_minor, s_minor,
4008 f9518d38 Iustin Pop
                                      shared_secret),
4009 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
4010 a1f445d3 Iustin Pop
                          iv_name=iv_name)
4011 a1f445d3 Iustin Pop
  return drbd_dev
4012 a1f445d3 Iustin Pop
4013 7c0d6283 Michael Hanselmann
4014 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
4015 a8083063 Iustin Pop
                          instance_name, primary_node,
4016 08db7c5c Iustin Pop
                          secondary_nodes, disk_info,
4017 e2a65344 Iustin Pop
                          file_storage_dir, file_driver,
4018 e2a65344 Iustin Pop
                          base_index):
4019 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
4020 a8083063 Iustin Pop

4021 a8083063 Iustin Pop
  """
4022 a8083063 Iustin Pop
  #TODO: compute space requirements
4023 a8083063 Iustin Pop
4024 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
4025 08db7c5c Iustin Pop
  disk_count = len(disk_info)
4026 08db7c5c Iustin Pop
  disks = []
4027 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
4028 08db7c5c Iustin Pop
    pass
4029 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
4030 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
4031 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
4032 923b1523 Iustin Pop
4033 08db7c5c Iustin Pop
    names = _GenerateUniqueNames(lu, [".disk%d" % i
4034 08db7c5c Iustin Pop
                                      for i in range(disk_count)])
4035 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4036 e2a65344 Iustin Pop
      disk_index = idx + base_index
4037 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
4038 08db7c5c Iustin Pop
                              logical_id=(vgname, names[idx]),
4039 6ec66eae Iustin Pop
                              iv_name="disk/%d" % disk_index,
4040 6ec66eae Iustin Pop
                              mode=disk["mode"])
4041 08db7c5c Iustin Pop
      disks.append(disk_dev)
4042 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
4043 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
4044 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
4045 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
4046 08db7c5c Iustin Pop
    minors = lu.cfg.AllocateDRBDMinor(
4047 08db7c5c Iustin Pop
      [primary_node, remote_node] * len(disk_info), instance_name)
4048 08db7c5c Iustin Pop
4049 e6c1ff2f Iustin Pop
    names = []
4050 e6c1ff2f Iustin Pop
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % i
4051 e6c1ff2f Iustin Pop
                                               for i in range(disk_count)]):
4052 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_data")
4053 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_meta")
4054 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4055 112050d9 Iustin Pop
      disk_index = idx + base_index
4056 08db7c5c Iustin Pop
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
4057 08db7c5c Iustin Pop
                                      disk["size"], names[idx*2:idx*2+2],
4058 e2a65344 Iustin Pop
                                      "disk/%d" % disk_index,
4059 08db7c5c Iustin Pop
                                      minors[idx*2], minors[idx*2+1])
4060 6ec66eae Iustin Pop
      disk_dev.mode = disk["mode"]
4061 08db7c5c Iustin Pop
      disks.append(disk_dev)
4062 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
4063 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
4064 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
4065 0f1a06e3 Manuel Franceschini
4066 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4067 112050d9 Iustin Pop
      disk_index = idx + base_index
4068 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
4069 e2a65344 Iustin Pop
                              iv_name="disk/%d" % disk_index,
4070 08db7c5c Iustin Pop
                              logical_id=(file_driver,
4071 08db7c5c Iustin Pop
                                          "%s/disk%d" % (file_storage_dir,
4072 43e99cff Guido Trotter
                                                         disk_index)),
4073 6ec66eae Iustin Pop
                              mode=disk["mode"])
4074 08db7c5c Iustin Pop
      disks.append(disk_dev)
4075 a8083063 Iustin Pop
  else:
4076 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
4077 a8083063 Iustin Pop
  return disks
4078 a8083063 Iustin Pop
4079 a8083063 Iustin Pop
4080 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
4081 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
4082 3ecf6786 Iustin Pop

4083 3ecf6786 Iustin Pop
  """
4084 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
4085 a0c3fea1 Michael Hanselmann
4086 a0c3fea1 Michael Hanselmann
4087 b9bddb6b Iustin Pop
def _CreateDisks(lu, instance):
4088 a8083063 Iustin Pop
  """Create all disks for an instance.
4089 a8083063 Iustin Pop

4090 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
4091 a8083063 Iustin Pop

4092 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
4093 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
4094 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
4095 e4376078 Iustin Pop
  @param instance: the instance whose disks we should create
4096 e4376078 Iustin Pop
  @rtype: boolean
4097 e4376078 Iustin Pop
  @return: the success of the creation
4098 a8083063 Iustin Pop

4099 a8083063 Iustin Pop
  """
4100 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
4101 428958aa Iustin Pop
  pnode = instance.primary_node
4102 a0c3fea1 Michael Hanselmann
4103 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
4104 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4105 428958aa Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
4106 0f1a06e3 Manuel Franceschini
4107 781de953 Iustin Pop
    if result.failed or not result.data:
4108 428958aa Iustin Pop
      raise errors.OpExecError("Could not connect to node '%s'" % pnode)
4109 0f1a06e3 Manuel Franceschini
4110 781de953 Iustin Pop
    if not result.data[0]:
4111 796cab27 Iustin Pop
      raise errors.OpExecError("Failed to create directory '%s'" %
4112 796cab27 Iustin Pop
                               file_storage_dir)
4113 0f1a06e3 Manuel Franceschini
4114 24991749 Iustin Pop
  # Note: this needs to be kept in sync with adding of disks in
4115 24991749 Iustin Pop
  # LUSetInstanceParams
4116 a8083063 Iustin Pop
  for device in instance.disks:
4117 9a4f63d1 Iustin Pop
    logging.info("Creating volume %s for instance %s",
4118 9a4f63d1 Iustin Pop
                 device.iv_name, instance.name)
4119 a8083063 Iustin Pop
    #HARDCODE
4120 428958aa Iustin Pop
    for node in instance.all_nodes:
4121 428958aa Iustin Pop
      f_create = node == pnode
4122 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
4123 a8083063 Iustin Pop
4124 a8083063 Iustin Pop
4125 b9bddb6b Iustin Pop
def _RemoveDisks(lu, instance):
4126 a8083063 Iustin Pop
  """Remove all disks for an instance.
4127 a8083063 Iustin Pop

4128 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
4129 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
4130 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
4131 a8083063 Iustin Pop
  with `_CreateDisks()`).
4132 a8083063 Iustin Pop

4133 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
4134 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
4135 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
4136 e4376078 Iustin Pop
  @param instance: the instance whose disks we should remove
4137 e4376078 Iustin Pop
  @rtype: boolean
4138 e4376078 Iustin Pop
  @return: the success of the removal
4139 a8083063 Iustin Pop

4140 a8083063 Iustin Pop
  """
4141 9a4f63d1 Iustin Pop
  logging.info("Removing block devices for instance %s", instance.name)
4142 a8083063 Iustin Pop
4143 e1bc0878 Iustin Pop
  all_result = True
4144 a8083063 Iustin Pop
  for device in instance.disks:
4145 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
4146 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
4147 e1bc0878 Iustin Pop
      msg = lu.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
4148 e1bc0878 Iustin Pop
      if msg:
4149 e1bc0878 Iustin Pop
        lu.LogWarning("Could not remove block device %s on node %s,"
4150 e1bc0878 Iustin Pop
                      " continuing anyway: %s", device.iv_name, node, msg)
4151 e1bc0878 Iustin Pop
        all_result = False
4152 0f1a06e3 Manuel Franceschini
4153 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
4154 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4155 781de953 Iustin Pop
    result = lu.rpc.call_file_storage_dir_remove(instance.primary_node,
4156 781de953 Iustin Pop
                                                 file_storage_dir)
4157 781de953 Iustin Pop
    if result.failed or not result.data:
4158 9a4f63d1 Iustin Pop
      logging.error("Could not remove directory '%s'", file_storage_dir)
4159 e1bc0878 Iustin Pop
      all_result = False
4160 0f1a06e3 Manuel Franceschini
4161 e1bc0878 Iustin Pop
  return all_result
4162 a8083063 Iustin Pop
4163 a8083063 Iustin Pop
4164 08db7c5c Iustin Pop
def _ComputeDiskSize(disk_template, disks):
4165 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
4166 e2fe6369 Iustin Pop

4167 e2fe6369 Iustin Pop
  """
4168 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
4169 e2fe6369 Iustin Pop
  req_size_dict = {
4170 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
4171 08db7c5c Iustin Pop
    constants.DT_PLAIN: sum(d["size"] for d in disks),
4172 08db7c5c Iustin Pop
    # 128 MB are added for drbd metadata for each disk
4173 08db7c5c Iustin Pop
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
4174 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
4175 e2fe6369 Iustin Pop
  }
4176 e2fe6369 Iustin Pop
4177 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
4178 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
4179 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
4180 e2fe6369 Iustin Pop
4181 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
4182 e2fe6369 Iustin Pop
4183 e2fe6369 Iustin Pop
4184 74409b12 Iustin Pop
def _CheckHVParams(lu, nodenames, hvname, hvparams):
4185 74409b12 Iustin Pop
  """Hypervisor parameter validation.
4186 74409b12 Iustin Pop

4187 74409b12 Iustin Pop
  This function abstract the hypervisor parameter validation to be
4188 74409b12 Iustin Pop
  used in both instance create and instance modify.
4189 74409b12 Iustin Pop

4190 74409b12 Iustin Pop
  @type lu: L{LogicalUnit}
4191 74409b12 Iustin Pop
  @param lu: the logical unit for which we check
4192 74409b12 Iustin Pop
  @type nodenames: list
4193 74409b12 Iustin Pop
  @param nodenames: the list of nodes on which we should check
4194 74409b12 Iustin Pop
  @type hvname: string
4195 74409b12 Iustin Pop
  @param hvname: the name of the hypervisor we should use
4196 74409b12 Iustin Pop
  @type hvparams: dict
4197 74409b12 Iustin Pop
  @param hvparams: the parameters which we need to check
4198 74409b12 Iustin Pop
  @raise errors.OpPrereqError: if the parameters are not valid
4199 74409b12 Iustin Pop

4200 74409b12 Iustin Pop
  """
4201 74409b12 Iustin Pop
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
4202 74409b12 Iustin Pop
                                                  hvname,
4203 74409b12 Iustin Pop
                                                  hvparams)
4204 74409b12 Iustin Pop
  for node in nodenames:
4205 781de953 Iustin Pop
    info = hvinfo[node]
4206 68c6f21c Iustin Pop
    if info.offline:
4207 68c6f21c Iustin Pop
      continue
4208 0959c824 Iustin Pop
    msg = info.RemoteFailMsg()
4209 0959c824 Iustin Pop
    if msg:
4210 74409b12 Iustin Pop
      raise errors.OpPrereqError("Hypervisor parameter validation failed:"
4211 0959c824 Iustin Pop
                                 " %s" % msg)
4212 74409b12 Iustin Pop
4213 74409b12 Iustin Pop
4214 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
4215 a8083063 Iustin Pop
  """Create an instance.
4216 a8083063 Iustin Pop

4217 a8083063 Iustin Pop
  """
4218 a8083063 Iustin Pop
  HPATH = "instance-add"
4219 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4220 08db7c5c Iustin Pop
  _OP_REQP = ["instance_name", "disks", "disk_template",
4221 08db7c5c Iustin Pop
              "mode", "start",
4222 08db7c5c Iustin Pop
              "wait_for_sync", "ip_check", "nics",
4223 338e51e8 Iustin Pop
              "hvparams", "beparams"]
4224 7baf741d Guido Trotter
  REQ_BGL = False
4225 7baf741d Guido Trotter
4226 7baf741d Guido Trotter
  def _ExpandNode(self, node):
4227 7baf741d Guido Trotter
    """Expands and checks one node name.
4228 7baf741d Guido Trotter

4229 7baf741d Guido Trotter
    """
4230 7baf741d Guido Trotter
    node_full = self.cfg.ExpandNodeName(node)
4231 7baf741d Guido Trotter
    if node_full is None:
4232 7baf741d Guido Trotter
      raise errors.OpPrereqError("Unknown node %s" % node)
4233 7baf741d Guido Trotter
    return node_full
4234 7baf741d Guido Trotter
4235 7baf741d Guido Trotter
  def ExpandNames(self):
4236 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
4237 7baf741d Guido Trotter

4238 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
4239 7baf741d Guido Trotter

4240 7baf741d Guido Trotter
    """
4241 7baf741d Guido Trotter
    self.needed_locks = {}
4242 7baf741d Guido Trotter
4243 7baf741d Guido Trotter
    # set optional parameters to none if they don't exist
4244 6785674e Iustin Pop
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
4245 7baf741d Guido Trotter
      if not hasattr(self.op, attr):
4246 7baf741d Guido Trotter
        setattr(self.op, attr, None)
4247 7baf741d Guido Trotter
4248 4b2f38dd Iustin Pop
    # cheap checks, mostly valid constants given
4249 4b2f38dd Iustin Pop
4250 7baf741d Guido Trotter
    # verify creation mode
4251 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
4252 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
4253 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
4254 7baf741d Guido Trotter
                                 self.op.mode)
4255 4b2f38dd Iustin Pop
4256 7baf741d Guido Trotter
    # disk template and mirror node verification
4257 7baf741d Guido Trotter
    if self.op.disk_template not in constants.DISK_TEMPLATES:
4258 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid disk template name")
4259 7baf741d Guido Trotter
4260 4b2f38dd Iustin Pop
    if self.op.hypervisor is None:
4261 4b2f38dd Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
4262 4b2f38dd Iustin Pop
4263 8705eb96 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
4264 8705eb96 Iustin Pop
    enabled_hvs = cluster.enabled_hypervisors
4265 4b2f38dd Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
4266 4b2f38dd Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
4267 4b2f38dd Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
4268 4b2f38dd Iustin Pop
                                  ",".join(enabled_hvs)))
4269 4b2f38dd Iustin Pop
4270 6785674e Iustin Pop
    # check hypervisor parameter syntax (locally)
4271 a5728081 Guido Trotter
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
4272 8705eb96 Iustin Pop
    filled_hvp = cluster.FillDict(cluster.hvparams[self.op.hypervisor],
4273 8705eb96 Iustin Pop
                                  self.op.hvparams)
4274 6785674e Iustin Pop
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
4275 8705eb96 Iustin Pop
    hv_type.CheckParameterSyntax(filled_hvp)
4276 6785674e Iustin Pop
4277 338e51e8 Iustin Pop
    # fill and remember the beparams dict
4278 a5728081 Guido Trotter
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
4279 338e51e8 Iustin Pop
    self.be_full = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
4280 338e51e8 Iustin Pop
                                    self.op.beparams)
4281 338e51e8 Iustin Pop
4282 7baf741d Guido Trotter
    #### instance parameters check
4283 7baf741d Guido Trotter
4284 7baf741d Guido Trotter
    # instance name verification
4285 7baf741d Guido Trotter
    hostname1 = utils.HostInfo(self.op.instance_name)
4286 7baf741d Guido Trotter
    self.op.instance_name = instance_name = hostname1.name
4287 7baf741d Guido Trotter
4288 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
4289 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
4290 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
4291 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
4292 7baf741d Guido Trotter
                                 instance_name)
4293 7baf741d Guido Trotter
4294 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
4295 7baf741d Guido Trotter
4296 08db7c5c Iustin Pop
    # NIC buildup
4297 08db7c5c Iustin Pop
    self.nics = []
4298 08db7c5c Iustin Pop
    for nic in self.op.nics:
4299 08db7c5c Iustin Pop
      # ip validity checks
4300 08db7c5c Iustin Pop
      ip = nic.get("ip", None)
4301 08db7c5c Iustin Pop
      if ip is None or ip.lower() == "none":
4302 08db7c5c Iustin Pop
        nic_ip = None
4303 08db7c5c Iustin Pop
      elif ip.lower() == constants.VALUE_AUTO:
4304 08db7c5c Iustin Pop
        nic_ip = hostname1.ip
4305 08db7c5c Iustin Pop
      else:
4306 08db7c5c Iustin Pop
        if not utils.IsValidIP(ip):
4307 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
4308 08db7c5c Iustin Pop
                                     " like a valid IP" % ip)
4309 08db7c5c Iustin Pop
        nic_ip = ip
4310 08db7c5c Iustin Pop
4311 08db7c5c Iustin Pop
      # MAC address verification
4312 08db7c5c Iustin Pop
      mac = nic.get("mac", constants.VALUE_AUTO)
4313 08db7c5c Iustin Pop
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4314 08db7c5c Iustin Pop
        if not utils.IsValidMac(mac.lower()):
4315 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Invalid MAC address specified: %s" %
4316 08db7c5c Iustin Pop
                                     mac)
4317 08db7c5c Iustin Pop
      # bridge verification
4318 9939547b Iustin Pop
      bridge = nic.get("bridge", None)
4319 9939547b Iustin Pop
      if bridge is None:
4320 9939547b Iustin Pop
        bridge = self.cfg.GetDefBridge()
4321 08db7c5c Iustin Pop
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, bridge=bridge))
4322 08db7c5c Iustin Pop
4323 08db7c5c Iustin Pop
    # disk checks/pre-build
4324 08db7c5c Iustin Pop
    self.disks = []
4325 08db7c5c Iustin Pop
    for disk in self.op.disks:
4326 08db7c5c Iustin Pop
      mode = disk.get("mode", constants.DISK_RDWR)
4327 08db7c5c Iustin Pop
      if mode not in constants.DISK_ACCESS_SET:
4328 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
4329 08db7c5c Iustin Pop
                                   mode)
4330 08db7c5c Iustin Pop
      size = disk.get("size", None)
4331 08db7c5c Iustin Pop
      if size is None:
4332 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Missing disk size")
4333 08db7c5c Iustin Pop
      try:
4334 08db7c5c Iustin Pop
        size = int(size)
4335 08db7c5c Iustin Pop
      except ValueError:
4336 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk size '%s'" % size)
4337 08db7c5c Iustin Pop
      self.disks.append({"size": size, "mode": mode})
4338 08db7c5c Iustin Pop
4339 7baf741d Guido Trotter
    # used in CheckPrereq for ip ping check
4340 7baf741d Guido Trotter
    self.check_ip = hostname1.ip
4341 7baf741d Guido Trotter
4342 7baf741d Guido Trotter
    # file storage checks
4343 7baf741d Guido Trotter
    if (self.op.file_driver and
4344 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
4345 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
4346 7baf741d Guido Trotter
                                 self.op.file_driver)
4347 7baf741d Guido Trotter
4348 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
4349 7baf741d Guido Trotter
      raise errors.OpPrereqError("File storage directory path not absolute")
4350 7baf741d Guido Trotter
4351 7baf741d Guido Trotter
    ### Node/iallocator related checks
4352 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
4353 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
4354 7baf741d Guido Trotter
                                 " node must be given")
4355 7baf741d Guido Trotter
4356 7baf741d Guido Trotter
    if self.op.iallocator:
4357 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4358 7baf741d Guido Trotter
    else:
4359 7baf741d Guido Trotter
      self.op.pnode = self._ExpandNode(self.op.pnode)
4360 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
4361 7baf741d Guido Trotter
      if self.op.snode is not None:
4362 7baf741d Guido Trotter
        self.op.snode = self._ExpandNode(self.op.snode)
4363 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
4364 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
4365 7baf741d Guido Trotter
4366 7baf741d Guido Trotter
    # in case of import lock the source node too
4367 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
4368 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
4369 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
4370 7baf741d Guido Trotter
4371 b9322a9f Guido Trotter
      if src_path is None:
4372 b9322a9f Guido Trotter
        self.op.src_path = src_path = self.op.instance_name
4373 b9322a9f Guido Trotter
4374 b9322a9f Guido Trotter
      if src_node is None:
4375 b9322a9f Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4376 b9322a9f Guido Trotter
        self.op.src_node = None
4377 b9322a9f Guido Trotter
        if os.path.isabs(src_path):
4378 b9322a9f Guido Trotter
          raise errors.OpPrereqError("Importing an instance from an absolute"
4379 b9322a9f Guido Trotter
                                     " path requires a source node option.")
4380 b9322a9f Guido Trotter
      else:
4381 b9322a9f Guido Trotter
        self.op.src_node = src_node = self._ExpandNode(src_node)
4382 b9322a9f Guido Trotter
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4383 b9322a9f Guido Trotter
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
4384 b9322a9f Guido Trotter
        if not os.path.isabs(src_path):
4385 b9322a9f Guido Trotter
          self.op.src_path = src_path = \
4386 b9322a9f Guido Trotter
            os.path.join(constants.EXPORT_DIR, src_path)
4387 7baf741d Guido Trotter
4388 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
4389 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
4390 7baf741d Guido Trotter
        raise errors.OpPrereqError("No guest OS specified")
4391 a8083063 Iustin Pop
4392 538475ca Iustin Pop
  def _RunAllocator(self):
4393 538475ca Iustin Pop
    """Run the allocator based on input opcode.
4394 538475ca Iustin Pop

4395 538475ca Iustin Pop
    """
4396 08db7c5c Iustin Pop
    nics = [n.ToDict() for n in self.nics]
4397 72737a7f Iustin Pop
    ial = IAllocator(self,
4398 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
4399 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
4400 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
4401 d1c2dd75 Iustin Pop
                     tags=[],
4402 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
4403 338e51e8 Iustin Pop
                     vcpus=self.be_full[constants.BE_VCPUS],
4404 338e51e8 Iustin Pop
                     mem_size=self.be_full[constants.BE_MEMORY],
4405 08db7c5c Iustin Pop
                     disks=self.disks,
4406 d1c2dd75 Iustin Pop
                     nics=nics,
4407 8cc7e742 Guido Trotter
                     hypervisor=self.op.hypervisor,
4408 29859cb7 Iustin Pop
                     )
4409 d1c2dd75 Iustin Pop
4410 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
4411 d1c2dd75 Iustin Pop
4412 d1c2dd75 Iustin Pop
    if not ial.success:
4413 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
4414 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
4415 d1c2dd75 Iustin Pop
                                                           ial.info))
4416 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
4417 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4418 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
4419 97abc79f Iustin Pop
                                 (self.op.iallocator, len(ial.nodes),
4420 1ce4bbe3 René Nussbaumer
                                  ial.required_nodes))
4421 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
4422 86d9d3bb Iustin Pop
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
4423 86d9d3bb Iustin Pop
                 self.op.instance_name, self.op.iallocator,
4424 86d9d3bb Iustin Pop
                 ", ".join(ial.nodes))
4425 27579978 Iustin Pop
    if ial.required_nodes == 2:
4426 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
4427 538475ca Iustin Pop
4428 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4429 a8083063 Iustin Pop
    """Build hooks env.
4430 a8083063 Iustin Pop

4431 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4432 a8083063 Iustin Pop

4433 a8083063 Iustin Pop
    """
4434 a8083063 Iustin Pop
    env = {
4435 2c2690c9 Iustin Pop
      "ADD_MODE": self.op.mode,
4436 a8083063 Iustin Pop
      }
4437 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
4438 2c2690c9 Iustin Pop
      env["SRC_NODE"] = self.op.src_node
4439 2c2690c9 Iustin Pop
      env["SRC_PATH"] = self.op.src_path
4440 2c2690c9 Iustin Pop
      env["SRC_IMAGES"] = self.src_images
4441 396e1b78 Michael Hanselmann
4442 2c2690c9 Iustin Pop
    env.update(_BuildInstanceHookEnv(
4443 2c2690c9 Iustin Pop
      name=self.op.instance_name,
4444 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
4445 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
4446 4978db17 Iustin Pop
      status=self.op.start,
4447 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
4448 338e51e8 Iustin Pop
      memory=self.be_full[constants.BE_MEMORY],
4449 338e51e8 Iustin Pop
      vcpus=self.be_full[constants.BE_VCPUS],
4450 08db7c5c Iustin Pop
      nics=[(n.ip, n.bridge, n.mac) for n in self.nics],
4451 2c2690c9 Iustin Pop
      disk_template=self.op.disk_template,
4452 2c2690c9 Iustin Pop
      disks=[(d["size"], d["mode"]) for d in self.disks],
4453 396e1b78 Michael Hanselmann
    ))
4454 a8083063 Iustin Pop
4455 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
4456 a8083063 Iustin Pop
          self.secondaries)
4457 a8083063 Iustin Pop
    return env, nl, nl
4458 a8083063 Iustin Pop
4459 a8083063 Iustin Pop
4460 a8083063 Iustin Pop
  def CheckPrereq(self):
4461 a8083063 Iustin Pop
    """Check prerequisites.
4462 a8083063 Iustin Pop

4463 a8083063 Iustin Pop
    """
4464 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
4465 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
4466 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
4467 eedc99de Manuel Franceschini
                                 " instances")
4468 eedc99de Manuel Franceschini
4469 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
4470 7baf741d Guido Trotter
      src_node = self.op.src_node
4471 7baf741d Guido Trotter
      src_path = self.op.src_path
4472 a8083063 Iustin Pop
4473 c0cbdc67 Guido Trotter
      if src_node is None:
4474 c0cbdc67 Guido Trotter
        exp_list = self.rpc.call_export_list(
4475 781de953 Iustin Pop
          self.acquired_locks[locking.LEVEL_NODE])
4476 c0cbdc67 Guido Trotter
        found = False
4477 c0cbdc67 Guido Trotter
        for node in exp_list:
4478 781de953 Iustin Pop
          if not exp_list[node].failed and src_path in exp_list[node].data:
4479 c0cbdc67 Guido Trotter
            found = True
4480 c0cbdc67 Guido Trotter
            self.op.src_node = src_node = node
4481 c0cbdc67 Guido Trotter
            self.op.src_path = src_path = os.path.join(constants.EXPORT_DIR,
4482 c0cbdc67 Guido Trotter
                                                       src_path)
4483 c0cbdc67 Guido Trotter
            break
4484 c0cbdc67 Guido Trotter
        if not found:
4485 c0cbdc67 Guido Trotter
          raise errors.OpPrereqError("No export found for relative path %s" %
4486 c0cbdc67 Guido Trotter
                                      src_path)
4487 c0cbdc67 Guido Trotter
4488 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, src_node)
4489 781de953 Iustin Pop
      result = self.rpc.call_export_info(src_node, src_path)
4490 781de953 Iustin Pop
      result.Raise()
4491 781de953 Iustin Pop
      if not result.data:
4492 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
4493 a8083063 Iustin Pop
4494 781de953 Iustin Pop
      export_info = result.data
4495 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
4496 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
4497 a8083063 Iustin Pop
4498 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
4499 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
4500 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
4501 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
4502 a8083063 Iustin Pop
4503 09acf207 Guido Trotter
      # Check that the new instance doesn't have less disks than the export
4504 08db7c5c Iustin Pop
      instance_disks = len(self.disks)
4505 09acf207 Guido Trotter
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
4506 09acf207 Guido Trotter
      if instance_disks < export_disks:
4507 09acf207 Guido Trotter
        raise errors.OpPrereqError("Not enough disks to import."
4508 09acf207 Guido Trotter
                                   " (instance: %d, export: %d)" %
4509 726d7d68 Iustin Pop
                                   (instance_disks, export_disks))
4510 a8083063 Iustin Pop
4511 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
4512 09acf207 Guido Trotter
      disk_images = []
4513 09acf207 Guido Trotter
      for idx in range(export_disks):
4514 09acf207 Guido Trotter
        option = 'disk%d_dump' % idx
4515 09acf207 Guido Trotter
        if export_info.has_option(constants.INISECT_INS, option):
4516 09acf207 Guido Trotter
          # FIXME: are the old os-es, disk sizes, etc. useful?
4517 09acf207 Guido Trotter
          export_name = export_info.get(constants.INISECT_INS, option)
4518 09acf207 Guido Trotter
          image = os.path.join(src_path, export_name)
4519 09acf207 Guido Trotter
          disk_images.append(image)
4520 09acf207 Guido Trotter
        else:
4521 09acf207 Guido Trotter
          disk_images.append(False)
4522 09acf207 Guido Trotter
4523 09acf207 Guido Trotter
      self.src_images = disk_images
4524 901a65c1 Iustin Pop
4525 b4364a6b Guido Trotter
      old_name = export_info.get(constants.INISECT_INS, 'name')
4526 b4364a6b Guido Trotter
      # FIXME: int() here could throw a ValueError on broken exports
4527 b4364a6b Guido Trotter
      exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
4528 b4364a6b Guido Trotter
      if self.op.instance_name == old_name:
4529 b4364a6b Guido Trotter
        for idx, nic in enumerate(self.nics):
4530 b4364a6b Guido Trotter
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
4531 b4364a6b Guido Trotter
            nic_mac_ini = 'nic%d_mac' % idx
4532 b4364a6b Guido Trotter
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
4533 bc89efc3 Guido Trotter
4534 295728df Guido Trotter
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
4535 7baf741d Guido Trotter
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
4536 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
4537 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
4538 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
4539 901a65c1 Iustin Pop
4540 901a65c1 Iustin Pop
    if self.op.ip_check:
4541 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
4542 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
4543 7b3a8fb5 Iustin Pop
                                   (self.check_ip, self.op.instance_name))
4544 901a65c1 Iustin Pop
4545 295728df Guido Trotter
    #### mac address generation
4546 295728df Guido Trotter
    # By generating here the mac address both the allocator and the hooks get
4547 295728df Guido Trotter
    # the real final mac address rather than the 'auto' or 'generate' value.
4548 295728df Guido Trotter
    # There is a race condition between the generation and the instance object
4549 295728df Guido Trotter
    # creation, which means that we know the mac is valid now, but we're not
4550 295728df Guido Trotter
    # sure it will be when we actually add the instance. If things go bad
4551 295728df Guido Trotter
    # adding the instance will abort because of a duplicate mac, and the
4552 295728df Guido Trotter
    # creation job will fail.
4553 295728df Guido Trotter
    for nic in self.nics:
4554 295728df Guido Trotter
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4555 295728df Guido Trotter
        nic.mac = self.cfg.GenerateMAC()
4556 295728df Guido Trotter
4557 538475ca Iustin Pop
    #### allocator run
4558 538475ca Iustin Pop
4559 538475ca Iustin Pop
    if self.op.iallocator is not None:
4560 538475ca Iustin Pop
      self._RunAllocator()
4561 0f1a06e3 Manuel Franceschini
4562 901a65c1 Iustin Pop
    #### node related checks
4563 901a65c1 Iustin Pop
4564 901a65c1 Iustin Pop
    # check primary node
4565 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
4566 7baf741d Guido Trotter
    assert self.pnode is not None, \
4567 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
4568 7527a8a4 Iustin Pop
    if pnode.offline:
4569 7527a8a4 Iustin Pop
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
4570 7527a8a4 Iustin Pop
                                 pnode.name)
4571 733a2b6a Iustin Pop
    if pnode.drained:
4572 733a2b6a Iustin Pop
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
4573 733a2b6a Iustin Pop
                                 pnode.name)
4574 7527a8a4 Iustin Pop
4575 901a65c1 Iustin Pop
    self.secondaries = []
4576 901a65c1 Iustin Pop
4577 901a65c1 Iustin Pop
    # mirror node verification
4578 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
4579 7baf741d Guido Trotter
      if self.op.snode is None:
4580 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
4581 3ecf6786 Iustin Pop
                                   " a mirror node")
4582 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
4583 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
4584 3ecf6786 Iustin Pop
                                   " the primary node.")
4585 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, self.op.snode)
4586 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, self.op.snode)
4587 733a2b6a Iustin Pop
      self.secondaries.append(self.op.snode)
4588 a8083063 Iustin Pop
4589 6785674e Iustin Pop
    nodenames = [pnode.name] + self.secondaries
4590 6785674e Iustin Pop
4591 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
4592 08db7c5c Iustin Pop
                                self.disks)
4593 ed1ebc60 Guido Trotter
4594 8d75db10 Iustin Pop
    # Check lv size requirements
4595 8d75db10 Iustin Pop
    if req_size is not None:
4596 72737a7f Iustin Pop
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4597 72737a7f Iustin Pop
                                         self.op.hypervisor)
4598 8d75db10 Iustin Pop
      for node in nodenames:
4599 781de953 Iustin Pop
        info = nodeinfo[node]
4600 781de953 Iustin Pop
        info.Raise()
4601 781de953 Iustin Pop
        info = info.data
4602 8d75db10 Iustin Pop
        if not info:
4603 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
4604 3e91897b Iustin Pop
                                     " from node '%s'" % node)
4605 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
4606 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
4607 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
4608 8d75db10 Iustin Pop
                                     " node %s" % node)
4609 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
4610 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
4611 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
4612 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
4613 ed1ebc60 Guido Trotter
4614 74409b12 Iustin Pop
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
4615 6785674e Iustin Pop
4616 a8083063 Iustin Pop
    # os verification
4617 781de953 Iustin Pop
    result = self.rpc.call_os_get(pnode.name, self.op.os_type)
4618 781de953 Iustin Pop
    result.Raise()
4619 781de953 Iustin Pop
    if not isinstance(result.data, objects.OS):
4620 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
4621 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
4622 a8083063 Iustin Pop
4623 901a65c1 Iustin Pop
    # bridge check on primary node
4624 08db7c5c Iustin Pop
    bridges = [n.bridge for n in self.nics]
4625 781de953 Iustin Pop
    result = self.rpc.call_bridges_exist(self.pnode.name, bridges)
4626 781de953 Iustin Pop
    result.Raise()
4627 781de953 Iustin Pop
    if not result.data:
4628 781de953 Iustin Pop
      raise errors.OpPrereqError("One of the target bridges '%s' does not"
4629 781de953 Iustin Pop
                                 " exist on destination node '%s'" %
4630 08db7c5c Iustin Pop
                                 (",".join(bridges), pnode.name))
4631 a8083063 Iustin Pop
4632 49ce1563 Iustin Pop
    # memory check on primary node
4633 49ce1563 Iustin Pop
    if self.op.start:
4634 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
4635 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
4636 338e51e8 Iustin Pop
                           self.be_full[constants.BE_MEMORY],
4637 338e51e8 Iustin Pop
                           self.op.hypervisor)
4638 49ce1563 Iustin Pop
4639 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4640 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
4641 a8083063 Iustin Pop

4642 a8083063 Iustin Pop
    """
4643 a8083063 Iustin Pop
    instance = self.op.instance_name
4644 a8083063 Iustin Pop
    pnode_name = self.pnode.name
4645 a8083063 Iustin Pop
4646 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
4647 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
4648 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
4649 2a6469d5 Alexander Schreiber
    else:
4650 2a6469d5 Alexander Schreiber
      network_port = None
4651 58acb49d Alexander Schreiber
4652 6785674e Iustin Pop
    ##if self.op.vnc_bind_address is None:
4653 6785674e Iustin Pop
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
4654 31a853d2 Iustin Pop
4655 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
4656 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
4657 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
4658 2c313123 Manuel Franceschini
    else:
4659 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
4660 2c313123 Manuel Franceschini
4661 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
4662 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
4663 d6a02168 Michael Hanselmann
                                        self.cfg.GetFileStorageDir(),
4664 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
4665 0f1a06e3 Manuel Franceschini
4666 0f1a06e3 Manuel Franceschini
4667 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
4668 a8083063 Iustin Pop
                                  self.op.disk_template,
4669 a8083063 Iustin Pop
                                  instance, pnode_name,
4670 08db7c5c Iustin Pop
                                  self.secondaries,
4671 08db7c5c Iustin Pop
                                  self.disks,
4672 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
4673 e2a65344 Iustin Pop
                                  self.op.file_driver,
4674 e2a65344 Iustin Pop
                                  0)
4675 a8083063 Iustin Pop
4676 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
4677 a8083063 Iustin Pop
                            primary_node=pnode_name,
4678 08db7c5c Iustin Pop
                            nics=self.nics, disks=disks,
4679 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
4680 4978db17 Iustin Pop
                            admin_up=False,
4681 58acb49d Alexander Schreiber
                            network_port=network_port,
4682 338e51e8 Iustin Pop
                            beparams=self.op.beparams,
4683 6785674e Iustin Pop
                            hvparams=self.op.hvparams,
4684 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
4685 a8083063 Iustin Pop
                            )
4686 a8083063 Iustin Pop
4687 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
4688 796cab27 Iustin Pop
    try:
4689 796cab27 Iustin Pop
      _CreateDisks(self, iobj)
4690 796cab27 Iustin Pop
    except errors.OpExecError:
4691 796cab27 Iustin Pop
      self.LogWarning("Device creation failed, reverting...")
4692 796cab27 Iustin Pop
      try:
4693 796cab27 Iustin Pop
        _RemoveDisks(self, iobj)
4694 796cab27 Iustin Pop
      finally:
4695 796cab27 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance)
4696 796cab27 Iustin Pop
        raise
4697 a8083063 Iustin Pop
4698 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
4699 a8083063 Iustin Pop
4700 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
4701 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
4702 7baf741d Guido Trotter
    # added the instance to the config
4703 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
4704 e36e96b4 Guido Trotter
    # Unlock all the nodes
4705 9c8971d7 Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
4706 9c8971d7 Guido Trotter
      nodes_keep = [self.op.src_node]
4707 9c8971d7 Guido Trotter
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
4708 9c8971d7 Guido Trotter
                       if node != self.op.src_node]
4709 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
4710 9c8971d7 Guido Trotter
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
4711 9c8971d7 Guido Trotter
    else:
4712 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE)
4713 9c8971d7 Guido Trotter
      del self.acquired_locks[locking.LEVEL_NODE]
4714 a8083063 Iustin Pop
4715 a8083063 Iustin Pop
    if self.op.wait_for_sync:
4716 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
4717 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
4718 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
4719 a8083063 Iustin Pop
      time.sleep(15)
4720 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
4721 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
4722 a8083063 Iustin Pop
    else:
4723 a8083063 Iustin Pop
      disk_abort = False
4724 a8083063 Iustin Pop
4725 a8083063 Iustin Pop
    if disk_abort:
4726 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
4727 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
4728 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
4729 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
4730 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
4731 3ecf6786 Iustin Pop
                               " this instance")
4732 a8083063 Iustin Pop
4733 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
4734 a8083063 Iustin Pop
                (instance, pnode_name))
4735 a8083063 Iustin Pop
4736 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
4737 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
4738 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
4739 781de953 Iustin Pop
        result = self.rpc.call_instance_os_add(pnode_name, iobj)
4740 20e01edd Iustin Pop
        msg = result.RemoteFailMsg()
4741 20e01edd Iustin Pop
        if msg:
4742 781de953 Iustin Pop
          raise errors.OpExecError("Could not add os for instance %s"
4743 20e01edd Iustin Pop
                                   " on node %s: %s" %
4744 20e01edd Iustin Pop
                                   (instance, pnode_name, msg))
4745 a8083063 Iustin Pop
4746 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
4747 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
4748 a8083063 Iustin Pop
        src_node = self.op.src_node
4749 09acf207 Guido Trotter
        src_images = self.src_images
4750 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
4751 6c0af70e Guido Trotter
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
4752 09acf207 Guido Trotter
                                                         src_node, src_images,
4753 6c0af70e Guido Trotter
                                                         cluster_name)
4754 781de953 Iustin Pop
        import_result.Raise()
4755 781de953 Iustin Pop
        for idx, result in enumerate(import_result.data):
4756 09acf207 Guido Trotter
          if not result:
4757 726d7d68 Iustin Pop
            self.LogWarning("Could not import the image %s for instance"
4758 726d7d68 Iustin Pop
                            " %s, disk %d, on node %s" %
4759 726d7d68 Iustin Pop
                            (src_images[idx], instance, idx, pnode_name))
4760 a8083063 Iustin Pop
      else:
4761 a8083063 Iustin Pop
        # also checked in the prereq part
4762 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
4763 3ecf6786 Iustin Pop
                                     % self.op.mode)
4764 a8083063 Iustin Pop
4765 a8083063 Iustin Pop
    if self.op.start:
4766 4978db17 Iustin Pop
      iobj.admin_up = True
4767 4978db17 Iustin Pop
      self.cfg.Update(iobj)
4768 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s", instance, pnode_name)
4769 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
4770 07813a9e Iustin Pop
      result = self.rpc.call_instance_start(pnode_name, iobj)
4771 dd279568 Iustin Pop
      msg = result.RemoteFailMsg()
4772 dd279568 Iustin Pop
      if msg:
4773 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance: %s" % msg)
4774 a8083063 Iustin Pop
4775 a8083063 Iustin Pop
4776 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
4777 a8083063 Iustin Pop
  """Connect to an instance's console.
4778 a8083063 Iustin Pop

4779 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
4780 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
4781 a8083063 Iustin Pop
  console.
4782 a8083063 Iustin Pop

4783 a8083063 Iustin Pop
  """
4784 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
4785 8659b73e Guido Trotter
  REQ_BGL = False
4786 8659b73e Guido Trotter
4787 8659b73e Guido Trotter
  def ExpandNames(self):
4788 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
4789 a8083063 Iustin Pop
4790 a8083063 Iustin Pop
  def CheckPrereq(self):
4791 a8083063 Iustin Pop
    """Check prerequisites.
4792 a8083063 Iustin Pop

4793 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4794 a8083063 Iustin Pop

4795 a8083063 Iustin Pop
    """
4796 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4797 8659b73e Guido Trotter
    assert self.instance is not None, \
4798 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4799 513e896d Guido Trotter
    _CheckNodeOnline(self, self.instance.primary_node)
4800 a8083063 Iustin Pop
4801 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4802 a8083063 Iustin Pop
    """Connect to the console of an instance
4803 a8083063 Iustin Pop

4804 a8083063 Iustin Pop
    """
4805 a8083063 Iustin Pop
    instance = self.instance
4806 a8083063 Iustin Pop
    node = instance.primary_node
4807 a8083063 Iustin Pop
4808 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
4809 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
4810 781de953 Iustin Pop
    node_insts.Raise()
4811 a8083063 Iustin Pop
4812 781de953 Iustin Pop
    if instance.name not in node_insts.data:
4813 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
4814 a8083063 Iustin Pop
4815 9a4f63d1 Iustin Pop
    logging.debug("Connecting to console of %s on %s", instance.name, node)
4816 a8083063 Iustin Pop
4817 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
4818 5431b2e4 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
4819 5431b2e4 Guido Trotter
    # beparams and hvparams are passed separately, to avoid editing the
4820 5431b2e4 Guido Trotter
    # instance and then saving the defaults in the instance itself.
4821 5431b2e4 Guido Trotter
    hvparams = cluster.FillHV(instance)
4822 5431b2e4 Guido Trotter
    beparams = cluster.FillBE(instance)
4823 5431b2e4 Guido Trotter
    console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
4824 b047857b Michael Hanselmann
4825 82122173 Iustin Pop
    # build ssh cmdline
4826 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
4827 a8083063 Iustin Pop
4828 a8083063 Iustin Pop
4829 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
4830 a8083063 Iustin Pop
  """Replace the disks of an instance.
4831 a8083063 Iustin Pop

4832 a8083063 Iustin Pop
  """
4833 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
4834 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4835 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
4836 efd990e4 Guido Trotter
  REQ_BGL = False
4837 efd990e4 Guido Trotter
4838 7e9366f7 Iustin Pop
  def CheckArguments(self):
4839 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
4840 efd990e4 Guido Trotter
      self.op.remote_node = None
4841 7e9366f7 Iustin Pop
    if not hasattr(self.op, "iallocator"):
4842 7e9366f7 Iustin Pop
      self.op.iallocator = None
4843 7e9366f7 Iustin Pop
4844 7e9366f7 Iustin Pop
    # check for valid parameter combination
4845 7e9366f7 Iustin Pop
    cnt = [self.op.remote_node, self.op.iallocator].count(None)
4846 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_CHG:
4847 7e9366f7 Iustin Pop
      if cnt == 2:
4848 7e9366f7 Iustin Pop
        raise errors.OpPrereqError("When changing the secondary either an"
4849 7e9366f7 Iustin Pop
                                   " iallocator script must be used or the"
4850 7e9366f7 Iustin Pop
                                   " new node given")
4851 7e9366f7 Iustin Pop
      elif cnt == 0:
4852 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Give either the iallocator or the new"
4853 efd990e4 Guido Trotter
                                   " secondary, not both")
4854 7e9366f7 Iustin Pop
    else: # not replacing the secondary
4855 7e9366f7 Iustin Pop
      if cnt != 2:
4856 7e9366f7 Iustin Pop
        raise errors.OpPrereqError("The iallocator and new node options can"
4857 7e9366f7 Iustin Pop
                                   " be used only when changing the"
4858 7e9366f7 Iustin Pop
                                   " secondary node")
4859 7e9366f7 Iustin Pop
4860 7e9366f7 Iustin Pop
  def ExpandNames(self):
4861 7e9366f7 Iustin Pop
    self._ExpandAndLockInstance()
4862 7e9366f7 Iustin Pop
4863 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
4864 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4865 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
4866 efd990e4 Guido Trotter
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
4867 efd990e4 Guido Trotter
      if remote_node is None:
4868 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Node '%s' not known" %
4869 efd990e4 Guido Trotter
                                   self.op.remote_node)
4870 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
4871 3b559640 Iustin Pop
      # Warning: do not remove the locking of the new secondary here
4872 3b559640 Iustin Pop
      # unless DRBD8.AddChildren is changed to work in parallel;
4873 3b559640 Iustin Pop
      # currently it doesn't since parallel invocations of
4874 3b559640 Iustin Pop
      # FindUnusedMinor will conflict
4875 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
4876 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
4877 efd990e4 Guido Trotter
    else:
4878 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
4879 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4880 efd990e4 Guido Trotter
4881 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
4882 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
4883 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
4884 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
4885 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
4886 efd990e4 Guido Trotter
      self._LockInstancesNodes()
4887 a8083063 Iustin Pop
4888 b6e82a65 Iustin Pop
  def _RunAllocator(self):
4889 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
4890 b6e82a65 Iustin Pop

4891 b6e82a65 Iustin Pop
    """
4892 72737a7f Iustin Pop
    ial = IAllocator(self,
4893 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
4894 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
4895 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
4896 b6e82a65 Iustin Pop
4897 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
4898 b6e82a65 Iustin Pop
4899 b6e82a65 Iustin Pop
    if not ial.success:
4900 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
4901 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
4902 b6e82a65 Iustin Pop
                                                           ial.info))
4903 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
4904 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4905 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
4906 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
4907 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
4908 86d9d3bb Iustin Pop
    self.LogInfo("Selected new secondary for the instance: %s",
4909 86d9d3bb Iustin Pop
                 self.op.remote_node)
4910 b6e82a65 Iustin Pop
4911 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4912 a8083063 Iustin Pop
    """Build hooks env.
4913 a8083063 Iustin Pop

4914 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
4915 a8083063 Iustin Pop

4916 a8083063 Iustin Pop
    """
4917 a8083063 Iustin Pop
    env = {
4918 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
4919 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
4920 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
4921 a8083063 Iustin Pop
      }
4922 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4923 0834c866 Iustin Pop
    nl = [
4924 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
4925 0834c866 Iustin Pop
      self.instance.primary_node,
4926 0834c866 Iustin Pop
      ]
4927 0834c866 Iustin Pop
    if self.op.remote_node is not None:
4928 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
4929 a8083063 Iustin Pop
    return env, nl, nl
4930 a8083063 Iustin Pop
4931 a8083063 Iustin Pop
  def CheckPrereq(self):
4932 a8083063 Iustin Pop
    """Check prerequisites.
4933 a8083063 Iustin Pop

4934 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4935 a8083063 Iustin Pop

4936 a8083063 Iustin Pop
    """
4937 efd990e4 Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4938 efd990e4 Guido Trotter
    assert instance is not None, \
4939 efd990e4 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4940 a8083063 Iustin Pop
    self.instance = instance
4941 a8083063 Iustin Pop
4942 7e9366f7 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
4943 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
4944 7e9366f7 Iustin Pop
                                 " instances")
4945 a8083063 Iustin Pop
4946 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
4947 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
4948 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
4949 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
4950 a8083063 Iustin Pop
4951 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
4952 a9e0c397 Iustin Pop
4953 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
4954 de8c7666 Guido Trotter
      self._RunAllocator()
4955 b6e82a65 Iustin Pop
4956 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
4957 a9e0c397 Iustin Pop
    if remote_node is not None:
4958 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
4959 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
4960 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
4961 a9e0c397 Iustin Pop
    else:
4962 a9e0c397 Iustin Pop
      self.remote_node_info = None
4963 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
4964 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
4965 3ecf6786 Iustin Pop
                                 " the instance.")
4966 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
4967 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("The specified node is already the"
4968 7e9366f7 Iustin Pop
                                 " secondary node of the instance.")
4969 7e9366f7 Iustin Pop
4970 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_PRI:
4971 7e9366f7 Iustin Pop
      n1 = self.tgt_node = instance.primary_node
4972 7e9366f7 Iustin Pop
      n2 = self.oth_node = self.sec_node
4973 7e9366f7 Iustin Pop
    elif self.op.mode == constants.REPLACE_DISK_SEC:
4974 7e9366f7 Iustin Pop
      n1 = self.tgt_node = self.sec_node
4975 7e9366f7 Iustin Pop
      n2 = self.oth_node = instance.primary_node
4976 7e9366f7 Iustin Pop
    elif self.op.mode == constants.REPLACE_DISK_CHG:
4977 7e9366f7 Iustin Pop
      n1 = self.new_node = remote_node
4978 7e9366f7 Iustin Pop
      n2 = self.oth_node = instance.primary_node
4979 7e9366f7 Iustin Pop
      self.tgt_node = self.sec_node
4980 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, remote_node)
4981 7e9366f7 Iustin Pop
    else:
4982 7e9366f7 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replace mode")
4983 7e9366f7 Iustin Pop
4984 7e9366f7 Iustin Pop
    _CheckNodeOnline(self, n1)
4985 7e9366f7 Iustin Pop
    _CheckNodeOnline(self, n2)
4986 a9e0c397 Iustin Pop
4987 54155f52 Iustin Pop
    if not self.op.disks:
4988 54155f52 Iustin Pop
      self.op.disks = range(len(instance.disks))
4989 54155f52 Iustin Pop
4990 54155f52 Iustin Pop
    for disk_idx in self.op.disks:
4991 3e0cea06 Iustin Pop
      instance.FindDisk(disk_idx)
4992 a8083063 Iustin Pop
4993 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
4994 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
4995 a9e0c397 Iustin Pop

4996 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
4997 e4376078 Iustin Pop

4998 e4376078 Iustin Pop
      1. for each disk to be replaced:
4999 e4376078 Iustin Pop

5000 e4376078 Iustin Pop
        1. create new LVs on the target node with unique names
5001 e4376078 Iustin Pop
        1. detach old LVs from the drbd device
5002 e4376078 Iustin Pop
        1. rename old LVs to name_replaced.<time_t>
5003 e4376078 Iustin Pop
        1. rename new LVs to old LVs
5004 e4376078 Iustin Pop
        1. attach the new LVs (with the old names now) to the drbd device
5005 e4376078 Iustin Pop

5006 e4376078 Iustin Pop
      1. wait for sync across all devices
5007 e4376078 Iustin Pop

5008 e4376078 Iustin Pop
      1. for each modified disk:
5009 e4376078 Iustin Pop

5010 e4376078 Iustin Pop
        1. remove old LVs (which have the name name_replaces.<time_t>)
5011 a9e0c397 Iustin Pop

5012 a9e0c397 Iustin Pop
    Failures are not very well handled.
5013 cff90b79 Iustin Pop

5014 a9e0c397 Iustin Pop
    """
5015 cff90b79 Iustin Pop
    steps_total = 6
5016 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
5017 a9e0c397 Iustin Pop
    instance = self.instance
5018 a9e0c397 Iustin Pop
    iv_names = {}
5019 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
5020 a9e0c397 Iustin Pop
    # start of work
5021 a9e0c397 Iustin Pop
    cfg = self.cfg
5022 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
5023 cff90b79 Iustin Pop
    oth_node = self.oth_node
5024 cff90b79 Iustin Pop
5025 cff90b79 Iustin Pop
    # Step: check device activation
5026 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
5027 cff90b79 Iustin Pop
    info("checking volume groups")
5028 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
5029 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([oth_node, tgt_node])
5030 cff90b79 Iustin Pop
    if not results:
5031 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
5032 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
5033 781de953 Iustin Pop
      res = results[node]
5034 781de953 Iustin Pop
      if res.failed or not res.data or my_vg not in res.data:
5035 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
5036 cff90b79 Iustin Pop
                                 (my_vg, node))
5037 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
5038 54155f52 Iustin Pop
      if idx not in self.op.disks:
5039 cff90b79 Iustin Pop
        continue
5040 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
5041 54155f52 Iustin Pop
        info("checking disk/%d on %s" % (idx, node))
5042 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
5043 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(node, dev)
5044 23829f6f Iustin Pop
        msg = result.RemoteFailMsg()
5045 23829f6f Iustin Pop
        if not msg and not result.payload:
5046 23829f6f Iustin Pop
          msg = "disk not found"
5047 23829f6f Iustin Pop
        if msg:
5048 23829f6f Iustin Pop
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5049 23829f6f Iustin Pop
                                   (idx, node, msg))
5050 cff90b79 Iustin Pop
5051 cff90b79 Iustin Pop
    # Step: check other node consistency
5052 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
5053 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
5054 54155f52 Iustin Pop
      if idx not in self.op.disks:
5055 cff90b79 Iustin Pop
        continue
5056 54155f52 Iustin Pop
      info("checking disk/%d consistency on %s" % (idx, oth_node))
5057 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, oth_node,
5058 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
5059 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
5060 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
5061 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
5062 cff90b79 Iustin Pop
5063 cff90b79 Iustin Pop
    # Step: create new storage
5064 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
5065 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
5066 54155f52 Iustin Pop
      if idx not in self.op.disks:
5067 a9e0c397 Iustin Pop
        continue
5068 a9e0c397 Iustin Pop
      size = dev.size
5069 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
5070 54155f52 Iustin Pop
      lv_names = [".disk%d_%s" % (idx, suf)
5071 54155f52 Iustin Pop
                  for suf in ["data", "meta"]]
5072 b9bddb6b Iustin Pop
      names = _GenerateUniqueNames(self, lv_names)
5073 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
5074 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
5075 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
5076 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
5077 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
5078 a9e0c397 Iustin Pop
      old_lvs = dev.children
5079 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
5080 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
5081 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
5082 428958aa Iustin Pop
      # we pass force_create=True to force the LVM creation
5083 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
5084 428958aa Iustin Pop
        _CreateBlockDev(self, tgt_node, instance, new_lv, True,
5085 428958aa Iustin Pop
                        _GetInstanceInfoText(instance), False)
5086 a9e0c397 Iustin Pop
5087 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
5088 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
5089 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
5090 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
5091 781de953 Iustin Pop
      result = self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs)
5092 781de953 Iustin Pop
      result.Raise()
5093 781de953 Iustin Pop
      if not result.data:
5094 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
5095 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
5096 cff90b79 Iustin Pop
      #dev.children = []
5097 cff90b79 Iustin Pop
      #cfg.Update(instance)
5098 a9e0c397 Iustin Pop
5099 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
5100 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
5101 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
5102 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
5103 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
5104 cff90b79 Iustin Pop
5105 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
5106 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
5107 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
5108 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
5109 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
5110 cff90b79 Iustin Pop
      rlist = []
5111 cff90b79 Iustin Pop
      for to_ren in old_lvs:
5112 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(tgt_node, to_ren)
5113 23829f6f Iustin Pop
        if not result.RemoteFailMsg() and result.payload:
5114 23829f6f Iustin Pop
          # device exists
5115 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
5116 cff90b79 Iustin Pop
5117 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
5118 781de953 Iustin Pop
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5119 781de953 Iustin Pop
      result.Raise()
5120 781de953 Iustin Pop
      if not result.data:
5121 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
5122 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
5123 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
5124 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
5125 781de953 Iustin Pop
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5126 781de953 Iustin Pop
      result.Raise()
5127 781de953 Iustin Pop
      if not result.data:
5128 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
5129 cff90b79 Iustin Pop
5130 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
5131 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
5132 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
5133 a9e0c397 Iustin Pop
5134 cff90b79 Iustin Pop
      for disk in old_lvs:
5135 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
5136 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
5137 a9e0c397 Iustin Pop
5138 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
5139 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
5140 4504c3d6 Iustin Pop
      result = self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs)
5141 781de953 Iustin Pop
      if result.failed or not result.data:
5142 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
5143 e1bc0878 Iustin Pop
          msg = self.rpc.call_blockdev_remove(tgt_node, new_lv).RemoteFailMsg()
5144 e1bc0878 Iustin Pop
          if msg:
5145 e1bc0878 Iustin Pop
            warning("Can't rollback device %s: %s", dev, msg,
5146 e1bc0878 Iustin Pop
                    hint="cleanup manually the unused logical volumes")
5147 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
5148 a9e0c397 Iustin Pop
5149 a9e0c397 Iustin Pop
      dev.children = new_lvs
5150 a9e0c397 Iustin Pop
      cfg.Update(instance)
5151 a9e0c397 Iustin Pop
5152 cff90b79 Iustin Pop
    # Step: wait for sync
5153 a9e0c397 Iustin Pop
5154 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
5155 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
5156 a9e0c397 Iustin Pop
    # return value
5157 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
5158 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
5159 a9e0c397 Iustin Pop
5160 a9e0c397 Iustin Pop
    # so check manually all the devices
5161 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5162 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
5163 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(instance.primary_node, dev)
5164 23829f6f Iustin Pop
      msg = result.RemoteFailMsg()
5165 23829f6f Iustin Pop
      if not msg and not result.payload:
5166 23829f6f Iustin Pop
        msg = "disk not found"
5167 23829f6f Iustin Pop
      if msg:
5168 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
5169 23829f6f Iustin Pop
                                 (name, msg))
5170 23829f6f Iustin Pop
      if result.payload[5]:
5171 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
5172 a9e0c397 Iustin Pop
5173 cff90b79 Iustin Pop
    # Step: remove old storage
5174 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
5175 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5176 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
5177 a9e0c397 Iustin Pop
      for lv in old_lvs:
5178 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
5179 e1bc0878 Iustin Pop
        msg = self.rpc.call_blockdev_remove(tgt_node, lv).RemoteFailMsg()
5180 e1bc0878 Iustin Pop
        if msg:
5181 e1bc0878 Iustin Pop
          warning("Can't remove old LV: %s" % msg,
5182 e1bc0878 Iustin Pop
                  hint="manually remove unused LVs")
5183 a9e0c397 Iustin Pop
          continue
5184 a9e0c397 Iustin Pop
5185 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
5186 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
5187 a9e0c397 Iustin Pop

5188 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
5189 a9e0c397 Iustin Pop
      - for all disks of the instance:
5190 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
5191 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
5192 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
5193 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
5194 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
5195 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
5196 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
5197 a9e0c397 Iustin Pop
          not network enabled
5198 a9e0c397 Iustin Pop
      - wait for sync across all devices
5199 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
5200 a9e0c397 Iustin Pop

5201 a9e0c397 Iustin Pop
    Failures are not very well handled.
5202 0834c866 Iustin Pop

5203 a9e0c397 Iustin Pop
    """
5204 0834c866 Iustin Pop
    steps_total = 6
5205 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
5206 a9e0c397 Iustin Pop
    instance = self.instance
5207 a9e0c397 Iustin Pop
    iv_names = {}
5208 a9e0c397 Iustin Pop
    # start of work
5209 a9e0c397 Iustin Pop
    cfg = self.cfg
5210 a9e0c397 Iustin Pop
    old_node = self.tgt_node
5211 a9e0c397 Iustin Pop
    new_node = self.new_node
5212 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
5213 a2d59d8b Iustin Pop
    nodes_ip = {
5214 a2d59d8b Iustin Pop
      old_node: self.cfg.GetNodeInfo(old_node).secondary_ip,
5215 a2d59d8b Iustin Pop
      new_node: self.cfg.GetNodeInfo(new_node).secondary_ip,
5216 a2d59d8b Iustin Pop
      pri_node: self.cfg.GetNodeInfo(pri_node).secondary_ip,
5217 a2d59d8b Iustin Pop
      }
5218 0834c866 Iustin Pop
5219 0834c866 Iustin Pop
    # Step: check device activation
5220 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
5221 0834c866 Iustin Pop
    info("checking volume groups")
5222 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
5223 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([pri_node, new_node])
5224 0834c866 Iustin Pop
    for node in pri_node, new_node:
5225 781de953 Iustin Pop
      res = results[node]
5226 781de953 Iustin Pop
      if res.failed or not res.data or my_vg not in res.data:
5227 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
5228 0834c866 Iustin Pop
                                 (my_vg, node))
5229 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5230 d418ebfb Iustin Pop
      if idx not in self.op.disks:
5231 0834c866 Iustin Pop
        continue
5232 d418ebfb Iustin Pop
      info("checking disk/%d on %s" % (idx, pri_node))
5233 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5234 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(pri_node, dev)
5235 23829f6f Iustin Pop
      msg = result.RemoteFailMsg()
5236 23829f6f Iustin Pop
      if not msg and not result.payload:
5237 23829f6f Iustin Pop
        msg = "disk not found"
5238 23829f6f Iustin Pop
      if msg:
5239 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5240 23829f6f Iustin Pop
                                 (idx, pri_node, msg))
5241 0834c866 Iustin Pop
5242 0834c866 Iustin Pop
    # Step: check other node consistency
5243 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
5244 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5245 d418ebfb Iustin Pop
      if idx not in self.op.disks:
5246 0834c866 Iustin Pop
        continue
5247 d418ebfb Iustin Pop
      info("checking disk/%d consistency on %s" % (idx, pri_node))
5248 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
5249 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
5250 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
5251 0834c866 Iustin Pop
                                 pri_node)
5252 0834c866 Iustin Pop
5253 0834c866 Iustin Pop
    # Step: create new storage
5254 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
5255 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5256 d418ebfb Iustin Pop
      info("adding new local storage on %s for disk/%d" %
5257 d418ebfb Iustin Pop
           (new_node, idx))
5258 428958aa Iustin Pop
      # we pass force_create=True to force LVM creation
5259 a9e0c397 Iustin Pop
      for new_lv in dev.children:
5260 428958aa Iustin Pop
        _CreateBlockDev(self, new_node, instance, new_lv, True,
5261 428958aa Iustin Pop
                        _GetInstanceInfoText(instance), False)
5262 a9e0c397 Iustin Pop
5263 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
5264 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
5265 a1578d63 Iustin Pop
    # error and the success paths
5266 a1578d63 Iustin Pop
    minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
5267 a1578d63 Iustin Pop
                                   instance.name)
5268 468b46f9 Iustin Pop
    logging.debug("Allocated minors %s" % (minors,))
5269 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
5270 d418ebfb Iustin Pop
    for idx, (dev, new_minor) in enumerate(zip(instance.disks, minors)):
5271 0834c866 Iustin Pop
      size = dev.size
5272 d418ebfb Iustin Pop
      info("activating a new drbd on %s for disk/%d" % (new_node, idx))
5273 a2d59d8b Iustin Pop
      # create new devices on new_node; note that we create two IDs:
5274 a2d59d8b Iustin Pop
      # one without port, so the drbd will be activated without
5275 a2d59d8b Iustin Pop
      # networking information on the new node at this stage, and one
5276 a2d59d8b Iustin Pop
      # with network, for the latter activation in step 4
5277 a2d59d8b Iustin Pop
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
5278 a2d59d8b Iustin Pop
      if pri_node == o_node1:
5279 a2d59d8b Iustin Pop
        p_minor = o_minor1
5280 ffa1c0dc Iustin Pop
      else:
5281 a2d59d8b Iustin Pop
        p_minor = o_minor2
5282 a2d59d8b Iustin Pop
5283 a2d59d8b Iustin Pop
      new_alone_id = (pri_node, new_node, None, p_minor, new_minor, o_secret)
5284 a2d59d8b Iustin Pop
      new_net_id = (pri_node, new_node, o_port, p_minor, new_minor, o_secret)
5285 a2d59d8b Iustin Pop
5286 a2d59d8b Iustin Pop
      iv_names[idx] = (dev, dev.children, new_net_id)
5287 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
5288 a2d59d8b Iustin Pop
                    new_net_id)
5289 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
5290 a2d59d8b Iustin Pop
                              logical_id=new_alone_id,
5291 a9e0c397 Iustin Pop
                              children=dev.children)
5292 796cab27 Iustin Pop
      try:
5293 de12473a Iustin Pop
        _CreateSingleBlockDev(self, new_node, instance, new_drbd,
5294 de12473a Iustin Pop
                              _GetInstanceInfoText(instance), False)
5295 1492cca7 Iustin Pop
      except errors.BlockDeviceError:
5296 a1578d63 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
5297 796cab27 Iustin Pop
        raise
5298 a9e0c397 Iustin Pop
5299 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5300 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
5301 d418ebfb Iustin Pop
      info("shutting down drbd for disk/%d on old node" % idx)
5302 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
5303 cacfd1fd Iustin Pop
      msg = self.rpc.call_blockdev_shutdown(old_node, dev).RemoteFailMsg()
5304 cacfd1fd Iustin Pop
      if msg:
5305 cacfd1fd Iustin Pop
        warning("Failed to shutdown drbd for disk/%d on old node: %s" %
5306 cacfd1fd Iustin Pop
                (idx, msg),
5307 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
5308 a9e0c397 Iustin Pop
5309 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
5310 a2d59d8b Iustin Pop
    result = self.rpc.call_drbd_disconnect_net([pri_node], nodes_ip,
5311 a2d59d8b Iustin Pop
                                               instance.disks)[pri_node]
5312 642445d9 Iustin Pop
5313 a2d59d8b Iustin Pop
    msg = result.RemoteFailMsg()
5314 a2d59d8b Iustin Pop
    if msg:
5315 a2d59d8b Iustin Pop
      # detaches didn't succeed (unlikely)
5316 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance.name)
5317 a2d59d8b Iustin Pop
      raise errors.OpExecError("Can't detach the disks from the network on"
5318 a2d59d8b Iustin Pop
                               " old node: %s" % (msg,))
5319 642445d9 Iustin Pop
5320 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
5321 642445d9 Iustin Pop
    # the instance to point to the new secondary
5322 642445d9 Iustin Pop
    info("updating instance configuration")
5323 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
5324 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
5325 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5326 642445d9 Iustin Pop
    cfg.Update(instance)
5327 a9e0c397 Iustin Pop
5328 642445d9 Iustin Pop
    # and now perform the drbd attach
5329 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
5330 a2d59d8b Iustin Pop
    result = self.rpc.call_drbd_attach_net([pri_node, new_node], nodes_ip,
5331 a2d59d8b Iustin Pop
                                           instance.disks, instance.name,
5332 a2d59d8b Iustin Pop
                                           False)
5333 a2d59d8b Iustin Pop
    for to_node, to_result in result.items():
5334 a2d59d8b Iustin Pop
      msg = to_result.RemoteFailMsg()
5335 a2d59d8b Iustin Pop
      if msg:
5336 a2d59d8b Iustin Pop
        warning("can't attach drbd disks on node %s: %s", to_node, msg,
5337 a2d59d8b Iustin Pop
                hint="please do a gnt-instance info to see the"
5338 a2d59d8b Iustin Pop
                " status of disks")
5339 a9e0c397 Iustin Pop
5340 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
5341 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
5342 a9e0c397 Iustin Pop
    # return value
5343 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
5344 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
5345 a9e0c397 Iustin Pop
5346 a9e0c397 Iustin Pop
    # so check manually all the devices
5347 d418ebfb Iustin Pop
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
5348 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5349 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(pri_node, dev)
5350 23829f6f Iustin Pop
      msg = result.RemoteFailMsg()
5351 23829f6f Iustin Pop
      if not msg and not result.payload:
5352 23829f6f Iustin Pop
        msg = "disk not found"
5353 23829f6f Iustin Pop
      if msg:
5354 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find DRBD device disk/%d: %s" %
5355 23829f6f Iustin Pop
                                 (idx, msg))
5356 23829f6f Iustin Pop
      if result.payload[5]:
5357 d418ebfb Iustin Pop
        raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx)
5358 a9e0c397 Iustin Pop
5359 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
5360 d418ebfb Iustin Pop
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
5361 d418ebfb Iustin Pop
      info("remove logical volumes for disk/%d" % idx)
5362 a9e0c397 Iustin Pop
      for lv in old_lvs:
5363 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
5364 e1bc0878 Iustin Pop
        msg = self.rpc.call_blockdev_remove(old_node, lv).RemoteFailMsg()
5365 e1bc0878 Iustin Pop
        if msg:
5366 e1bc0878 Iustin Pop
          warning("Can't remove LV on old secondary: %s", msg,
5367 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
5368 a9e0c397 Iustin Pop
5369 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
5370 a9e0c397 Iustin Pop
    """Execute disk replacement.
5371 a9e0c397 Iustin Pop

5372 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
5373 a9e0c397 Iustin Pop

5374 a9e0c397 Iustin Pop
    """
5375 a9e0c397 Iustin Pop
    instance = self.instance
5376 22985314 Guido Trotter
5377 22985314 Guido Trotter
    # Activate the instance disks if we're replacing them on a down instance
5378 0d68c45d Iustin Pop
    if not instance.admin_up:
5379 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, True)
5380 22985314 Guido Trotter
5381 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_CHG:
5382 7e9366f7 Iustin Pop
      fn = self._ExecD8Secondary
5383 a9e0c397 Iustin Pop
    else:
5384 7e9366f7 Iustin Pop
      fn = self._ExecD8DiskOnly
5385 22985314 Guido Trotter
5386 22985314 Guido Trotter
    ret = fn(feedback_fn)
5387 22985314 Guido Trotter
5388 22985314 Guido Trotter
    # Deactivate the instance disks if we're replacing them on a down instance
5389 0d68c45d Iustin Pop
    if not instance.admin_up:
5390 b9bddb6b Iustin Pop
      _SafeShutdownInstanceDisks(self, instance)
5391 22985314 Guido Trotter
5392 22985314 Guido Trotter
    return ret
5393 a9e0c397 Iustin Pop
5394 a8083063 Iustin Pop
5395 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
5396 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
5397 8729e0d7 Iustin Pop

5398 8729e0d7 Iustin Pop
  """
5399 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
5400 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5401 6605411d Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
5402 31e63dbf Guido Trotter
  REQ_BGL = False
5403 31e63dbf Guido Trotter
5404 31e63dbf Guido Trotter
  def ExpandNames(self):
5405 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
5406 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
5407 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5408 31e63dbf Guido Trotter
5409 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
5410 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
5411 31e63dbf Guido Trotter
      self._LockInstancesNodes()
5412 8729e0d7 Iustin Pop
5413 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
5414 8729e0d7 Iustin Pop
    """Build hooks env.
5415 8729e0d7 Iustin Pop

5416 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
5417 8729e0d7 Iustin Pop

5418 8729e0d7 Iustin Pop
    """
5419 8729e0d7 Iustin Pop
    env = {
5420 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
5421 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
5422 8729e0d7 Iustin Pop
      }
5423 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5424 8729e0d7 Iustin Pop
    nl = [
5425 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
5426 8729e0d7 Iustin Pop
      self.instance.primary_node,
5427 8729e0d7 Iustin Pop
      ]
5428 8729e0d7 Iustin Pop
    return env, nl, nl
5429 8729e0d7 Iustin Pop
5430 8729e0d7 Iustin Pop
  def CheckPrereq(self):
5431 8729e0d7 Iustin Pop
    """Check prerequisites.
5432 8729e0d7 Iustin Pop

5433 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
5434 8729e0d7 Iustin Pop

5435 8729e0d7 Iustin Pop
    """
5436 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5437 31e63dbf Guido Trotter
    assert instance is not None, \
5438 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5439 6b12959c Iustin Pop
    nodenames = list(instance.all_nodes)
5440 6b12959c Iustin Pop
    for node in nodenames:
5441 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, node)
5442 7527a8a4 Iustin Pop
5443 31e63dbf Guido Trotter
5444 8729e0d7 Iustin Pop
    self.instance = instance
5445 8729e0d7 Iustin Pop
5446 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
5447 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
5448 8729e0d7 Iustin Pop
                                 " growing.")
5449 8729e0d7 Iustin Pop
5450 ad24e046 Iustin Pop
    self.disk = instance.FindDisk(self.op.disk)
5451 8729e0d7 Iustin Pop
5452 72737a7f Iustin Pop
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
5453 72737a7f Iustin Pop
                                       instance.hypervisor)
5454 8729e0d7 Iustin Pop
    for node in nodenames:
5455 781de953 Iustin Pop
      info = nodeinfo[node]
5456 781de953 Iustin Pop
      if info.failed or not info.data:
5457 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
5458 8729e0d7 Iustin Pop
                                   " from node '%s'" % node)
5459 781de953 Iustin Pop
      vg_free = info.data.get('vg_free', None)
5460 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
5461 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
5462 8729e0d7 Iustin Pop
                                   " node %s" % node)
5463 781de953 Iustin Pop
      if self.op.amount > vg_free:
5464 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
5465 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
5466 781de953 Iustin Pop
                                   (node, vg_free, self.op.amount))
5467 8729e0d7 Iustin Pop
5468 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
5469 8729e0d7 Iustin Pop
    """Execute disk grow.
5470 8729e0d7 Iustin Pop

5471 8729e0d7 Iustin Pop
    """
5472 8729e0d7 Iustin Pop
    instance = self.instance
5473 ad24e046 Iustin Pop
    disk = self.disk
5474 6b12959c Iustin Pop
    for node in instance.all_nodes:
5475 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
5476 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
5477 0959c824 Iustin Pop
      msg = result.RemoteFailMsg()
5478 0959c824 Iustin Pop
      if msg:
5479 781de953 Iustin Pop
        raise errors.OpExecError("Grow request failed to node %s: %s" %
5480 0959c824 Iustin Pop
                                 (node, msg))
5481 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
5482 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
5483 6605411d Iustin Pop
    if self.op.wait_for_sync:
5484 cd4d138f Guido Trotter
      disk_abort = not _WaitForSync(self, instance)
5485 6605411d Iustin Pop
      if disk_abort:
5486 86d9d3bb Iustin Pop
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
5487 86d9d3bb Iustin Pop
                             " status.\nPlease check the instance.")
5488 8729e0d7 Iustin Pop
5489 8729e0d7 Iustin Pop
5490 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
5491 a8083063 Iustin Pop
  """Query runtime instance data.
5492 a8083063 Iustin Pop

5493 a8083063 Iustin Pop
  """
5494 57821cac Iustin Pop
  _OP_REQP = ["instances", "static"]
5495 a987fa48 Guido Trotter
  REQ_BGL = False
5496 ae5849b5 Michael Hanselmann
5497 a987fa48 Guido Trotter
  def ExpandNames(self):
5498 a987fa48 Guido Trotter
    self.needed_locks = {}
5499 a987fa48 Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
5500 a987fa48 Guido Trotter
5501 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
5502 a987fa48 Guido Trotter
      raise errors.OpPrereqError("Invalid argument type 'instances'")
5503 a987fa48 Guido Trotter
5504 a987fa48 Guido Trotter
    if self.op.instances:
5505 a987fa48 Guido Trotter
      self.wanted_names = []
5506 a987fa48 Guido Trotter
      for name in self.op.instances:
5507 a987fa48 Guido Trotter
        full_name = self.cfg.ExpandInstanceName(name)
5508 a987fa48 Guido Trotter
        if full_name is None:
5509 f57c76e4 Iustin Pop
          raise errors.OpPrereqError("Instance '%s' not known" % name)
5510 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
5511 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
5512 a987fa48 Guido Trotter
    else:
5513 a987fa48 Guido Trotter
      self.wanted_names = None
5514 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
5515 a987fa48 Guido Trotter
5516 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
5517 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5518 a987fa48 Guido Trotter
5519 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
5520 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
5521 a987fa48 Guido Trotter
      self._LockInstancesNodes()
5522 a8083063 Iustin Pop
5523 a8083063 Iustin Pop
  def CheckPrereq(self):
5524 a8083063 Iustin Pop
    """Check prerequisites.
5525 a8083063 Iustin Pop

5526 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
5527 a8083063 Iustin Pop

5528 a8083063 Iustin Pop
    """
5529 a987fa48 Guido Trotter
    if self.wanted_names is None:
5530 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
5531 a8083063 Iustin Pop
5532 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
5533 a987fa48 Guido Trotter
                             in self.wanted_names]
5534 a987fa48 Guido Trotter
    return
5535 a8083063 Iustin Pop
5536 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
5537 a8083063 Iustin Pop
    """Compute block device status.
5538 a8083063 Iustin Pop

5539 a8083063 Iustin Pop
    """
5540 57821cac Iustin Pop
    static = self.op.static
5541 57821cac Iustin Pop
    if not static:
5542 57821cac Iustin Pop
      self.cfg.SetDiskID(dev, instance.primary_node)
5543 57821cac Iustin Pop
      dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
5544 9854f5d0 Iustin Pop
      if dev_pstatus.offline:
5545 9854f5d0 Iustin Pop
        dev_pstatus = None
5546 9854f5d0 Iustin Pop
      else:
5547 9854f5d0 Iustin Pop
        msg = dev_pstatus.RemoteFailMsg()
5548 9854f5d0 Iustin Pop
        if msg:
5549 9854f5d0 Iustin Pop
          raise errors.OpExecError("Can't compute disk status for %s: %s" %
5550 9854f5d0 Iustin Pop
                                   (instance.name, msg))
5551 9854f5d0 Iustin Pop
        dev_pstatus = dev_pstatus.payload
5552 57821cac Iustin Pop
    else:
5553 57821cac Iustin Pop
      dev_pstatus = None
5554 57821cac Iustin Pop
5555 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
5556 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
5557 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
5558 a8083063 Iustin Pop
        snode = dev.logical_id[1]
5559 a8083063 Iustin Pop
      else:
5560 a8083063 Iustin Pop
        snode = dev.logical_id[0]
5561 a8083063 Iustin Pop
5562 57821cac Iustin Pop
    if snode and not static:
5563 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
5564 72737a7f Iustin Pop
      dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
5565 9854f5d0 Iustin Pop
      if dev_sstatus.offline:
5566 9854f5d0 Iustin Pop
        dev_sstatus = None
5567 9854f5d0 Iustin Pop
      else:
5568 9854f5d0 Iustin Pop
        msg = dev_sstatus.RemoteFailMsg()
5569 9854f5d0 Iustin Pop
        if msg:
5570 9854f5d0 Iustin Pop
          raise errors.OpExecError("Can't compute disk status for %s: %s" %
5571 9854f5d0 Iustin Pop
                                   (instance.name, msg))
5572 9854f5d0 Iustin Pop
        dev_sstatus = dev_sstatus.payload
5573 a8083063 Iustin Pop
    else:
5574 a8083063 Iustin Pop
      dev_sstatus = None
5575 a8083063 Iustin Pop
5576 a8083063 Iustin Pop
    if dev.children:
5577 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
5578 a8083063 Iustin Pop
                      for child in dev.children]
5579 a8083063 Iustin Pop
    else:
5580 a8083063 Iustin Pop
      dev_children = []
5581 a8083063 Iustin Pop
5582 a8083063 Iustin Pop
    data = {
5583 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
5584 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
5585 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
5586 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
5587 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
5588 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
5589 a8083063 Iustin Pop
      "children": dev_children,
5590 b6fdf8b8 Iustin Pop
      "mode": dev.mode,
5591 a8083063 Iustin Pop
      }
5592 a8083063 Iustin Pop
5593 a8083063 Iustin Pop
    return data
5594 a8083063 Iustin Pop
5595 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5596 a8083063 Iustin Pop
    """Gather and return data"""
5597 a8083063 Iustin Pop
    result = {}
5598 338e51e8 Iustin Pop
5599 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
5600 338e51e8 Iustin Pop
5601 a8083063 Iustin Pop
    for instance in self.wanted_instances:
5602 57821cac Iustin Pop
      if not self.op.static:
5603 57821cac Iustin Pop
        remote_info = self.rpc.call_instance_info(instance.primary_node,
5604 57821cac Iustin Pop
                                                  instance.name,
5605 57821cac Iustin Pop
                                                  instance.hypervisor)
5606 781de953 Iustin Pop
        remote_info.Raise()
5607 781de953 Iustin Pop
        remote_info = remote_info.data
5608 57821cac Iustin Pop
        if remote_info and "state" in remote_info:
5609 57821cac Iustin Pop
          remote_state = "up"
5610 57821cac Iustin Pop
        else:
5611 57821cac Iustin Pop
          remote_state = "down"
5612 a8083063 Iustin Pop
      else:
5613 57821cac Iustin Pop
        remote_state = None
5614 0d68c45d Iustin Pop
      if instance.admin_up:
5615 a8083063 Iustin Pop
        config_state = "up"
5616 0d68c45d Iustin Pop
      else:
5617 0d68c45d Iustin Pop
        config_state = "down"
5618 a8083063 Iustin Pop
5619 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
5620 a8083063 Iustin Pop
               for device in instance.disks]
5621 a8083063 Iustin Pop
5622 a8083063 Iustin Pop
      idict = {
5623 a8083063 Iustin Pop
        "name": instance.name,
5624 a8083063 Iustin Pop
        "config_state": config_state,
5625 a8083063 Iustin Pop
        "run_state": remote_state,
5626 a8083063 Iustin Pop
        "pnode": instance.primary_node,
5627 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
5628 a8083063 Iustin Pop
        "os": instance.os,
5629 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
5630 a8083063 Iustin Pop
        "disks": disks,
5631 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
5632 24838135 Iustin Pop
        "network_port": instance.network_port,
5633 24838135 Iustin Pop
        "hv_instance": instance.hvparams,
5634 338e51e8 Iustin Pop
        "hv_actual": cluster.FillHV(instance),
5635 338e51e8 Iustin Pop
        "be_instance": instance.beparams,
5636 338e51e8 Iustin Pop
        "be_actual": cluster.FillBE(instance),
5637 a8083063 Iustin Pop
        }
5638 a8083063 Iustin Pop
5639 a8083063 Iustin Pop
      result[instance.name] = idict
5640 a8083063 Iustin Pop
5641 a8083063 Iustin Pop
    return result
5642 a8083063 Iustin Pop
5643 a8083063 Iustin Pop
5644 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
5645 a8083063 Iustin Pop
  """Modifies an instances's parameters.
5646 a8083063 Iustin Pop

5647 a8083063 Iustin Pop
  """
5648 a8083063 Iustin Pop
  HPATH = "instance-modify"
5649 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5650 24991749 Iustin Pop
  _OP_REQP = ["instance_name"]
5651 1a5c7281 Guido Trotter
  REQ_BGL = False
5652 1a5c7281 Guido Trotter
5653 24991749 Iustin Pop
  def CheckArguments(self):
5654 24991749 Iustin Pop
    if not hasattr(self.op, 'nics'):
5655 24991749 Iustin Pop
      self.op.nics = []
5656 24991749 Iustin Pop
    if not hasattr(self.op, 'disks'):
5657 24991749 Iustin Pop
      self.op.disks = []
5658 24991749 Iustin Pop
    if not hasattr(self.op, 'beparams'):
5659 24991749 Iustin Pop
      self.op.beparams = {}
5660 24991749 Iustin Pop
    if not hasattr(self.op, 'hvparams'):
5661 24991749 Iustin Pop
      self.op.hvparams = {}
5662 24991749 Iustin Pop
    self.op.force = getattr(self.op, "force", False)
5663 24991749 Iustin Pop
    if not (self.op.nics or self.op.disks or
5664 24991749 Iustin Pop
            self.op.hvparams or self.op.beparams):
5665 24991749 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
5666 24991749 Iustin Pop
5667 24991749 Iustin Pop
    # Disk validation
5668 24991749 Iustin Pop
    disk_addremove = 0
5669 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
5670 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
5671 24991749 Iustin Pop
        disk_addremove += 1
5672 24991749 Iustin Pop
        continue
5673 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
5674 24991749 Iustin Pop
        disk_addremove += 1
5675 24991749 Iustin Pop
      else:
5676 24991749 Iustin Pop
        if not isinstance(disk_op, int):
5677 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index")
5678 24991749 Iustin Pop
      if disk_op == constants.DDM_ADD:
5679 24991749 Iustin Pop
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
5680 6ec66eae Iustin Pop
        if mode not in constants.DISK_ACCESS_SET:
5681 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode)
5682 24991749 Iustin Pop
        size = disk_dict.get('size', None)
5683 24991749 Iustin Pop
        if size is None:
5684 24991749 Iustin Pop
          raise errors.OpPrereqError("Required disk parameter size missing")
5685 24991749 Iustin Pop
        try:
5686 24991749 Iustin Pop
          size = int(size)
5687 24991749 Iustin Pop
        except ValueError, err:
5688 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
5689 24991749 Iustin Pop
                                     str(err))
5690 24991749 Iustin Pop
        disk_dict['size'] = size
5691 24991749 Iustin Pop
      else:
5692 24991749 Iustin Pop
        # modification of disk
5693 24991749 Iustin Pop
        if 'size' in disk_dict:
5694 24991749 Iustin Pop
          raise errors.OpPrereqError("Disk size change not possible, use"
5695 24991749 Iustin Pop
                                     " grow-disk")
5696 24991749 Iustin Pop
5697 24991749 Iustin Pop
    if disk_addremove > 1:
5698 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one disk add or remove operation"
5699 24991749 Iustin Pop
                                 " supported at a time")
5700 24991749 Iustin Pop
5701 24991749 Iustin Pop
    # NIC validation
5702 24991749 Iustin Pop
    nic_addremove = 0
5703 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
5704 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
5705 24991749 Iustin Pop
        nic_addremove += 1
5706 24991749 Iustin Pop
        continue
5707 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
5708 24991749 Iustin Pop
        nic_addremove += 1
5709 24991749 Iustin Pop
      else:
5710 24991749 Iustin Pop
        if not isinstance(nic_op, int):
5711 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid nic index")
5712 24991749 Iustin Pop
5713 24991749 Iustin Pop
      # nic_dict should be a dict
5714 24991749 Iustin Pop
      nic_ip = nic_dict.get('ip', None)
5715 24991749 Iustin Pop
      if nic_ip is not None:
5716 5c44da6a Guido Trotter
        if nic_ip.lower() == constants.VALUE_NONE:
5717 24991749 Iustin Pop
          nic_dict['ip'] = None
5718 24991749 Iustin Pop
        else:
5719 24991749 Iustin Pop
          if not utils.IsValidIP(nic_ip):
5720 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip)
5721 5c44da6a Guido Trotter
5722 5c44da6a Guido Trotter
      if nic_op == constants.DDM_ADD:
5723 5c44da6a Guido Trotter
        nic_bridge = nic_dict.get('bridge', None)
5724 5c44da6a Guido Trotter
        if nic_bridge is None:
5725 5c44da6a Guido Trotter
          nic_dict['bridge'] = self.cfg.GetDefBridge()
5726 5c44da6a Guido Trotter
        nic_mac = nic_dict.get('mac', None)
5727 5c44da6a Guido Trotter
        if nic_mac is None:
5728 5c44da6a Guido Trotter
          nic_dict['mac'] = constants.VALUE_AUTO
5729 5c44da6a Guido Trotter
5730 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
5731 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
5732 24991749 Iustin Pop
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5733 24991749 Iustin Pop
          if not utils.IsValidMac(nic_mac):
5734 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac)
5735 5c44da6a Guido Trotter
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
5736 5c44da6a Guido Trotter
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
5737 5c44da6a Guido Trotter
                                     " modifying an existing nic")
5738 5c44da6a Guido Trotter
5739 24991749 Iustin Pop
    if nic_addremove > 1:
5740 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one NIC add or remove operation"
5741 24991749 Iustin Pop
                                 " supported at a time")
5742 24991749 Iustin Pop
5743 1a5c7281 Guido Trotter
  def ExpandNames(self):
5744 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
5745 74409b12 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
5746 74409b12 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5747 74409b12 Iustin Pop
5748 74409b12 Iustin Pop
  def DeclareLocks(self, level):
5749 74409b12 Iustin Pop
    if level == locking.LEVEL_NODE:
5750 74409b12 Iustin Pop
      self._LockInstancesNodes()
5751 a8083063 Iustin Pop
5752 a8083063 Iustin Pop
  def BuildHooksEnv(self):
5753 a8083063 Iustin Pop
    """Build hooks env.
5754 a8083063 Iustin Pop

5755 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
5756 a8083063 Iustin Pop

5757 a8083063 Iustin Pop
    """
5758 396e1b78 Michael Hanselmann
    args = dict()
5759 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.be_new:
5760 338e51e8 Iustin Pop
      args['memory'] = self.be_new[constants.BE_MEMORY]
5761 338e51e8 Iustin Pop
    if constants.BE_VCPUS in self.be_new:
5762 61be6ba4 Iustin Pop
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
5763 d8dcf3c9 Guido Trotter
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
5764 d8dcf3c9 Guido Trotter
    # information at all.
5765 d8dcf3c9 Guido Trotter
    if self.op.nics:
5766 d8dcf3c9 Guido Trotter
      args['nics'] = []
5767 d8dcf3c9 Guido Trotter
      nic_override = dict(self.op.nics)
5768 d8dcf3c9 Guido Trotter
      for idx, nic in enumerate(self.instance.nics):
5769 d8dcf3c9 Guido Trotter
        if idx in nic_override:
5770 d8dcf3c9 Guido Trotter
          this_nic_override = nic_override[idx]
5771 d8dcf3c9 Guido Trotter
        else:
5772 d8dcf3c9 Guido Trotter
          this_nic_override = {}
5773 d8dcf3c9 Guido Trotter
        if 'ip' in this_nic_override:
5774 d8dcf3c9 Guido Trotter
          ip = this_nic_override['ip']
5775 d8dcf3c9 Guido Trotter
        else:
5776 d8dcf3c9 Guido Trotter
          ip = nic.ip
5777 d8dcf3c9 Guido Trotter
        if 'bridge' in this_nic_override:
5778 d8dcf3c9 Guido Trotter
          bridge = this_nic_override['bridge']
5779 d8dcf3c9 Guido Trotter
        else:
5780 d8dcf3c9 Guido Trotter
          bridge = nic.bridge
5781 d8dcf3c9 Guido Trotter
        if 'mac' in this_nic_override:
5782 d8dcf3c9 Guido Trotter
          mac = this_nic_override['mac']
5783 d8dcf3c9 Guido Trotter
        else:
5784 d8dcf3c9 Guido Trotter
          mac = nic.mac
5785 d8dcf3c9 Guido Trotter
        args['nics'].append((ip, bridge, mac))
5786 d8dcf3c9 Guido Trotter
      if constants.DDM_ADD in nic_override:
5787 d8dcf3c9 Guido Trotter
        ip = nic_override[constants.DDM_ADD].get('ip', None)
5788 d8dcf3c9 Guido Trotter
        bridge = nic_override[constants.DDM_ADD]['bridge']
5789 d8dcf3c9 Guido Trotter
        mac = nic_override[constants.DDM_ADD]['mac']
5790 d8dcf3c9 Guido Trotter
        args['nics'].append((ip, bridge, mac))
5791 d8dcf3c9 Guido Trotter
      elif constants.DDM_REMOVE in nic_override:
5792 d8dcf3c9 Guido Trotter
        del args['nics'][-1]
5793 d8dcf3c9 Guido Trotter
5794 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
5795 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5796 a8083063 Iustin Pop
    return env, nl, nl
5797 a8083063 Iustin Pop
5798 a8083063 Iustin Pop
  def CheckPrereq(self):
5799 a8083063 Iustin Pop
    """Check prerequisites.
5800 a8083063 Iustin Pop

5801 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
5802 a8083063 Iustin Pop

5803 a8083063 Iustin Pop
    """
5804 24991749 Iustin Pop
    force = self.force = self.op.force
5805 a8083063 Iustin Pop
5806 74409b12 Iustin Pop
    # checking the new params on the primary/secondary nodes
5807 31a853d2 Iustin Pop
5808 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5809 1a5c7281 Guido Trotter
    assert self.instance is not None, \
5810 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5811 6b12959c Iustin Pop
    pnode = instance.primary_node
5812 6b12959c Iustin Pop
    nodelist = list(instance.all_nodes)
5813 74409b12 Iustin Pop
5814 338e51e8 Iustin Pop
    # hvparams processing
5815 74409b12 Iustin Pop
    if self.op.hvparams:
5816 74409b12 Iustin Pop
      i_hvdict = copy.deepcopy(instance.hvparams)
5817 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
5818 8edcd611 Guido Trotter
        if val == constants.VALUE_DEFAULT:
5819 74409b12 Iustin Pop
          try:
5820 74409b12 Iustin Pop
            del i_hvdict[key]
5821 74409b12 Iustin Pop
          except KeyError:
5822 74409b12 Iustin Pop
            pass
5823 74409b12 Iustin Pop
        else:
5824 74409b12 Iustin Pop
          i_hvdict[key] = val
5825 74409b12 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
5826 a5728081 Guido Trotter
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
5827 74409b12 Iustin Pop
      hv_new = cluster.FillDict(cluster.hvparams[instance.hypervisor],
5828 74409b12 Iustin Pop
                                i_hvdict)
5829 74409b12 Iustin Pop
      # local check
5830 74409b12 Iustin Pop
      hypervisor.GetHypervisor(
5831 74409b12 Iustin Pop
        instance.hypervisor).CheckParameterSyntax(hv_new)
5832 74409b12 Iustin Pop
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
5833 338e51e8 Iustin Pop
      self.hv_new = hv_new # the new actual values
5834 338e51e8 Iustin Pop
      self.hv_inst = i_hvdict # the new dict (without defaults)
5835 338e51e8 Iustin Pop
    else:
5836 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
5837 338e51e8 Iustin Pop
5838 338e51e8 Iustin Pop
    # beparams processing
5839 338e51e8 Iustin Pop
    if self.op.beparams:
5840 338e51e8 Iustin Pop
      i_bedict = copy.deepcopy(instance.beparams)
5841 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
5842 8edcd611 Guido Trotter
        if val == constants.VALUE_DEFAULT:
5843 338e51e8 Iustin Pop
          try:
5844 338e51e8 Iustin Pop
            del i_bedict[key]
5845 338e51e8 Iustin Pop
          except KeyError:
5846 338e51e8 Iustin Pop
            pass
5847 338e51e8 Iustin Pop
        else:
5848 338e51e8 Iustin Pop
          i_bedict[key] = val
5849 338e51e8 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
5850 a5728081 Guido Trotter
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
5851 338e51e8 Iustin Pop
      be_new = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
5852 338e51e8 Iustin Pop
                                i_bedict)
5853 338e51e8 Iustin Pop
      self.be_new = be_new # the new actual values
5854 338e51e8 Iustin Pop
      self.be_inst = i_bedict # the new dict (without defaults)
5855 338e51e8 Iustin Pop
    else:
5856 b637ae4d Iustin Pop
      self.be_new = self.be_inst = {}
5857 74409b12 Iustin Pop
5858 cfefe007 Guido Trotter
    self.warn = []
5859 647a5d80 Iustin Pop
5860 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.op.beparams and not self.force:
5861 647a5d80 Iustin Pop
      mem_check_list = [pnode]
5862 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
5863 c0f2b229 Iustin Pop
        # either we changed auto_balance to yes or it was from before
5864 647a5d80 Iustin Pop
        mem_check_list.extend(instance.secondary_nodes)
5865 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
5866 72737a7f Iustin Pop
                                                  instance.hypervisor)
5867 647a5d80 Iustin Pop
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
5868 72737a7f Iustin Pop
                                         instance.hypervisor)
5869 781de953 Iustin Pop
      if nodeinfo[pnode].failed or not isinstance(nodeinfo[pnode].data, dict):
5870 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
5871 cfefe007 Guido Trotter
        self.warn.append("Can't get info from primary node %s" % pnode)
5872 cfefe007 Guido Trotter
      else:
5873 781de953 Iustin Pop
        if not instance_info.failed and instance_info.data:
5874 781de953 Iustin Pop
          current_mem = instance_info.data['memory']
5875 cfefe007 Guido Trotter
        else:
5876 cfefe007 Guido Trotter
          # Assume instance not running
5877 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
5878 cfefe007 Guido Trotter
          # and we have no other way to check)
5879 cfefe007 Guido Trotter
          current_mem = 0
5880 338e51e8 Iustin Pop
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
5881 781de953 Iustin Pop
                    nodeinfo[pnode].data['memory_free'])
5882 cfefe007 Guido Trotter
        if miss_mem > 0:
5883 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
5884 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
5885 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
5886 cfefe007 Guido Trotter
5887 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
5888 ea33068f Iustin Pop
        for node, nres in nodeinfo.iteritems():
5889 ea33068f Iustin Pop
          if node not in instance.secondary_nodes:
5890 ea33068f Iustin Pop
            continue
5891 781de953 Iustin Pop
          if nres.failed or not isinstance(nres.data, dict):
5892 647a5d80 Iustin Pop
            self.warn.append("Can't get info from secondary node %s" % node)
5893 781de953 Iustin Pop
          elif be_new[constants.BE_MEMORY] > nres.data['memory_free']:
5894 647a5d80 Iustin Pop
            self.warn.append("Not enough memory to failover instance to"
5895 647a5d80 Iustin Pop
                             " secondary node %s" % node)
5896 5bc84f33 Alexander Schreiber
5897 24991749 Iustin Pop
    # NIC processing
5898 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
5899 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
5900 24991749 Iustin Pop
        if not instance.nics:
5901 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance has no NICs, cannot remove")
5902 24991749 Iustin Pop
        continue
5903 24991749 Iustin Pop
      if nic_op != constants.DDM_ADD:
5904 24991749 Iustin Pop
        # an existing nic
5905 24991749 Iustin Pop
        if nic_op < 0 or nic_op >= len(instance.nics):
5906 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
5907 24991749 Iustin Pop
                                     " are 0 to %d" %
5908 24991749 Iustin Pop
                                     (nic_op, len(instance.nics)))
5909 5c44da6a Guido Trotter
      if 'bridge' in nic_dict:
5910 5c44da6a Guido Trotter
        nic_bridge = nic_dict['bridge']
5911 5c44da6a Guido Trotter
        if nic_bridge is None:
5912 5c44da6a Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic bridge to None')
5913 24991749 Iustin Pop
        if not self.rpc.call_bridges_exist(pnode, [nic_bridge]):
5914 24991749 Iustin Pop
          msg = ("Bridge '%s' doesn't exist on one of"
5915 24991749 Iustin Pop
                 " the instance nodes" % nic_bridge)
5916 24991749 Iustin Pop
          if self.force:
5917 24991749 Iustin Pop
            self.warn.append(msg)
5918 24991749 Iustin Pop
          else:
5919 24991749 Iustin Pop
            raise errors.OpPrereqError(msg)
5920 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
5921 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
5922 5c44da6a Guido Trotter
        if nic_mac is None:
5923 5c44da6a Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic mac to None')
5924 5c44da6a Guido Trotter
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5925 5c44da6a Guido Trotter
          # otherwise generate the mac
5926 5c44da6a Guido Trotter
          nic_dict['mac'] = self.cfg.GenerateMAC()
5927 5c44da6a Guido Trotter
        else:
5928 5c44da6a Guido Trotter
          # or validate/reserve the current one
5929 5c44da6a Guido Trotter
          if self.cfg.IsMacInUse(nic_mac):
5930 5c44da6a Guido Trotter
            raise errors.OpPrereqError("MAC address %s already in use"
5931 5c44da6a Guido Trotter
                                       " in cluster" % nic_mac)
5932 24991749 Iustin Pop
5933 24991749 Iustin Pop
    # DISK processing
5934 24991749 Iustin Pop
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
5935 24991749 Iustin Pop
      raise errors.OpPrereqError("Disk operations not supported for"
5936 24991749 Iustin Pop
                                 " diskless instances")
5937 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
5938 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
5939 24991749 Iustin Pop
        if len(instance.disks) == 1:
5940 24991749 Iustin Pop
          raise errors.OpPrereqError("Cannot remove the last disk of"
5941 24991749 Iustin Pop
                                     " an instance")
5942 24991749 Iustin Pop
        ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
5943 24991749 Iustin Pop
        ins_l = ins_l[pnode]
5944 4cfb9426 Iustin Pop
        if ins_l.failed or not isinstance(ins_l.data, list):
5945 24991749 Iustin Pop
          raise errors.OpPrereqError("Can't contact node '%s'" % pnode)
5946 4cfb9426 Iustin Pop
        if instance.name in ins_l.data:
5947 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance is running, can't remove"
5948 24991749 Iustin Pop
                                     " disks.")
5949 24991749 Iustin Pop
5950 24991749 Iustin Pop
      if (disk_op == constants.DDM_ADD and
5951 24991749 Iustin Pop
          len(instance.nics) >= constants.MAX_DISKS):
5952 24991749 Iustin Pop
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
5953 24991749 Iustin Pop
                                   " add more" % constants.MAX_DISKS)
5954 24991749 Iustin Pop
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
5955 24991749 Iustin Pop
        # an existing disk
5956 24991749 Iustin Pop
        if disk_op < 0 or disk_op >= len(instance.disks):
5957 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
5958 24991749 Iustin Pop
                                     " are 0 to %d" %
5959 24991749 Iustin Pop
                                     (disk_op, len(instance.disks)))
5960 24991749 Iustin Pop
5961 a8083063 Iustin Pop
    return
5962 a8083063 Iustin Pop
5963 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5964 a8083063 Iustin Pop
    """Modifies an instance.
5965 a8083063 Iustin Pop

5966 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
5967 24991749 Iustin Pop

5968 a8083063 Iustin Pop
    """
5969 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
5970 cfefe007 Guido Trotter
    # feedback_fn there.
5971 cfefe007 Guido Trotter
    for warn in self.warn:
5972 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
5973 cfefe007 Guido Trotter
5974 a8083063 Iustin Pop
    result = []
5975 a8083063 Iustin Pop
    instance = self.instance
5976 24991749 Iustin Pop
    # disk changes
5977 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
5978 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
5979 24991749 Iustin Pop
        # remove the last disk
5980 24991749 Iustin Pop
        device = instance.disks.pop()
5981 24991749 Iustin Pop
        device_idx = len(instance.disks)
5982 24991749 Iustin Pop
        for node, disk in device.ComputeNodeTree(instance.primary_node):
5983 24991749 Iustin Pop
          self.cfg.SetDiskID(disk, node)
5984 e1bc0878 Iustin Pop
          msg = self.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
5985 e1bc0878 Iustin Pop
          if msg:
5986 e1bc0878 Iustin Pop
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
5987 e1bc0878 Iustin Pop
                            " continuing anyway", device_idx, node, msg)
5988 24991749 Iustin Pop
        result.append(("disk/%d" % device_idx, "remove"))
5989 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
5990 24991749 Iustin Pop
        # add a new disk
5991 24991749 Iustin Pop
        if instance.disk_template == constants.DT_FILE:
5992 24991749 Iustin Pop
          file_driver, file_path = instance.disks[0].logical_id
5993 24991749 Iustin Pop
          file_path = os.path.dirname(file_path)
5994 24991749 Iustin Pop
        else:
5995 24991749 Iustin Pop
          file_driver = file_path = None
5996 24991749 Iustin Pop
        disk_idx_base = len(instance.disks)
5997 24991749 Iustin Pop
        new_disk = _GenerateDiskTemplate(self,
5998 24991749 Iustin Pop
                                         instance.disk_template,
5999 32388e6d Iustin Pop
                                         instance.name, instance.primary_node,
6000 24991749 Iustin Pop
                                         instance.secondary_nodes,
6001 24991749 Iustin Pop
                                         [disk_dict],
6002 24991749 Iustin Pop
                                         file_path,
6003 24991749 Iustin Pop
                                         file_driver,
6004 24991749 Iustin Pop
                                         disk_idx_base)[0]
6005 24991749 Iustin Pop
        instance.disks.append(new_disk)
6006 24991749 Iustin Pop
        info = _GetInstanceInfoText(instance)
6007 24991749 Iustin Pop
6008 24991749 Iustin Pop
        logging.info("Creating volume %s for instance %s",
6009 24991749 Iustin Pop
                     new_disk.iv_name, instance.name)
6010 24991749 Iustin Pop
        # Note: this needs to be kept in sync with _CreateDisks
6011 24991749 Iustin Pop
        #HARDCODE
6012 428958aa Iustin Pop
        for node in instance.all_nodes:
6013 428958aa Iustin Pop
          f_create = node == instance.primary_node
6014 796cab27 Iustin Pop
          try:
6015 428958aa Iustin Pop
            _CreateBlockDev(self, node, instance, new_disk,
6016 428958aa Iustin Pop
                            f_create, info, f_create)
6017 1492cca7 Iustin Pop
          except errors.OpExecError, err:
6018 24991749 Iustin Pop
            self.LogWarning("Failed to create volume %s (%s) on"
6019 428958aa Iustin Pop
                            " node %s: %s",
6020 428958aa Iustin Pop
                            new_disk.iv_name, new_disk, node, err)
6021 24991749 Iustin Pop
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
6022 24991749 Iustin Pop
                       (new_disk.size, new_disk.mode)))
6023 24991749 Iustin Pop
      else:
6024 24991749 Iustin Pop
        # change a given disk
6025 24991749 Iustin Pop
        instance.disks[disk_op].mode = disk_dict['mode']
6026 24991749 Iustin Pop
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
6027 24991749 Iustin Pop
    # NIC changes
6028 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
6029 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
6030 24991749 Iustin Pop
        # remove the last nic
6031 24991749 Iustin Pop
        del instance.nics[-1]
6032 24991749 Iustin Pop
        result.append(("nic.%d" % len(instance.nics), "remove"))
6033 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
6034 5c44da6a Guido Trotter
        # mac and bridge should be set, by now
6035 5c44da6a Guido Trotter
        mac = nic_dict['mac']
6036 5c44da6a Guido Trotter
        bridge = nic_dict['bridge']
6037 24991749 Iustin Pop
        new_nic = objects.NIC(mac=mac, ip=nic_dict.get('ip', None),
6038 5c44da6a Guido Trotter
                              bridge=bridge)
6039 24991749 Iustin Pop
        instance.nics.append(new_nic)
6040 24991749 Iustin Pop
        result.append(("nic.%d" % (len(instance.nics) - 1),
6041 24991749 Iustin Pop
                       "add:mac=%s,ip=%s,bridge=%s" %
6042 24991749 Iustin Pop
                       (new_nic.mac, new_nic.ip, new_nic.bridge)))
6043 24991749 Iustin Pop
      else:
6044 24991749 Iustin Pop
        # change a given nic
6045 24991749 Iustin Pop
        for key in 'mac', 'ip', 'bridge':
6046 24991749 Iustin Pop
          if key in nic_dict:
6047 24991749 Iustin Pop
            setattr(instance.nics[nic_op], key, nic_dict[key])
6048 24991749 Iustin Pop
            result.append(("nic.%s/%d" % (key, nic_op), nic_dict[key]))
6049 24991749 Iustin Pop
6050 24991749 Iustin Pop
    # hvparams changes
6051 74409b12 Iustin Pop
    if self.op.hvparams:
6052 12649e35 Guido Trotter
      instance.hvparams = self.hv_inst
6053 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
6054 74409b12 Iustin Pop
        result.append(("hv/%s" % key, val))
6055 24991749 Iustin Pop
6056 24991749 Iustin Pop
    # beparams changes
6057 338e51e8 Iustin Pop
    if self.op.beparams:
6058 338e51e8 Iustin Pop
      instance.beparams = self.be_inst
6059 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
6060 338e51e8 Iustin Pop
        result.append(("be/%s" % key, val))
6061 a8083063 Iustin Pop
6062 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
6063 a8083063 Iustin Pop
6064 a8083063 Iustin Pop
    return result
6065 a8083063 Iustin Pop
6066 a8083063 Iustin Pop
6067 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
6068 a8083063 Iustin Pop
  """Query the exports list
6069 a8083063 Iustin Pop

6070 a8083063 Iustin Pop
  """
6071 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
6072 21a15682 Guido Trotter
  REQ_BGL = False
6073 21a15682 Guido Trotter
6074 21a15682 Guido Trotter
  def ExpandNames(self):
6075 21a15682 Guido Trotter
    self.needed_locks = {}
6076 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
6077 21a15682 Guido Trotter
    if not self.op.nodes:
6078 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6079 21a15682 Guido Trotter
    else:
6080 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
6081 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
6082 a8083063 Iustin Pop
6083 a8083063 Iustin Pop
  def CheckPrereq(self):
6084 21a15682 Guido Trotter
    """Check prerequisites.
6085 a8083063 Iustin Pop

6086 a8083063 Iustin Pop
    """
6087 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
6088 a8083063 Iustin Pop
6089 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6090 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
6091 a8083063 Iustin Pop

6092 e4376078 Iustin Pop
    @rtype: dict
6093 e4376078 Iustin Pop
    @return: a dictionary with the structure node->(export-list)
6094 e4376078 Iustin Pop
        where export-list is a list of the instances exported on
6095 e4376078 Iustin Pop
        that node.
6096 a8083063 Iustin Pop

6097 a8083063 Iustin Pop
    """
6098 b04285f2 Guido Trotter
    rpcresult = self.rpc.call_export_list(self.nodes)
6099 b04285f2 Guido Trotter
    result = {}
6100 b04285f2 Guido Trotter
    for node in rpcresult:
6101 b04285f2 Guido Trotter
      if rpcresult[node].failed:
6102 b04285f2 Guido Trotter
        result[node] = False
6103 b04285f2 Guido Trotter
      else:
6104 b04285f2 Guido Trotter
        result[node] = rpcresult[node].data
6105 b04285f2 Guido Trotter
6106 b04285f2 Guido Trotter
    return result
6107 a8083063 Iustin Pop
6108 a8083063 Iustin Pop
6109 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
6110 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
6111 a8083063 Iustin Pop

6112 a8083063 Iustin Pop
  """
6113 a8083063 Iustin Pop
  HPATH = "instance-export"
6114 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
6115 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
6116 6657590e Guido Trotter
  REQ_BGL = False
6117 6657590e Guido Trotter
6118 6657590e Guido Trotter
  def ExpandNames(self):
6119 6657590e Guido Trotter
    self._ExpandAndLockInstance()
6120 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
6121 6657590e Guido Trotter
    #
6122 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
6123 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
6124 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
6125 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
6126 6657590e Guido Trotter
    #    then one to remove, after
6127 6657590e Guido Trotter
    #  - removing the removal operation altoghether
6128 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6129 6657590e Guido Trotter
6130 6657590e Guido Trotter
  def DeclareLocks(self, level):
6131 6657590e Guido Trotter
    """Last minute lock declaration."""
6132 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
6133 a8083063 Iustin Pop
6134 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6135 a8083063 Iustin Pop
    """Build hooks env.
6136 a8083063 Iustin Pop

6137 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
6138 a8083063 Iustin Pop

6139 a8083063 Iustin Pop
    """
6140 a8083063 Iustin Pop
    env = {
6141 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
6142 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
6143 a8083063 Iustin Pop
      }
6144 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6145 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
6146 a8083063 Iustin Pop
          self.op.target_node]
6147 a8083063 Iustin Pop
    return env, nl, nl
6148 a8083063 Iustin Pop
6149 a8083063 Iustin Pop
  def CheckPrereq(self):
6150 a8083063 Iustin Pop
    """Check prerequisites.
6151 a8083063 Iustin Pop

6152 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
6153 a8083063 Iustin Pop

6154 a8083063 Iustin Pop
    """
6155 6657590e Guido Trotter
    instance_name = self.op.instance_name
6156 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
6157 6657590e Guido Trotter
    assert self.instance is not None, \
6158 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
6159 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
6160 a8083063 Iustin Pop
6161 6657590e Guido Trotter
    self.dst_node = self.cfg.GetNodeInfo(
6162 6657590e Guido Trotter
      self.cfg.ExpandNodeName(self.op.target_node))
6163 a8083063 Iustin Pop
6164 268b8e42 Iustin Pop
    if self.dst_node is None:
6165 268b8e42 Iustin Pop
      # This is wrong node name, not a non-locked node
6166 268b8e42 Iustin Pop
      raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node)
6167 aeb83a2b Iustin Pop
    _CheckNodeOnline(self, self.dst_node.name)
6168 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, self.dst_node.name)
6169 a8083063 Iustin Pop
6170 b6023d6c Manuel Franceschini
    # instance disk type verification
6171 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
6172 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
6173 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
6174 b6023d6c Manuel Franceschini
                                   " file-based disks")
6175 b6023d6c Manuel Franceschini
6176 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6177 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
6178 a8083063 Iustin Pop

6179 a8083063 Iustin Pop
    """
6180 a8083063 Iustin Pop
    instance = self.instance
6181 a8083063 Iustin Pop
    dst_node = self.dst_node
6182 a8083063 Iustin Pop
    src_node = instance.primary_node
6183 a8083063 Iustin Pop
    if self.op.shutdown:
6184 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
6185 781de953 Iustin Pop
      result = self.rpc.call_instance_shutdown(src_node, instance)
6186 1fae010f Iustin Pop
      msg = result.RemoteFailMsg()
6187 1fae010f Iustin Pop
      if msg:
6188 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
6189 1fae010f Iustin Pop
                                 " node %s: %s" %
6190 1fae010f Iustin Pop
                                 (instance.name, src_node, msg))
6191 a8083063 Iustin Pop
6192 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
6193 a8083063 Iustin Pop
6194 a8083063 Iustin Pop
    snap_disks = []
6195 a8083063 Iustin Pop
6196 998c712c Iustin Pop
    # set the disks ID correctly since call_instance_start needs the
6197 998c712c Iustin Pop
    # correct drbd minor to create the symlinks
6198 998c712c Iustin Pop
    for disk in instance.disks:
6199 998c712c Iustin Pop
      self.cfg.SetDiskID(disk, src_node)
6200 998c712c Iustin Pop
6201 a8083063 Iustin Pop
    try:
6202 a8083063 Iustin Pop
      for disk in instance.disks:
6203 19d7f90a Guido Trotter
        # new_dev_name will be a snapshot of an lvm leaf of the one we passed
6204 19d7f90a Guido Trotter
        new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk)
6205 781de953 Iustin Pop
        if new_dev_name.failed or not new_dev_name.data:
6206 19d7f90a Guido Trotter
          self.LogWarning("Could not snapshot block device %s on node %s",
6207 9a4f63d1 Iustin Pop
                          disk.logical_id[1], src_node)
6208 19d7f90a Guido Trotter
          snap_disks.append(False)
6209 19d7f90a Guido Trotter
        else:
6210 19d7f90a Guido Trotter
          new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
6211 781de953 Iustin Pop
                                 logical_id=(vgname, new_dev_name.data),
6212 781de953 Iustin Pop
                                 physical_id=(vgname, new_dev_name.data),
6213 19d7f90a Guido Trotter
                                 iv_name=disk.iv_name)
6214 19d7f90a Guido Trotter
          snap_disks.append(new_dev)
6215 a8083063 Iustin Pop
6216 a8083063 Iustin Pop
    finally:
6217 0d68c45d Iustin Pop
      if self.op.shutdown and instance.admin_up:
6218 07813a9e Iustin Pop
        result = self.rpc.call_instance_start(src_node, instance)
6219 dd279568 Iustin Pop
        msg = result.RemoteFailMsg()
6220 dd279568 Iustin Pop
        if msg:
6221 b9bddb6b Iustin Pop
          _ShutdownInstanceDisks(self, instance)
6222 dd279568 Iustin Pop
          raise errors.OpExecError("Could not start instance: %s" % msg)
6223 a8083063 Iustin Pop
6224 a8083063 Iustin Pop
    # TODO: check for size
6225 a8083063 Iustin Pop
6226 62c9ec92 Iustin Pop
    cluster_name = self.cfg.GetClusterName()
6227 74c47259 Iustin Pop
    for idx, dev in enumerate(snap_disks):
6228 19d7f90a Guido Trotter
      if dev:
6229 781de953 Iustin Pop
        result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
6230 781de953 Iustin Pop
                                               instance, cluster_name, idx)
6231 781de953 Iustin Pop
        if result.failed or not result.data:
6232 19d7f90a Guido Trotter
          self.LogWarning("Could not export block device %s from node %s to"
6233 19d7f90a Guido Trotter
                          " node %s", dev.logical_id[1], src_node,
6234 19d7f90a Guido Trotter
                          dst_node.name)
6235 e1bc0878 Iustin Pop
        msg = self.rpc.call_blockdev_remove(src_node, dev).RemoteFailMsg()
6236 e1bc0878 Iustin Pop
        if msg:
6237 19d7f90a Guido Trotter
          self.LogWarning("Could not remove snapshot block device %s from node"
6238 e1bc0878 Iustin Pop
                          " %s: %s", dev.logical_id[1], src_node, msg)
6239 a8083063 Iustin Pop
6240 781de953 Iustin Pop
    result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
6241 781de953 Iustin Pop
    if result.failed or not result.data:
6242 19d7f90a Guido Trotter
      self.LogWarning("Could not finalize export for instance %s on node %s",
6243 19d7f90a Guido Trotter
                      instance.name, dst_node.name)
6244 a8083063 Iustin Pop
6245 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
6246 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
6247 a8083063 Iustin Pop
6248 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
6249 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
6250 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
6251 a8083063 Iustin Pop
    if nodelist:
6252 72737a7f Iustin Pop
      exportlist = self.rpc.call_export_list(nodelist)
6253 a8083063 Iustin Pop
      for node in exportlist:
6254 781de953 Iustin Pop
        if exportlist[node].failed:
6255 781de953 Iustin Pop
          continue
6256 781de953 Iustin Pop
        if instance.name in exportlist[node].data:
6257 72737a7f Iustin Pop
          if not self.rpc.call_export_remove(node, instance.name):
6258 19d7f90a Guido Trotter
            self.LogWarning("Could not remove older export for instance %s"
6259 19d7f90a Guido Trotter
                            " on node %s", instance.name, node)
6260 5c947f38 Iustin Pop
6261 5c947f38 Iustin Pop
6262 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
6263 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
6264 9ac99fda Guido Trotter

6265 9ac99fda Guido Trotter
  """
6266 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
6267 3656b3af Guido Trotter
  REQ_BGL = False
6268 3656b3af Guido Trotter
6269 3656b3af Guido Trotter
  def ExpandNames(self):
6270 3656b3af Guido Trotter
    self.needed_locks = {}
6271 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
6272 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
6273 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
6274 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6275 9ac99fda Guido Trotter
6276 9ac99fda Guido Trotter
  def CheckPrereq(self):
6277 9ac99fda Guido Trotter
    """Check prerequisites.
6278 9ac99fda Guido Trotter
    """
6279 9ac99fda Guido Trotter
    pass
6280 9ac99fda Guido Trotter
6281 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
6282 9ac99fda Guido Trotter
    """Remove any export.
6283 9ac99fda Guido Trotter

6284 9ac99fda Guido Trotter
    """
6285 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
6286 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
6287 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
6288 9ac99fda Guido Trotter
    fqdn_warn = False
6289 9ac99fda Guido Trotter
    if not instance_name:
6290 9ac99fda Guido Trotter
      fqdn_warn = True
6291 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
6292 9ac99fda Guido Trotter
6293 72737a7f Iustin Pop
    exportlist = self.rpc.call_export_list(self.acquired_locks[
6294 72737a7f Iustin Pop
      locking.LEVEL_NODE])
6295 9ac99fda Guido Trotter
    found = False
6296 9ac99fda Guido Trotter
    for node in exportlist:
6297 781de953 Iustin Pop
      if exportlist[node].failed:
6298 25361b9a Iustin Pop
        self.LogWarning("Failed to query node %s, continuing" % node)
6299 781de953 Iustin Pop
        continue
6300 781de953 Iustin Pop
      if instance_name in exportlist[node].data:
6301 9ac99fda Guido Trotter
        found = True
6302 781de953 Iustin Pop
        result = self.rpc.call_export_remove(node, instance_name)
6303 781de953 Iustin Pop
        if result.failed or not result.data:
6304 9a4f63d1 Iustin Pop
          logging.error("Could not remove export for instance %s"
6305 9a4f63d1 Iustin Pop
                        " on node %s", instance_name, node)
6306 9ac99fda Guido Trotter
6307 9ac99fda Guido Trotter
    if fqdn_warn and not found:
6308 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
6309 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
6310 9ac99fda Guido Trotter
                  " Domain Name.")
6311 9ac99fda Guido Trotter
6312 9ac99fda Guido Trotter
6313 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
6314 5c947f38 Iustin Pop
  """Generic tags LU.
6315 5c947f38 Iustin Pop

6316 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
6317 5c947f38 Iustin Pop

6318 5c947f38 Iustin Pop
  """
6319 5c947f38 Iustin Pop
6320 8646adce Guido Trotter
  def ExpandNames(self):
6321 8646adce Guido Trotter
    self.needed_locks = {}
6322 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
6323 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
6324 5c947f38 Iustin Pop
      if name is None:
6325 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
6326 3ecf6786 Iustin Pop
                                   (self.op.name,))
6327 5c947f38 Iustin Pop
      self.op.name = name
6328 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = name
6329 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
6330 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
6331 5c947f38 Iustin Pop
      if name is None:
6332 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
6333 3ecf6786 Iustin Pop
                                   (self.op.name,))
6334 5c947f38 Iustin Pop
      self.op.name = name
6335 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = name
6336 8646adce Guido Trotter
6337 8646adce Guido Trotter
  def CheckPrereq(self):
6338 8646adce Guido Trotter
    """Check prerequisites.
6339 8646adce Guido Trotter

6340 8646adce Guido Trotter
    """
6341 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
6342 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
6343 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
6344 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
6345 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
6346 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
6347 5c947f38 Iustin Pop
    else:
6348 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
6349 3ecf6786 Iustin Pop
                                 str(self.op.kind))
6350 5c947f38 Iustin Pop
6351 5c947f38 Iustin Pop
6352 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
6353 5c947f38 Iustin Pop
  """Returns the tags of a given object.
6354 5c947f38 Iustin Pop

6355 5c947f38 Iustin Pop
  """
6356 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
6357 8646adce Guido Trotter
  REQ_BGL = False
6358 5c947f38 Iustin Pop
6359 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6360 5c947f38 Iustin Pop
    """Returns the tag list.
6361 5c947f38 Iustin Pop

6362 5c947f38 Iustin Pop
    """
6363 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
6364 5c947f38 Iustin Pop
6365 5c947f38 Iustin Pop
6366 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
6367 73415719 Iustin Pop
  """Searches the tags for a given pattern.
6368 73415719 Iustin Pop

6369 73415719 Iustin Pop
  """
6370 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
6371 8646adce Guido Trotter
  REQ_BGL = False
6372 8646adce Guido Trotter
6373 8646adce Guido Trotter
  def ExpandNames(self):
6374 8646adce Guido Trotter
    self.needed_locks = {}
6375 73415719 Iustin Pop
6376 73415719 Iustin Pop
  def CheckPrereq(self):
6377 73415719 Iustin Pop
    """Check prerequisites.
6378 73415719 Iustin Pop

6379 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
6380 73415719 Iustin Pop

6381 73415719 Iustin Pop
    """
6382 73415719 Iustin Pop
    try:
6383 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
6384 73415719 Iustin Pop
    except re.error, err:
6385 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
6386 73415719 Iustin Pop
                                 (self.op.pattern, err))
6387 73415719 Iustin Pop
6388 73415719 Iustin Pop
  def Exec(self, feedback_fn):
6389 73415719 Iustin Pop
    """Returns the tag list.
6390 73415719 Iustin Pop

6391 73415719 Iustin Pop
    """
6392 73415719 Iustin Pop
    cfg = self.cfg
6393 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
6394 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
6395 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
6396 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
6397 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
6398 73415719 Iustin Pop
    results = []
6399 73415719 Iustin Pop
    for path, target in tgts:
6400 73415719 Iustin Pop
      for tag in target.GetTags():
6401 73415719 Iustin Pop
        if self.re.search(tag):
6402 73415719 Iustin Pop
          results.append((path, tag))
6403 73415719 Iustin Pop
    return results
6404 73415719 Iustin Pop
6405 73415719 Iustin Pop
6406 f27302fa Iustin Pop
class LUAddTags(TagsLU):
6407 5c947f38 Iustin Pop
  """Sets a tag on a given object.
6408 5c947f38 Iustin Pop

6409 5c947f38 Iustin Pop
  """
6410 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
6411 8646adce Guido Trotter
  REQ_BGL = False
6412 5c947f38 Iustin Pop
6413 5c947f38 Iustin Pop
  def CheckPrereq(self):
6414 5c947f38 Iustin Pop
    """Check prerequisites.
6415 5c947f38 Iustin Pop

6416 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
6417 5c947f38 Iustin Pop

6418 5c947f38 Iustin Pop
    """
6419 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
6420 f27302fa Iustin Pop
    for tag in self.op.tags:
6421 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
6422 5c947f38 Iustin Pop
6423 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6424 5c947f38 Iustin Pop
    """Sets the tag.
6425 5c947f38 Iustin Pop

6426 5c947f38 Iustin Pop
    """
6427 5c947f38 Iustin Pop
    try:
6428 f27302fa Iustin Pop
      for tag in self.op.tags:
6429 f27302fa Iustin Pop
        self.target.AddTag(tag)
6430 5c947f38 Iustin Pop
    except errors.TagError, err:
6431 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
6432 5c947f38 Iustin Pop
    try:
6433 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
6434 5c947f38 Iustin Pop
    except errors.ConfigurationError:
6435 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
6436 3ecf6786 Iustin Pop
                                " config file and the operation has been"
6437 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
6438 5c947f38 Iustin Pop
6439 5c947f38 Iustin Pop
6440 f27302fa Iustin Pop
class LUDelTags(TagsLU):
6441 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
6442 5c947f38 Iustin Pop

6443 5c947f38 Iustin Pop
  """
6444 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
6445 8646adce Guido Trotter
  REQ_BGL = False
6446 5c947f38 Iustin Pop
6447 5c947f38 Iustin Pop
  def CheckPrereq(self):
6448 5c947f38 Iustin Pop
    """Check prerequisites.
6449 5c947f38 Iustin Pop

6450 5c947f38 Iustin Pop
    This checks that we have the given tag.
6451 5c947f38 Iustin Pop

6452 5c947f38 Iustin Pop
    """
6453 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
6454 f27302fa Iustin Pop
    for tag in self.op.tags:
6455 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
6456 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
6457 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
6458 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
6459 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
6460 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
6461 f27302fa Iustin Pop
      diff_names.sort()
6462 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
6463 f27302fa Iustin Pop
                                 (",".join(diff_names)))
6464 5c947f38 Iustin Pop
6465 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6466 5c947f38 Iustin Pop
    """Remove the tag from the object.
6467 5c947f38 Iustin Pop

6468 5c947f38 Iustin Pop
    """
6469 f27302fa Iustin Pop
    for tag in self.op.tags:
6470 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
6471 5c947f38 Iustin Pop
    try:
6472 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
6473 5c947f38 Iustin Pop
    except errors.ConfigurationError:
6474 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
6475 3ecf6786 Iustin Pop
                                " config file and the operation has been"
6476 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
6477 06009e27 Iustin Pop
6478 0eed6e61 Guido Trotter
6479 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
6480 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
6481 06009e27 Iustin Pop

6482 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
6483 06009e27 Iustin Pop
  time.
6484 06009e27 Iustin Pop

6485 06009e27 Iustin Pop
  """
6486 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
6487 fbe9022f Guido Trotter
  REQ_BGL = False
6488 06009e27 Iustin Pop
6489 fbe9022f Guido Trotter
  def ExpandNames(self):
6490 fbe9022f Guido Trotter
    """Expand names and set required locks.
6491 06009e27 Iustin Pop

6492 fbe9022f Guido Trotter
    This expands the node list, if any.
6493 06009e27 Iustin Pop

6494 06009e27 Iustin Pop
    """
6495 fbe9022f Guido Trotter
    self.needed_locks = {}
6496 06009e27 Iustin Pop
    if self.op.on_nodes:
6497 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
6498 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
6499 fbe9022f Guido Trotter
      # more information.
6500 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
6501 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
6502 fbe9022f Guido Trotter
6503 fbe9022f Guido Trotter
  def CheckPrereq(self):
6504 fbe9022f Guido Trotter
    """Check prerequisites.
6505 fbe9022f Guido Trotter

6506 fbe9022f Guido Trotter
    """
6507 06009e27 Iustin Pop
6508 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
6509 06009e27 Iustin Pop
    """Do the actual sleep.
6510 06009e27 Iustin Pop

6511 06009e27 Iustin Pop
    """
6512 06009e27 Iustin Pop
    if self.op.on_master:
6513 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
6514 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
6515 06009e27 Iustin Pop
    if self.op.on_nodes:
6516 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
6517 06009e27 Iustin Pop
      if not result:
6518 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
6519 06009e27 Iustin Pop
      for node, node_result in result.items():
6520 781de953 Iustin Pop
        node_result.Raise()
6521 781de953 Iustin Pop
        if not node_result.data:
6522 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
6523 781de953 Iustin Pop
                                   " result: %s" % (node, node_result.data))
6524 d61df03e Iustin Pop
6525 d61df03e Iustin Pop
6526 d1c2dd75 Iustin Pop
class IAllocator(object):
6527 d1c2dd75 Iustin Pop
  """IAllocator framework.
6528 d61df03e Iustin Pop

6529 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
6530 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
6531 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
6532 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
6533 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
6534 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
6535 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
6536 d1c2dd75 Iustin Pop
      easy usage
6537 d61df03e Iustin Pop

6538 d61df03e Iustin Pop
  """
6539 29859cb7 Iustin Pop
  _ALLO_KEYS = [
6540 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
6541 8cc7e742 Guido Trotter
    "os", "tags", "nics", "vcpus", "hypervisor",
6542 d1c2dd75 Iustin Pop
    ]
6543 29859cb7 Iustin Pop
  _RELO_KEYS = [
6544 29859cb7 Iustin Pop
    "relocate_from",
6545 29859cb7 Iustin Pop
    ]
6546 d1c2dd75 Iustin Pop
6547 72737a7f Iustin Pop
  def __init__(self, lu, mode, name, **kwargs):
6548 72737a7f Iustin Pop
    self.lu = lu
6549 d1c2dd75 Iustin Pop
    # init buffer variables
6550 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
6551 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
6552 29859cb7 Iustin Pop
    self.mode = mode
6553 29859cb7 Iustin Pop
    self.name = name
6554 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
6555 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
6556 a0add446 Iustin Pop
    self.hypervisor = None
6557 29859cb7 Iustin Pop
    self.relocate_from = None
6558 27579978 Iustin Pop
    # computed fields
6559 27579978 Iustin Pop
    self.required_nodes = None
6560 d1c2dd75 Iustin Pop
    # init result fields
6561 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
6562 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6563 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
6564 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6565 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
6566 29859cb7 Iustin Pop
    else:
6567 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
6568 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
6569 d1c2dd75 Iustin Pop
    for key in kwargs:
6570 29859cb7 Iustin Pop
      if key not in keyset:
6571 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
6572 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
6573 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
6574 29859cb7 Iustin Pop
    for key in keyset:
6575 d1c2dd75 Iustin Pop
      if key not in kwargs:
6576 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
6577 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
6578 d1c2dd75 Iustin Pop
    self._BuildInputData()
6579 d1c2dd75 Iustin Pop
6580 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
6581 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
6582 d1c2dd75 Iustin Pop

6583 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
6584 d1c2dd75 Iustin Pop

6585 d1c2dd75 Iustin Pop
    """
6586 72737a7f Iustin Pop
    cfg = self.lu.cfg
6587 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
6588 d1c2dd75 Iustin Pop
    # cluster data
6589 d1c2dd75 Iustin Pop
    data = {
6590 77031881 Iustin Pop
      "version": constants.IALLOCATOR_VERSION,
6591 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
6592 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
6593 1325da74 Iustin Pop
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
6594 d1c2dd75 Iustin Pop
      # we don't have job IDs
6595 d61df03e Iustin Pop
      }
6596 b57e9819 Guido Trotter
    iinfo = cfg.GetAllInstancesInfo().values()
6597 b57e9819 Guido Trotter
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
6598 6286519f Iustin Pop
6599 d1c2dd75 Iustin Pop
    # node data
6600 d1c2dd75 Iustin Pop
    node_results = {}
6601 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
6602 8cc7e742 Guido Trotter
6603 8cc7e742 Guido Trotter
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6604 a0add446 Iustin Pop
      hypervisor_name = self.hypervisor
6605 8cc7e742 Guido Trotter
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6606 a0add446 Iustin Pop
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
6607 8cc7e742 Guido Trotter
6608 72737a7f Iustin Pop
    node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
6609 a0add446 Iustin Pop
                                           hypervisor_name)
6610 18640d69 Guido Trotter
    node_iinfo = self.lu.rpc.call_all_instances_info(node_list,
6611 18640d69 Guido Trotter
                       cluster_info.enabled_hypervisors)
6612 1325da74 Iustin Pop
    for nname, nresult in node_data.items():
6613 1325da74 Iustin Pop
      # first fill in static (config-based) values
6614 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
6615 d1c2dd75 Iustin Pop
      pnr = {
6616 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
6617 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
6618 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
6619 fc0fe88c Iustin Pop
        "offline": ninfo.offline,
6620 0b2454b9 Iustin Pop
        "drained": ninfo.drained,
6621 1325da74 Iustin Pop
        "master_candidate": ninfo.master_candidate,
6622 d1c2dd75 Iustin Pop
        }
6623 1325da74 Iustin Pop
6624 1325da74 Iustin Pop
      if not ninfo.offline:
6625 1325da74 Iustin Pop
        nresult.Raise()
6626 1325da74 Iustin Pop
        if not isinstance(nresult.data, dict):
6627 1325da74 Iustin Pop
          raise errors.OpExecError("Can't get data for node %s" % nname)
6628 1325da74 Iustin Pop
        remote_info = nresult.data
6629 1325da74 Iustin Pop
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
6630 1325da74 Iustin Pop
                     'vg_size', 'vg_free', 'cpu_total']:
6631 1325da74 Iustin Pop
          if attr not in remote_info:
6632 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' didn't return attribute"
6633 1325da74 Iustin Pop
                                     " '%s'" % (nname, attr))
6634 1325da74 Iustin Pop
          try:
6635 1325da74 Iustin Pop
            remote_info[attr] = int(remote_info[attr])
6636 1325da74 Iustin Pop
          except ValueError, err:
6637 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' returned invalid value"
6638 1325da74 Iustin Pop
                                     " for '%s': %s" % (nname, attr, err))
6639 1325da74 Iustin Pop
        # compute memory used by primary instances
6640 1325da74 Iustin Pop
        i_p_mem = i_p_up_mem = 0
6641 1325da74 Iustin Pop
        for iinfo, beinfo in i_list:
6642 1325da74 Iustin Pop
          if iinfo.primary_node == nname:
6643 1325da74 Iustin Pop
            i_p_mem += beinfo[constants.BE_MEMORY]
6644 1325da74 Iustin Pop
            if iinfo.name not in node_iinfo[nname].data:
6645 1325da74 Iustin Pop
              i_used_mem = 0
6646 1325da74 Iustin Pop
            else:
6647 1325da74 Iustin Pop
              i_used_mem = int(node_iinfo[nname].data[iinfo.name]['memory'])
6648 1325da74 Iustin Pop
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
6649 1325da74 Iustin Pop
            remote_info['memory_free'] -= max(0, i_mem_diff)
6650 1325da74 Iustin Pop
6651 1325da74 Iustin Pop
            if iinfo.admin_up:
6652 1325da74 Iustin Pop
              i_p_up_mem += beinfo[constants.BE_MEMORY]
6653 1325da74 Iustin Pop
6654 1325da74 Iustin Pop
        # compute memory used by instances
6655 1325da74 Iustin Pop
        pnr_dyn = {
6656 1325da74 Iustin Pop
          "total_memory": remote_info['memory_total'],
6657 1325da74 Iustin Pop
          "reserved_memory": remote_info['memory_dom0'],
6658 1325da74 Iustin Pop
          "free_memory": remote_info['memory_free'],
6659 1325da74 Iustin Pop
          "total_disk": remote_info['vg_size'],
6660 1325da74 Iustin Pop
          "free_disk": remote_info['vg_free'],
6661 1325da74 Iustin Pop
          "total_cpus": remote_info['cpu_total'],
6662 1325da74 Iustin Pop
          "i_pri_memory": i_p_mem,
6663 1325da74 Iustin Pop
          "i_pri_up_memory": i_p_up_mem,
6664 1325da74 Iustin Pop
          }
6665 1325da74 Iustin Pop
        pnr.update(pnr_dyn)
6666 1325da74 Iustin Pop
6667 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
6668 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
6669 d1c2dd75 Iustin Pop
6670 d1c2dd75 Iustin Pop
    # instance data
6671 d1c2dd75 Iustin Pop
    instance_data = {}
6672 338e51e8 Iustin Pop
    for iinfo, beinfo in i_list:
6673 d1c2dd75 Iustin Pop
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
6674 d1c2dd75 Iustin Pop
                  for n in iinfo.nics]
6675 d1c2dd75 Iustin Pop
      pir = {
6676 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
6677 1325da74 Iustin Pop
        "admin_up": iinfo.admin_up,
6678 338e51e8 Iustin Pop
        "vcpus": beinfo[constants.BE_VCPUS],
6679 338e51e8 Iustin Pop
        "memory": beinfo[constants.BE_MEMORY],
6680 d1c2dd75 Iustin Pop
        "os": iinfo.os,
6681 1325da74 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
6682 d1c2dd75 Iustin Pop
        "nics": nic_data,
6683 1325da74 Iustin Pop
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
6684 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
6685 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
6686 d1c2dd75 Iustin Pop
        }
6687 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
6688 d61df03e Iustin Pop
6689 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
6690 d61df03e Iustin Pop
6691 d1c2dd75 Iustin Pop
    self.in_data = data
6692 d61df03e Iustin Pop
6693 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
6694 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
6695 d61df03e Iustin Pop

6696 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
6697 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
6698 d61df03e Iustin Pop

6699 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
6700 d1c2dd75 Iustin Pop
    done.
6701 d61df03e Iustin Pop

6702 d1c2dd75 Iustin Pop
    """
6703 d1c2dd75 Iustin Pop
    data = self.in_data
6704 d1c2dd75 Iustin Pop
6705 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
6706 d1c2dd75 Iustin Pop
6707 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
6708 27579978 Iustin Pop
      self.required_nodes = 2
6709 27579978 Iustin Pop
    else:
6710 27579978 Iustin Pop
      self.required_nodes = 1
6711 d1c2dd75 Iustin Pop
    request = {
6712 d1c2dd75 Iustin Pop
      "type": "allocate",
6713 d1c2dd75 Iustin Pop
      "name": self.name,
6714 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
6715 d1c2dd75 Iustin Pop
      "tags": self.tags,
6716 d1c2dd75 Iustin Pop
      "os": self.os,
6717 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
6718 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
6719 d1c2dd75 Iustin Pop
      "disks": self.disks,
6720 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
6721 d1c2dd75 Iustin Pop
      "nics": self.nics,
6722 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
6723 d1c2dd75 Iustin Pop
      }
6724 d1c2dd75 Iustin Pop
    data["request"] = request
6725 298fe380 Iustin Pop
6726 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
6727 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
6728 298fe380 Iustin Pop

6729 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
6730 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
6731 d61df03e Iustin Pop

6732 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
6733 d1c2dd75 Iustin Pop
    done.
6734 d61df03e Iustin Pop

6735 d1c2dd75 Iustin Pop
    """
6736 72737a7f Iustin Pop
    instance = self.lu.cfg.GetInstanceInfo(self.name)
6737 27579978 Iustin Pop
    if instance is None:
6738 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
6739 27579978 Iustin Pop
                                   " IAllocator" % self.name)
6740 27579978 Iustin Pop
6741 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
6742 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
6743 27579978 Iustin Pop
6744 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
6745 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
6746 2a139bb0 Iustin Pop
6747 27579978 Iustin Pop
    self.required_nodes = 1
6748 dafc7302 Guido Trotter
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
6749 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
6750 27579978 Iustin Pop
6751 d1c2dd75 Iustin Pop
    request = {
6752 2a139bb0 Iustin Pop
      "type": "relocate",
6753 d1c2dd75 Iustin Pop
      "name": self.name,
6754 27579978 Iustin Pop
      "disk_space_total": disk_space,
6755 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
6756 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
6757 d1c2dd75 Iustin Pop
      }
6758 27579978 Iustin Pop
    self.in_data["request"] = request
6759 d61df03e Iustin Pop
6760 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
6761 d1c2dd75 Iustin Pop
    """Build input data structures.
6762 d61df03e Iustin Pop

6763 d1c2dd75 Iustin Pop
    """
6764 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
6765 d61df03e Iustin Pop
6766 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6767 d1c2dd75 Iustin Pop
      self._AddNewInstance()
6768 d1c2dd75 Iustin Pop
    else:
6769 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
6770 d61df03e Iustin Pop
6771 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
6772 d61df03e Iustin Pop
6773 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
6774 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
6775 298fe380 Iustin Pop

6776 d1c2dd75 Iustin Pop
    """
6777 72737a7f Iustin Pop
    if call_fn is None:
6778 72737a7f Iustin Pop
      call_fn = self.lu.rpc.call_iallocator_runner
6779 d1c2dd75 Iustin Pop
    data = self.in_text
6780 298fe380 Iustin Pop
6781 72737a7f Iustin Pop
    result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
6782 781de953 Iustin Pop
    result.Raise()
6783 298fe380 Iustin Pop
6784 781de953 Iustin Pop
    if not isinstance(result.data, (list, tuple)) or len(result.data) != 4:
6785 8d528b7c Iustin Pop
      raise errors.OpExecError("Invalid result from master iallocator runner")
6786 8d528b7c Iustin Pop
6787 781de953 Iustin Pop
    rcode, stdout, stderr, fail = result.data
6788 8d528b7c Iustin Pop
6789 8d528b7c Iustin Pop
    if rcode == constants.IARUN_NOTFOUND:
6790 8d528b7c Iustin Pop
      raise errors.OpExecError("Can't find allocator '%s'" % name)
6791 8d528b7c Iustin Pop
    elif rcode == constants.IARUN_FAILURE:
6792 38206f3c Iustin Pop
      raise errors.OpExecError("Instance allocator call failed: %s,"
6793 38206f3c Iustin Pop
                               " output: %s" % (fail, stdout+stderr))
6794 8d528b7c Iustin Pop
    self.out_text = stdout
6795 d1c2dd75 Iustin Pop
    if validate:
6796 d1c2dd75 Iustin Pop
      self._ValidateResult()
6797 298fe380 Iustin Pop
6798 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
6799 d1c2dd75 Iustin Pop
    """Process the allocator results.
6800 538475ca Iustin Pop

6801 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
6802 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
6803 538475ca Iustin Pop

6804 d1c2dd75 Iustin Pop
    """
6805 d1c2dd75 Iustin Pop
    try:
6806 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
6807 d1c2dd75 Iustin Pop
    except Exception, err:
6808 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
6809 d1c2dd75 Iustin Pop
6810 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
6811 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
6812 538475ca Iustin Pop
6813 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
6814 d1c2dd75 Iustin Pop
      if key not in rdict:
6815 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
6816 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
6817 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
6818 538475ca Iustin Pop
6819 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
6820 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
6821 d1c2dd75 Iustin Pop
                               " is not a list")
6822 d1c2dd75 Iustin Pop
    self.out_data = rdict
6823 538475ca Iustin Pop
6824 538475ca Iustin Pop
6825 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
6826 d61df03e Iustin Pop
  """Run allocator tests.
6827 d61df03e Iustin Pop

6828 d61df03e Iustin Pop
  This LU runs the allocator tests
6829 d61df03e Iustin Pop

6830 d61df03e Iustin Pop
  """
6831 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
6832 d61df03e Iustin Pop
6833 d61df03e Iustin Pop
  def CheckPrereq(self):
6834 d61df03e Iustin Pop
    """Check prerequisites.
6835 d61df03e Iustin Pop

6836 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
6837 d61df03e Iustin Pop

6838 d61df03e Iustin Pop
    """
6839 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
6840 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
6841 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
6842 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
6843 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
6844 d61df03e Iustin Pop
                                     attr)
6845 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
6846 d61df03e Iustin Pop
      if iname is not None:
6847 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
6848 d61df03e Iustin Pop
                                   iname)
6849 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
6850 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
6851 d61df03e Iustin Pop
      for row in self.op.nics:
6852 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
6853 d61df03e Iustin Pop
            "mac" not in row or
6854 d61df03e Iustin Pop
            "ip" not in row or
6855 d61df03e Iustin Pop
            "bridge" not in row):
6856 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
6857 d61df03e Iustin Pop
                                     " 'nics' parameter")
6858 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
6859 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
6860 d61df03e Iustin Pop
      for row in self.op.disks:
6861 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
6862 d61df03e Iustin Pop
            "size" not in row or
6863 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
6864 d61df03e Iustin Pop
            "mode" not in row or
6865 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
6866 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
6867 d61df03e Iustin Pop
                                     " 'disks' parameter")
6868 8901997e Iustin Pop
      if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None:
6869 8cc7e742 Guido Trotter
        self.op.hypervisor = self.cfg.GetHypervisorType()
6870 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
6871 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
6872 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
6873 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
6874 d61df03e Iustin Pop
      if fname is None:
6875 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
6876 d61df03e Iustin Pop
                                   self.op.name)
6877 d61df03e Iustin Pop
      self.op.name = fname
6878 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
6879 d61df03e Iustin Pop
    else:
6880 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
6881 d61df03e Iustin Pop
                                 self.op.mode)
6882 d61df03e Iustin Pop
6883 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
6884 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
6885 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
6886 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
6887 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
6888 d61df03e Iustin Pop
                                 self.op.direction)
6889 d61df03e Iustin Pop
6890 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
6891 d61df03e Iustin Pop
    """Run the allocator test.
6892 d61df03e Iustin Pop

6893 d61df03e Iustin Pop
    """
6894 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
6895 72737a7f Iustin Pop
      ial = IAllocator(self,
6896 29859cb7 Iustin Pop
                       mode=self.op.mode,
6897 29859cb7 Iustin Pop
                       name=self.op.name,
6898 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
6899 29859cb7 Iustin Pop
                       disks=self.op.disks,
6900 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
6901 29859cb7 Iustin Pop
                       os=self.op.os,
6902 29859cb7 Iustin Pop
                       tags=self.op.tags,
6903 29859cb7 Iustin Pop
                       nics=self.op.nics,
6904 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
6905 8cc7e742 Guido Trotter
                       hypervisor=self.op.hypervisor,
6906 29859cb7 Iustin Pop
                       )
6907 29859cb7 Iustin Pop
    else:
6908 72737a7f Iustin Pop
      ial = IAllocator(self,
6909 29859cb7 Iustin Pop
                       mode=self.op.mode,
6910 29859cb7 Iustin Pop
                       name=self.op.name,
6911 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
6912 29859cb7 Iustin Pop
                       )
6913 d61df03e Iustin Pop
6914 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
6915 d1c2dd75 Iustin Pop
      result = ial.in_text
6916 298fe380 Iustin Pop
    else:
6917 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
6918 d1c2dd75 Iustin Pop
      result = ial.out_text
6919 298fe380 Iustin Pop
    return result