Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 4be4691d

History | View | Annotate | Download (195.8 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 ffa1c0dc Iustin Pop
import logging
34 74409b12 Iustin Pop
import copy
35 a8083063 Iustin Pop
36 a8083063 Iustin Pop
from ganeti import ssh
37 a8083063 Iustin Pop
from ganeti import utils
38 a8083063 Iustin Pop
from ganeti import errors
39 a8083063 Iustin Pop
from ganeti import hypervisor
40 6048c986 Guido Trotter
from ganeti import locking
41 a8083063 Iustin Pop
from ganeti import constants
42 a8083063 Iustin Pop
from ganeti import objects
43 a8083063 Iustin Pop
from ganeti import opcodes
44 8d14b30d Iustin Pop
from ganeti import serializer
45 d61df03e Iustin Pop
46 d61df03e Iustin Pop
47 a8083063 Iustin Pop
class LogicalUnit(object):
48 396e1b78 Michael Hanselmann
  """Logical Unit base class.
49 a8083063 Iustin Pop

50 a8083063 Iustin Pop
  Subclasses must follow these rules:
51 d465bdc8 Guido Trotter
    - implement ExpandNames
52 d465bdc8 Guido Trotter
    - implement CheckPrereq
53 a8083063 Iustin Pop
    - implement Exec
54 a8083063 Iustin Pop
    - implement BuildHooksEnv
55 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
56 05f86716 Guido Trotter
    - optionally redefine their run requirements:
57 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
58 05f86716 Guido Trotter

59 05f86716 Guido Trotter
  Note that all commands require root permissions.
60 a8083063 Iustin Pop

61 a8083063 Iustin Pop
  """
62 a8083063 Iustin Pop
  HPATH = None
63 a8083063 Iustin Pop
  HTYPE = None
64 a8083063 Iustin Pop
  _OP_REQP = []
65 7e55040e Guido Trotter
  REQ_BGL = True
66 a8083063 Iustin Pop
67 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
68 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
69 a8083063 Iustin Pop

70 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
71 a8083063 Iustin Pop
    validity.
72 a8083063 Iustin Pop

73 a8083063 Iustin Pop
    """
74 5bfac263 Iustin Pop
    self.proc = processor
75 a8083063 Iustin Pop
    self.op = op
76 77b657a3 Guido Trotter
    self.cfg = context.cfg
77 77b657a3 Guido Trotter
    self.context = context
78 72737a7f Iustin Pop
    self.rpc = rpc
79 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
80 d465bdc8 Guido Trotter
    self.needed_locks = None
81 6683bba2 Guido Trotter
    self.acquired_locks = {}
82 3977a4c1 Guido Trotter
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
83 ca2a79e1 Guido Trotter
    self.add_locks = {}
84 ca2a79e1 Guido Trotter
    self.remove_locks = {}
85 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
86 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
87 c92b310a Michael Hanselmann
    self.__ssh = None
88 86d9d3bb Iustin Pop
    # logging
89 86d9d3bb Iustin Pop
    self.LogWarning = processor.LogWarning
90 86d9d3bb Iustin Pop
    self.LogInfo = processor.LogInfo
91 c92b310a Michael Hanselmann
92 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
93 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
94 a8083063 Iustin Pop
      if attr_val is None:
95 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
96 3ecf6786 Iustin Pop
                                   attr_name)
97 4be4691d Iustin Pop
    self.CheckArguments()
98 a8083063 Iustin Pop
99 c92b310a Michael Hanselmann
  def __GetSSH(self):
100 c92b310a Michael Hanselmann
    """Returns the SshRunner object
101 c92b310a Michael Hanselmann

102 c92b310a Michael Hanselmann
    """
103 c92b310a Michael Hanselmann
    if not self.__ssh:
104 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
105 c92b310a Michael Hanselmann
    return self.__ssh
106 c92b310a Michael Hanselmann
107 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
108 c92b310a Michael Hanselmann
109 4be4691d Iustin Pop
  def CheckArguments(self):
110 4be4691d Iustin Pop
    """Check syntactic validity for the opcode arguments.
111 4be4691d Iustin Pop

112 4be4691d Iustin Pop
    This method is for doing a simple syntactic check and ensure
113 4be4691d Iustin Pop
    validity of opcode parameters, without any cluster-related
114 4be4691d Iustin Pop
    checks. While the same can be accomplished in ExpandNames and/or
115 4be4691d Iustin Pop
    CheckPrereq, doing these separate is better because:
116 4be4691d Iustin Pop

117 4be4691d Iustin Pop
      - ExpandNames is left as as purely a lock-related function
118 4be4691d Iustin Pop
      - CheckPrereq is run after we have aquired locks (and possible
119 4be4691d Iustin Pop
        waited for them)
120 4be4691d Iustin Pop

121 4be4691d Iustin Pop
    The function is allowed to change the self.op attribute so that
122 4be4691d Iustin Pop
    later methods can no longer worry about missing parameters.
123 4be4691d Iustin Pop

124 4be4691d Iustin Pop
    """
125 4be4691d Iustin Pop
    pass
126 4be4691d Iustin Pop
127 d465bdc8 Guido Trotter
  def ExpandNames(self):
128 d465bdc8 Guido Trotter
    """Expand names for this LU.
129 d465bdc8 Guido Trotter

130 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
131 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
132 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
133 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
134 d465bdc8 Guido Trotter

135 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
136 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
137 d465bdc8 Guido Trotter
    as values. Rules:
138 e4376078 Iustin Pop

139 e4376078 Iustin Pop
      - use an empty dict if you don't need any lock
140 e4376078 Iustin Pop
      - if you don't need any lock at a particular level omit that level
141 e4376078 Iustin Pop
      - don't put anything for the BGL level
142 e4376078 Iustin Pop
      - if you want all locks at a level use locking.ALL_SET as a value
143 d465bdc8 Guido Trotter

144 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
145 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
146 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
147 3977a4c1 Guido Trotter

148 e4376078 Iustin Pop
    Examples::
149 e4376078 Iustin Pop

150 e4376078 Iustin Pop
      # Acquire all nodes and one instance
151 e4376078 Iustin Pop
      self.needed_locks = {
152 e4376078 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
153 e4376078 Iustin Pop
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
154 e4376078 Iustin Pop
      }
155 e4376078 Iustin Pop
      # Acquire just two nodes
156 e4376078 Iustin Pop
      self.needed_locks = {
157 e4376078 Iustin Pop
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
158 e4376078 Iustin Pop
      }
159 e4376078 Iustin Pop
      # Acquire no locks
160 e4376078 Iustin Pop
      self.needed_locks = {} # No, you can't leave it to the default value None
161 d465bdc8 Guido Trotter

162 d465bdc8 Guido Trotter
    """
163 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
164 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
165 d465bdc8 Guido Trotter
    # time.
166 d465bdc8 Guido Trotter
    if self.REQ_BGL:
167 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
168 d465bdc8 Guido Trotter
    else:
169 d465bdc8 Guido Trotter
      raise NotImplementedError
170 d465bdc8 Guido Trotter
171 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
172 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
173 fb8dcb62 Guido Trotter

174 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
175 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
176 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
177 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
178 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
179 fb8dcb62 Guido Trotter
    default it does nothing.
180 fb8dcb62 Guido Trotter

181 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
182 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
183 fb8dcb62 Guido Trotter

184 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
185 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
186 fb8dcb62 Guido Trotter

187 fb8dcb62 Guido Trotter
    """
188 fb8dcb62 Guido Trotter
189 a8083063 Iustin Pop
  def CheckPrereq(self):
190 a8083063 Iustin Pop
    """Check prerequisites for this LU.
191 a8083063 Iustin Pop

192 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
193 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
194 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
195 a8083063 Iustin Pop
    allowed.
196 a8083063 Iustin Pop

197 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
198 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
199 a8083063 Iustin Pop

200 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
201 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
202 a8083063 Iustin Pop

203 a8083063 Iustin Pop
    """
204 a8083063 Iustin Pop
    raise NotImplementedError
205 a8083063 Iustin Pop
206 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
207 a8083063 Iustin Pop
    """Execute the LU.
208 a8083063 Iustin Pop

209 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
210 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
211 a8083063 Iustin Pop
    code, or expected.
212 a8083063 Iustin Pop

213 a8083063 Iustin Pop
    """
214 a8083063 Iustin Pop
    raise NotImplementedError
215 a8083063 Iustin Pop
216 a8083063 Iustin Pop
  def BuildHooksEnv(self):
217 a8083063 Iustin Pop
    """Build hooks environment for this LU.
218 a8083063 Iustin Pop

219 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
220 a8083063 Iustin Pop
    containing the environment that will be used for running the
221 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
222 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
223 a8083063 Iustin Pop
    the hook should run after the execution.
224 a8083063 Iustin Pop

225 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
226 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
227 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
228 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
229 a8083063 Iustin Pop

230 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
231 a8083063 Iustin Pop

232 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
233 a8083063 Iustin Pop
    not be called.
234 a8083063 Iustin Pop

235 a8083063 Iustin Pop
    """
236 a8083063 Iustin Pop
    raise NotImplementedError
237 a8083063 Iustin Pop
238 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
239 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
240 1fce5219 Guido Trotter

241 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
242 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
243 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
244 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
245 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
246 1fce5219 Guido Trotter

247 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
248 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
249 e4376078 Iustin Pop
    @param hook_results: the results of the multi-node hooks rpc call
250 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
251 e4376078 Iustin Pop
    @param lu_result: the previous Exec result this LU had, or None
252 e4376078 Iustin Pop
        in the PRE phase
253 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
254 e4376078 Iustin Pop
        and hook results
255 1fce5219 Guido Trotter

256 1fce5219 Guido Trotter
    """
257 1fce5219 Guido Trotter
    return lu_result
258 1fce5219 Guido Trotter
259 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
260 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
261 43905206 Guido Trotter

262 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
263 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
264 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
265 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
266 43905206 Guido Trotter
    before.
267 43905206 Guido Trotter

268 43905206 Guido Trotter
    """
269 43905206 Guido Trotter
    if self.needed_locks is None:
270 43905206 Guido Trotter
      self.needed_locks = {}
271 43905206 Guido Trotter
    else:
272 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
273 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
274 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
275 43905206 Guido Trotter
    if expanded_name is None:
276 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
277 43905206 Guido Trotter
                                  self.op.instance_name)
278 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
279 43905206 Guido Trotter
    self.op.instance_name = expanded_name
280 43905206 Guido Trotter
281 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
282 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
283 c4a2fee1 Guido Trotter

284 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
285 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
286 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
287 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
288 c4a2fee1 Guido Trotter

289 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
290 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
291 c4a2fee1 Guido Trotter

292 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
293 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
294 c4a2fee1 Guido Trotter

295 e4376078 Iustin Pop
    If should be called in DeclareLocks in a way similar to::
296 c4a2fee1 Guido Trotter

297 e4376078 Iustin Pop
      if level == locking.LEVEL_NODE:
298 e4376078 Iustin Pop
        self._LockInstancesNodes()
299 c4a2fee1 Guido Trotter

300 a82ce292 Guido Trotter
    @type primary_only: boolean
301 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
302 a82ce292 Guido Trotter

303 c4a2fee1 Guido Trotter
    """
304 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
305 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
306 c4a2fee1 Guido Trotter
307 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
308 c4a2fee1 Guido Trotter
309 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
310 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
311 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
312 c4a2fee1 Guido Trotter
    wanted_nodes = []
313 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
314 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
315 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
316 a82ce292 Guido Trotter
      if not primary_only:
317 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
318 9513b6ab Guido Trotter
319 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
320 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
321 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
322 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
323 c4a2fee1 Guido Trotter
324 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
325 c4a2fee1 Guido Trotter
326 a8083063 Iustin Pop
327 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
328 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
329 a8083063 Iustin Pop

330 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
331 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
332 a8083063 Iustin Pop

333 a8083063 Iustin Pop
  """
334 a8083063 Iustin Pop
  HPATH = None
335 a8083063 Iustin Pop
  HTYPE = None
336 a8083063 Iustin Pop
337 a8083063 Iustin Pop
338 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
339 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
340 83120a01 Michael Hanselmann

341 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
342 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
343 e4376078 Iustin Pop
  @type nodes: list
344 e4376078 Iustin Pop
  @param nodes: list of node names or None for all nodes
345 e4376078 Iustin Pop
  @rtype: list
346 e4376078 Iustin Pop
  @return: the list of nodes, sorted
347 e4376078 Iustin Pop
  @raise errors.OpProgrammerError: if the nodes parameter is wrong type
348 83120a01 Michael Hanselmann

349 83120a01 Michael Hanselmann
  """
350 3312b702 Iustin Pop
  if not isinstance(nodes, list):
351 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
352 dcb93971 Michael Hanselmann
353 ea47808a Guido Trotter
  if not nodes:
354 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
355 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
356 dcb93971 Michael Hanselmann
357 ea47808a Guido Trotter
  wanted = []
358 ea47808a Guido Trotter
  for name in nodes:
359 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
360 ea47808a Guido Trotter
    if node is None:
361 ea47808a Guido Trotter
      raise errors.OpPrereqError("No such node name '%s'" % name)
362 ea47808a Guido Trotter
    wanted.append(node)
363 dcb93971 Michael Hanselmann
364 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
365 3312b702 Iustin Pop
366 3312b702 Iustin Pop
367 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
368 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
369 3312b702 Iustin Pop

370 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
371 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
372 e4376078 Iustin Pop
  @type instances: list
373 e4376078 Iustin Pop
  @param instances: list of instance names or None for all instances
374 e4376078 Iustin Pop
  @rtype: list
375 e4376078 Iustin Pop
  @return: the list of instances, sorted
376 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if the instances parameter is wrong type
377 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if any of the passed instances is not found
378 3312b702 Iustin Pop

379 3312b702 Iustin Pop
  """
380 3312b702 Iustin Pop
  if not isinstance(instances, list):
381 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
382 3312b702 Iustin Pop
383 3312b702 Iustin Pop
  if instances:
384 3312b702 Iustin Pop
    wanted = []
385 3312b702 Iustin Pop
386 3312b702 Iustin Pop
    for name in instances:
387 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
388 3312b702 Iustin Pop
      if instance is None:
389 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
390 3312b702 Iustin Pop
      wanted.append(instance)
391 3312b702 Iustin Pop
392 3312b702 Iustin Pop
  else:
393 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
394 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
395 dcb93971 Michael Hanselmann
396 dcb93971 Michael Hanselmann
397 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
398 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
399 83120a01 Michael Hanselmann

400 a2d2e1a7 Iustin Pop
  @type static: L{utils.FieldSet}
401 31bf511f Iustin Pop
  @param static: static fields set
402 a2d2e1a7 Iustin Pop
  @type dynamic: L{utils.FieldSet}
403 31bf511f Iustin Pop
  @param dynamic: dynamic fields set
404 83120a01 Michael Hanselmann

405 83120a01 Michael Hanselmann
  """
406 a2d2e1a7 Iustin Pop
  f = utils.FieldSet()
407 31bf511f Iustin Pop
  f.Extend(static)
408 31bf511f Iustin Pop
  f.Extend(dynamic)
409 dcb93971 Michael Hanselmann
410 31bf511f Iustin Pop
  delta = f.NonMatching(selected)
411 31bf511f Iustin Pop
  if delta:
412 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
413 31bf511f Iustin Pop
                               % ",".join(delta))
414 dcb93971 Michael Hanselmann
415 dcb93971 Michael Hanselmann
416 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
417 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
418 e4376078 Iustin Pop
  """Builds instance related env variables for hooks
419 e4376078 Iustin Pop

420 e4376078 Iustin Pop
  This builds the hook environment from individual variables.
421 e4376078 Iustin Pop

422 e4376078 Iustin Pop
  @type name: string
423 e4376078 Iustin Pop
  @param name: the name of the instance
424 e4376078 Iustin Pop
  @type primary_node: string
425 e4376078 Iustin Pop
  @param primary_node: the name of the instance's primary node
426 e4376078 Iustin Pop
  @type secondary_nodes: list
427 e4376078 Iustin Pop
  @param secondary_nodes: list of secondary nodes as strings
428 e4376078 Iustin Pop
  @type os_type: string
429 e4376078 Iustin Pop
  @param os_type: the name of the instance's OS
430 e4376078 Iustin Pop
  @type status: string
431 e4376078 Iustin Pop
  @param status: the desired status of the instances
432 e4376078 Iustin Pop
  @type memory: string
433 e4376078 Iustin Pop
  @param memory: the memory size of the instance
434 e4376078 Iustin Pop
  @type vcpus: string
435 e4376078 Iustin Pop
  @param vcpus: the count of VCPUs the instance has
436 e4376078 Iustin Pop
  @type nics: list
437 e4376078 Iustin Pop
  @param nics: list of tuples (ip, bridge, mac) representing
438 e4376078 Iustin Pop
      the NICs the instance  has
439 e4376078 Iustin Pop
  @rtype: dict
440 e4376078 Iustin Pop
  @return: the hook environment for this instance
441 ecb215b5 Michael Hanselmann

442 396e1b78 Michael Hanselmann
  """
443 396e1b78 Michael Hanselmann
  env = {
444 0e137c28 Iustin Pop
    "OP_TARGET": name,
445 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
446 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
447 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
448 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
449 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
450 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
451 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
452 396e1b78 Michael Hanselmann
  }
453 396e1b78 Michael Hanselmann
454 396e1b78 Michael Hanselmann
  if nics:
455 396e1b78 Michael Hanselmann
    nic_count = len(nics)
456 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
457 396e1b78 Michael Hanselmann
      if ip is None:
458 396e1b78 Michael Hanselmann
        ip = ""
459 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
460 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
461 53e4e875 Guido Trotter
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
462 396e1b78 Michael Hanselmann
  else:
463 396e1b78 Michael Hanselmann
    nic_count = 0
464 396e1b78 Michael Hanselmann
465 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
466 396e1b78 Michael Hanselmann
467 396e1b78 Michael Hanselmann
  return env
468 396e1b78 Michael Hanselmann
469 396e1b78 Michael Hanselmann
470 338e51e8 Iustin Pop
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
471 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
472 ecb215b5 Michael Hanselmann

473 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
474 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
475 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
476 e4376078 Iustin Pop
  @param instance: the instance for which we should build the
477 e4376078 Iustin Pop
      environment
478 e4376078 Iustin Pop
  @type override: dict
479 e4376078 Iustin Pop
  @param override: dictionary with key/values that will override
480 e4376078 Iustin Pop
      our values
481 e4376078 Iustin Pop
  @rtype: dict
482 e4376078 Iustin Pop
  @return: the hook environment dictionary
483 e4376078 Iustin Pop

484 ecb215b5 Michael Hanselmann
  """
485 338e51e8 Iustin Pop
  bep = lu.cfg.GetClusterInfo().FillBE(instance)
486 396e1b78 Michael Hanselmann
  args = {
487 396e1b78 Michael Hanselmann
    'name': instance.name,
488 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
489 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
490 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
491 396e1b78 Michael Hanselmann
    'status': instance.os,
492 338e51e8 Iustin Pop
    'memory': bep[constants.BE_MEMORY],
493 338e51e8 Iustin Pop
    'vcpus': bep[constants.BE_VCPUS],
494 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
495 396e1b78 Michael Hanselmann
  }
496 396e1b78 Michael Hanselmann
  if override:
497 396e1b78 Michael Hanselmann
    args.update(override)
498 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
499 396e1b78 Michael Hanselmann
500 396e1b78 Michael Hanselmann
501 b9bddb6b Iustin Pop
def _CheckInstanceBridgesExist(lu, instance):
502 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
503 bf6929a2 Alexander Schreiber

504 bf6929a2 Alexander Schreiber
  """
505 bf6929a2 Alexander Schreiber
  # check bridges existance
506 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
507 72737a7f Iustin Pop
  if not lu.rpc.call_bridges_exist(instance.primary_node, brlist):
508 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
509 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
510 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
511 bf6929a2 Alexander Schreiber
512 bf6929a2 Alexander Schreiber
513 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
514 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
515 a8083063 Iustin Pop

516 a8083063 Iustin Pop
  """
517 a8083063 Iustin Pop
  _OP_REQP = []
518 a8083063 Iustin Pop
519 a8083063 Iustin Pop
  def CheckPrereq(self):
520 a8083063 Iustin Pop
    """Check prerequisites.
521 a8083063 Iustin Pop

522 a8083063 Iustin Pop
    This checks whether the cluster is empty.
523 a8083063 Iustin Pop

524 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
525 a8083063 Iustin Pop

526 a8083063 Iustin Pop
    """
527 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
528 a8083063 Iustin Pop
529 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
530 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
531 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
532 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
533 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
534 db915bd1 Michael Hanselmann
    if instancelist:
535 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
536 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
537 a8083063 Iustin Pop
538 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
539 a8083063 Iustin Pop
    """Destroys the cluster.
540 a8083063 Iustin Pop

541 a8083063 Iustin Pop
    """
542 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
543 72737a7f Iustin Pop
    if not self.rpc.call_node_stop_master(master, False):
544 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
545 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
546 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
547 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
548 140aa4a8 Iustin Pop
    return master
549 a8083063 Iustin Pop
550 a8083063 Iustin Pop
551 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
552 a8083063 Iustin Pop
  """Verifies the cluster status.
553 a8083063 Iustin Pop

554 a8083063 Iustin Pop
  """
555 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
556 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
557 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
558 d4b9d97f Guido Trotter
  REQ_BGL = False
559 d4b9d97f Guido Trotter
560 d4b9d97f Guido Trotter
  def ExpandNames(self):
561 d4b9d97f Guido Trotter
    self.needed_locks = {
562 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
563 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
564 d4b9d97f Guido Trotter
    }
565 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
566 a8083063 Iustin Pop
567 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
568 a8083063 Iustin Pop
                  remote_version, feedback_fn):
569 a8083063 Iustin Pop
    """Run multiple tests against a node.
570 a8083063 Iustin Pop

571 e4376078 Iustin Pop
    Test list::
572 e4376078 Iustin Pop

573 a8083063 Iustin Pop
      - compares ganeti version
574 a8083063 Iustin Pop
      - checks vg existance and size > 20G
575 a8083063 Iustin Pop
      - checks config file checksum
576 a8083063 Iustin Pop
      - checks ssh to other nodes
577 a8083063 Iustin Pop

578 e4376078 Iustin Pop
    @type node: string
579 e4376078 Iustin Pop
    @param node: the name of the node to check
580 e4376078 Iustin Pop
    @param file_list: required list of files
581 e4376078 Iustin Pop
    @param local_cksum: dictionary of local files and their checksums
582 e4376078 Iustin Pop
    @type vglist: dict
583 e4376078 Iustin Pop
    @param vglist: dictionary of volume group names and their size
584 e4376078 Iustin Pop
    @param node_result: the results from the node
585 e4376078 Iustin Pop
    @param remote_version: the RPC version from the remote node
586 e4376078 Iustin Pop
    @param feedback_fn: function used to accumulate results
587 098c0958 Michael Hanselmann

588 a8083063 Iustin Pop
    """
589 a8083063 Iustin Pop
    # compares ganeti version
590 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
591 a8083063 Iustin Pop
    if not remote_version:
592 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
593 a8083063 Iustin Pop
      return True
594 a8083063 Iustin Pop
595 a8083063 Iustin Pop
    if local_version != remote_version:
596 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
597 a8083063 Iustin Pop
                      (local_version, node, remote_version))
598 a8083063 Iustin Pop
      return True
599 a8083063 Iustin Pop
600 a8083063 Iustin Pop
    # checks vg existance and size > 20G
601 a8083063 Iustin Pop
602 a8083063 Iustin Pop
    bad = False
603 a8083063 Iustin Pop
    if not vglist:
604 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
605 a8083063 Iustin Pop
                      (node,))
606 a8083063 Iustin Pop
      bad = True
607 a8083063 Iustin Pop
    else:
608 8d1a2a64 Michael Hanselmann
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
609 8d1a2a64 Michael Hanselmann
                                            constants.MIN_VG_SIZE)
610 a8083063 Iustin Pop
      if vgstatus:
611 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
612 a8083063 Iustin Pop
        bad = True
613 a8083063 Iustin Pop
614 2eb78bc8 Guido Trotter
    if not node_result:
615 2eb78bc8 Guido Trotter
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
616 2eb78bc8 Guido Trotter
      return True
617 2eb78bc8 Guido Trotter
618 a8083063 Iustin Pop
    # checks config file checksum
619 a8083063 Iustin Pop
    # checks ssh to any
620 a8083063 Iustin Pop
621 a8083063 Iustin Pop
    if 'filelist' not in node_result:
622 a8083063 Iustin Pop
      bad = True
623 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
624 a8083063 Iustin Pop
    else:
625 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
626 a8083063 Iustin Pop
      for file_name in file_list:
627 a8083063 Iustin Pop
        if file_name not in remote_cksum:
628 a8083063 Iustin Pop
          bad = True
629 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
630 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
631 a8083063 Iustin Pop
          bad = True
632 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
633 a8083063 Iustin Pop
634 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
635 a8083063 Iustin Pop
      bad = True
636 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
637 a8083063 Iustin Pop
    else:
638 a8083063 Iustin Pop
      if node_result['nodelist']:
639 a8083063 Iustin Pop
        bad = True
640 a8083063 Iustin Pop
        for node in node_result['nodelist']:
641 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
642 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
643 9d4bfc96 Iustin Pop
    if 'node-net-test' not in node_result:
644 9d4bfc96 Iustin Pop
      bad = True
645 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
646 9d4bfc96 Iustin Pop
    else:
647 9d4bfc96 Iustin Pop
      if node_result['node-net-test']:
648 9d4bfc96 Iustin Pop
        bad = True
649 9d4bfc96 Iustin Pop
        nlist = utils.NiceSort(node_result['node-net-test'].keys())
650 9d4bfc96 Iustin Pop
        for node in nlist:
651 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
652 9d4bfc96 Iustin Pop
                          (node, node_result['node-net-test'][node]))
653 9d4bfc96 Iustin Pop
654 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
655 e69d05fd Iustin Pop
    if isinstance(hyp_result, dict):
656 e69d05fd Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
657 e69d05fd Iustin Pop
        if hv_result is not None:
658 e69d05fd Iustin Pop
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
659 e69d05fd Iustin Pop
                      (hv_name, hv_result))
660 a8083063 Iustin Pop
    return bad
661 a8083063 Iustin Pop
662 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
663 c5705f58 Guido Trotter
                      node_instance, feedback_fn):
664 a8083063 Iustin Pop
    """Verify an instance.
665 a8083063 Iustin Pop

666 a8083063 Iustin Pop
    This function checks to see if the required block devices are
667 a8083063 Iustin Pop
    available on the instance's node.
668 a8083063 Iustin Pop

669 a8083063 Iustin Pop
    """
670 a8083063 Iustin Pop
    bad = False
671 a8083063 Iustin Pop
672 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
673 a8083063 Iustin Pop
674 a8083063 Iustin Pop
    node_vol_should = {}
675 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
676 a8083063 Iustin Pop
677 a8083063 Iustin Pop
    for node in node_vol_should:
678 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
679 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
680 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
681 a8083063 Iustin Pop
                          (volume, node))
682 a8083063 Iustin Pop
          bad = True
683 a8083063 Iustin Pop
684 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
685 a872dae6 Guido Trotter
      if (node_current not in node_instance or
686 a872dae6 Guido Trotter
          not instance in node_instance[node_current]):
687 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
688 a8083063 Iustin Pop
                        (instance, node_current))
689 a8083063 Iustin Pop
        bad = True
690 a8083063 Iustin Pop
691 a8083063 Iustin Pop
    for node in node_instance:
692 a8083063 Iustin Pop
      if (not node == node_current):
693 a8083063 Iustin Pop
        if instance in node_instance[node]:
694 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
695 a8083063 Iustin Pop
                          (instance, node))
696 a8083063 Iustin Pop
          bad = True
697 a8083063 Iustin Pop
698 6a438c98 Michael Hanselmann
    return bad
699 a8083063 Iustin Pop
700 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
701 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
702 a8083063 Iustin Pop

703 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
704 a8083063 Iustin Pop
    reported as unknown.
705 a8083063 Iustin Pop

706 a8083063 Iustin Pop
    """
707 a8083063 Iustin Pop
    bad = False
708 a8083063 Iustin Pop
709 a8083063 Iustin Pop
    for node in node_vol_is:
710 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
711 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
712 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
713 a8083063 Iustin Pop
                      (volume, node))
714 a8083063 Iustin Pop
          bad = True
715 a8083063 Iustin Pop
    return bad
716 a8083063 Iustin Pop
717 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
718 a8083063 Iustin Pop
    """Verify the list of running instances.
719 a8083063 Iustin Pop

720 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
721 a8083063 Iustin Pop

722 a8083063 Iustin Pop
    """
723 a8083063 Iustin Pop
    bad = False
724 a8083063 Iustin Pop
    for node in node_instance:
725 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
726 a8083063 Iustin Pop
        if runninginstance not in instancelist:
727 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
728 a8083063 Iustin Pop
                          (runninginstance, node))
729 a8083063 Iustin Pop
          bad = True
730 a8083063 Iustin Pop
    return bad
731 a8083063 Iustin Pop
732 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
733 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
734 2b3b6ddd Guido Trotter

735 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
736 2b3b6ddd Guido Trotter
    was primary for.
737 2b3b6ddd Guido Trotter

738 2b3b6ddd Guido Trotter
    """
739 2b3b6ddd Guido Trotter
    bad = False
740 2b3b6ddd Guido Trotter
741 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
742 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
743 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
744 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
745 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
746 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
747 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
748 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
749 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
750 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
751 2b3b6ddd Guido Trotter
        needed_mem = 0
752 2b3b6ddd Guido Trotter
        for instance in instances:
753 338e51e8 Iustin Pop
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
754 c0f2b229 Iustin Pop
          if bep[constants.BE_AUTO_BALANCE]:
755 3924700f Iustin Pop
            needed_mem += bep[constants.BE_MEMORY]
756 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
757 2b3b6ddd Guido Trotter
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
758 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
759 2b3b6ddd Guido Trotter
          bad = True
760 2b3b6ddd Guido Trotter
    return bad
761 2b3b6ddd Guido Trotter
762 a8083063 Iustin Pop
  def CheckPrereq(self):
763 a8083063 Iustin Pop
    """Check prerequisites.
764 a8083063 Iustin Pop

765 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
766 e54c4c5e Guido Trotter
    all its members are valid.
767 a8083063 Iustin Pop

768 a8083063 Iustin Pop
    """
769 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
770 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
771 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
772 a8083063 Iustin Pop
773 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
774 d8fff41c Guido Trotter
    """Build hooks env.
775 d8fff41c Guido Trotter

776 d8fff41c Guido Trotter
    Cluster-Verify hooks just rone in the post phase and their failure makes
777 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
778 d8fff41c Guido Trotter

779 d8fff41c Guido Trotter
    """
780 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
781 d8fff41c Guido Trotter
    # TODO: populate the environment with useful information for verify hooks
782 d8fff41c Guido Trotter
    env = {}
783 d8fff41c Guido Trotter
    return env, [], all_nodes
784 d8fff41c Guido Trotter
785 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
786 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
787 a8083063 Iustin Pop

788 a8083063 Iustin Pop
    """
789 a8083063 Iustin Pop
    bad = False
790 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
791 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
792 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
793 a8083063 Iustin Pop
794 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
795 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
796 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
797 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
798 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
799 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
800 3924700f Iustin Pop
    i_non_a_balanced = [] # Non auto-balanced instances
801 a8083063 Iustin Pop
    node_volume = {}
802 a8083063 Iustin Pop
    node_instance = {}
803 9c9c7d30 Guido Trotter
    node_info = {}
804 26b6af5e Guido Trotter
    instance_cfg = {}
805 a8083063 Iustin Pop
806 a8083063 Iustin Pop
    # FIXME: verify OS list
807 a8083063 Iustin Pop
    # do local checksums
808 d6a02168 Michael Hanselmann
    file_names = []
809 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
810 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
811 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
812 a8083063 Iustin Pop
813 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
814 72737a7f Iustin Pop
    all_volumeinfo = self.rpc.call_volume_list(nodelist, vg_name)
815 72737a7f Iustin Pop
    all_instanceinfo = self.rpc.call_instance_list(nodelist, hypervisors)
816 72737a7f Iustin Pop
    all_vglist = self.rpc.call_vg_list(nodelist)
817 a8083063 Iustin Pop
    node_verify_param = {
818 a8083063 Iustin Pop
      'filelist': file_names,
819 a8083063 Iustin Pop
      'nodelist': nodelist,
820 e69d05fd Iustin Pop
      'hypervisor': hypervisors,
821 9d4bfc96 Iustin Pop
      'node-net-test': [(node.name, node.primary_ip, node.secondary_ip)
822 9d4bfc96 Iustin Pop
                        for node in nodeinfo]
823 a8083063 Iustin Pop
      }
824 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
825 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
826 72737a7f Iustin Pop
    all_rversion = self.rpc.call_version(nodelist)
827 72737a7f Iustin Pop
    all_ninfo = self.rpc.call_node_info(nodelist, self.cfg.GetVGName(),
828 72737a7f Iustin Pop
                                        self.cfg.GetHypervisorType())
829 a8083063 Iustin Pop
830 3924700f Iustin Pop
    cluster = self.cfg.GetClusterInfo()
831 a8083063 Iustin Pop
    for node in nodelist:
832 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
833 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
834 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
835 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
836 a8083063 Iustin Pop
      bad = bad or result
837 a8083063 Iustin Pop
838 a8083063 Iustin Pop
      # node_volume
839 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
840 a8083063 Iustin Pop
841 b63ed789 Iustin Pop
      if isinstance(volumeinfo, basestring):
842 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
843 b63ed789 Iustin Pop
                    (node, volumeinfo[-400:].encode('string_escape')))
844 b63ed789 Iustin Pop
        bad = True
845 b63ed789 Iustin Pop
        node_volume[node] = {}
846 b63ed789 Iustin Pop
      elif not isinstance(volumeinfo, dict):
847 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
848 a8083063 Iustin Pop
        bad = True
849 a8083063 Iustin Pop
        continue
850 b63ed789 Iustin Pop
      else:
851 b63ed789 Iustin Pop
        node_volume[node] = volumeinfo
852 a8083063 Iustin Pop
853 a8083063 Iustin Pop
      # node_instance
854 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
855 a8083063 Iustin Pop
      if type(nodeinstance) != list:
856 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
857 a8083063 Iustin Pop
        bad = True
858 a8083063 Iustin Pop
        continue
859 a8083063 Iustin Pop
860 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
861 a8083063 Iustin Pop
862 9c9c7d30 Guido Trotter
      # node_info
863 9c9c7d30 Guido Trotter
      nodeinfo = all_ninfo[node]
864 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
865 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
866 9c9c7d30 Guido Trotter
        bad = True
867 9c9c7d30 Guido Trotter
        continue
868 9c9c7d30 Guido Trotter
869 9c9c7d30 Guido Trotter
      try:
870 9c9c7d30 Guido Trotter
        node_info[node] = {
871 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
872 9c9c7d30 Guido Trotter
          "dfree": int(nodeinfo['vg_free']),
873 93e4c50b Guido Trotter
          "pinst": [],
874 93e4c50b Guido Trotter
          "sinst": [],
875 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
876 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
877 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
878 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
879 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
880 36e7da50 Guido Trotter
          # secondary.
881 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
882 9c9c7d30 Guido Trotter
        }
883 9c9c7d30 Guido Trotter
      except ValueError:
884 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
885 9c9c7d30 Guido Trotter
        bad = True
886 9c9c7d30 Guido Trotter
        continue
887 9c9c7d30 Guido Trotter
888 a8083063 Iustin Pop
    node_vol_should = {}
889 a8083063 Iustin Pop
890 a8083063 Iustin Pop
    for instance in instancelist:
891 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
892 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
893 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
894 c5705f58 Guido Trotter
                                     node_instance, feedback_fn)
895 c5705f58 Guido Trotter
      bad = bad or result
896 a8083063 Iustin Pop
897 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
898 a8083063 Iustin Pop
899 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
900 26b6af5e Guido Trotter
901 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
902 93e4c50b Guido Trotter
      if pnode in node_info:
903 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
904 93e4c50b Guido Trotter
      else:
905 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
906 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
907 93e4c50b Guido Trotter
        bad = True
908 93e4c50b Guido Trotter
909 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
910 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
911 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
912 93e4c50b Guido Trotter
      # supported either.
913 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
914 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
915 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
916 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
917 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
918 93e4c50b Guido Trotter
                    % instance)
919 93e4c50b Guido Trotter
920 c0f2b229 Iustin Pop
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
921 3924700f Iustin Pop
        i_non_a_balanced.append(instance)
922 3924700f Iustin Pop
923 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
924 93e4c50b Guido Trotter
        if snode in node_info:
925 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
926 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
927 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
928 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
929 93e4c50b Guido Trotter
        else:
930 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
931 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
932 93e4c50b Guido Trotter
933 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
934 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
935 a8083063 Iustin Pop
                                       feedback_fn)
936 a8083063 Iustin Pop
    bad = bad or result
937 a8083063 Iustin Pop
938 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
939 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
940 a8083063 Iustin Pop
                                         feedback_fn)
941 a8083063 Iustin Pop
    bad = bad or result
942 a8083063 Iustin Pop
943 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
944 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
945 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
946 e54c4c5e Guido Trotter
      bad = bad or result
947 2b3b6ddd Guido Trotter
948 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
949 2b3b6ddd Guido Trotter
    if i_non_redundant:
950 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
951 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
952 2b3b6ddd Guido Trotter
953 3924700f Iustin Pop
    if i_non_a_balanced:
954 3924700f Iustin Pop
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
955 3924700f Iustin Pop
                  % len(i_non_a_balanced))
956 3924700f Iustin Pop
957 34290825 Michael Hanselmann
    return not bad
958 a8083063 Iustin Pop
959 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
960 e4376078 Iustin Pop
    """Analize the post-hooks' result
961 e4376078 Iustin Pop

962 e4376078 Iustin Pop
    This method analyses the hook result, handles it, and sends some
963 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
964 d8fff41c Guido Trotter

965 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
966 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
967 e4376078 Iustin Pop
    @param hooks_results: the results of the multi-node hooks rpc call
968 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
969 e4376078 Iustin Pop
    @param lu_result: previous Exec result
970 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
971 e4376078 Iustin Pop
        and hook results
972 d8fff41c Guido Trotter

973 d8fff41c Guido Trotter
    """
974 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
975 38206f3c Iustin Pop
    # their results
976 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
977 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
978 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
979 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
980 d8fff41c Guido Trotter
      if not hooks_results:
981 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
982 d8fff41c Guido Trotter
        lu_result = 1
983 d8fff41c Guido Trotter
      else:
984 d8fff41c Guido Trotter
        for node_name in hooks_results:
985 d8fff41c Guido Trotter
          show_node_header = True
986 d8fff41c Guido Trotter
          res = hooks_results[node_name]
987 d8fff41c Guido Trotter
          if res is False or not isinstance(res, list):
988 d8fff41c Guido Trotter
            feedback_fn("    Communication failure")
989 d8fff41c Guido Trotter
            lu_result = 1
990 d8fff41c Guido Trotter
            continue
991 d8fff41c Guido Trotter
          for script, hkr, output in res:
992 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
993 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
994 d8fff41c Guido Trotter
              # failing hooks on that node
995 d8fff41c Guido Trotter
              if show_node_header:
996 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
997 d8fff41c Guido Trotter
                show_node_header = False
998 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
999 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
1000 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
1001 d8fff41c Guido Trotter
              lu_result = 1
1002 d8fff41c Guido Trotter
1003 d8fff41c Guido Trotter
      return lu_result
1004 d8fff41c Guido Trotter
1005 a8083063 Iustin Pop
1006 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
1007 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
1008 2c95a8d4 Iustin Pop

1009 2c95a8d4 Iustin Pop
  """
1010 2c95a8d4 Iustin Pop
  _OP_REQP = []
1011 d4b9d97f Guido Trotter
  REQ_BGL = False
1012 d4b9d97f Guido Trotter
1013 d4b9d97f Guido Trotter
  def ExpandNames(self):
1014 d4b9d97f Guido Trotter
    self.needed_locks = {
1015 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1016 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1017 d4b9d97f Guido Trotter
    }
1018 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1019 2c95a8d4 Iustin Pop
1020 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
1021 2c95a8d4 Iustin Pop
    """Check prerequisites.
1022 2c95a8d4 Iustin Pop

1023 2c95a8d4 Iustin Pop
    This has no prerequisites.
1024 2c95a8d4 Iustin Pop

1025 2c95a8d4 Iustin Pop
    """
1026 2c95a8d4 Iustin Pop
    pass
1027 2c95a8d4 Iustin Pop
1028 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
1029 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
1030 2c95a8d4 Iustin Pop

1031 2c95a8d4 Iustin Pop
    """
1032 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
1033 2c95a8d4 Iustin Pop
1034 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
1035 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1036 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
1037 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
1038 2c95a8d4 Iustin Pop
1039 2c95a8d4 Iustin Pop
    nv_dict = {}
1040 2c95a8d4 Iustin Pop
    for inst in instances:
1041 2c95a8d4 Iustin Pop
      inst_lvs = {}
1042 2c95a8d4 Iustin Pop
      if (inst.status != "up" or
1043 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
1044 2c95a8d4 Iustin Pop
        continue
1045 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
1046 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1047 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
1048 2c95a8d4 Iustin Pop
        for vol in vol_list:
1049 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
1050 2c95a8d4 Iustin Pop
1051 2c95a8d4 Iustin Pop
    if not nv_dict:
1052 2c95a8d4 Iustin Pop
      return result
1053 2c95a8d4 Iustin Pop
1054 72737a7f Iustin Pop
    node_lvs = self.rpc.call_volume_list(nodes, vg_name)
1055 2c95a8d4 Iustin Pop
1056 2c95a8d4 Iustin Pop
    to_act = set()
1057 2c95a8d4 Iustin Pop
    for node in nodes:
1058 2c95a8d4 Iustin Pop
      # node_volume
1059 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
1060 2c95a8d4 Iustin Pop
1061 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
1062 9a4f63d1 Iustin Pop
        logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
1063 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
1064 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
1065 9a4f63d1 Iustin Pop
        logging.warning("Connection to node %s failed or invalid data"
1066 9a4f63d1 Iustin Pop
                        " returned", node)
1067 2c95a8d4 Iustin Pop
        res_nodes.append(node)
1068 2c95a8d4 Iustin Pop
        continue
1069 2c95a8d4 Iustin Pop
1070 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
1071 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
1072 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
1073 b63ed789 Iustin Pop
            and inst.name not in res_instances):
1074 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
1075 2c95a8d4 Iustin Pop
1076 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1077 b63ed789 Iustin Pop
    # data better
1078 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1079 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1080 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1081 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1082 b63ed789 Iustin Pop
1083 2c95a8d4 Iustin Pop
    return result
1084 2c95a8d4 Iustin Pop
1085 2c95a8d4 Iustin Pop
1086 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1087 07bd8a51 Iustin Pop
  """Rename the cluster.
1088 07bd8a51 Iustin Pop

1089 07bd8a51 Iustin Pop
  """
1090 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1091 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1092 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1093 07bd8a51 Iustin Pop
1094 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1095 07bd8a51 Iustin Pop
    """Build hooks env.
1096 07bd8a51 Iustin Pop

1097 07bd8a51 Iustin Pop
    """
1098 07bd8a51 Iustin Pop
    env = {
1099 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1100 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1101 07bd8a51 Iustin Pop
      }
1102 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1103 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1104 07bd8a51 Iustin Pop
1105 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1106 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1107 07bd8a51 Iustin Pop

1108 07bd8a51 Iustin Pop
    """
1109 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1110 07bd8a51 Iustin Pop
1111 bcf043c9 Iustin Pop
    new_name = hostname.name
1112 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1113 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
1114 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
1115 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1116 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1117 07bd8a51 Iustin Pop
                                 " cluster has changed")
1118 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1119 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1120 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1121 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1122 07bd8a51 Iustin Pop
                                   new_ip)
1123 07bd8a51 Iustin Pop
1124 07bd8a51 Iustin Pop
    self.op.name = new_name
1125 07bd8a51 Iustin Pop
1126 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1127 07bd8a51 Iustin Pop
    """Rename the cluster.
1128 07bd8a51 Iustin Pop

1129 07bd8a51 Iustin Pop
    """
1130 07bd8a51 Iustin Pop
    clustername = self.op.name
1131 07bd8a51 Iustin Pop
    ip = self.ip
1132 07bd8a51 Iustin Pop
1133 07bd8a51 Iustin Pop
    # shutdown the master IP
1134 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
1135 72737a7f Iustin Pop
    if not self.rpc.call_node_stop_master(master, False):
1136 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1137 07bd8a51 Iustin Pop
1138 07bd8a51 Iustin Pop
    try:
1139 07bd8a51 Iustin Pop
      # modify the sstore
1140 d6a02168 Michael Hanselmann
      # TODO: sstore
1141 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
1142 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
1143 07bd8a51 Iustin Pop
1144 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
1145 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
1146 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
1147 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
1148 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
1149 07bd8a51 Iustin Pop
1150 9a4f63d1 Iustin Pop
      logging.debug("Copying updated ssconf data to all nodes")
1151 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1152 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
1153 72737a7f Iustin Pop
        result = self.rpc.call_upload_file(dist_nodes, fname)
1154 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
1155 07bd8a51 Iustin Pop
          if not result[to_node]:
1156 86d9d3bb Iustin Pop
            self.LogWarning("Copy of file %s to node %s failed",
1157 86d9d3bb Iustin Pop
                            fname, to_node)
1158 07bd8a51 Iustin Pop
    finally:
1159 72737a7f Iustin Pop
      if not self.rpc.call_node_start_master(master, False):
1160 86d9d3bb Iustin Pop
        self.LogWarning("Could not re-enable the master role on"
1161 86d9d3bb Iustin Pop
                        " the master, please restart manually.")
1162 07bd8a51 Iustin Pop
1163 07bd8a51 Iustin Pop
1164 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1165 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1166 8084f9f6 Manuel Franceschini

1167 e4376078 Iustin Pop
  @type disk: L{objects.Disk}
1168 e4376078 Iustin Pop
  @param disk: the disk to check
1169 e4376078 Iustin Pop
  @rtype: booleean
1170 e4376078 Iustin Pop
  @return: boolean indicating whether a LD_LV dev_type was found or not
1171 8084f9f6 Manuel Franceschini

1172 8084f9f6 Manuel Franceschini
  """
1173 8084f9f6 Manuel Franceschini
  if disk.children:
1174 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1175 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1176 8084f9f6 Manuel Franceschini
        return True
1177 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1178 8084f9f6 Manuel Franceschini
1179 8084f9f6 Manuel Franceschini
1180 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1181 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1182 8084f9f6 Manuel Franceschini

1183 8084f9f6 Manuel Franceschini
  """
1184 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1185 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1186 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1187 c53279cf Guido Trotter
  REQ_BGL = False
1188 c53279cf Guido Trotter
1189 c53279cf Guido Trotter
  def ExpandNames(self):
1190 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
1191 c53279cf Guido Trotter
    # all nodes to be modified.
1192 c53279cf Guido Trotter
    self.needed_locks = {
1193 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1194 c53279cf Guido Trotter
    }
1195 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1196 8084f9f6 Manuel Franceschini
1197 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1198 8084f9f6 Manuel Franceschini
    """Build hooks env.
1199 8084f9f6 Manuel Franceschini

1200 8084f9f6 Manuel Franceschini
    """
1201 8084f9f6 Manuel Franceschini
    env = {
1202 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1203 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1204 8084f9f6 Manuel Franceschini
      }
1205 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1206 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1207 8084f9f6 Manuel Franceschini
1208 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1209 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1210 8084f9f6 Manuel Franceschini

1211 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1212 5f83e263 Iustin Pop
    if the given volume group is valid.
1213 8084f9f6 Manuel Franceschini

1214 8084f9f6 Manuel Franceschini
    """
1215 c53279cf Guido Trotter
    # FIXME: This only works because there is only one parameter that can be
1216 c53279cf Guido Trotter
    # changed or removed.
1217 779c15bb Iustin Pop
    if self.op.vg_name is not None and not self.op.vg_name:
1218 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
1219 8084f9f6 Manuel Franceschini
      for inst in instances:
1220 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1221 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1222 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1223 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1224 8084f9f6 Manuel Franceschini
1225 779c15bb Iustin Pop
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1226 779c15bb Iustin Pop
1227 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1228 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1229 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
1230 8084f9f6 Manuel Franceschini
      for node in node_list:
1231 8d1a2a64 Michael Hanselmann
        vgstatus = utils.CheckVolumeGroupSize(vglist[node], self.op.vg_name,
1232 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1233 8084f9f6 Manuel Franceschini
        if vgstatus:
1234 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1235 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1236 8084f9f6 Manuel Franceschini
1237 779c15bb Iustin Pop
    self.cluster = cluster = self.cfg.GetClusterInfo()
1238 779c15bb Iustin Pop
    # beparams changes do not need validation (we can't validate?),
1239 779c15bb Iustin Pop
    # but we still process here
1240 779c15bb Iustin Pop
    if self.op.beparams:
1241 779c15bb Iustin Pop
      self.new_beparams = cluster.FillDict(
1242 779c15bb Iustin Pop
        cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)
1243 779c15bb Iustin Pop
1244 779c15bb Iustin Pop
    # hypervisor list/parameters
1245 779c15bb Iustin Pop
    self.new_hvparams = cluster.FillDict(cluster.hvparams, {})
1246 779c15bb Iustin Pop
    if self.op.hvparams:
1247 779c15bb Iustin Pop
      if not isinstance(self.op.hvparams, dict):
1248 779c15bb Iustin Pop
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1249 779c15bb Iustin Pop
      for hv_name, hv_dict in self.op.hvparams.items():
1250 779c15bb Iustin Pop
        if hv_name not in self.new_hvparams:
1251 779c15bb Iustin Pop
          self.new_hvparams[hv_name] = hv_dict
1252 779c15bb Iustin Pop
        else:
1253 779c15bb Iustin Pop
          self.new_hvparams[hv_name].update(hv_dict)
1254 779c15bb Iustin Pop
1255 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1256 779c15bb Iustin Pop
      self.hv_list = self.op.enabled_hypervisors
1257 779c15bb Iustin Pop
    else:
1258 779c15bb Iustin Pop
      self.hv_list = cluster.enabled_hypervisors
1259 779c15bb Iustin Pop
1260 779c15bb Iustin Pop
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1261 779c15bb Iustin Pop
      # either the enabled list has changed, or the parameters have, validate
1262 779c15bb Iustin Pop
      for hv_name, hv_params in self.new_hvparams.items():
1263 779c15bb Iustin Pop
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1264 779c15bb Iustin Pop
            (self.op.enabled_hypervisors and
1265 779c15bb Iustin Pop
             hv_name in self.op.enabled_hypervisors)):
1266 779c15bb Iustin Pop
          # either this is a new hypervisor, or its parameters have changed
1267 779c15bb Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
1268 779c15bb Iustin Pop
          hv_class.CheckParameterSyntax(hv_params)
1269 779c15bb Iustin Pop
          _CheckHVParams(self, node_list, hv_name, hv_params)
1270 779c15bb Iustin Pop
1271 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1272 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1273 8084f9f6 Manuel Franceschini

1274 8084f9f6 Manuel Franceschini
    """
1275 779c15bb Iustin Pop
    if self.op.vg_name is not None:
1276 779c15bb Iustin Pop
      if self.op.vg_name != self.cfg.GetVGName():
1277 779c15bb Iustin Pop
        self.cfg.SetVGName(self.op.vg_name)
1278 779c15bb Iustin Pop
      else:
1279 779c15bb Iustin Pop
        feedback_fn("Cluster LVM configuration already in desired"
1280 779c15bb Iustin Pop
                    " state, not changing")
1281 779c15bb Iustin Pop
    if self.op.hvparams:
1282 779c15bb Iustin Pop
      self.cluster.hvparams = self.new_hvparams
1283 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1284 779c15bb Iustin Pop
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1285 779c15bb Iustin Pop
    if self.op.beparams:
1286 779c15bb Iustin Pop
      self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams
1287 779c15bb Iustin Pop
    self.cfg.Update(self.cluster)
1288 8084f9f6 Manuel Franceschini
1289 8084f9f6 Manuel Franceschini
1290 b9bddb6b Iustin Pop
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1291 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1292 a8083063 Iustin Pop

1293 a8083063 Iustin Pop
  """
1294 a8083063 Iustin Pop
  if not instance.disks:
1295 a8083063 Iustin Pop
    return True
1296 a8083063 Iustin Pop
1297 a8083063 Iustin Pop
  if not oneshot:
1298 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1299 a8083063 Iustin Pop
1300 a8083063 Iustin Pop
  node = instance.primary_node
1301 a8083063 Iustin Pop
1302 a8083063 Iustin Pop
  for dev in instance.disks:
1303 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
1304 a8083063 Iustin Pop
1305 a8083063 Iustin Pop
  retries = 0
1306 a8083063 Iustin Pop
  while True:
1307 a8083063 Iustin Pop
    max_time = 0
1308 a8083063 Iustin Pop
    done = True
1309 a8083063 Iustin Pop
    cumul_degraded = False
1310 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1311 a8083063 Iustin Pop
    if not rstats:
1312 86d9d3bb Iustin Pop
      lu.LogWarning("Can't get any data from node %s", node)
1313 a8083063 Iustin Pop
      retries += 1
1314 a8083063 Iustin Pop
      if retries >= 10:
1315 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1316 3ecf6786 Iustin Pop
                                 " aborting." % node)
1317 a8083063 Iustin Pop
      time.sleep(6)
1318 a8083063 Iustin Pop
      continue
1319 a8083063 Iustin Pop
    retries = 0
1320 a8083063 Iustin Pop
    for i in range(len(rstats)):
1321 a8083063 Iustin Pop
      mstat = rstats[i]
1322 a8083063 Iustin Pop
      if mstat is None:
1323 86d9d3bb Iustin Pop
        lu.LogWarning("Can't compute data for node %s/%s",
1324 86d9d3bb Iustin Pop
                           node, instance.disks[i].iv_name)
1325 a8083063 Iustin Pop
        continue
1326 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1327 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1328 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1329 a8083063 Iustin Pop
      if perc_done is not None:
1330 a8083063 Iustin Pop
        done = False
1331 a8083063 Iustin Pop
        if est_time is not None:
1332 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1333 a8083063 Iustin Pop
          max_time = est_time
1334 a8083063 Iustin Pop
        else:
1335 a8083063 Iustin Pop
          rem_time = "no time estimate"
1336 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1337 b9bddb6b Iustin Pop
                        (instance.disks[i].iv_name, perc_done, rem_time))
1338 a8083063 Iustin Pop
    if done or oneshot:
1339 a8083063 Iustin Pop
      break
1340 a8083063 Iustin Pop
1341 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
1342 a8083063 Iustin Pop
1343 a8083063 Iustin Pop
  if done:
1344 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1345 a8083063 Iustin Pop
  return not cumul_degraded
1346 a8083063 Iustin Pop
1347 a8083063 Iustin Pop
1348 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1349 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1350 a8083063 Iustin Pop

1351 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1352 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1353 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1354 0834c866 Iustin Pop

1355 a8083063 Iustin Pop
  """
1356 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
1357 0834c866 Iustin Pop
  if ldisk:
1358 0834c866 Iustin Pop
    idx = 6
1359 0834c866 Iustin Pop
  else:
1360 0834c866 Iustin Pop
    idx = 5
1361 a8083063 Iustin Pop
1362 a8083063 Iustin Pop
  result = True
1363 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1364 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
1365 a8083063 Iustin Pop
    if not rstats:
1366 9a4f63d1 Iustin Pop
      logging.warning("Node %s: disk degraded, not found or node down", node)
1367 a8083063 Iustin Pop
      result = False
1368 a8083063 Iustin Pop
    else:
1369 0834c866 Iustin Pop
      result = result and (not rstats[idx])
1370 a8083063 Iustin Pop
  if dev.children:
1371 a8083063 Iustin Pop
    for child in dev.children:
1372 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1373 a8083063 Iustin Pop
1374 a8083063 Iustin Pop
  return result
1375 a8083063 Iustin Pop
1376 a8083063 Iustin Pop
1377 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1378 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1379 a8083063 Iustin Pop

1380 a8083063 Iustin Pop
  """
1381 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1382 6bf01bbb Guido Trotter
  REQ_BGL = False
1383 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet()
1384 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
1385 a8083063 Iustin Pop
1386 6bf01bbb Guido Trotter
  def ExpandNames(self):
1387 1f9430d6 Iustin Pop
    if self.op.names:
1388 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1389 1f9430d6 Iustin Pop
1390 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1391 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1392 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1393 1f9430d6 Iustin Pop
1394 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
1395 6bf01bbb Guido Trotter
    self.needed_locks = {}
1396 6bf01bbb Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1397 e310b019 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1398 6bf01bbb Guido Trotter
1399 6bf01bbb Guido Trotter
  def CheckPrereq(self):
1400 6bf01bbb Guido Trotter
    """Check prerequisites.
1401 6bf01bbb Guido Trotter

1402 6bf01bbb Guido Trotter
    """
1403 6bf01bbb Guido Trotter
1404 1f9430d6 Iustin Pop
  @staticmethod
1405 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1406 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1407 1f9430d6 Iustin Pop

1408 e4376078 Iustin Pop
    @param node_list: a list with the names of all nodes
1409 e4376078 Iustin Pop
    @param rlist: a map with node names as keys and OS objects as values
1410 1f9430d6 Iustin Pop

1411 e4376078 Iustin Pop
    @rtype: dict
1412 e4376078 Iustin Pop
    @returns: a dictionary with osnames as keys and as value another map, with
1413 e4376078 Iustin Pop
        nodes as keys and list of OS objects as values, eg::
1414 e4376078 Iustin Pop

1415 e4376078 Iustin Pop
          {"debian-etch": {"node1": [<object>,...],
1416 e4376078 Iustin Pop
                           "node2": [<object>,]}
1417 e4376078 Iustin Pop
          }
1418 1f9430d6 Iustin Pop

1419 1f9430d6 Iustin Pop
    """
1420 1f9430d6 Iustin Pop
    all_os = {}
1421 1f9430d6 Iustin Pop
    for node_name, nr in rlist.iteritems():
1422 1f9430d6 Iustin Pop
      if not nr:
1423 1f9430d6 Iustin Pop
        continue
1424 b4de68a9 Iustin Pop
      for os_obj in nr:
1425 b4de68a9 Iustin Pop
        if os_obj.name not in all_os:
1426 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1427 1f9430d6 Iustin Pop
          # for each node in node_list
1428 b4de68a9 Iustin Pop
          all_os[os_obj.name] = {}
1429 1f9430d6 Iustin Pop
          for nname in node_list:
1430 b4de68a9 Iustin Pop
            all_os[os_obj.name][nname] = []
1431 b4de68a9 Iustin Pop
        all_os[os_obj.name][node_name].append(os_obj)
1432 1f9430d6 Iustin Pop
    return all_os
1433 a8083063 Iustin Pop
1434 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1435 a8083063 Iustin Pop
    """Compute the list of OSes.
1436 a8083063 Iustin Pop

1437 a8083063 Iustin Pop
    """
1438 6bf01bbb Guido Trotter
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1439 72737a7f Iustin Pop
    node_data = self.rpc.call_os_diagnose(node_list)
1440 a8083063 Iustin Pop
    if node_data == False:
1441 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1442 1f9430d6 Iustin Pop
    pol = self._DiagnoseByOS(node_list, node_data)
1443 1f9430d6 Iustin Pop
    output = []
1444 1f9430d6 Iustin Pop
    for os_name, os_data in pol.iteritems():
1445 1f9430d6 Iustin Pop
      row = []
1446 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1447 1f9430d6 Iustin Pop
        if field == "name":
1448 1f9430d6 Iustin Pop
          val = os_name
1449 1f9430d6 Iustin Pop
        elif field == "valid":
1450 1f9430d6 Iustin Pop
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1451 1f9430d6 Iustin Pop
        elif field == "node_status":
1452 1f9430d6 Iustin Pop
          val = {}
1453 1f9430d6 Iustin Pop
          for node_name, nos_list in os_data.iteritems():
1454 1f9430d6 Iustin Pop
            val[node_name] = [(v.status, v.path) for v in nos_list]
1455 1f9430d6 Iustin Pop
        else:
1456 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1457 1f9430d6 Iustin Pop
        row.append(val)
1458 1f9430d6 Iustin Pop
      output.append(row)
1459 1f9430d6 Iustin Pop
1460 1f9430d6 Iustin Pop
    return output
1461 a8083063 Iustin Pop
1462 a8083063 Iustin Pop
1463 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1464 a8083063 Iustin Pop
  """Logical unit for removing a node.
1465 a8083063 Iustin Pop

1466 a8083063 Iustin Pop
  """
1467 a8083063 Iustin Pop
  HPATH = "node-remove"
1468 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1469 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1470 a8083063 Iustin Pop
1471 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1472 a8083063 Iustin Pop
    """Build hooks env.
1473 a8083063 Iustin Pop

1474 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1475 d08869ee Guido Trotter
    node would then be impossible to remove.
1476 a8083063 Iustin Pop

1477 a8083063 Iustin Pop
    """
1478 396e1b78 Michael Hanselmann
    env = {
1479 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1480 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1481 396e1b78 Michael Hanselmann
      }
1482 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1483 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1484 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1485 a8083063 Iustin Pop
1486 a8083063 Iustin Pop
  def CheckPrereq(self):
1487 a8083063 Iustin Pop
    """Check prerequisites.
1488 a8083063 Iustin Pop

1489 a8083063 Iustin Pop
    This checks:
1490 a8083063 Iustin Pop
     - the node exists in the configuration
1491 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1492 a8083063 Iustin Pop
     - it's not the master
1493 a8083063 Iustin Pop

1494 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1495 a8083063 Iustin Pop

1496 a8083063 Iustin Pop
    """
1497 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1498 a8083063 Iustin Pop
    if node is None:
1499 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1500 a8083063 Iustin Pop
1501 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1502 a8083063 Iustin Pop
1503 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
1504 a8083063 Iustin Pop
    if node.name == masternode:
1505 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1506 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1507 a8083063 Iustin Pop
1508 a8083063 Iustin Pop
    for instance_name in instance_list:
1509 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1510 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1511 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1512 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1513 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1514 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1515 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1516 a8083063 Iustin Pop
    self.op.node_name = node.name
1517 a8083063 Iustin Pop
    self.node = node
1518 a8083063 Iustin Pop
1519 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1520 a8083063 Iustin Pop
    """Removes the node from the cluster.
1521 a8083063 Iustin Pop

1522 a8083063 Iustin Pop
    """
1523 a8083063 Iustin Pop
    node = self.node
1524 9a4f63d1 Iustin Pop
    logging.info("Stopping the node daemon and removing configs from node %s",
1525 9a4f63d1 Iustin Pop
                 node.name)
1526 a8083063 Iustin Pop
1527 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
1528 a8083063 Iustin Pop
1529 72737a7f Iustin Pop
    self.rpc.call_node_leave_cluster(node.name)
1530 c8a0948f Michael Hanselmann
1531 a8083063 Iustin Pop
1532 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1533 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1534 a8083063 Iustin Pop

1535 a8083063 Iustin Pop
  """
1536 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1537 35705d8f Guido Trotter
  REQ_BGL = False
1538 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet(
1539 31bf511f Iustin Pop
    "dtotal", "dfree",
1540 31bf511f Iustin Pop
    "mtotal", "mnode", "mfree",
1541 31bf511f Iustin Pop
    "bootid",
1542 31bf511f Iustin Pop
    "ctotal",
1543 31bf511f Iustin Pop
    )
1544 31bf511f Iustin Pop
1545 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(
1546 31bf511f Iustin Pop
    "name", "pinst_cnt", "sinst_cnt",
1547 31bf511f Iustin Pop
    "pinst_list", "sinst_list",
1548 31bf511f Iustin Pop
    "pip", "sip", "tags",
1549 31bf511f Iustin Pop
    "serial_no",
1550 31bf511f Iustin Pop
    )
1551 a8083063 Iustin Pop
1552 35705d8f Guido Trotter
  def ExpandNames(self):
1553 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1554 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1555 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1556 a8083063 Iustin Pop
1557 35705d8f Guido Trotter
    self.needed_locks = {}
1558 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1559 c8d8b4c8 Iustin Pop
1560 c8d8b4c8 Iustin Pop
    if self.op.names:
1561 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
1562 35705d8f Guido Trotter
    else:
1563 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
1564 c8d8b4c8 Iustin Pop
1565 31bf511f Iustin Pop
    self.do_locking = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
1566 c8d8b4c8 Iustin Pop
    if self.do_locking:
1567 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
1568 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
1569 c8d8b4c8 Iustin Pop
1570 35705d8f Guido Trotter
1571 35705d8f Guido Trotter
  def CheckPrereq(self):
1572 35705d8f Guido Trotter
    """Check prerequisites.
1573 35705d8f Guido Trotter

1574 35705d8f Guido Trotter
    """
1575 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
1576 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
1577 c8d8b4c8 Iustin Pop
    pass
1578 a8083063 Iustin Pop
1579 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1580 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1581 a8083063 Iustin Pop

1582 a8083063 Iustin Pop
    """
1583 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
1584 c8d8b4c8 Iustin Pop
    if self.do_locking:
1585 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
1586 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
1587 3fa93523 Guido Trotter
      nodenames = self.wanted
1588 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
1589 3fa93523 Guido Trotter
      if missing:
1590 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
1591 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
1592 c8d8b4c8 Iustin Pop
    else:
1593 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
1594 c1f1cbb2 Iustin Pop
1595 c1f1cbb2 Iustin Pop
    nodenames = utils.NiceSort(nodenames)
1596 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
1597 a8083063 Iustin Pop
1598 a8083063 Iustin Pop
    # begin data gathering
1599 a8083063 Iustin Pop
1600 31bf511f Iustin Pop
    if self.do_locking:
1601 a8083063 Iustin Pop
      live_data = {}
1602 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
1603 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
1604 a8083063 Iustin Pop
      for name in nodenames:
1605 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1606 a8083063 Iustin Pop
        if nodeinfo:
1607 a8083063 Iustin Pop
          live_data[name] = {
1608 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1609 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1610 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1611 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1612 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1613 e8a4c138 Iustin Pop
            "ctotal": utils.TryConvert(int, nodeinfo['cpu_total']),
1614 3ef10550 Michael Hanselmann
            "bootid": nodeinfo['bootid'],
1615 a8083063 Iustin Pop
            }
1616 a8083063 Iustin Pop
        else:
1617 a8083063 Iustin Pop
          live_data[name] = {}
1618 a8083063 Iustin Pop
    else:
1619 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1620 a8083063 Iustin Pop
1621 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1622 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1623 a8083063 Iustin Pop
1624 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1625 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1626 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1627 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1628 a8083063 Iustin Pop
1629 ec223efb Iustin Pop
      for instance_name in instancelist:
1630 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1631 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1632 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1633 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1634 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1635 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1636 a8083063 Iustin Pop
1637 a8083063 Iustin Pop
    # end data gathering
1638 a8083063 Iustin Pop
1639 a8083063 Iustin Pop
    output = []
1640 a8083063 Iustin Pop
    for node in nodelist:
1641 a8083063 Iustin Pop
      node_output = []
1642 a8083063 Iustin Pop
      for field in self.op.output_fields:
1643 a8083063 Iustin Pop
        if field == "name":
1644 a8083063 Iustin Pop
          val = node.name
1645 ec223efb Iustin Pop
        elif field == "pinst_list":
1646 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1647 ec223efb Iustin Pop
        elif field == "sinst_list":
1648 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1649 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1650 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1651 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1652 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1653 a8083063 Iustin Pop
        elif field == "pip":
1654 a8083063 Iustin Pop
          val = node.primary_ip
1655 a8083063 Iustin Pop
        elif field == "sip":
1656 a8083063 Iustin Pop
          val = node.secondary_ip
1657 130a6a6f Iustin Pop
        elif field == "tags":
1658 130a6a6f Iustin Pop
          val = list(node.GetTags())
1659 38d7239a Iustin Pop
        elif field == "serial_no":
1660 38d7239a Iustin Pop
          val = node.serial_no
1661 31bf511f Iustin Pop
        elif self._FIELDS_DYNAMIC.Matches(field):
1662 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1663 a8083063 Iustin Pop
        else:
1664 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1665 a8083063 Iustin Pop
        node_output.append(val)
1666 a8083063 Iustin Pop
      output.append(node_output)
1667 a8083063 Iustin Pop
1668 a8083063 Iustin Pop
    return output
1669 a8083063 Iustin Pop
1670 a8083063 Iustin Pop
1671 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1672 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1673 dcb93971 Michael Hanselmann

1674 dcb93971 Michael Hanselmann
  """
1675 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1676 21a15682 Guido Trotter
  REQ_BGL = False
1677 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
1678 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("node")
1679 21a15682 Guido Trotter
1680 21a15682 Guido Trotter
  def ExpandNames(self):
1681 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1682 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1683 21a15682 Guido Trotter
                       selected=self.op.output_fields)
1684 21a15682 Guido Trotter
1685 21a15682 Guido Trotter
    self.needed_locks = {}
1686 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1687 21a15682 Guido Trotter
    if not self.op.nodes:
1688 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1689 21a15682 Guido Trotter
    else:
1690 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
1691 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
1692 dcb93971 Michael Hanselmann
1693 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1694 dcb93971 Michael Hanselmann
    """Check prerequisites.
1695 dcb93971 Michael Hanselmann

1696 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1697 dcb93971 Michael Hanselmann

1698 dcb93971 Michael Hanselmann
    """
1699 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
1700 dcb93971 Michael Hanselmann
1701 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1702 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1703 dcb93971 Michael Hanselmann

1704 dcb93971 Michael Hanselmann
    """
1705 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1706 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
1707 dcb93971 Michael Hanselmann
1708 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1709 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1710 dcb93971 Michael Hanselmann
1711 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1712 dcb93971 Michael Hanselmann
1713 dcb93971 Michael Hanselmann
    output = []
1714 dcb93971 Michael Hanselmann
    for node in nodenames:
1715 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1716 37d19eb2 Michael Hanselmann
        continue
1717 37d19eb2 Michael Hanselmann
1718 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1719 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1720 dcb93971 Michael Hanselmann
1721 dcb93971 Michael Hanselmann
      for vol in node_vols:
1722 dcb93971 Michael Hanselmann
        node_output = []
1723 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1724 dcb93971 Michael Hanselmann
          if field == "node":
1725 dcb93971 Michael Hanselmann
            val = node
1726 dcb93971 Michael Hanselmann
          elif field == "phys":
1727 dcb93971 Michael Hanselmann
            val = vol['dev']
1728 dcb93971 Michael Hanselmann
          elif field == "vg":
1729 dcb93971 Michael Hanselmann
            val = vol['vg']
1730 dcb93971 Michael Hanselmann
          elif field == "name":
1731 dcb93971 Michael Hanselmann
            val = vol['name']
1732 dcb93971 Michael Hanselmann
          elif field == "size":
1733 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1734 dcb93971 Michael Hanselmann
          elif field == "instance":
1735 dcb93971 Michael Hanselmann
            for inst in ilist:
1736 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1737 dcb93971 Michael Hanselmann
                continue
1738 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1739 dcb93971 Michael Hanselmann
                val = inst.name
1740 dcb93971 Michael Hanselmann
                break
1741 dcb93971 Michael Hanselmann
            else:
1742 dcb93971 Michael Hanselmann
              val = '-'
1743 dcb93971 Michael Hanselmann
          else:
1744 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1745 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1746 dcb93971 Michael Hanselmann
1747 dcb93971 Michael Hanselmann
        output.append(node_output)
1748 dcb93971 Michael Hanselmann
1749 dcb93971 Michael Hanselmann
    return output
1750 dcb93971 Michael Hanselmann
1751 dcb93971 Michael Hanselmann
1752 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1753 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1754 a8083063 Iustin Pop

1755 a8083063 Iustin Pop
  """
1756 a8083063 Iustin Pop
  HPATH = "node-add"
1757 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1758 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1759 a8083063 Iustin Pop
1760 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1761 a8083063 Iustin Pop
    """Build hooks env.
1762 a8083063 Iustin Pop

1763 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1764 a8083063 Iustin Pop

1765 a8083063 Iustin Pop
    """
1766 a8083063 Iustin Pop
    env = {
1767 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1768 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1769 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1770 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1771 a8083063 Iustin Pop
      }
1772 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1773 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1774 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1775 a8083063 Iustin Pop
1776 a8083063 Iustin Pop
  def CheckPrereq(self):
1777 a8083063 Iustin Pop
    """Check prerequisites.
1778 a8083063 Iustin Pop

1779 a8083063 Iustin Pop
    This checks:
1780 a8083063 Iustin Pop
     - the new node is not already in the config
1781 a8083063 Iustin Pop
     - it is resolvable
1782 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1783 a8083063 Iustin Pop

1784 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1785 a8083063 Iustin Pop

1786 a8083063 Iustin Pop
    """
1787 a8083063 Iustin Pop
    node_name = self.op.node_name
1788 a8083063 Iustin Pop
    cfg = self.cfg
1789 a8083063 Iustin Pop
1790 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1791 a8083063 Iustin Pop
1792 bcf043c9 Iustin Pop
    node = dns_data.name
1793 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1794 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1795 a8083063 Iustin Pop
    if secondary_ip is None:
1796 a8083063 Iustin Pop
      secondary_ip = primary_ip
1797 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1798 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1799 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1800 e7c6e02b Michael Hanselmann
1801 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1802 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
1803 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
1804 e7c6e02b Michael Hanselmann
                                 node)
1805 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
1806 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
1807 a8083063 Iustin Pop
1808 a8083063 Iustin Pop
    for existing_node_name in node_list:
1809 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1810 e7c6e02b Michael Hanselmann
1811 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
1812 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
1813 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
1814 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
1815 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
1816 e7c6e02b Michael Hanselmann
        continue
1817 e7c6e02b Michael Hanselmann
1818 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1819 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1820 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1821 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1822 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1823 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1824 a8083063 Iustin Pop
1825 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1826 a8083063 Iustin Pop
    # same as for the master
1827 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
1828 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1829 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1830 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1831 a8083063 Iustin Pop
      if master_singlehomed:
1832 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1833 3ecf6786 Iustin Pop
                                   " new node has one")
1834 a8083063 Iustin Pop
      else:
1835 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1836 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1837 a8083063 Iustin Pop
1838 a8083063 Iustin Pop
    # checks reachablity
1839 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
1840 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1841 a8083063 Iustin Pop
1842 a8083063 Iustin Pop
    if not newbie_singlehomed:
1843 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1844 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
1845 b15d625f Iustin Pop
                           source=myself.secondary_ip):
1846 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1847 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
1848 a8083063 Iustin Pop
1849 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1850 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1851 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1852 a8083063 Iustin Pop
1853 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1854 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1855 a8083063 Iustin Pop

1856 a8083063 Iustin Pop
    """
1857 a8083063 Iustin Pop
    new_node = self.new_node
1858 a8083063 Iustin Pop
    node = new_node.name
1859 a8083063 Iustin Pop
1860 a8083063 Iustin Pop
    # check connectivity
1861 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
1862 a8083063 Iustin Pop
    if result:
1863 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1864 9a4f63d1 Iustin Pop
        logging.info("Communication to node %s fine, sw version %s match",
1865 9a4f63d1 Iustin Pop
                     node, result)
1866 a8083063 Iustin Pop
      else:
1867 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1868 3ecf6786 Iustin Pop
                                 " node version %s" %
1869 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1870 a8083063 Iustin Pop
    else:
1871 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1872 a8083063 Iustin Pop
1873 a8083063 Iustin Pop
    # setup ssh on node
1874 9a4f63d1 Iustin Pop
    logging.info("Copy ssh key to node %s", node)
1875 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1876 a8083063 Iustin Pop
    keyarray = []
1877 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1878 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1879 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1880 a8083063 Iustin Pop
1881 a8083063 Iustin Pop
    for i in keyfiles:
1882 a8083063 Iustin Pop
      f = open(i, 'r')
1883 a8083063 Iustin Pop
      try:
1884 a8083063 Iustin Pop
        keyarray.append(f.read())
1885 a8083063 Iustin Pop
      finally:
1886 a8083063 Iustin Pop
        f.close()
1887 a8083063 Iustin Pop
1888 72737a7f Iustin Pop
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
1889 72737a7f Iustin Pop
                                    keyarray[2],
1890 72737a7f Iustin Pop
                                    keyarray[3], keyarray[4], keyarray[5])
1891 a8083063 Iustin Pop
1892 a8083063 Iustin Pop
    if not result:
1893 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1894 a8083063 Iustin Pop
1895 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1896 d9c02ca6 Michael Hanselmann
    utils.AddHostToEtcHosts(new_node.name)
1897 c8a0948f Michael Hanselmann
1898 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1899 caad16e2 Iustin Pop
      if not self.rpc.call_node_has_ip_address(new_node.name,
1900 caad16e2 Iustin Pop
                                               new_node.secondary_ip):
1901 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
1902 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
1903 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
1904 a8083063 Iustin Pop
1905 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
1906 5c0527ed Guido Trotter
    node_verify_param = {
1907 5c0527ed Guido Trotter
      'nodelist': [node],
1908 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
1909 5c0527ed Guido Trotter
    }
1910 5c0527ed Guido Trotter
1911 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
1912 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
1913 5c0527ed Guido Trotter
    for verifier in node_verify_list:
1914 5c0527ed Guido Trotter
      if not result[verifier]:
1915 5c0527ed Guido Trotter
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
1916 5c0527ed Guido Trotter
                                 " for remote verification" % verifier)
1917 5c0527ed Guido Trotter
      if result[verifier]['nodelist']:
1918 5c0527ed Guido Trotter
        for failed in result[verifier]['nodelist']:
1919 5c0527ed Guido Trotter
          feedback_fn("ssh/hostname verification failed %s -> %s" %
1920 5c0527ed Guido Trotter
                      (verifier, result[verifier]['nodelist'][failed]))
1921 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
1922 ff98055b Iustin Pop
1923 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1924 a8083063 Iustin Pop
    # including the node just added
1925 d6a02168 Michael Hanselmann
    myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
1926 102b115b Michael Hanselmann
    dist_nodes = self.cfg.GetNodeList()
1927 102b115b Michael Hanselmann
    if not self.op.readd:
1928 102b115b Michael Hanselmann
      dist_nodes.append(node)
1929 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1930 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1931 a8083063 Iustin Pop
1932 9a4f63d1 Iustin Pop
    logging.debug("Copying hosts and known_hosts to all nodes")
1933 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
1934 72737a7f Iustin Pop
      result = self.rpc.call_upload_file(dist_nodes, fname)
1935 a8083063 Iustin Pop
      for to_node in dist_nodes:
1936 a8083063 Iustin Pop
        if not result[to_node]:
1937 9a4f63d1 Iustin Pop
          logging.error("Copy of file %s to node %s failed", fname, to_node)
1938 a8083063 Iustin Pop
1939 d6a02168 Michael Hanselmann
    to_copy = []
1940 00cd937c Iustin Pop
    if constants.HT_XEN_HVM in self.cfg.GetClusterInfo().enabled_hypervisors:
1941 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
1942 a8083063 Iustin Pop
    for fname in to_copy:
1943 72737a7f Iustin Pop
      result = self.rpc.call_upload_file([node], fname)
1944 b5602d15 Guido Trotter
      if not result[node]:
1945 9a4f63d1 Iustin Pop
        logging.error("Could not copy file %s to node %s", fname, node)
1946 a8083063 Iustin Pop
1947 d8470559 Michael Hanselmann
    if self.op.readd:
1948 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
1949 d8470559 Michael Hanselmann
    else:
1950 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
1951 a8083063 Iustin Pop
1952 a8083063 Iustin Pop
1953 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1954 a8083063 Iustin Pop
  """Query cluster configuration.
1955 a8083063 Iustin Pop

1956 a8083063 Iustin Pop
  """
1957 a8083063 Iustin Pop
  _OP_REQP = []
1958 642339cf Guido Trotter
  REQ_BGL = False
1959 642339cf Guido Trotter
1960 642339cf Guido Trotter
  def ExpandNames(self):
1961 642339cf Guido Trotter
    self.needed_locks = {}
1962 a8083063 Iustin Pop
1963 a8083063 Iustin Pop
  def CheckPrereq(self):
1964 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1965 a8083063 Iustin Pop

1966 a8083063 Iustin Pop
    """
1967 a8083063 Iustin Pop
    pass
1968 a8083063 Iustin Pop
1969 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1970 a8083063 Iustin Pop
    """Return cluster config.
1971 a8083063 Iustin Pop

1972 a8083063 Iustin Pop
    """
1973 469f88e1 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
1974 a8083063 Iustin Pop
    result = {
1975 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1976 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1977 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1978 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1979 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1980 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1981 469f88e1 Iustin Pop
      "name": cluster.cluster_name,
1982 469f88e1 Iustin Pop
      "master": cluster.master_node,
1983 02691904 Alexander Schreiber
      "default_hypervisor": cluster.default_hypervisor,
1984 469f88e1 Iustin Pop
      "enabled_hypervisors": cluster.enabled_hypervisors,
1985 469f88e1 Iustin Pop
      "hvparams": cluster.hvparams,
1986 469f88e1 Iustin Pop
      "beparams": cluster.beparams,
1987 a8083063 Iustin Pop
      }
1988 a8083063 Iustin Pop
1989 a8083063 Iustin Pop
    return result
1990 a8083063 Iustin Pop
1991 a8083063 Iustin Pop
1992 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
1993 ae5849b5 Michael Hanselmann
  """Return configuration values.
1994 a8083063 Iustin Pop

1995 a8083063 Iustin Pop
  """
1996 a8083063 Iustin Pop
  _OP_REQP = []
1997 642339cf Guido Trotter
  REQ_BGL = False
1998 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet()
1999 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag")
2000 642339cf Guido Trotter
2001 642339cf Guido Trotter
  def ExpandNames(self):
2002 642339cf Guido Trotter
    self.needed_locks = {}
2003 a8083063 Iustin Pop
2004 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2005 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2006 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
2007 ae5849b5 Michael Hanselmann
2008 a8083063 Iustin Pop
  def CheckPrereq(self):
2009 a8083063 Iustin Pop
    """No prerequisites.
2010 a8083063 Iustin Pop

2011 a8083063 Iustin Pop
    """
2012 a8083063 Iustin Pop
    pass
2013 a8083063 Iustin Pop
2014 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2015 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
2016 a8083063 Iustin Pop

2017 a8083063 Iustin Pop
    """
2018 ae5849b5 Michael Hanselmann
    values = []
2019 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
2020 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
2021 3ccafd0e Iustin Pop
        entry = self.cfg.GetClusterName()
2022 ae5849b5 Michael Hanselmann
      elif field == "master_node":
2023 3ccafd0e Iustin Pop
        entry = self.cfg.GetMasterNode()
2024 3ccafd0e Iustin Pop
      elif field == "drain_flag":
2025 3ccafd0e Iustin Pop
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2026 ae5849b5 Michael Hanselmann
      else:
2027 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
2028 3ccafd0e Iustin Pop
      values.append(entry)
2029 ae5849b5 Michael Hanselmann
    return values
2030 a8083063 Iustin Pop
2031 a8083063 Iustin Pop
2032 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
2033 a8083063 Iustin Pop
  """Bring up an instance's disks.
2034 a8083063 Iustin Pop

2035 a8083063 Iustin Pop
  """
2036 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2037 f22a8ba3 Guido Trotter
  REQ_BGL = False
2038 f22a8ba3 Guido Trotter
2039 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2040 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2041 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2042 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2043 f22a8ba3 Guido Trotter
2044 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2045 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2046 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2047 a8083063 Iustin Pop
2048 a8083063 Iustin Pop
  def CheckPrereq(self):
2049 a8083063 Iustin Pop
    """Check prerequisites.
2050 a8083063 Iustin Pop

2051 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2052 a8083063 Iustin Pop

2053 a8083063 Iustin Pop
    """
2054 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2055 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2056 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2057 a8083063 Iustin Pop
2058 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2059 a8083063 Iustin Pop
    """Activate the disks.
2060 a8083063 Iustin Pop

2061 a8083063 Iustin Pop
    """
2062 b9bddb6b Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
2063 a8083063 Iustin Pop
    if not disks_ok:
2064 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
2065 a8083063 Iustin Pop
2066 a8083063 Iustin Pop
    return disks_info
2067 a8083063 Iustin Pop
2068 a8083063 Iustin Pop
2069 b9bddb6b Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
2070 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
2071 a8083063 Iustin Pop

2072 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
2073 a8083063 Iustin Pop

2074 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
2075 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
2076 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
2077 e4376078 Iustin Pop
  @param instance: the instance for whose disks we assemble
2078 e4376078 Iustin Pop
  @type ignore_secondaries: boolean
2079 e4376078 Iustin Pop
  @param ignore_secondaries: if true, errors on secondary nodes
2080 e4376078 Iustin Pop
      won't result in an error return from the function
2081 e4376078 Iustin Pop
  @return: False if the operation failed, otherwise a list of
2082 e4376078 Iustin Pop
      (host, instance_visible_name, node_visible_name)
2083 e4376078 Iustin Pop
      with the mapping from node devices to instance devices
2084 a8083063 Iustin Pop

2085 a8083063 Iustin Pop
  """
2086 a8083063 Iustin Pop
  device_info = []
2087 a8083063 Iustin Pop
  disks_ok = True
2088 fdbd668d Iustin Pop
  iname = instance.name
2089 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
2090 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
2091 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
2092 fdbd668d Iustin Pop
2093 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
2094 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
2095 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
2096 fdbd668d Iustin Pop
  # SyncSource, etc.)
2097 fdbd668d Iustin Pop
2098 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
2099 a8083063 Iustin Pop
  for inst_disk in instance.disks:
2100 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2101 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2102 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2103 a8083063 Iustin Pop
      if not result:
2104 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2105 86d9d3bb Iustin Pop
                           " (is_primary=False, pass=1)",
2106 86d9d3bb Iustin Pop
                           inst_disk.iv_name, node)
2107 fdbd668d Iustin Pop
        if not ignore_secondaries:
2108 a8083063 Iustin Pop
          disks_ok = False
2109 fdbd668d Iustin Pop
2110 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
2111 fdbd668d Iustin Pop
2112 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
2113 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
2114 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2115 fdbd668d Iustin Pop
      if node != instance.primary_node:
2116 fdbd668d Iustin Pop
        continue
2117 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2118 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2119 fdbd668d Iustin Pop
      if not result:
2120 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2121 86d9d3bb Iustin Pop
                           " (is_primary=True, pass=2)",
2122 86d9d3bb Iustin Pop
                           inst_disk.iv_name, node)
2123 fdbd668d Iustin Pop
        disks_ok = False
2124 fdbd668d Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name, result))
2125 a8083063 Iustin Pop
2126 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
2127 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
2128 b352ab5b Iustin Pop
  # improving the logical/physical id handling
2129 b352ab5b Iustin Pop
  for disk in instance.disks:
2130 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
2131 b352ab5b Iustin Pop
2132 a8083063 Iustin Pop
  return disks_ok, device_info
2133 a8083063 Iustin Pop
2134 a8083063 Iustin Pop
2135 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
2136 3ecf6786 Iustin Pop
  """Start the disks of an instance.
2137 3ecf6786 Iustin Pop

2138 3ecf6786 Iustin Pop
  """
2139 b9bddb6b Iustin Pop
  disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2140 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
2141 fe7b0351 Michael Hanselmann
  if not disks_ok:
2142 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
2143 fe7b0351 Michael Hanselmann
    if force is not None and not force:
2144 86d9d3bb Iustin Pop
      lu.proc.LogWarning("", hint="If the message above refers to a"
2145 86d9d3bb Iustin Pop
                         " secondary node,"
2146 86d9d3bb Iustin Pop
                         " you can retry the operation using '--force'.")
2147 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
2148 fe7b0351 Michael Hanselmann
2149 fe7b0351 Michael Hanselmann
2150 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
2151 a8083063 Iustin Pop
  """Shutdown an instance's disks.
2152 a8083063 Iustin Pop

2153 a8083063 Iustin Pop
  """
2154 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2155 f22a8ba3 Guido Trotter
  REQ_BGL = False
2156 f22a8ba3 Guido Trotter
2157 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2158 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2159 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2160 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2161 f22a8ba3 Guido Trotter
2162 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2163 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2164 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2165 a8083063 Iustin Pop
2166 a8083063 Iustin Pop
  def CheckPrereq(self):
2167 a8083063 Iustin Pop
    """Check prerequisites.
2168 a8083063 Iustin Pop

2169 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2170 a8083063 Iustin Pop

2171 a8083063 Iustin Pop
    """
2172 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2173 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2174 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2175 a8083063 Iustin Pop
2176 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2177 a8083063 Iustin Pop
    """Deactivate the disks
2178 a8083063 Iustin Pop

2179 a8083063 Iustin Pop
    """
2180 a8083063 Iustin Pop
    instance = self.instance
2181 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
2182 a8083063 Iustin Pop
2183 a8083063 Iustin Pop
2184 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
2185 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
2186 155d6c75 Guido Trotter

2187 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
2188 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
2189 155d6c75 Guido Trotter

2190 155d6c75 Guido Trotter
  """
2191 72737a7f Iustin Pop
  ins_l = lu.rpc.call_instance_list([instance.primary_node],
2192 72737a7f Iustin Pop
                                      [instance.hypervisor])
2193 155d6c75 Guido Trotter
  ins_l = ins_l[instance.primary_node]
2194 155d6c75 Guido Trotter
  if not type(ins_l) is list:
2195 155d6c75 Guido Trotter
    raise errors.OpExecError("Can't contact node '%s'" %
2196 155d6c75 Guido Trotter
                             instance.primary_node)
2197 155d6c75 Guido Trotter
2198 155d6c75 Guido Trotter
  if instance.name in ins_l:
2199 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
2200 155d6c75 Guido Trotter
                             " block devices.")
2201 155d6c75 Guido Trotter
2202 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
2203 a8083063 Iustin Pop
2204 a8083063 Iustin Pop
2205 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2206 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
2207 a8083063 Iustin Pop

2208 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
2209 a8083063 Iustin Pop

2210 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
2211 a8083063 Iustin Pop
  ignored.
2212 a8083063 Iustin Pop

2213 a8083063 Iustin Pop
  """
2214 a8083063 Iustin Pop
  result = True
2215 a8083063 Iustin Pop
  for disk in instance.disks:
2216 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2217 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
2218 72737a7f Iustin Pop
      if not lu.rpc.call_blockdev_shutdown(node, top_disk):
2219 9a4f63d1 Iustin Pop
        logging.error("Could not shutdown block device %s on node %s",
2220 9a4f63d1 Iustin Pop
                      disk.iv_name, node)
2221 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
2222 a8083063 Iustin Pop
          result = False
2223 a8083063 Iustin Pop
  return result
2224 a8083063 Iustin Pop
2225 a8083063 Iustin Pop
2226 b9bddb6b Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor):
2227 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
2228 d4f16fd9 Iustin Pop

2229 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
2230 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
2231 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
2232 d4f16fd9 Iustin Pop
  exception.
2233 d4f16fd9 Iustin Pop

2234 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
2235 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
2236 e69d05fd Iustin Pop
  @type node: C{str}
2237 e69d05fd Iustin Pop
  @param node: the node to check
2238 e69d05fd Iustin Pop
  @type reason: C{str}
2239 e69d05fd Iustin Pop
  @param reason: string to use in the error message
2240 e69d05fd Iustin Pop
  @type requested: C{int}
2241 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
2242 e69d05fd Iustin Pop
  @type hypervisor: C{str}
2243 e69d05fd Iustin Pop
  @param hypervisor: the hypervisor to ask for memory stats
2244 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2245 e69d05fd Iustin Pop
      we cannot check the node
2246 d4f16fd9 Iustin Pop

2247 d4f16fd9 Iustin Pop
  """
2248 72737a7f Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor)
2249 d4f16fd9 Iustin Pop
  if not nodeinfo or not isinstance(nodeinfo, dict):
2250 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Could not contact node %s for resource"
2251 d4f16fd9 Iustin Pop
                             " information" % (node,))
2252 d4f16fd9 Iustin Pop
2253 d4f16fd9 Iustin Pop
  free_mem = nodeinfo[node].get('memory_free')
2254 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2255 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2256 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
2257 d4f16fd9 Iustin Pop
  if requested > free_mem:
2258 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2259 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
2260 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
2261 d4f16fd9 Iustin Pop
2262 d4f16fd9 Iustin Pop
2263 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2264 a8083063 Iustin Pop
  """Starts an instance.
2265 a8083063 Iustin Pop

2266 a8083063 Iustin Pop
  """
2267 a8083063 Iustin Pop
  HPATH = "instance-start"
2268 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2269 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2270 e873317a Guido Trotter
  REQ_BGL = False
2271 e873317a Guido Trotter
2272 e873317a Guido Trotter
  def ExpandNames(self):
2273 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2274 a8083063 Iustin Pop
2275 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2276 a8083063 Iustin Pop
    """Build hooks env.
2277 a8083063 Iustin Pop

2278 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2279 a8083063 Iustin Pop

2280 a8083063 Iustin Pop
    """
2281 a8083063 Iustin Pop
    env = {
2282 a8083063 Iustin Pop
      "FORCE": self.op.force,
2283 a8083063 Iustin Pop
      }
2284 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2285 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2286 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2287 a8083063 Iustin Pop
    return env, nl, nl
2288 a8083063 Iustin Pop
2289 a8083063 Iustin Pop
  def CheckPrereq(self):
2290 a8083063 Iustin Pop
    """Check prerequisites.
2291 a8083063 Iustin Pop

2292 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2293 a8083063 Iustin Pop

2294 a8083063 Iustin Pop
    """
2295 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2296 e873317a Guido Trotter
    assert self.instance is not None, \
2297 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2298 a8083063 Iustin Pop
2299 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2300 a8083063 Iustin Pop
    # check bridges existance
2301 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2302 a8083063 Iustin Pop
2303 b9bddb6b Iustin Pop
    _CheckNodeFreeMemory(self, instance.primary_node,
2304 d4f16fd9 Iustin Pop
                         "starting instance %s" % instance.name,
2305 338e51e8 Iustin Pop
                         bep[constants.BE_MEMORY], instance.hypervisor)
2306 d4f16fd9 Iustin Pop
2307 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2308 a8083063 Iustin Pop
    """Start the instance.
2309 a8083063 Iustin Pop

2310 a8083063 Iustin Pop
    """
2311 a8083063 Iustin Pop
    instance = self.instance
2312 a8083063 Iustin Pop
    force = self.op.force
2313 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
2314 a8083063 Iustin Pop
2315 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2316 fe482621 Iustin Pop
2317 a8083063 Iustin Pop
    node_current = instance.primary_node
2318 a8083063 Iustin Pop
2319 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
2320 a8083063 Iustin Pop
2321 72737a7f Iustin Pop
    if not self.rpc.call_instance_start(node_current, instance, extra_args):
2322 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
2323 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
2324 a8083063 Iustin Pop
2325 a8083063 Iustin Pop
2326 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2327 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2328 bf6929a2 Alexander Schreiber

2329 bf6929a2 Alexander Schreiber
  """
2330 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2331 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2332 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2333 e873317a Guido Trotter
  REQ_BGL = False
2334 e873317a Guido Trotter
2335 e873317a Guido Trotter
  def ExpandNames(self):
2336 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2337 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2338 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
2339 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2340 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
2341 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2342 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
2343 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2344 bf6929a2 Alexander Schreiber
2345 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2346 bf6929a2 Alexander Schreiber
    """Build hooks env.
2347 bf6929a2 Alexander Schreiber

2348 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2349 bf6929a2 Alexander Schreiber

2350 bf6929a2 Alexander Schreiber
    """
2351 bf6929a2 Alexander Schreiber
    env = {
2352 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2353 bf6929a2 Alexander Schreiber
      }
2354 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2355 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2356 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
2357 bf6929a2 Alexander Schreiber
    return env, nl, nl
2358 bf6929a2 Alexander Schreiber
2359 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2360 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2361 bf6929a2 Alexander Schreiber

2362 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2363 bf6929a2 Alexander Schreiber

2364 bf6929a2 Alexander Schreiber
    """
2365 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2366 e873317a Guido Trotter
    assert self.instance is not None, \
2367 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2368 bf6929a2 Alexander Schreiber
2369 bf6929a2 Alexander Schreiber
    # check bridges existance
2370 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2371 bf6929a2 Alexander Schreiber
2372 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2373 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2374 bf6929a2 Alexander Schreiber

2375 bf6929a2 Alexander Schreiber
    """
2376 bf6929a2 Alexander Schreiber
    instance = self.instance
2377 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2378 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2379 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2380 bf6929a2 Alexander Schreiber
2381 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2382 bf6929a2 Alexander Schreiber
2383 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2384 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2385 72737a7f Iustin Pop
      if not self.rpc.call_instance_reboot(node_current, instance,
2386 72737a7f Iustin Pop
                                           reboot_type, extra_args):
2387 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2388 bf6929a2 Alexander Schreiber
    else:
2389 72737a7f Iustin Pop
      if not self.rpc.call_instance_shutdown(node_current, instance):
2390 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2391 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
2392 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
2393 72737a7f Iustin Pop
      if not self.rpc.call_instance_start(node_current, instance, extra_args):
2394 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
2395 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2396 bf6929a2 Alexander Schreiber
2397 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2398 bf6929a2 Alexander Schreiber
2399 bf6929a2 Alexander Schreiber
2400 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2401 a8083063 Iustin Pop
  """Shutdown an instance.
2402 a8083063 Iustin Pop

2403 a8083063 Iustin Pop
  """
2404 a8083063 Iustin Pop
  HPATH = "instance-stop"
2405 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2406 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2407 e873317a Guido Trotter
  REQ_BGL = False
2408 e873317a Guido Trotter
2409 e873317a Guido Trotter
  def ExpandNames(self):
2410 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2411 a8083063 Iustin Pop
2412 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2413 a8083063 Iustin Pop
    """Build hooks env.
2414 a8083063 Iustin Pop

2415 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2416 a8083063 Iustin Pop

2417 a8083063 Iustin Pop
    """
2418 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2419 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2420 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2421 a8083063 Iustin Pop
    return env, nl, nl
2422 a8083063 Iustin Pop
2423 a8083063 Iustin Pop
  def CheckPrereq(self):
2424 a8083063 Iustin Pop
    """Check prerequisites.
2425 a8083063 Iustin Pop

2426 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2427 a8083063 Iustin Pop

2428 a8083063 Iustin Pop
    """
2429 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2430 e873317a Guido Trotter
    assert self.instance is not None, \
2431 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2432 a8083063 Iustin Pop
2433 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2434 a8083063 Iustin Pop
    """Shutdown the instance.
2435 a8083063 Iustin Pop

2436 a8083063 Iustin Pop
    """
2437 a8083063 Iustin Pop
    instance = self.instance
2438 a8083063 Iustin Pop
    node_current = instance.primary_node
2439 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2440 72737a7f Iustin Pop
    if not self.rpc.call_instance_shutdown(node_current, instance):
2441 86d9d3bb Iustin Pop
      self.proc.LogWarning("Could not shutdown instance")
2442 a8083063 Iustin Pop
2443 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
2444 a8083063 Iustin Pop
2445 a8083063 Iustin Pop
2446 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2447 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2448 fe7b0351 Michael Hanselmann

2449 fe7b0351 Michael Hanselmann
  """
2450 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2451 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2452 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2453 4e0b4d2d Guido Trotter
  REQ_BGL = False
2454 4e0b4d2d Guido Trotter
2455 4e0b4d2d Guido Trotter
  def ExpandNames(self):
2456 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
2457 fe7b0351 Michael Hanselmann
2458 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2459 fe7b0351 Michael Hanselmann
    """Build hooks env.
2460 fe7b0351 Michael Hanselmann

2461 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2462 fe7b0351 Michael Hanselmann

2463 fe7b0351 Michael Hanselmann
    """
2464 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2465 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2466 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2467 fe7b0351 Michael Hanselmann
    return env, nl, nl
2468 fe7b0351 Michael Hanselmann
2469 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2470 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2471 fe7b0351 Michael Hanselmann

2472 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2473 fe7b0351 Michael Hanselmann

2474 fe7b0351 Michael Hanselmann
    """
2475 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2476 4e0b4d2d Guido Trotter
    assert instance is not None, \
2477 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2478 4e0b4d2d Guido Trotter
2479 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2480 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2481 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2482 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2483 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2484 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2485 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2486 72737a7f Iustin Pop
                                              instance.name,
2487 72737a7f Iustin Pop
                                              instance.hypervisor)
2488 fe7b0351 Michael Hanselmann
    if remote_info:
2489 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2490 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2491 3ecf6786 Iustin Pop
                                  instance.primary_node))
2492 d0834de3 Michael Hanselmann
2493 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2494 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2495 d0834de3 Michael Hanselmann
      # OS verification
2496 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2497 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2498 d0834de3 Michael Hanselmann
      if pnode is None:
2499 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2500 3ecf6786 Iustin Pop
                                   self.op.pnode)
2501 72737a7f Iustin Pop
      os_obj = self.rpc.call_os_get(pnode.name, self.op.os_type)
2502 dfa96ded Guido Trotter
      if not os_obj:
2503 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2504 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2505 d0834de3 Michael Hanselmann
2506 fe7b0351 Michael Hanselmann
    self.instance = instance
2507 fe7b0351 Michael Hanselmann
2508 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2509 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2510 fe7b0351 Michael Hanselmann

2511 fe7b0351 Michael Hanselmann
    """
2512 fe7b0351 Michael Hanselmann
    inst = self.instance
2513 fe7b0351 Michael Hanselmann
2514 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2515 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2516 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2517 97abc79f Iustin Pop
      self.cfg.Update(inst)
2518 d0834de3 Michael Hanselmann
2519 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
2520 fe7b0351 Michael Hanselmann
    try:
2521 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2522 bb2ee932 Iustin Pop
      if not self.rpc.call_instance_os_add(inst.primary_node, inst):
2523 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
2524 f4bc1f2c Michael Hanselmann
                                 " on node %s" %
2525 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2526 fe7b0351 Michael Hanselmann
    finally:
2527 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
2528 fe7b0351 Michael Hanselmann
2529 fe7b0351 Michael Hanselmann
2530 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2531 decd5f45 Iustin Pop
  """Rename an instance.
2532 decd5f45 Iustin Pop

2533 decd5f45 Iustin Pop
  """
2534 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2535 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2536 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2537 decd5f45 Iustin Pop
2538 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2539 decd5f45 Iustin Pop
    """Build hooks env.
2540 decd5f45 Iustin Pop

2541 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2542 decd5f45 Iustin Pop

2543 decd5f45 Iustin Pop
    """
2544 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2545 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2546 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2547 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2548 decd5f45 Iustin Pop
    return env, nl, nl
2549 decd5f45 Iustin Pop
2550 decd5f45 Iustin Pop
  def CheckPrereq(self):
2551 decd5f45 Iustin Pop
    """Check prerequisites.
2552 decd5f45 Iustin Pop

2553 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2554 decd5f45 Iustin Pop

2555 decd5f45 Iustin Pop
    """
2556 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2557 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2558 decd5f45 Iustin Pop
    if instance is None:
2559 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2560 decd5f45 Iustin Pop
                                 self.op.instance_name)
2561 decd5f45 Iustin Pop
    if instance.status != "down":
2562 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2563 decd5f45 Iustin Pop
                                 self.op.instance_name)
2564 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2565 72737a7f Iustin Pop
                                              instance.name,
2566 72737a7f Iustin Pop
                                              instance.hypervisor)
2567 decd5f45 Iustin Pop
    if remote_info:
2568 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2569 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2570 decd5f45 Iustin Pop
                                  instance.primary_node))
2571 decd5f45 Iustin Pop
    self.instance = instance
2572 decd5f45 Iustin Pop
2573 decd5f45 Iustin Pop
    # new name verification
2574 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2575 decd5f45 Iustin Pop
2576 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2577 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
2578 7bde3275 Guido Trotter
    if new_name in instance_list:
2579 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2580 c09f363f Manuel Franceschini
                                 new_name)
2581 7bde3275 Guido Trotter
2582 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2583 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
2584 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2585 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2586 decd5f45 Iustin Pop
2587 decd5f45 Iustin Pop
2588 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2589 decd5f45 Iustin Pop
    """Reinstall the instance.
2590 decd5f45 Iustin Pop

2591 decd5f45 Iustin Pop
    """
2592 decd5f45 Iustin Pop
    inst = self.instance
2593 decd5f45 Iustin Pop
    old_name = inst.name
2594 decd5f45 Iustin Pop
2595 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2596 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2597 b23c4333 Manuel Franceschini
2598 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2599 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
2600 cb4e8387 Iustin Pop
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
2601 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
2602 decd5f45 Iustin Pop
2603 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2604 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2605 decd5f45 Iustin Pop
2606 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2607 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2608 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
2609 72737a7f Iustin Pop
                                                     old_file_storage_dir,
2610 72737a7f Iustin Pop
                                                     new_file_storage_dir)
2611 b23c4333 Manuel Franceschini
2612 b23c4333 Manuel Franceschini
      if not result:
2613 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not connect to node '%s' to rename"
2614 b23c4333 Manuel Franceschini
                                 " directory '%s' to '%s' (but the instance"
2615 b23c4333 Manuel Franceschini
                                 " has been renamed in Ganeti)" % (
2616 b23c4333 Manuel Franceschini
                                 inst.primary_node, old_file_storage_dir,
2617 b23c4333 Manuel Franceschini
                                 new_file_storage_dir))
2618 b23c4333 Manuel Franceschini
2619 b23c4333 Manuel Franceschini
      if not result[0]:
2620 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2621 b23c4333 Manuel Franceschini
                                 " (but the instance has been renamed in"
2622 b23c4333 Manuel Franceschini
                                 " Ganeti)" % (old_file_storage_dir,
2623 b23c4333 Manuel Franceschini
                                               new_file_storage_dir))
2624 b23c4333 Manuel Franceschini
2625 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
2626 decd5f45 Iustin Pop
    try:
2627 72737a7f Iustin Pop
      if not self.rpc.call_instance_run_rename(inst.primary_node, inst,
2628 d15a9ad3 Guido Trotter
                                               old_name):
2629 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
2630 6291574d Alexander Schreiber
               " (but the instance has been renamed in Ganeti)" %
2631 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2632 86d9d3bb Iustin Pop
        self.proc.LogWarning(msg)
2633 decd5f45 Iustin Pop
    finally:
2634 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
2635 decd5f45 Iustin Pop
2636 decd5f45 Iustin Pop
2637 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2638 a8083063 Iustin Pop
  """Remove an instance.
2639 a8083063 Iustin Pop

2640 a8083063 Iustin Pop
  """
2641 a8083063 Iustin Pop
  HPATH = "instance-remove"
2642 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2643 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
2644 cf472233 Guido Trotter
  REQ_BGL = False
2645 cf472233 Guido Trotter
2646 cf472233 Guido Trotter
  def ExpandNames(self):
2647 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
2648 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2649 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2650 cf472233 Guido Trotter
2651 cf472233 Guido Trotter
  def DeclareLocks(self, level):
2652 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
2653 cf472233 Guido Trotter
      self._LockInstancesNodes()
2654 a8083063 Iustin Pop
2655 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2656 a8083063 Iustin Pop
    """Build hooks env.
2657 a8083063 Iustin Pop

2658 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2659 a8083063 Iustin Pop

2660 a8083063 Iustin Pop
    """
2661 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2662 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
2663 a8083063 Iustin Pop
    return env, nl, nl
2664 a8083063 Iustin Pop
2665 a8083063 Iustin Pop
  def CheckPrereq(self):
2666 a8083063 Iustin Pop
    """Check prerequisites.
2667 a8083063 Iustin Pop

2668 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2669 a8083063 Iustin Pop

2670 a8083063 Iustin Pop
    """
2671 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2672 cf472233 Guido Trotter
    assert self.instance is not None, \
2673 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2674 a8083063 Iustin Pop
2675 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2676 a8083063 Iustin Pop
    """Remove the instance.
2677 a8083063 Iustin Pop

2678 a8083063 Iustin Pop
    """
2679 a8083063 Iustin Pop
    instance = self.instance
2680 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
2681 9a4f63d1 Iustin Pop
                 instance.name, instance.primary_node)
2682 a8083063 Iustin Pop
2683 72737a7f Iustin Pop
    if not self.rpc.call_instance_shutdown(instance.primary_node, instance):
2684 1d67656e Iustin Pop
      if self.op.ignore_failures:
2685 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2686 1d67656e Iustin Pop
      else:
2687 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2688 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2689 a8083063 Iustin Pop
2690 9a4f63d1 Iustin Pop
    logging.info("Removing block devices for instance %s", instance.name)
2691 a8083063 Iustin Pop
2692 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
2693 1d67656e Iustin Pop
      if self.op.ignore_failures:
2694 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2695 1d67656e Iustin Pop
      else:
2696 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2697 a8083063 Iustin Pop
2698 9a4f63d1 Iustin Pop
    logging.info("Removing instance %s out of cluster config", instance.name)
2699 a8083063 Iustin Pop
2700 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2701 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
2702 a8083063 Iustin Pop
2703 a8083063 Iustin Pop
2704 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2705 a8083063 Iustin Pop
  """Logical unit for querying instances.
2706 a8083063 Iustin Pop

2707 a8083063 Iustin Pop
  """
2708 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2709 7eb9d8f7 Guido Trotter
  REQ_BGL = False
2710 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
2711 a2d2e1a7 Iustin Pop
                                    "admin_state", "admin_ram",
2712 a2d2e1a7 Iustin Pop
                                    "disk_template", "ip", "mac", "bridge",
2713 a2d2e1a7 Iustin Pop
                                    "sda_size", "sdb_size", "vcpus", "tags",
2714 a2d2e1a7 Iustin Pop
                                    "network_port", "beparams",
2715 a2d2e1a7 Iustin Pop
                                    "(disk).(size)/([0-9]+)",
2716 a2d2e1a7 Iustin Pop
                                    "(disk).(sizes)",
2717 a2d2e1a7 Iustin Pop
                                    "(nic).(mac|ip|bridge)/([0-9]+)",
2718 a2d2e1a7 Iustin Pop
                                    "(nic).(macs|ips|bridges)",
2719 a2d2e1a7 Iustin Pop
                                    "(disk|nic).(count)",
2720 a2d2e1a7 Iustin Pop
                                    "serial_no", "hypervisor", "hvparams",] +
2721 a2d2e1a7 Iustin Pop
                                  ["hv/%s" % name
2722 a2d2e1a7 Iustin Pop
                                   for name in constants.HVS_PARAMETERS] +
2723 a2d2e1a7 Iustin Pop
                                  ["be/%s" % name
2724 a2d2e1a7 Iustin Pop
                                   for name in constants.BES_PARAMETERS])
2725 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
2726 31bf511f Iustin Pop
2727 a8083063 Iustin Pop
2728 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
2729 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2730 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2731 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2732 a8083063 Iustin Pop
2733 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
2734 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
2735 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2736 7eb9d8f7 Guido Trotter
2737 57a2fb91 Iustin Pop
    if self.op.names:
2738 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
2739 7eb9d8f7 Guido Trotter
    else:
2740 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
2741 7eb9d8f7 Guido Trotter
2742 31bf511f Iustin Pop
    self.do_locking = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
2743 57a2fb91 Iustin Pop
    if self.do_locking:
2744 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
2745 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
2746 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2747 7eb9d8f7 Guido Trotter
2748 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
2749 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
2750 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
2751 7eb9d8f7 Guido Trotter
2752 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
2753 7eb9d8f7 Guido Trotter
    """Check prerequisites.
2754 7eb9d8f7 Guido Trotter

2755 7eb9d8f7 Guido Trotter
    """
2756 57a2fb91 Iustin Pop
    pass
2757 069dcc86 Iustin Pop
2758 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2759 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2760 a8083063 Iustin Pop

2761 a8083063 Iustin Pop
    """
2762 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
2763 57a2fb91 Iustin Pop
    if self.do_locking:
2764 57a2fb91 Iustin Pop
      instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2765 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
2766 3fa93523 Guido Trotter
      instance_names = self.wanted
2767 3fa93523 Guido Trotter
      missing = set(instance_names).difference(all_info.keys())
2768 3fa93523 Guido Trotter
      if missing:
2769 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
2770 3fa93523 Guido Trotter
          "Some instances were removed before retrieving their data: %s"
2771 3fa93523 Guido Trotter
          % missing)
2772 57a2fb91 Iustin Pop
    else:
2773 57a2fb91 Iustin Pop
      instance_names = all_info.keys()
2774 c1f1cbb2 Iustin Pop
2775 c1f1cbb2 Iustin Pop
    instance_names = utils.NiceSort(instance_names)
2776 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
2777 a8083063 Iustin Pop
2778 a8083063 Iustin Pop
    # begin data gathering
2779 a8083063 Iustin Pop
2780 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2781 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
2782 a8083063 Iustin Pop
2783 a8083063 Iustin Pop
    bad_nodes = []
2784 31bf511f Iustin Pop
    if self.do_locking:
2785 a8083063 Iustin Pop
      live_data = {}
2786 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
2787 a8083063 Iustin Pop
      for name in nodes:
2788 a8083063 Iustin Pop
        result = node_data[name]
2789 a8083063 Iustin Pop
        if result:
2790 a8083063 Iustin Pop
          live_data.update(result)
2791 a8083063 Iustin Pop
        elif result == False:
2792 a8083063 Iustin Pop
          bad_nodes.append(name)
2793 a8083063 Iustin Pop
        # else no instance is alive
2794 a8083063 Iustin Pop
    else:
2795 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2796 a8083063 Iustin Pop
2797 a8083063 Iustin Pop
    # end data gathering
2798 a8083063 Iustin Pop
2799 5018a335 Iustin Pop
    HVPREFIX = "hv/"
2800 338e51e8 Iustin Pop
    BEPREFIX = "be/"
2801 a8083063 Iustin Pop
    output = []
2802 a8083063 Iustin Pop
    for instance in instance_list:
2803 a8083063 Iustin Pop
      iout = []
2804 5018a335 Iustin Pop
      i_hv = self.cfg.GetClusterInfo().FillHV(instance)
2805 338e51e8 Iustin Pop
      i_be = self.cfg.GetClusterInfo().FillBE(instance)
2806 a8083063 Iustin Pop
      for field in self.op.output_fields:
2807 71c1af58 Iustin Pop
        st_match = self._FIELDS_STATIC.Matches(field)
2808 a8083063 Iustin Pop
        if field == "name":
2809 a8083063 Iustin Pop
          val = instance.name
2810 a8083063 Iustin Pop
        elif field == "os":
2811 a8083063 Iustin Pop
          val = instance.os
2812 a8083063 Iustin Pop
        elif field == "pnode":
2813 a8083063 Iustin Pop
          val = instance.primary_node
2814 a8083063 Iustin Pop
        elif field == "snodes":
2815 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2816 a8083063 Iustin Pop
        elif field == "admin_state":
2817 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2818 a8083063 Iustin Pop
        elif field == "oper_state":
2819 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2820 8a23d2d3 Iustin Pop
            val = None
2821 a8083063 Iustin Pop
          else:
2822 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2823 d8052456 Iustin Pop
        elif field == "status":
2824 d8052456 Iustin Pop
          if instance.primary_node in bad_nodes:
2825 d8052456 Iustin Pop
            val = "ERROR_nodedown"
2826 d8052456 Iustin Pop
          else:
2827 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
2828 d8052456 Iustin Pop
            if running:
2829 d8052456 Iustin Pop
              if instance.status != "down":
2830 d8052456 Iustin Pop
                val = "running"
2831 d8052456 Iustin Pop
              else:
2832 d8052456 Iustin Pop
                val = "ERROR_up"
2833 d8052456 Iustin Pop
            else:
2834 d8052456 Iustin Pop
              if instance.status != "down":
2835 d8052456 Iustin Pop
                val = "ERROR_down"
2836 d8052456 Iustin Pop
              else:
2837 d8052456 Iustin Pop
                val = "ADMIN_down"
2838 a8083063 Iustin Pop
        elif field == "oper_ram":
2839 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2840 8a23d2d3 Iustin Pop
            val = None
2841 a8083063 Iustin Pop
          elif instance.name in live_data:
2842 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2843 a8083063 Iustin Pop
          else:
2844 a8083063 Iustin Pop
            val = "-"
2845 a8083063 Iustin Pop
        elif field == "disk_template":
2846 a8083063 Iustin Pop
          val = instance.disk_template
2847 a8083063 Iustin Pop
        elif field == "ip":
2848 a8083063 Iustin Pop
          val = instance.nics[0].ip
2849 a8083063 Iustin Pop
        elif field == "bridge":
2850 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2851 a8083063 Iustin Pop
        elif field == "mac":
2852 a8083063 Iustin Pop
          val = instance.nics[0].mac
2853 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2854 ad24e046 Iustin Pop
          idx = ord(field[2]) - ord('a')
2855 ad24e046 Iustin Pop
          try:
2856 ad24e046 Iustin Pop
            val = instance.FindDisk(idx).size
2857 ad24e046 Iustin Pop
          except errors.OpPrereqError:
2858 8a23d2d3 Iustin Pop
            val = None
2859 130a6a6f Iustin Pop
        elif field == "tags":
2860 130a6a6f Iustin Pop
          val = list(instance.GetTags())
2861 38d7239a Iustin Pop
        elif field == "serial_no":
2862 38d7239a Iustin Pop
          val = instance.serial_no
2863 5018a335 Iustin Pop
        elif field == "network_port":
2864 5018a335 Iustin Pop
          val = instance.network_port
2865 338e51e8 Iustin Pop
        elif field == "hypervisor":
2866 338e51e8 Iustin Pop
          val = instance.hypervisor
2867 338e51e8 Iustin Pop
        elif field == "hvparams":
2868 338e51e8 Iustin Pop
          val = i_hv
2869 5018a335 Iustin Pop
        elif (field.startswith(HVPREFIX) and
2870 5018a335 Iustin Pop
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
2871 5018a335 Iustin Pop
          val = i_hv.get(field[len(HVPREFIX):], None)
2872 338e51e8 Iustin Pop
        elif field == "beparams":
2873 338e51e8 Iustin Pop
          val = i_be
2874 338e51e8 Iustin Pop
        elif (field.startswith(BEPREFIX) and
2875 338e51e8 Iustin Pop
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
2876 338e51e8 Iustin Pop
          val = i_be.get(field[len(BEPREFIX):], None)
2877 71c1af58 Iustin Pop
        elif st_match and st_match.groups():
2878 71c1af58 Iustin Pop
          # matches a variable list
2879 71c1af58 Iustin Pop
          st_groups = st_match.groups()
2880 71c1af58 Iustin Pop
          if st_groups and st_groups[0] == "disk":
2881 71c1af58 Iustin Pop
            if st_groups[1] == "count":
2882 71c1af58 Iustin Pop
              val = len(instance.disks)
2883 41a776da Iustin Pop
            elif st_groups[1] == "sizes":
2884 41a776da Iustin Pop
              val = [disk.size for disk in instance.disks]
2885 71c1af58 Iustin Pop
            elif st_groups[1] == "size":
2886 3e0cea06 Iustin Pop
              try:
2887 3e0cea06 Iustin Pop
                val = instance.FindDisk(st_groups[2]).size
2888 3e0cea06 Iustin Pop
              except errors.OpPrereqError:
2889 71c1af58 Iustin Pop
                val = None
2890 71c1af58 Iustin Pop
            else:
2891 71c1af58 Iustin Pop
              assert False, "Unhandled disk parameter"
2892 71c1af58 Iustin Pop
          elif st_groups[0] == "nic":
2893 71c1af58 Iustin Pop
            if st_groups[1] == "count":
2894 71c1af58 Iustin Pop
              val = len(instance.nics)
2895 41a776da Iustin Pop
            elif st_groups[1] == "macs":
2896 41a776da Iustin Pop
              val = [nic.mac for nic in instance.nics]
2897 41a776da Iustin Pop
            elif st_groups[1] == "ips":
2898 41a776da Iustin Pop
              val = [nic.ip for nic in instance.nics]
2899 41a776da Iustin Pop
            elif st_groups[1] == "bridges":
2900 41a776da Iustin Pop
              val = [nic.bridge for nic in instance.nics]
2901 71c1af58 Iustin Pop
            else:
2902 71c1af58 Iustin Pop
              # index-based item
2903 71c1af58 Iustin Pop
              nic_idx = int(st_groups[2])
2904 71c1af58 Iustin Pop
              if nic_idx >= len(instance.nics):
2905 71c1af58 Iustin Pop
                val = None
2906 71c1af58 Iustin Pop
              else:
2907 71c1af58 Iustin Pop
                if st_groups[1] == "mac":
2908 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].mac
2909 71c1af58 Iustin Pop
                elif st_groups[1] == "ip":
2910 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].ip
2911 71c1af58 Iustin Pop
                elif st_groups[1] == "bridge":
2912 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].bridge
2913 71c1af58 Iustin Pop
                else:
2914 71c1af58 Iustin Pop
                  assert False, "Unhandled NIC parameter"
2915 71c1af58 Iustin Pop
          else:
2916 71c1af58 Iustin Pop
            assert False, "Unhandled variable parameter"
2917 a8083063 Iustin Pop
        else:
2918 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2919 a8083063 Iustin Pop
        iout.append(val)
2920 a8083063 Iustin Pop
      output.append(iout)
2921 a8083063 Iustin Pop
2922 a8083063 Iustin Pop
    return output
2923 a8083063 Iustin Pop
2924 a8083063 Iustin Pop
2925 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2926 a8083063 Iustin Pop
  """Failover an instance.
2927 a8083063 Iustin Pop

2928 a8083063 Iustin Pop
  """
2929 a8083063 Iustin Pop
  HPATH = "instance-failover"
2930 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2931 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2932 c9e5c064 Guido Trotter
  REQ_BGL = False
2933 c9e5c064 Guido Trotter
2934 c9e5c064 Guido Trotter
  def ExpandNames(self):
2935 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
2936 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2937 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2938 c9e5c064 Guido Trotter
2939 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
2940 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
2941 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
2942 a8083063 Iustin Pop
2943 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2944 a8083063 Iustin Pop
    """Build hooks env.
2945 a8083063 Iustin Pop

2946 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2947 a8083063 Iustin Pop

2948 a8083063 Iustin Pop
    """
2949 a8083063 Iustin Pop
    env = {
2950 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2951 a8083063 Iustin Pop
      }
2952 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2953 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
2954 a8083063 Iustin Pop
    return env, nl, nl
2955 a8083063 Iustin Pop
2956 a8083063 Iustin Pop
  def CheckPrereq(self):
2957 a8083063 Iustin Pop
    """Check prerequisites.
2958 a8083063 Iustin Pop

2959 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2960 a8083063 Iustin Pop

2961 a8083063 Iustin Pop
    """
2962 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2963 c9e5c064 Guido Trotter
    assert self.instance is not None, \
2964 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2965 a8083063 Iustin Pop
2966 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2967 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2968 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2969 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
2970 2a710df1 Michael Hanselmann
2971 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2972 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2973 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2974 abdf0113 Iustin Pop
                                   "a mirrored disk template")
2975 2a710df1 Michael Hanselmann
2976 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2977 d4f16fd9 Iustin Pop
    # check memory requirements on the secondary node
2978 b9bddb6b Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
2979 338e51e8 Iustin Pop
                         instance.name, bep[constants.BE_MEMORY],
2980 e69d05fd Iustin Pop
                         instance.hypervisor)
2981 3a7c308e Guido Trotter
2982 a8083063 Iustin Pop
    # check bridge existance
2983 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2984 72737a7f Iustin Pop
    if not self.rpc.call_bridges_exist(target_node, brlist):
2985 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2986 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2987 50ff9a7a Iustin Pop
                                 (brlist, target_node))
2988 a8083063 Iustin Pop
2989 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2990 a8083063 Iustin Pop
    """Failover an instance.
2991 a8083063 Iustin Pop

2992 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2993 a8083063 Iustin Pop
    starting it on the secondary.
2994 a8083063 Iustin Pop

2995 a8083063 Iustin Pop
    """
2996 a8083063 Iustin Pop
    instance = self.instance
2997 a8083063 Iustin Pop
2998 a8083063 Iustin Pop
    source_node = instance.primary_node
2999 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
3000 a8083063 Iustin Pop
3001 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
3002 a8083063 Iustin Pop
    for dev in instance.disks:
3003 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
3004 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
3005 a0aaa0d0 Guido Trotter
        if instance.status == "up" and not self.op.ignore_consistency:
3006 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
3007 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
3008 a8083063 Iustin Pop
3009 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
3010 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
3011 9a4f63d1 Iustin Pop
                 instance.name, source_node)
3012 a8083063 Iustin Pop
3013 72737a7f Iustin Pop
    if not self.rpc.call_instance_shutdown(source_node, instance):
3014 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
3015 86d9d3bb Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
3016 86d9d3bb Iustin Pop
                             " Proceeding"
3017 86d9d3bb Iustin Pop
                             " anyway. Please make sure node %s is down",
3018 86d9d3bb Iustin Pop
                             instance.name, source_node, source_node)
3019 24a40d57 Iustin Pop
      else:
3020 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
3021 24a40d57 Iustin Pop
                                 (instance.name, source_node))
3022 a8083063 Iustin Pop
3023 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
3024 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3025 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
3026 a8083063 Iustin Pop
3027 a8083063 Iustin Pop
    instance.primary_node = target_node
3028 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
3029 b6102dab Guido Trotter
    self.cfg.Update(instance)
3030 a8083063 Iustin Pop
3031 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
3032 12a0cfbe Guido Trotter
    if instance.status == "up":
3033 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
3034 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s",
3035 9a4f63d1 Iustin Pop
                   instance.name, target_node)
3036 12a0cfbe Guido Trotter
3037 b9bddb6b Iustin Pop
      disks_ok, dummy = _AssembleInstanceDisks(self, instance,
3038 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
3039 12a0cfbe Guido Trotter
      if not disks_ok:
3040 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3041 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
3042 a8083063 Iustin Pop
3043 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
3044 72737a7f Iustin Pop
      if not self.rpc.call_instance_start(target_node, instance, None):
3045 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3046 12a0cfbe Guido Trotter
        raise errors.OpExecError("Could not start instance %s on node %s." %
3047 12a0cfbe Guido Trotter
                                 (instance.name, target_node))
3048 a8083063 Iustin Pop
3049 a8083063 Iustin Pop
3050 b9bddb6b Iustin Pop
def _CreateBlockDevOnPrimary(lu, node, instance, device, info):
3051 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
3052 a8083063 Iustin Pop

3053 a8083063 Iustin Pop
  This always creates all devices.
3054 a8083063 Iustin Pop

3055 a8083063 Iustin Pop
  """
3056 a8083063 Iustin Pop
  if device.children:
3057 a8083063 Iustin Pop
    for child in device.children:
3058 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnPrimary(lu, node, instance, child, info):
3059 a8083063 Iustin Pop
        return False
3060 a8083063 Iustin Pop
3061 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
3062 72737a7f Iustin Pop
  new_id = lu.rpc.call_blockdev_create(node, device, device.size,
3063 72737a7f Iustin Pop
                                       instance.name, True, info)
3064 a8083063 Iustin Pop
  if not new_id:
3065 a8083063 Iustin Pop
    return False
3066 a8083063 Iustin Pop
  if device.physical_id is None:
3067 a8083063 Iustin Pop
    device.physical_id = new_id
3068 a8083063 Iustin Pop
  return True
3069 a8083063 Iustin Pop
3070 a8083063 Iustin Pop
3071 b9bddb6b Iustin Pop
def _CreateBlockDevOnSecondary(lu, node, instance, device, force, info):
3072 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
3073 a8083063 Iustin Pop

3074 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
3075 a8083063 Iustin Pop
  all its children.
3076 a8083063 Iustin Pop

3077 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
3078 a8083063 Iustin Pop

3079 a8083063 Iustin Pop
  """
3080 a8083063 Iustin Pop
  if device.CreateOnSecondary():
3081 a8083063 Iustin Pop
    force = True
3082 a8083063 Iustin Pop
  if device.children:
3083 a8083063 Iustin Pop
    for child in device.children:
3084 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnSecondary(lu, node, instance,
3085 3f78eef2 Iustin Pop
                                        child, force, info):
3086 a8083063 Iustin Pop
        return False
3087 a8083063 Iustin Pop
3088 a8083063 Iustin Pop
  if not force:
3089 a8083063 Iustin Pop
    return True
3090 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
3091 72737a7f Iustin Pop
  new_id = lu.rpc.call_blockdev_create(node, device, device.size,
3092 72737a7f Iustin Pop
                                       instance.name, False, info)
3093 a8083063 Iustin Pop
  if not new_id:
3094 a8083063 Iustin Pop
    return False
3095 a8083063 Iustin Pop
  if device.physical_id is None:
3096 a8083063 Iustin Pop
    device.physical_id = new_id
3097 a8083063 Iustin Pop
  return True
3098 a8083063 Iustin Pop
3099 a8083063 Iustin Pop
3100 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
3101 923b1523 Iustin Pop
  """Generate a suitable LV name.
3102 923b1523 Iustin Pop

3103 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
3104 923b1523 Iustin Pop

3105 923b1523 Iustin Pop
  """
3106 923b1523 Iustin Pop
  results = []
3107 923b1523 Iustin Pop
  for val in exts:
3108 b9bddb6b Iustin Pop
    new_id = lu.cfg.GenerateUniqueID()
3109 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
3110 923b1523 Iustin Pop
  return results
3111 923b1523 Iustin Pop
3112 923b1523 Iustin Pop
3113 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
3114 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
3115 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
3116 a1f445d3 Iustin Pop

3117 a1f445d3 Iustin Pop
  """
3118 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
3119 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
3120 b9bddb6b Iustin Pop
  shared_secret = lu.cfg.GenerateDRBDSecret()
3121 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3122 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
3123 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3124 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
3125 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
3126 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
3127 f9518d38 Iustin Pop
                                      p_minor, s_minor,
3128 f9518d38 Iustin Pop
                                      shared_secret),
3129 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
3130 a1f445d3 Iustin Pop
                          iv_name=iv_name)
3131 a1f445d3 Iustin Pop
  return drbd_dev
3132 a1f445d3 Iustin Pop
3133 7c0d6283 Michael Hanselmann
3134 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
3135 a8083063 Iustin Pop
                          instance_name, primary_node,
3136 08db7c5c Iustin Pop
                          secondary_nodes, disk_info,
3137 e2a65344 Iustin Pop
                          file_storage_dir, file_driver,
3138 e2a65344 Iustin Pop
                          base_index):
3139 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
3140 a8083063 Iustin Pop

3141 a8083063 Iustin Pop
  """
3142 a8083063 Iustin Pop
  #TODO: compute space requirements
3143 a8083063 Iustin Pop
3144 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
3145 08db7c5c Iustin Pop
  disk_count = len(disk_info)
3146 08db7c5c Iustin Pop
  disks = []
3147 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
3148 08db7c5c Iustin Pop
    pass
3149 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
3150 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
3151 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
3152 923b1523 Iustin Pop
3153 08db7c5c Iustin Pop
    names = _GenerateUniqueNames(lu, [".disk%d" % i
3154 08db7c5c Iustin Pop
                                      for i in range(disk_count)])
3155 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
3156 e2a65344 Iustin Pop
      disk_index = idx + base_index
3157 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
3158 08db7c5c Iustin Pop
                              logical_id=(vgname, names[idx]),
3159 e2a65344 Iustin Pop
                              iv_name="disk/%d" % disk_index)
3160 08db7c5c Iustin Pop
      disks.append(disk_dev)
3161 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
3162 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
3163 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
3164 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
3165 08db7c5c Iustin Pop
    minors = lu.cfg.AllocateDRBDMinor(
3166 08db7c5c Iustin Pop
      [primary_node, remote_node] * len(disk_info), instance_name)
3167 08db7c5c Iustin Pop
3168 08db7c5c Iustin Pop
    names = _GenerateUniqueNames(lu,
3169 08db7c5c Iustin Pop
                                 [".disk%d_%s" % (i, s)
3170 08db7c5c Iustin Pop
                                  for i in range(disk_count)
3171 08db7c5c Iustin Pop
                                  for s in ("data", "meta")
3172 08db7c5c Iustin Pop
                                  ])
3173 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
3174 08db7c5c Iustin Pop
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
3175 08db7c5c Iustin Pop
                                      disk["size"], names[idx*2:idx*2+2],
3176 e2a65344 Iustin Pop
                                      "disk/%d" % disk_index,
3177 08db7c5c Iustin Pop
                                      minors[idx*2], minors[idx*2+1])
3178 08db7c5c Iustin Pop
      disks.append(disk_dev)
3179 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
3180 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
3181 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
3182 0f1a06e3 Manuel Franceschini
3183 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
3184 08db7c5c Iustin Pop
3185 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
3186 e2a65344 Iustin Pop
                              iv_name="disk/%d" % disk_index,
3187 08db7c5c Iustin Pop
                              logical_id=(file_driver,
3188 08db7c5c Iustin Pop
                                          "%s/disk%d" % (file_storage_dir,
3189 08db7c5c Iustin Pop
                                                         idx)))
3190 08db7c5c Iustin Pop
      disks.append(disk_dev)
3191 a8083063 Iustin Pop
  else:
3192 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
3193 a8083063 Iustin Pop
  return disks
3194 a8083063 Iustin Pop
3195 a8083063 Iustin Pop
3196 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
3197 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
3198 3ecf6786 Iustin Pop

3199 3ecf6786 Iustin Pop
  """
3200 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
3201 a0c3fea1 Michael Hanselmann
3202 a0c3fea1 Michael Hanselmann
3203 b9bddb6b Iustin Pop
def _CreateDisks(lu, instance):
3204 a8083063 Iustin Pop
  """Create all disks for an instance.
3205 a8083063 Iustin Pop

3206 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
3207 a8083063 Iustin Pop

3208 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
3209 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
3210 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
3211 e4376078 Iustin Pop
  @param instance: the instance whose disks we should create
3212 e4376078 Iustin Pop
  @rtype: boolean
3213 e4376078 Iustin Pop
  @return: the success of the creation
3214 a8083063 Iustin Pop

3215 a8083063 Iustin Pop
  """
3216 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
3217 a0c3fea1 Michael Hanselmann
3218 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
3219 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3220 72737a7f Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(instance.primary_node,
3221 72737a7f Iustin Pop
                                                 file_storage_dir)
3222 0f1a06e3 Manuel Franceschini
3223 0f1a06e3 Manuel Franceschini
    if not result:
3224 9a4f63d1 Iustin Pop
      logging.error("Could not connect to node '%s'", instance.primary_node)
3225 0f1a06e3 Manuel Franceschini
      return False
3226 0f1a06e3 Manuel Franceschini
3227 0f1a06e3 Manuel Franceschini
    if not result[0]:
3228 9a4f63d1 Iustin Pop
      logging.error("Failed to create directory '%s'", file_storage_dir)
3229 0f1a06e3 Manuel Franceschini
      return False
3230 0f1a06e3 Manuel Franceschini
3231 a8083063 Iustin Pop
  for device in instance.disks:
3232 9a4f63d1 Iustin Pop
    logging.info("Creating volume %s for instance %s",
3233 9a4f63d1 Iustin Pop
                 device.iv_name, instance.name)
3234 a8083063 Iustin Pop
    #HARDCODE
3235 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
3236 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnSecondary(lu, secondary_node, instance,
3237 3f78eef2 Iustin Pop
                                        device, False, info):
3238 9a4f63d1 Iustin Pop
        logging.error("Failed to create volume %s (%s) on secondary node %s!",
3239 9a4f63d1 Iustin Pop
                      device.iv_name, device, secondary_node)
3240 a8083063 Iustin Pop
        return False
3241 a8083063 Iustin Pop
    #HARDCODE
3242 b9bddb6b Iustin Pop
    if not _CreateBlockDevOnPrimary(lu, instance.primary_node,
3243 3f78eef2 Iustin Pop
                                    instance, device, info):
3244 9a4f63d1 Iustin Pop
      logging.error("Failed to create volume %s on primary!", device.iv_name)
3245 a8083063 Iustin Pop
      return False
3246 1c6e3627 Manuel Franceschini
3247 a8083063 Iustin Pop
  return True
3248 a8083063 Iustin Pop
3249 a8083063 Iustin Pop
3250 b9bddb6b Iustin Pop
def _RemoveDisks(lu, instance):
3251 a8083063 Iustin Pop
  """Remove all disks for an instance.
3252 a8083063 Iustin Pop

3253 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
3254 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
3255 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
3256 a8083063 Iustin Pop
  with `_CreateDisks()`).
3257 a8083063 Iustin Pop

3258 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
3259 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
3260 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
3261 e4376078 Iustin Pop
  @param instance: the instance whose disks we should remove
3262 e4376078 Iustin Pop
  @rtype: boolean
3263 e4376078 Iustin Pop
  @return: the success of the removal
3264 a8083063 Iustin Pop

3265 a8083063 Iustin Pop
  """
3266 9a4f63d1 Iustin Pop
  logging.info("Removing block devices for instance %s", instance.name)
3267 a8083063 Iustin Pop
3268 a8083063 Iustin Pop
  result = True
3269 a8083063 Iustin Pop
  for device in instance.disks:
3270 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
3271 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
3272 72737a7f Iustin Pop
      if not lu.rpc.call_blockdev_remove(node, disk):
3273 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not remove block device %s on node %s,"
3274 86d9d3bb Iustin Pop
                           " continuing anyway", device.iv_name, node)
3275 a8083063 Iustin Pop
        result = False
3276 0f1a06e3 Manuel Franceschini
3277 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
3278 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3279 72737a7f Iustin Pop
    if not lu.rpc.call_file_storage_dir_remove(instance.primary_node,
3280 72737a7f Iustin Pop
                                               file_storage_dir):
3281 9a4f63d1 Iustin Pop
      logging.error("Could not remove directory '%s'", file_storage_dir)
3282 0f1a06e3 Manuel Franceschini
      result = False
3283 0f1a06e3 Manuel Franceschini
3284 a8083063 Iustin Pop
  return result
3285 a8083063 Iustin Pop
3286 a8083063 Iustin Pop
3287 08db7c5c Iustin Pop
def _ComputeDiskSize(disk_template, disks):
3288 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
3289 e2fe6369 Iustin Pop

3290 e2fe6369 Iustin Pop
  """
3291 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
3292 e2fe6369 Iustin Pop
  req_size_dict = {
3293 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
3294 08db7c5c Iustin Pop
    constants.DT_PLAIN: sum(d["size"] for d in disks),
3295 08db7c5c Iustin Pop
    # 128 MB are added for drbd metadata for each disk
3296 08db7c5c Iustin Pop
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
3297 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
3298 e2fe6369 Iustin Pop
  }
3299 e2fe6369 Iustin Pop
3300 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
3301 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
3302 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
3303 e2fe6369 Iustin Pop
3304 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
3305 e2fe6369 Iustin Pop
3306 e2fe6369 Iustin Pop
3307 74409b12 Iustin Pop
def _CheckHVParams(lu, nodenames, hvname, hvparams):
3308 74409b12 Iustin Pop
  """Hypervisor parameter validation.
3309 74409b12 Iustin Pop

3310 74409b12 Iustin Pop
  This function abstract the hypervisor parameter validation to be
3311 74409b12 Iustin Pop
  used in both instance create and instance modify.
3312 74409b12 Iustin Pop

3313 74409b12 Iustin Pop
  @type lu: L{LogicalUnit}
3314 74409b12 Iustin Pop
  @param lu: the logical unit for which we check
3315 74409b12 Iustin Pop
  @type nodenames: list
3316 74409b12 Iustin Pop
  @param nodenames: the list of nodes on which we should check
3317 74409b12 Iustin Pop
  @type hvname: string
3318 74409b12 Iustin Pop
  @param hvname: the name of the hypervisor we should use
3319 74409b12 Iustin Pop
  @type hvparams: dict
3320 74409b12 Iustin Pop
  @param hvparams: the parameters which we need to check
3321 74409b12 Iustin Pop
  @raise errors.OpPrereqError: if the parameters are not valid
3322 74409b12 Iustin Pop

3323 74409b12 Iustin Pop
  """
3324 74409b12 Iustin Pop
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
3325 74409b12 Iustin Pop
                                                  hvname,
3326 74409b12 Iustin Pop
                                                  hvparams)
3327 74409b12 Iustin Pop
  for node in nodenames:
3328 74409b12 Iustin Pop
    info = hvinfo.get(node, None)
3329 74409b12 Iustin Pop
    if not info or not isinstance(info, (tuple, list)):
3330 74409b12 Iustin Pop
      raise errors.OpPrereqError("Cannot get current information"
3331 74409b12 Iustin Pop
                                 " from node '%s' (%s)" % (node, info))
3332 74409b12 Iustin Pop
    if not info[0]:
3333 74409b12 Iustin Pop
      raise errors.OpPrereqError("Hypervisor parameter validation failed:"
3334 74409b12 Iustin Pop
                                 " %s" % info[1])
3335 74409b12 Iustin Pop
3336 74409b12 Iustin Pop
3337 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
3338 a8083063 Iustin Pop
  """Create an instance.
3339 a8083063 Iustin Pop

3340 a8083063 Iustin Pop
  """
3341 a8083063 Iustin Pop
  HPATH = "instance-add"
3342 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3343 08db7c5c Iustin Pop
  _OP_REQP = ["instance_name", "disks", "disk_template",
3344 08db7c5c Iustin Pop
              "mode", "start",
3345 08db7c5c Iustin Pop
              "wait_for_sync", "ip_check", "nics",
3346 338e51e8 Iustin Pop
              "hvparams", "beparams"]
3347 7baf741d Guido Trotter
  REQ_BGL = False
3348 7baf741d Guido Trotter
3349 7baf741d Guido Trotter
  def _ExpandNode(self, node):
3350 7baf741d Guido Trotter
    """Expands and checks one node name.
3351 7baf741d Guido Trotter

3352 7baf741d Guido Trotter
    """
3353 7baf741d Guido Trotter
    node_full = self.cfg.ExpandNodeName(node)
3354 7baf741d Guido Trotter
    if node_full is None:
3355 7baf741d Guido Trotter
      raise errors.OpPrereqError("Unknown node %s" % node)
3356 7baf741d Guido Trotter
    return node_full
3357 7baf741d Guido Trotter
3358 7baf741d Guido Trotter
  def ExpandNames(self):
3359 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
3360 7baf741d Guido Trotter

3361 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
3362 7baf741d Guido Trotter

3363 7baf741d Guido Trotter
    """
3364 7baf741d Guido Trotter
    self.needed_locks = {}
3365 7baf741d Guido Trotter
3366 7baf741d Guido Trotter
    # set optional parameters to none if they don't exist
3367 6785674e Iustin Pop
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
3368 7baf741d Guido Trotter
      if not hasattr(self.op, attr):
3369 7baf741d Guido Trotter
        setattr(self.op, attr, None)
3370 7baf741d Guido Trotter
3371 4b2f38dd Iustin Pop
    # cheap checks, mostly valid constants given
3372 4b2f38dd Iustin Pop
3373 7baf741d Guido Trotter
    # verify creation mode
3374 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
3375 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
3376 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
3377 7baf741d Guido Trotter
                                 self.op.mode)
3378 4b2f38dd Iustin Pop
3379 7baf741d Guido Trotter
    # disk template and mirror node verification
3380 7baf741d Guido Trotter
    if self.op.disk_template not in constants.DISK_TEMPLATES:
3381 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid disk template name")
3382 7baf741d Guido Trotter
3383 4b2f38dd Iustin Pop
    if self.op.hypervisor is None:
3384 4b2f38dd Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
3385 4b2f38dd Iustin Pop
3386 8705eb96 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
3387 8705eb96 Iustin Pop
    enabled_hvs = cluster.enabled_hypervisors
3388 4b2f38dd Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
3389 4b2f38dd Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
3390 4b2f38dd Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
3391 4b2f38dd Iustin Pop
                                  ",".join(enabled_hvs)))
3392 4b2f38dd Iustin Pop
3393 6785674e Iustin Pop
    # check hypervisor parameter syntax (locally)
3394 6785674e Iustin Pop
3395 8705eb96 Iustin Pop
    filled_hvp = cluster.FillDict(cluster.hvparams[self.op.hypervisor],
3396 8705eb96 Iustin Pop
                                  self.op.hvparams)
3397 6785674e Iustin Pop
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
3398 8705eb96 Iustin Pop
    hv_type.CheckParameterSyntax(filled_hvp)
3399 6785674e Iustin Pop
3400 338e51e8 Iustin Pop
    # fill and remember the beparams dict
3401 338e51e8 Iustin Pop
    self.be_full = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
3402 338e51e8 Iustin Pop
                                    self.op.beparams)
3403 338e51e8 Iustin Pop
3404 7baf741d Guido Trotter
    #### instance parameters check
3405 7baf741d Guido Trotter
3406 7baf741d Guido Trotter
    # instance name verification
3407 7baf741d Guido Trotter
    hostname1 = utils.HostInfo(self.op.instance_name)
3408 7baf741d Guido Trotter
    self.op.instance_name = instance_name = hostname1.name
3409 7baf741d Guido Trotter
3410 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
3411 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
3412 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
3413 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3414 7baf741d Guido Trotter
                                 instance_name)
3415 7baf741d Guido Trotter
3416 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
3417 7baf741d Guido Trotter
3418 08db7c5c Iustin Pop
    # NIC buildup
3419 08db7c5c Iustin Pop
    self.nics = []
3420 08db7c5c Iustin Pop
    for nic in self.op.nics:
3421 08db7c5c Iustin Pop
      # ip validity checks
3422 08db7c5c Iustin Pop
      ip = nic.get("ip", None)
3423 08db7c5c Iustin Pop
      if ip is None or ip.lower() == "none":
3424 08db7c5c Iustin Pop
        nic_ip = None
3425 08db7c5c Iustin Pop
      elif ip.lower() == constants.VALUE_AUTO:
3426 08db7c5c Iustin Pop
        nic_ip = hostname1.ip
3427 08db7c5c Iustin Pop
      else:
3428 08db7c5c Iustin Pop
        if not utils.IsValidIP(ip):
3429 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
3430 08db7c5c Iustin Pop
                                     " like a valid IP" % ip)
3431 08db7c5c Iustin Pop
        nic_ip = ip
3432 08db7c5c Iustin Pop
3433 08db7c5c Iustin Pop
      # MAC address verification
3434 08db7c5c Iustin Pop
      mac = nic.get("mac", constants.VALUE_AUTO)
3435 08db7c5c Iustin Pop
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
3436 08db7c5c Iustin Pop
        if not utils.IsValidMac(mac.lower()):
3437 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Invalid MAC address specified: %s" %
3438 08db7c5c Iustin Pop
                                     mac)
3439 08db7c5c Iustin Pop
      # bridge verification
3440 08db7c5c Iustin Pop
      bridge = nic.get("bridge", self.cfg.GetDefBridge())
3441 08db7c5c Iustin Pop
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, bridge=bridge))
3442 08db7c5c Iustin Pop
3443 08db7c5c Iustin Pop
    # disk checks/pre-build
3444 08db7c5c Iustin Pop
    self.disks = []
3445 08db7c5c Iustin Pop
    for disk in self.op.disks:
3446 08db7c5c Iustin Pop
      mode = disk.get("mode", constants.DISK_RDWR)
3447 08db7c5c Iustin Pop
      if mode not in constants.DISK_ACCESS_SET:
3448 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
3449 08db7c5c Iustin Pop
                                   mode)
3450 08db7c5c Iustin Pop
      size = disk.get("size", None)
3451 08db7c5c Iustin Pop
      if size is None:
3452 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Missing disk size")
3453 08db7c5c Iustin Pop
      try:
3454 08db7c5c Iustin Pop
        size = int(size)
3455 08db7c5c Iustin Pop
      except ValueError:
3456 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk size '%s'" % size)
3457 08db7c5c Iustin Pop
      self.disks.append({"size": size, "mode": mode})
3458 08db7c5c Iustin Pop
3459 7baf741d Guido Trotter
    # used in CheckPrereq for ip ping check
3460 7baf741d Guido Trotter
    self.check_ip = hostname1.ip
3461 7baf741d Guido Trotter
3462 7baf741d Guido Trotter
    # file storage checks
3463 7baf741d Guido Trotter
    if (self.op.file_driver and
3464 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
3465 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
3466 7baf741d Guido Trotter
                                 self.op.file_driver)
3467 7baf741d Guido Trotter
3468 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
3469 7baf741d Guido Trotter
      raise errors.OpPrereqError("File storage directory path not absolute")
3470 7baf741d Guido Trotter
3471 7baf741d Guido Trotter
    ### Node/iallocator related checks
3472 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
3473 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
3474 7baf741d Guido Trotter
                                 " node must be given")
3475 7baf741d Guido Trotter
3476 7baf741d Guido Trotter
    if self.op.iallocator:
3477 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3478 7baf741d Guido Trotter
    else:
3479 7baf741d Guido Trotter
      self.op.pnode = self._ExpandNode(self.op.pnode)
3480 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
3481 7baf741d Guido Trotter
      if self.op.snode is not None:
3482 7baf741d Guido Trotter
        self.op.snode = self._ExpandNode(self.op.snode)
3483 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
3484 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
3485 7baf741d Guido Trotter
3486 7baf741d Guido Trotter
    # in case of import lock the source node too
3487 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
3488 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
3489 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
3490 7baf741d Guido Trotter
3491 7baf741d Guido Trotter
      if src_node is None or src_path is None:
3492 7baf741d Guido Trotter
        raise errors.OpPrereqError("Importing an instance requires source"
3493 7baf741d Guido Trotter
                                   " node and path options")
3494 7baf741d Guido Trotter
3495 7baf741d Guido Trotter
      if not os.path.isabs(src_path):
3496 7baf741d Guido Trotter
        raise errors.OpPrereqError("The source path must be absolute")
3497 7baf741d Guido Trotter
3498 7baf741d Guido Trotter
      self.op.src_node = src_node = self._ExpandNode(src_node)
3499 7baf741d Guido Trotter
      if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
3500 7baf741d Guido Trotter
        self.needed_locks[locking.LEVEL_NODE].append(src_node)
3501 7baf741d Guido Trotter
3502 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
3503 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
3504 7baf741d Guido Trotter
        raise errors.OpPrereqError("No guest OS specified")
3505 a8083063 Iustin Pop
3506 538475ca Iustin Pop
  def _RunAllocator(self):
3507 538475ca Iustin Pop
    """Run the allocator based on input opcode.
3508 538475ca Iustin Pop

3509 538475ca Iustin Pop
    """
3510 08db7c5c Iustin Pop
    nics = [n.ToDict() for n in self.nics]
3511 72737a7f Iustin Pop
    ial = IAllocator(self,
3512 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
3513 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
3514 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
3515 d1c2dd75 Iustin Pop
                     tags=[],
3516 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
3517 338e51e8 Iustin Pop
                     vcpus=self.be_full[constants.BE_VCPUS],
3518 338e51e8 Iustin Pop
                     mem_size=self.be_full[constants.BE_MEMORY],
3519 08db7c5c Iustin Pop
                     disks=self.disks,
3520 d1c2dd75 Iustin Pop
                     nics=nics,
3521 8cc7e742 Guido Trotter
                     hypervisor=self.op.hypervisor,
3522 29859cb7 Iustin Pop
                     )
3523 d1c2dd75 Iustin Pop
3524 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
3525 d1c2dd75 Iustin Pop
3526 d1c2dd75 Iustin Pop
    if not ial.success:
3527 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3528 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3529 d1c2dd75 Iustin Pop
                                                           ial.info))
3530 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3531 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3532 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
3533 97abc79f Iustin Pop
                                 (self.op.iallocator, len(ial.nodes),
3534 1ce4bbe3 René Nussbaumer
                                  ial.required_nodes))
3535 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
3536 86d9d3bb Iustin Pop
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
3537 86d9d3bb Iustin Pop
                 self.op.instance_name, self.op.iallocator,
3538 86d9d3bb Iustin Pop
                 ", ".join(ial.nodes))
3539 27579978 Iustin Pop
    if ial.required_nodes == 2:
3540 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
3541 538475ca Iustin Pop
3542 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3543 a8083063 Iustin Pop
    """Build hooks env.
3544 a8083063 Iustin Pop

3545 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3546 a8083063 Iustin Pop

3547 a8083063 Iustin Pop
    """
3548 a8083063 Iustin Pop
    env = {
3549 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
3550 08db7c5c Iustin Pop
      "INSTANCE_DISK_SIZE": ",".join(str(d["size"]) for d in self.disks),
3551 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
3552 a8083063 Iustin Pop
      }
3553 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3554 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
3555 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
3556 09acf207 Guido Trotter
      env["INSTANCE_SRC_IMAGES"] = self.src_images
3557 396e1b78 Michael Hanselmann
3558 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
3559 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
3560 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
3561 396e1b78 Michael Hanselmann
      status=self.instance_status,
3562 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
3563 338e51e8 Iustin Pop
      memory=self.be_full[constants.BE_MEMORY],
3564 338e51e8 Iustin Pop
      vcpus=self.be_full[constants.BE_VCPUS],
3565 08db7c5c Iustin Pop
      nics=[(n.ip, n.bridge, n.mac) for n in self.nics],
3566 396e1b78 Michael Hanselmann
    ))
3567 a8083063 Iustin Pop
3568 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
3569 a8083063 Iustin Pop
          self.secondaries)
3570 a8083063 Iustin Pop
    return env, nl, nl
3571 a8083063 Iustin Pop
3572 a8083063 Iustin Pop
3573 a8083063 Iustin Pop
  def CheckPrereq(self):
3574 a8083063 Iustin Pop
    """Check prerequisites.
3575 a8083063 Iustin Pop

3576 a8083063 Iustin Pop
    """
3577 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
3578 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
3579 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
3580 eedc99de Manuel Franceschini
                                 " instances")
3581 eedc99de Manuel Franceschini
3582 e69d05fd Iustin Pop
3583 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3584 7baf741d Guido Trotter
      src_node = self.op.src_node
3585 7baf741d Guido Trotter
      src_path = self.op.src_path
3586 a8083063 Iustin Pop
3587 72737a7f Iustin Pop
      export_info = self.rpc.call_export_info(src_node, src_path)
3588 a8083063 Iustin Pop
3589 a8083063 Iustin Pop
      if not export_info:
3590 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
3591 a8083063 Iustin Pop
3592 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
3593 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
3594 a8083063 Iustin Pop
3595 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
3596 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
3597 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
3598 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
3599 a8083063 Iustin Pop
3600 09acf207 Guido Trotter
      # Check that the new instance doesn't have less disks than the export
3601 08db7c5c Iustin Pop
      instance_disks = len(self.disks)
3602 09acf207 Guido Trotter
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
3603 09acf207 Guido Trotter
      if instance_disks < export_disks:
3604 09acf207 Guido Trotter
        raise errors.OpPrereqError("Not enough disks to import."
3605 09acf207 Guido Trotter
                                   " (instance: %d, export: %d)" %
3606 09acf207 Guido Trotter
                                   (2, export_disks))
3607 a8083063 Iustin Pop
3608 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
3609 09acf207 Guido Trotter
      disk_images = []
3610 09acf207 Guido Trotter
      for idx in range(export_disks):
3611 09acf207 Guido Trotter
        option = 'disk%d_dump' % idx
3612 09acf207 Guido Trotter
        if export_info.has_option(constants.INISECT_INS, option):
3613 09acf207 Guido Trotter
          # FIXME: are the old os-es, disk sizes, etc. useful?
3614 09acf207 Guido Trotter
          export_name = export_info.get(constants.INISECT_INS, option)
3615 09acf207 Guido Trotter
          image = os.path.join(src_path, export_name)
3616 09acf207 Guido Trotter
          disk_images.append(image)
3617 09acf207 Guido Trotter
        else:
3618 09acf207 Guido Trotter
          disk_images.append(False)
3619 09acf207 Guido Trotter
3620 09acf207 Guido Trotter
      self.src_images = disk_images
3621 901a65c1 Iustin Pop
3622 b4364a6b Guido Trotter
      old_name = export_info.get(constants.INISECT_INS, 'name')
3623 b4364a6b Guido Trotter
      # FIXME: int() here could throw a ValueError on broken exports
3624 b4364a6b Guido Trotter
      exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
3625 b4364a6b Guido Trotter
      if self.op.instance_name == old_name:
3626 b4364a6b Guido Trotter
        for idx, nic in enumerate(self.nics):
3627 b4364a6b Guido Trotter
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
3628 b4364a6b Guido Trotter
            nic_mac_ini = 'nic%d_mac' % idx
3629 b4364a6b Guido Trotter
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
3630 bc89efc3 Guido Trotter
3631 7baf741d Guido Trotter
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
3632 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
3633 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
3634 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
3635 901a65c1 Iustin Pop
3636 901a65c1 Iustin Pop
    if self.op.ip_check:
3637 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
3638 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3639 7b3a8fb5 Iustin Pop
                                   (self.check_ip, self.op.instance_name))
3640 901a65c1 Iustin Pop
3641 538475ca Iustin Pop
    #### allocator run
3642 538475ca Iustin Pop
3643 538475ca Iustin Pop
    if self.op.iallocator is not None:
3644 538475ca Iustin Pop
      self._RunAllocator()
3645 0f1a06e3 Manuel Franceschini
3646 901a65c1 Iustin Pop
    #### node related checks
3647 901a65c1 Iustin Pop
3648 901a65c1 Iustin Pop
    # check primary node
3649 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
3650 7baf741d Guido Trotter
    assert self.pnode is not None, \
3651 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
3652 901a65c1 Iustin Pop
    self.secondaries = []
3653 901a65c1 Iustin Pop
3654 901a65c1 Iustin Pop
    # mirror node verification
3655 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
3656 7baf741d Guido Trotter
      if self.op.snode is None:
3657 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
3658 3ecf6786 Iustin Pop
                                   " a mirror node")
3659 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
3660 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
3661 3ecf6786 Iustin Pop
                                   " the primary node.")
3662 7baf741d Guido Trotter
      self.secondaries.append(self.op.snode)
3663 a8083063 Iustin Pop
3664 6785674e Iustin Pop
    nodenames = [pnode.name] + self.secondaries
3665 6785674e Iustin Pop
3666 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
3667 08db7c5c Iustin Pop
                                self.disks)
3668 ed1ebc60 Guido Trotter
3669 8d75db10 Iustin Pop
    # Check lv size requirements
3670 8d75db10 Iustin Pop
    if req_size is not None:
3671 72737a7f Iustin Pop
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
3672 72737a7f Iustin Pop
                                         self.op.hypervisor)
3673 8d75db10 Iustin Pop
      for node in nodenames:
3674 8d75db10 Iustin Pop
        info = nodeinfo.get(node, None)
3675 8d75db10 Iustin Pop
        if not info:
3676 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
3677 3e91897b Iustin Pop
                                     " from node '%s'" % node)
3678 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
3679 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
3680 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
3681 8d75db10 Iustin Pop
                                     " node %s" % node)
3682 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
3683 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
3684 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
3685 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
3686 ed1ebc60 Guido Trotter
3687 74409b12 Iustin Pop
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
3688 6785674e Iustin Pop
3689 a8083063 Iustin Pop
    # os verification
3690 72737a7f Iustin Pop
    os_obj = self.rpc.call_os_get(pnode.name, self.op.os_type)
3691 dfa96ded Guido Trotter
    if not os_obj:
3692 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
3693 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
3694 a8083063 Iustin Pop
3695 901a65c1 Iustin Pop
    # bridge check on primary node
3696 08db7c5c Iustin Pop
    bridges = [n.bridge for n in self.nics]
3697 08db7c5c Iustin Pop
    if not self.rpc.call_bridges_exist(self.pnode.name, bridges):
3698 08db7c5c Iustin Pop
      raise errors.OpPrereqError("one of the target bridges '%s' does not"
3699 08db7c5c Iustin Pop
                                 " exist on"
3700 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
3701 08db7c5c Iustin Pop
                                 (",".join(bridges), pnode.name))
3702 a8083063 Iustin Pop
3703 49ce1563 Iustin Pop
    # memory check on primary node
3704 49ce1563 Iustin Pop
    if self.op.start:
3705 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
3706 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
3707 338e51e8 Iustin Pop
                           self.be_full[constants.BE_MEMORY],
3708 338e51e8 Iustin Pop
                           self.op.hypervisor)
3709 49ce1563 Iustin Pop
3710 a8083063 Iustin Pop
    if self.op.start:
3711 a8083063 Iustin Pop
      self.instance_status = 'up'
3712 a8083063 Iustin Pop
    else:
3713 a8083063 Iustin Pop
      self.instance_status = 'down'
3714 a8083063 Iustin Pop
3715 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3716 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
3717 a8083063 Iustin Pop

3718 a8083063 Iustin Pop
    """
3719 a8083063 Iustin Pop
    instance = self.op.instance_name
3720 a8083063 Iustin Pop
    pnode_name = self.pnode.name
3721 a8083063 Iustin Pop
3722 08db7c5c Iustin Pop
    for nic in self.nics:
3723 08db7c5c Iustin Pop
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
3724 08db7c5c Iustin Pop
        nic.mac = self.cfg.GenerateMAC()
3725 a8083063 Iustin Pop
3726 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
3727 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
3728 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
3729 2a6469d5 Alexander Schreiber
    else:
3730 2a6469d5 Alexander Schreiber
      network_port = None
3731 58acb49d Alexander Schreiber
3732 6785674e Iustin Pop
    ##if self.op.vnc_bind_address is None:
3733 6785674e Iustin Pop
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
3734 31a853d2 Iustin Pop
3735 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
3736 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
3737 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
3738 2c313123 Manuel Franceschini
    else:
3739 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
3740 2c313123 Manuel Franceschini
3741 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
3742 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
3743 d6a02168 Michael Hanselmann
                                        self.cfg.GetFileStorageDir(),
3744 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
3745 0f1a06e3 Manuel Franceschini
3746 0f1a06e3 Manuel Franceschini
3747 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
3748 a8083063 Iustin Pop
                                  self.op.disk_template,
3749 a8083063 Iustin Pop
                                  instance, pnode_name,
3750 08db7c5c Iustin Pop
                                  self.secondaries,
3751 08db7c5c Iustin Pop
                                  self.disks,
3752 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
3753 e2a65344 Iustin Pop
                                  self.op.file_driver,
3754 e2a65344 Iustin Pop
                                  0)
3755 a8083063 Iustin Pop
3756 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3757 a8083063 Iustin Pop
                            primary_node=pnode_name,
3758 08db7c5c Iustin Pop
                            nics=self.nics, disks=disks,
3759 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
3760 a8083063 Iustin Pop
                            status=self.instance_status,
3761 58acb49d Alexander Schreiber
                            network_port=network_port,
3762 338e51e8 Iustin Pop
                            beparams=self.op.beparams,
3763 6785674e Iustin Pop
                            hvparams=self.op.hvparams,
3764 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
3765 a8083063 Iustin Pop
                            )
3766 a8083063 Iustin Pop
3767 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
3768 b9bddb6b Iustin Pop
    if not _CreateDisks(self, iobj):
3769 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
3770 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance)
3771 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
3772 a8083063 Iustin Pop
3773 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
3774 a8083063 Iustin Pop
3775 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
3776 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
3777 7baf741d Guido Trotter
    # added the instance to the config
3778 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
3779 a1578d63 Iustin Pop
    # Remove the temp. assignements for the instance's drbds
3780 a1578d63 Iustin Pop
    self.cfg.ReleaseDRBDMinors(instance)
3781 e36e96b4 Guido Trotter
    # Unlock all the nodes
3782 e36e96b4 Guido Trotter
    self.context.glm.release(locking.LEVEL_NODE)
3783 e36e96b4 Guido Trotter
    del self.acquired_locks[locking.LEVEL_NODE]
3784 a8083063 Iustin Pop
3785 a8083063 Iustin Pop
    if self.op.wait_for_sync:
3786 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
3787 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3788 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
3789 a8083063 Iustin Pop
      time.sleep(15)
3790 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
3791 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
3792 a8083063 Iustin Pop
    else:
3793 a8083063 Iustin Pop
      disk_abort = False
3794 a8083063 Iustin Pop
3795 a8083063 Iustin Pop
    if disk_abort:
3796 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
3797 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3798 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
3799 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
3800 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3801 3ecf6786 Iustin Pop
                               " this instance")
3802 a8083063 Iustin Pop
3803 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3804 a8083063 Iustin Pop
                (instance, pnode_name))
3805 a8083063 Iustin Pop
3806 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3807 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3808 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3809 d15a9ad3 Guido Trotter
        if not self.rpc.call_instance_os_add(pnode_name, iobj):
3810 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3811 3ecf6786 Iustin Pop
                                   " on node %s" %
3812 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3813 a8083063 Iustin Pop
3814 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3815 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3816 a8083063 Iustin Pop
        src_node = self.op.src_node
3817 09acf207 Guido Trotter
        src_images = self.src_images
3818 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
3819 6c0af70e Guido Trotter
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
3820 09acf207 Guido Trotter
                                                         src_node, src_images,
3821 6c0af70e Guido Trotter
                                                         cluster_name)
3822 09acf207 Guido Trotter
        for idx, result in enumerate(import_result):
3823 09acf207 Guido Trotter
          if not result:
3824 09acf207 Guido Trotter
            self.LogWarning("Could not image %s for on instance %s, disk %d,"
3825 09acf207 Guido Trotter
                            " on node %s" % (src_images[idx], instance, idx,
3826 09acf207 Guido Trotter
                                             pnode_name))
3827 a8083063 Iustin Pop
      else:
3828 a8083063 Iustin Pop
        # also checked in the prereq part
3829 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3830 3ecf6786 Iustin Pop
                                     % self.op.mode)
3831 a8083063 Iustin Pop
3832 a8083063 Iustin Pop
    if self.op.start:
3833 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s", instance, pnode_name)
3834 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3835 72737a7f Iustin Pop
      if not self.rpc.call_instance_start(pnode_name, iobj, None):
3836 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3837 a8083063 Iustin Pop
3838 a8083063 Iustin Pop
3839 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3840 a8083063 Iustin Pop
  """Connect to an instance's console.
3841 a8083063 Iustin Pop

3842 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3843 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3844 a8083063 Iustin Pop
  console.
3845 a8083063 Iustin Pop

3846 a8083063 Iustin Pop
  """
3847 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3848 8659b73e Guido Trotter
  REQ_BGL = False
3849 8659b73e Guido Trotter
3850 8659b73e Guido Trotter
  def ExpandNames(self):
3851 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
3852 a8083063 Iustin Pop
3853 a8083063 Iustin Pop
  def CheckPrereq(self):
3854 a8083063 Iustin Pop
    """Check prerequisites.
3855 a8083063 Iustin Pop

3856 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3857 a8083063 Iustin Pop

3858 a8083063 Iustin Pop
    """
3859 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3860 8659b73e Guido Trotter
    assert self.instance is not None, \
3861 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3862 a8083063 Iustin Pop
3863 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3864 a8083063 Iustin Pop
    """Connect to the console of an instance
3865 a8083063 Iustin Pop

3866 a8083063 Iustin Pop
    """
3867 a8083063 Iustin Pop
    instance = self.instance
3868 a8083063 Iustin Pop
    node = instance.primary_node
3869 a8083063 Iustin Pop
3870 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
3871 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
3872 a8083063 Iustin Pop
    if node_insts is False:
3873 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3874 a8083063 Iustin Pop
3875 a8083063 Iustin Pop
    if instance.name not in node_insts:
3876 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3877 a8083063 Iustin Pop
3878 9a4f63d1 Iustin Pop
    logging.debug("Connecting to console of %s on %s", instance.name, node)
3879 a8083063 Iustin Pop
3880 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
3881 30989e69 Alexander Schreiber
    console_cmd = hyper.GetShellCommandForConsole(instance)
3882 b047857b Michael Hanselmann
3883 82122173 Iustin Pop
    # build ssh cmdline
3884 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
3885 a8083063 Iustin Pop
3886 a8083063 Iustin Pop
3887 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3888 a8083063 Iustin Pop
  """Replace the disks of an instance.
3889 a8083063 Iustin Pop

3890 a8083063 Iustin Pop
  """
3891 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3892 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3893 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
3894 efd990e4 Guido Trotter
  REQ_BGL = False
3895 efd990e4 Guido Trotter
3896 efd990e4 Guido Trotter
  def ExpandNames(self):
3897 efd990e4 Guido Trotter
    self._ExpandAndLockInstance()
3898 efd990e4 Guido Trotter
3899 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
3900 efd990e4 Guido Trotter
      self.op.remote_node = None
3901 efd990e4 Guido Trotter
3902 efd990e4 Guido Trotter
    ia_name = getattr(self.op, "iallocator", None)
3903 efd990e4 Guido Trotter
    if ia_name is not None:
3904 efd990e4 Guido Trotter
      if self.op.remote_node is not None:
3905 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Give either the iallocator or the new"
3906 efd990e4 Guido Trotter
                                   " secondary, not both")
3907 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3908 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
3909 efd990e4 Guido Trotter
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
3910 efd990e4 Guido Trotter
      if remote_node is None:
3911 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Node '%s' not known" %
3912 efd990e4 Guido Trotter
                                   self.op.remote_node)
3913 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
3914 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
3915 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3916 efd990e4 Guido Trotter
    else:
3917 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
3918 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3919 efd990e4 Guido Trotter
3920 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
3921 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
3922 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
3923 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
3924 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
3925 efd990e4 Guido Trotter
      self._LockInstancesNodes()
3926 a8083063 Iustin Pop
3927 b6e82a65 Iustin Pop
  def _RunAllocator(self):
3928 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
3929 b6e82a65 Iustin Pop

3930 b6e82a65 Iustin Pop
    """
3931 72737a7f Iustin Pop
    ial = IAllocator(self,
3932 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
3933 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
3934 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
3935 b6e82a65 Iustin Pop
3936 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
3937 b6e82a65 Iustin Pop
3938 b6e82a65 Iustin Pop
    if not ial.success:
3939 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3940 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3941 b6e82a65 Iustin Pop
                                                           ial.info))
3942 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3943 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3944 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
3945 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
3946 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
3947 86d9d3bb Iustin Pop
    self.LogInfo("Selected new secondary for the instance: %s",
3948 86d9d3bb Iustin Pop
                 self.op.remote_node)
3949 b6e82a65 Iustin Pop
3950 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3951 a8083063 Iustin Pop
    """Build hooks env.
3952 a8083063 Iustin Pop

3953 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3954 a8083063 Iustin Pop

3955 a8083063 Iustin Pop
    """
3956 a8083063 Iustin Pop
    env = {
3957 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
3958 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3959 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3960 a8083063 Iustin Pop
      }
3961 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3962 0834c866 Iustin Pop
    nl = [
3963 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
3964 0834c866 Iustin Pop
      self.instance.primary_node,
3965 0834c866 Iustin Pop
      ]
3966 0834c866 Iustin Pop
    if self.op.remote_node is not None:
3967 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
3968 a8083063 Iustin Pop
    return env, nl, nl
3969 a8083063 Iustin Pop
3970 a8083063 Iustin Pop
  def CheckPrereq(self):
3971 a8083063 Iustin Pop
    """Check prerequisites.
3972 a8083063 Iustin Pop

3973 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3974 a8083063 Iustin Pop

3975 a8083063 Iustin Pop
    """
3976 efd990e4 Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3977 efd990e4 Guido Trotter
    assert instance is not None, \
3978 efd990e4 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3979 a8083063 Iustin Pop
    self.instance = instance
3980 a8083063 Iustin Pop
3981 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3982 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3983 a9e0c397 Iustin Pop
                                 " network mirrored.")
3984 a8083063 Iustin Pop
3985 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3986 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3987 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3988 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3989 a8083063 Iustin Pop
3990 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
3991 a9e0c397 Iustin Pop
3992 b6e82a65 Iustin Pop
    ia_name = getattr(self.op, "iallocator", None)
3993 b6e82a65 Iustin Pop
    if ia_name is not None:
3994 de8c7666 Guido Trotter
      self._RunAllocator()
3995 b6e82a65 Iustin Pop
3996 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
3997 a9e0c397 Iustin Pop
    if remote_node is not None:
3998 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
3999 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
4000 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
4001 a9e0c397 Iustin Pop
    else:
4002 a9e0c397 Iustin Pop
      self.remote_node_info = None
4003 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
4004 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
4005 3ecf6786 Iustin Pop
                                 " the instance.")
4006 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
4007 0834c866 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_SEC:
4008 0834c866 Iustin Pop
        # this is for DRBD8, where we can't execute the same mode of
4009 0834c866 Iustin Pop
        # replacement as for drbd7 (no different port allocated)
4010 0834c866 Iustin Pop
        raise errors.OpPrereqError("Same secondary given, cannot execute"
4011 0834c866 Iustin Pop
                                   " replacement")
4012 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
4013 7df43a76 Iustin Pop
      if (self.op.mode == constants.REPLACE_DISK_ALL and
4014 7df43a76 Iustin Pop
          remote_node is not None):
4015 7df43a76 Iustin Pop
        # switch to replace secondary mode
4016 7df43a76 Iustin Pop
        self.op.mode = constants.REPLACE_DISK_SEC
4017 7df43a76 Iustin Pop
4018 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
4019 12c3449a Michael Hanselmann
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
4020 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
4021 a9e0c397 Iustin Pop
                                   " both at once")
4022 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
4023 a9e0c397 Iustin Pop
        if remote_node is not None:
4024 12c3449a Michael Hanselmann
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
4025 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
4026 a9e0c397 Iustin Pop
                                     " node disk replacement")
4027 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
4028 cff90b79 Iustin Pop
        self.oth_node = instance.secondary_nodes[0]
4029 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
4030 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
4031 a9e0c397 Iustin Pop
                                    # we don't change the secondary
4032 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
4033 cff90b79 Iustin Pop
        self.oth_node = instance.primary_node
4034 a9e0c397 Iustin Pop
      else:
4035 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
4036 a9e0c397 Iustin Pop
4037 54155f52 Iustin Pop
    if not self.op.disks:
4038 54155f52 Iustin Pop
      self.op.disks = range(len(instance.disks))
4039 54155f52 Iustin Pop
4040 54155f52 Iustin Pop
    for disk_idx in self.op.disks:
4041 3e0cea06 Iustin Pop
      instance.FindDisk(disk_idx)
4042 a8083063 Iustin Pop
4043 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
4044 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
4045 a9e0c397 Iustin Pop

4046 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
4047 e4376078 Iustin Pop

4048 e4376078 Iustin Pop
      1. for each disk to be replaced:
4049 e4376078 Iustin Pop

4050 e4376078 Iustin Pop
        1. create new LVs on the target node with unique names
4051 e4376078 Iustin Pop
        1. detach old LVs from the drbd device
4052 e4376078 Iustin Pop
        1. rename old LVs to name_replaced.<time_t>
4053 e4376078 Iustin Pop
        1. rename new LVs to old LVs
4054 e4376078 Iustin Pop
        1. attach the new LVs (with the old names now) to the drbd device
4055 e4376078 Iustin Pop

4056 e4376078 Iustin Pop
      1. wait for sync across all devices
4057 e4376078 Iustin Pop

4058 e4376078 Iustin Pop
      1. for each modified disk:
4059 e4376078 Iustin Pop

4060 e4376078 Iustin Pop
        1. remove old LVs (which have the name name_replaces.<time_t>)
4061 a9e0c397 Iustin Pop

4062 a9e0c397 Iustin Pop
    Failures are not very well handled.
4063 cff90b79 Iustin Pop

4064 a9e0c397 Iustin Pop
    """
4065 cff90b79 Iustin Pop
    steps_total = 6
4066 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
4067 a9e0c397 Iustin Pop
    instance = self.instance
4068 a9e0c397 Iustin Pop
    iv_names = {}
4069 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
4070 a9e0c397 Iustin Pop
    # start of work
4071 a9e0c397 Iustin Pop
    cfg = self.cfg
4072 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
4073 cff90b79 Iustin Pop
    oth_node = self.oth_node
4074 cff90b79 Iustin Pop
4075 cff90b79 Iustin Pop
    # Step: check device activation
4076 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
4077 cff90b79 Iustin Pop
    info("checking volume groups")
4078 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
4079 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([oth_node, tgt_node])
4080 cff90b79 Iustin Pop
    if not results:
4081 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
4082 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
4083 cff90b79 Iustin Pop
      res = results.get(node, False)
4084 cff90b79 Iustin Pop
      if not res or my_vg not in res:
4085 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
4086 cff90b79 Iustin Pop
                                 (my_vg, node))
4087 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
4088 54155f52 Iustin Pop
      if idx not in self.op.disks:
4089 cff90b79 Iustin Pop
        continue
4090 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
4091 54155f52 Iustin Pop
        info("checking disk/%d on %s" % (idx, node))
4092 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
4093 72737a7f Iustin Pop
        if not self.rpc.call_blockdev_find(node, dev):
4094 54155f52 Iustin Pop
          raise errors.OpExecError("Can't find disk/%d on node %s" %
4095 54155f52 Iustin Pop
                                   (idx, node))
4096 cff90b79 Iustin Pop
4097 cff90b79 Iustin Pop
    # Step: check other node consistency
4098 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
4099 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
4100 54155f52 Iustin Pop
      if idx not in self.op.disks:
4101 cff90b79 Iustin Pop
        continue
4102 54155f52 Iustin Pop
      info("checking disk/%d consistency on %s" % (idx, oth_node))
4103 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, oth_node,
4104 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
4105 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
4106 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
4107 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
4108 cff90b79 Iustin Pop
4109 cff90b79 Iustin Pop
    # Step: create new storage
4110 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
4111 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
4112 54155f52 Iustin Pop
      if idx not in self.op.disks:
4113 a9e0c397 Iustin Pop
        continue
4114 a9e0c397 Iustin Pop
      size = dev.size
4115 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
4116 54155f52 Iustin Pop
      lv_names = [".disk%d_%s" % (idx, suf)
4117 54155f52 Iustin Pop
                  for suf in ["data", "meta"]]
4118 b9bddb6b Iustin Pop
      names = _GenerateUniqueNames(self, lv_names)
4119 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
4120 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
4121 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
4122 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
4123 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
4124 a9e0c397 Iustin Pop
      old_lvs = dev.children
4125 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
4126 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
4127 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
4128 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
4129 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
4130 a9e0c397 Iustin Pop
      # are talking about the secondary node
4131 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
4132 b9bddb6b Iustin Pop
        if not _CreateBlockDevOnPrimary(self, tgt_node, instance, new_lv,
4133 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
4134 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
4135 a9e0c397 Iustin Pop
                                   " node '%s'" %
4136 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
4137 a9e0c397 Iustin Pop
4138 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
4139 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
4140 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
4141 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
4142 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
4143 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
4144 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
4145 cff90b79 Iustin Pop
      #dev.children = []
4146 cff90b79 Iustin Pop
      #cfg.Update(instance)
4147 a9e0c397 Iustin Pop
4148 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
4149 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
4150 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
4151 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
4152 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
4153 cff90b79 Iustin Pop
4154 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
4155 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
4156 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
4157 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
4158 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
4159 cff90b79 Iustin Pop
      rlist = []
4160 cff90b79 Iustin Pop
      for to_ren in old_lvs:
4161 72737a7f Iustin Pop
        find_res = self.rpc.call_blockdev_find(tgt_node, to_ren)
4162 cff90b79 Iustin Pop
        if find_res is not None: # device exists
4163 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
4164 cff90b79 Iustin Pop
4165 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
4166 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_rename(tgt_node, rlist):
4167 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
4168 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
4169 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
4170 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
4171 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_rename(tgt_node, rlist):
4172 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
4173 cff90b79 Iustin Pop
4174 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
4175 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
4176 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
4177 a9e0c397 Iustin Pop
4178 cff90b79 Iustin Pop
      for disk in old_lvs:
4179 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
4180 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
4181 a9e0c397 Iustin Pop
4182 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
4183 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
4184 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
4185 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
4186 72737a7f Iustin Pop
          if not self.rpc.call_blockdev_remove(tgt_node, new_lv):
4187 79caa9ed Guido Trotter
            warning("Can't rollback device %s", hint="manually cleanup unused"
4188 cff90b79 Iustin Pop
                    " logical volumes")
4189 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
4190 a9e0c397 Iustin Pop
4191 a9e0c397 Iustin Pop
      dev.children = new_lvs
4192 a9e0c397 Iustin Pop
      cfg.Update(instance)
4193 a9e0c397 Iustin Pop
4194 cff90b79 Iustin Pop
    # Step: wait for sync
4195 a9e0c397 Iustin Pop
4196 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
4197 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
4198 a9e0c397 Iustin Pop
    # return value
4199 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
4200 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
4201 a9e0c397 Iustin Pop
4202 a9e0c397 Iustin Pop
    # so check manually all the devices
4203 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
4204 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
4205 72737a7f Iustin Pop
      is_degr = self.rpc.call_blockdev_find(instance.primary_node, dev)[5]
4206 a9e0c397 Iustin Pop
      if is_degr:
4207 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
4208 a9e0c397 Iustin Pop
4209 cff90b79 Iustin Pop
    # Step: remove old storage
4210 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
4211 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
4212 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
4213 a9e0c397 Iustin Pop
      for lv in old_lvs:
4214 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
4215 72737a7f Iustin Pop
        if not self.rpc.call_blockdev_remove(tgt_node, lv):
4216 79caa9ed Guido Trotter
          warning("Can't remove old LV", hint="manually remove unused LVs")
4217 a9e0c397 Iustin Pop
          continue
4218 a9e0c397 Iustin Pop
4219 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
4220 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
4221 a9e0c397 Iustin Pop

4222 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
4223 a9e0c397 Iustin Pop
      - for all disks of the instance:
4224 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
4225 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
4226 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
4227 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
4228 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
4229 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
4230 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
4231 a9e0c397 Iustin Pop
          not network enabled
4232 a9e0c397 Iustin Pop
      - wait for sync across all devices
4233 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
4234 a9e0c397 Iustin Pop

4235 a9e0c397 Iustin Pop
    Failures are not very well handled.
4236 0834c866 Iustin Pop

4237 a9e0c397 Iustin Pop
    """
4238 0834c866 Iustin Pop
    steps_total = 6
4239 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
4240 a9e0c397 Iustin Pop
    instance = self.instance
4241 a9e0c397 Iustin Pop
    iv_names = {}
4242 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
4243 a9e0c397 Iustin Pop
    # start of work
4244 a9e0c397 Iustin Pop
    cfg = self.cfg
4245 a9e0c397 Iustin Pop
    old_node = self.tgt_node
4246 a9e0c397 Iustin Pop
    new_node = self.new_node
4247 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
4248 0834c866 Iustin Pop
4249 0834c866 Iustin Pop
    # Step: check device activation
4250 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
4251 0834c866 Iustin Pop
    info("checking volume groups")
4252 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
4253 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([pri_node, new_node])
4254 0834c866 Iustin Pop
    if not results:
4255 0834c866 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
4256 0834c866 Iustin Pop
    for node in pri_node, new_node:
4257 0834c866 Iustin Pop
      res = results.get(node, False)
4258 0834c866 Iustin Pop
      if not res or my_vg not in res:
4259 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
4260 0834c866 Iustin Pop
                                 (my_vg, node))
4261 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
4262 d418ebfb Iustin Pop
      if idx not in self.op.disks:
4263 0834c866 Iustin Pop
        continue
4264 d418ebfb Iustin Pop
      info("checking disk/%d on %s" % (idx, pri_node))
4265 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4266 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_find(pri_node, dev):
4267 d418ebfb Iustin Pop
        raise errors.OpExecError("Can't find disk/%d on node %s" %
4268 d418ebfb Iustin Pop
                                 (idx, pri_node))
4269 0834c866 Iustin Pop
4270 0834c866 Iustin Pop
    # Step: check other node consistency
4271 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
4272 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
4273 d418ebfb Iustin Pop
      if idx not in self.op.disks:
4274 0834c866 Iustin Pop
        continue
4275 d418ebfb Iustin Pop
      info("checking disk/%d consistency on %s" % (idx, pri_node))
4276 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
4277 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
4278 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
4279 0834c866 Iustin Pop
                                 pri_node)
4280 0834c866 Iustin Pop
4281 0834c866 Iustin Pop
    # Step: create new storage
4282 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
4283 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
4284 a9e0c397 Iustin Pop
      size = dev.size
4285 d418ebfb Iustin Pop
      info("adding new local storage on %s for disk/%d" %
4286 d418ebfb Iustin Pop
           (new_node, idx))
4287 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
4288 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
4289 a9e0c397 Iustin Pop
      # are talking about the secondary node
4290 a9e0c397 Iustin Pop
      for new_lv in dev.children:
4291 b9bddb6b Iustin Pop
        if not _CreateBlockDevOnPrimary(self, new_node, instance, new_lv,
4292 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
4293 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
4294 a9e0c397 Iustin Pop
                                   " node '%s'" %
4295 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
4296 a9e0c397 Iustin Pop
4297 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
4298 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
4299 a1578d63 Iustin Pop
    # error and the success paths
4300 a1578d63 Iustin Pop
    minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
4301 a1578d63 Iustin Pop
                                   instance.name)
4302 468b46f9 Iustin Pop
    logging.debug("Allocated minors %s" % (minors,))
4303 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
4304 d418ebfb Iustin Pop
    for idx, (dev, new_minor) in enumerate(zip(instance.disks, minors)):
4305 0834c866 Iustin Pop
      size = dev.size
4306 d418ebfb Iustin Pop
      info("activating a new drbd on %s for disk/%d" % (new_node, idx))
4307 a9e0c397 Iustin Pop
      # create new devices on new_node
4308 ffa1c0dc Iustin Pop
      if pri_node == dev.logical_id[0]:
4309 ffa1c0dc Iustin Pop
        new_logical_id = (pri_node, new_node,
4310 f9518d38 Iustin Pop
                          dev.logical_id[2], dev.logical_id[3], new_minor,
4311 f9518d38 Iustin Pop
                          dev.logical_id[5])
4312 ffa1c0dc Iustin Pop
      else:
4313 ffa1c0dc Iustin Pop
        new_logical_id = (new_node, pri_node,
4314 f9518d38 Iustin Pop
                          dev.logical_id[2], new_minor, dev.logical_id[4],
4315 f9518d38 Iustin Pop
                          dev.logical_id[5])
4316 d418ebfb Iustin Pop
      iv_names[idx] = (dev, dev.children, new_logical_id)
4317 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
4318 a1578d63 Iustin Pop
                    new_logical_id)
4319 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
4320 ffa1c0dc Iustin Pop
                              logical_id=new_logical_id,
4321 a9e0c397 Iustin Pop
                              children=dev.children)
4322 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnSecondary(self, new_node, instance,
4323 3f78eef2 Iustin Pop
                                        new_drbd, False,
4324 b9bddb6b Iustin Pop
                                        _GetInstanceInfoText(instance)):
4325 a1578d63 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
4326 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
4327 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
4328 a9e0c397 Iustin Pop
4329 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
4330 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
4331 d418ebfb Iustin Pop
      info("shutting down drbd for disk/%d on old node" % idx)
4332 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
4333 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_shutdown(old_node, dev):
4334 d418ebfb Iustin Pop
        warning("Failed to shutdown drbd for disk/%d on old node" % idx,
4335 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
4336 a9e0c397 Iustin Pop
4337 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
4338 642445d9 Iustin Pop
    done = 0
4339 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
4340 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4341 f9518d38 Iustin Pop
      # set the network part of the physical (unique in bdev terms) id
4342 f9518d38 Iustin Pop
      # to None, meaning detach from network
4343 f9518d38 Iustin Pop
      dev.physical_id = (None, None, None, None) + dev.physical_id[4:]
4344 642445d9 Iustin Pop
      # and 'find' the device, which will 'fix' it to match the
4345 642445d9 Iustin Pop
      # standalone state
4346 72737a7f Iustin Pop
      if self.rpc.call_blockdev_find(pri_node, dev):
4347 642445d9 Iustin Pop
        done += 1
4348 642445d9 Iustin Pop
      else:
4349 d418ebfb Iustin Pop
        warning("Failed to detach drbd disk/%d from network, unusual case" %
4350 d418ebfb Iustin Pop
                idx)
4351 642445d9 Iustin Pop
4352 642445d9 Iustin Pop
    if not done:
4353 642445d9 Iustin Pop
      # no detaches succeeded (very unlikely)
4354 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance.name)
4355 642445d9 Iustin Pop
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
4356 642445d9 Iustin Pop
4357 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
4358 642445d9 Iustin Pop
    # the instance to point to the new secondary
4359 642445d9 Iustin Pop
    info("updating instance configuration")
4360 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
4361 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
4362 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4363 642445d9 Iustin Pop
    cfg.Update(instance)
4364 a1578d63 Iustin Pop
    # we can remove now the temp minors as now the new values are
4365 a1578d63 Iustin Pop
    # written to the config file (and therefore stable)
4366 a1578d63 Iustin Pop
    self.cfg.ReleaseDRBDMinors(instance.name)
4367 a9e0c397 Iustin Pop
4368 642445d9 Iustin Pop
    # and now perform the drbd attach
4369 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
4370 642445d9 Iustin Pop
    failures = []
4371 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
4372 d418ebfb Iustin Pop
      info("attaching primary drbd for disk/%d to new secondary node" % idx)
4373 642445d9 Iustin Pop
      # since the attach is smart, it's enough to 'find' the device,
4374 642445d9 Iustin Pop
      # it will automatically activate the network, if the physical_id
4375 642445d9 Iustin Pop
      # is correct
4376 642445d9 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4377 ffa1c0dc Iustin Pop
      logging.debug("Disk to attach: %s", dev)
4378 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_find(pri_node, dev):
4379 d418ebfb Iustin Pop
        warning("can't attach drbd disk/%d to new secondary!" % idx,
4380 642445d9 Iustin Pop
                "please do a gnt-instance info to see the status of disks")
4381 a9e0c397 Iustin Pop
4382 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
4383 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
4384 a9e0c397 Iustin Pop
    # return value
4385 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
4386 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
4387 a9e0c397 Iustin Pop
4388 a9e0c397 Iustin Pop
    # so check manually all the devices
4389 d418ebfb Iustin Pop
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
4390 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4391 72737a7f Iustin Pop
      is_degr = self.rpc.call_blockdev_find(pri_node, dev)[5]
4392 a9e0c397 Iustin Pop
      if is_degr:
4393 d418ebfb Iustin Pop
        raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx)
4394 a9e0c397 Iustin Pop
4395 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
4396 d418ebfb Iustin Pop
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
4397 d418ebfb Iustin Pop
      info("remove logical volumes for disk/%d" % idx)
4398 a9e0c397 Iustin Pop
      for lv in old_lvs:
4399 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
4400 72737a7f Iustin Pop
        if not self.rpc.call_blockdev_remove(old_node, lv):
4401 0834c866 Iustin Pop
          warning("Can't remove LV on old secondary",
4402 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
4403 a9e0c397 Iustin Pop
4404 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
4405 a9e0c397 Iustin Pop
    """Execute disk replacement.
4406 a9e0c397 Iustin Pop

4407 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
4408 a9e0c397 Iustin Pop

4409 a9e0c397 Iustin Pop
    """
4410 a9e0c397 Iustin Pop
    instance = self.instance
4411 22985314 Guido Trotter
4412 22985314 Guido Trotter
    # Activate the instance disks if we're replacing them on a down instance
4413 22985314 Guido Trotter
    if instance.status == "down":
4414 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, True)
4415 22985314 Guido Trotter
4416 abdf0113 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
4417 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
4418 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
4419 a9e0c397 Iustin Pop
      else:
4420 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
4421 a9e0c397 Iustin Pop
    else:
4422 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
4423 22985314 Guido Trotter
4424 22985314 Guido Trotter
    ret = fn(feedback_fn)
4425 22985314 Guido Trotter
4426 22985314 Guido Trotter
    # Deactivate the instance disks if we're replacing them on a down instance
4427 22985314 Guido Trotter
    if instance.status == "down":
4428 b9bddb6b Iustin Pop
      _SafeShutdownInstanceDisks(self, instance)
4429 22985314 Guido Trotter
4430 22985314 Guido Trotter
    return ret
4431 a9e0c397 Iustin Pop
4432 a8083063 Iustin Pop
4433 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
4434 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
4435 8729e0d7 Iustin Pop

4436 8729e0d7 Iustin Pop
  """
4437 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
4438 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4439 6605411d Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
4440 31e63dbf Guido Trotter
  REQ_BGL = False
4441 31e63dbf Guido Trotter
4442 31e63dbf Guido Trotter
  def ExpandNames(self):
4443 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
4444 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4445 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4446 31e63dbf Guido Trotter
4447 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
4448 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
4449 31e63dbf Guido Trotter
      self._LockInstancesNodes()
4450 8729e0d7 Iustin Pop
4451 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
4452 8729e0d7 Iustin Pop
    """Build hooks env.
4453 8729e0d7 Iustin Pop

4454 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
4455 8729e0d7 Iustin Pop

4456 8729e0d7 Iustin Pop
    """
4457 8729e0d7 Iustin Pop
    env = {
4458 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
4459 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
4460 8729e0d7 Iustin Pop
      }
4461 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4462 8729e0d7 Iustin Pop
    nl = [
4463 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
4464 8729e0d7 Iustin Pop
      self.instance.primary_node,
4465 8729e0d7 Iustin Pop
      ]
4466 8729e0d7 Iustin Pop
    return env, nl, nl
4467 8729e0d7 Iustin Pop
4468 8729e0d7 Iustin Pop
  def CheckPrereq(self):
4469 8729e0d7 Iustin Pop
    """Check prerequisites.
4470 8729e0d7 Iustin Pop

4471 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
4472 8729e0d7 Iustin Pop

4473 8729e0d7 Iustin Pop
    """
4474 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4475 31e63dbf Guido Trotter
    assert instance is not None, \
4476 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4477 31e63dbf Guido Trotter
4478 8729e0d7 Iustin Pop
    self.instance = instance
4479 8729e0d7 Iustin Pop
4480 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
4481 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
4482 8729e0d7 Iustin Pop
                                 " growing.")
4483 8729e0d7 Iustin Pop
4484 ad24e046 Iustin Pop
    self.disk = instance.FindDisk(self.op.disk)
4485 8729e0d7 Iustin Pop
4486 8729e0d7 Iustin Pop
    nodenames = [instance.primary_node] + list(instance.secondary_nodes)
4487 72737a7f Iustin Pop
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4488 72737a7f Iustin Pop
                                       instance.hypervisor)
4489 8729e0d7 Iustin Pop
    for node in nodenames:
4490 8729e0d7 Iustin Pop
      info = nodeinfo.get(node, None)
4491 8729e0d7 Iustin Pop
      if not info:
4492 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
4493 8729e0d7 Iustin Pop
                                   " from node '%s'" % node)
4494 8729e0d7 Iustin Pop
      vg_free = info.get('vg_free', None)
4495 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
4496 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
4497 8729e0d7 Iustin Pop
                                   " node %s" % node)
4498 8729e0d7 Iustin Pop
      if self.op.amount > info['vg_free']:
4499 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
4500 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
4501 8729e0d7 Iustin Pop
                                   (node, info['vg_free'], self.op.amount))
4502 8729e0d7 Iustin Pop
4503 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
4504 8729e0d7 Iustin Pop
    """Execute disk grow.
4505 8729e0d7 Iustin Pop

4506 8729e0d7 Iustin Pop
    """
4507 8729e0d7 Iustin Pop
    instance = self.instance
4508 ad24e046 Iustin Pop
    disk = self.disk
4509 8729e0d7 Iustin Pop
    for node in (instance.secondary_nodes + (instance.primary_node,)):
4510 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
4511 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
4512 72737a7f Iustin Pop
      if (not result or not isinstance(result, (list, tuple)) or
4513 72737a7f Iustin Pop
          len(result) != 2):
4514 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s" % node)
4515 8729e0d7 Iustin Pop
      elif not result[0]:
4516 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s: %s" %
4517 8729e0d7 Iustin Pop
                                 (node, result[1]))
4518 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
4519 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
4520 6605411d Iustin Pop
    if self.op.wait_for_sync:
4521 cd4d138f Guido Trotter
      disk_abort = not _WaitForSync(self, instance)
4522 6605411d Iustin Pop
      if disk_abort:
4523 86d9d3bb Iustin Pop
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
4524 86d9d3bb Iustin Pop
                             " status.\nPlease check the instance.")
4525 8729e0d7 Iustin Pop
4526 8729e0d7 Iustin Pop
4527 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
4528 a8083063 Iustin Pop
  """Query runtime instance data.
4529 a8083063 Iustin Pop

4530 a8083063 Iustin Pop
  """
4531 57821cac Iustin Pop
  _OP_REQP = ["instances", "static"]
4532 a987fa48 Guido Trotter
  REQ_BGL = False
4533 ae5849b5 Michael Hanselmann
4534 a987fa48 Guido Trotter
  def ExpandNames(self):
4535 a987fa48 Guido Trotter
    self.needed_locks = {}
4536 a987fa48 Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
4537 a987fa48 Guido Trotter
4538 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
4539 a987fa48 Guido Trotter
      raise errors.OpPrereqError("Invalid argument type 'instances'")
4540 a987fa48 Guido Trotter
4541 a987fa48 Guido Trotter
    if self.op.instances:
4542 a987fa48 Guido Trotter
      self.wanted_names = []
4543 a987fa48 Guido Trotter
      for name in self.op.instances:
4544 a987fa48 Guido Trotter
        full_name = self.cfg.ExpandInstanceName(name)
4545 a987fa48 Guido Trotter
        if full_name is None:
4546 a987fa48 Guido Trotter
          raise errors.OpPrereqError("Instance '%s' not known" %
4547 a987fa48 Guido Trotter
                                     self.op.instance_name)
4548 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
4549 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
4550 a987fa48 Guido Trotter
    else:
4551 a987fa48 Guido Trotter
      self.wanted_names = None
4552 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
4553 a987fa48 Guido Trotter
4554 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4555 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4556 a987fa48 Guido Trotter
4557 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
4558 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
4559 a987fa48 Guido Trotter
      self._LockInstancesNodes()
4560 a8083063 Iustin Pop
4561 a8083063 Iustin Pop
  def CheckPrereq(self):
4562 a8083063 Iustin Pop
    """Check prerequisites.
4563 a8083063 Iustin Pop

4564 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
4565 a8083063 Iustin Pop

4566 a8083063 Iustin Pop
    """
4567 a987fa48 Guido Trotter
    if self.wanted_names is None:
4568 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
4569 a8083063 Iustin Pop
4570 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
4571 a987fa48 Guido Trotter
                             in self.wanted_names]
4572 a987fa48 Guido Trotter
    return
4573 a8083063 Iustin Pop
4574 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
4575 a8083063 Iustin Pop
    """Compute block device status.
4576 a8083063 Iustin Pop

4577 a8083063 Iustin Pop
    """
4578 57821cac Iustin Pop
    static = self.op.static
4579 57821cac Iustin Pop
    if not static:
4580 57821cac Iustin Pop
      self.cfg.SetDiskID(dev, instance.primary_node)
4581 57821cac Iustin Pop
      dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
4582 57821cac Iustin Pop
    else:
4583 57821cac Iustin Pop
      dev_pstatus = None
4584 57821cac Iustin Pop
4585 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
4586 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
4587 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
4588 a8083063 Iustin Pop
        snode = dev.logical_id[1]
4589 a8083063 Iustin Pop
      else:
4590 a8083063 Iustin Pop
        snode = dev.logical_id[0]
4591 a8083063 Iustin Pop
4592 57821cac Iustin Pop
    if snode and not static:
4593 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
4594 72737a7f Iustin Pop
      dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
4595 a8083063 Iustin Pop
    else:
4596 a8083063 Iustin Pop
      dev_sstatus = None
4597 a8083063 Iustin Pop
4598 a8083063 Iustin Pop
    if dev.children:
4599 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
4600 a8083063 Iustin Pop
                      for child in dev.children]
4601 a8083063 Iustin Pop
    else:
4602 a8083063 Iustin Pop
      dev_children = []
4603 a8083063 Iustin Pop
4604 a8083063 Iustin Pop
    data = {
4605 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
4606 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
4607 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
4608 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
4609 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
4610 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
4611 a8083063 Iustin Pop
      "children": dev_children,
4612 b6fdf8b8 Iustin Pop
      "mode": dev.mode,
4613 a8083063 Iustin Pop
      }
4614 a8083063 Iustin Pop
4615 a8083063 Iustin Pop
    return data
4616 a8083063 Iustin Pop
4617 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4618 a8083063 Iustin Pop
    """Gather and return data"""
4619 a8083063 Iustin Pop
    result = {}
4620 338e51e8 Iustin Pop
4621 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
4622 338e51e8 Iustin Pop
4623 a8083063 Iustin Pop
    for instance in self.wanted_instances:
4624 57821cac Iustin Pop
      if not self.op.static:
4625 57821cac Iustin Pop
        remote_info = self.rpc.call_instance_info(instance.primary_node,
4626 57821cac Iustin Pop
                                                  instance.name,
4627 57821cac Iustin Pop
                                                  instance.hypervisor)
4628 57821cac Iustin Pop
        if remote_info and "state" in remote_info:
4629 57821cac Iustin Pop
          remote_state = "up"
4630 57821cac Iustin Pop
        else:
4631 57821cac Iustin Pop
          remote_state = "down"
4632 a8083063 Iustin Pop
      else:
4633 57821cac Iustin Pop
        remote_state = None
4634 a8083063 Iustin Pop
      if instance.status == "down":
4635 a8083063 Iustin Pop
        config_state = "down"
4636 a8083063 Iustin Pop
      else:
4637 a8083063 Iustin Pop
        config_state = "up"
4638 a8083063 Iustin Pop
4639 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
4640 a8083063 Iustin Pop
               for device in instance.disks]
4641 a8083063 Iustin Pop
4642 a8083063 Iustin Pop
      idict = {
4643 a8083063 Iustin Pop
        "name": instance.name,
4644 a8083063 Iustin Pop
        "config_state": config_state,
4645 a8083063 Iustin Pop
        "run_state": remote_state,
4646 a8083063 Iustin Pop
        "pnode": instance.primary_node,
4647 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
4648 a8083063 Iustin Pop
        "os": instance.os,
4649 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4650 a8083063 Iustin Pop
        "disks": disks,
4651 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
4652 24838135 Iustin Pop
        "network_port": instance.network_port,
4653 24838135 Iustin Pop
        "hv_instance": instance.hvparams,
4654 338e51e8 Iustin Pop
        "hv_actual": cluster.FillHV(instance),
4655 338e51e8 Iustin Pop
        "be_instance": instance.beparams,
4656 338e51e8 Iustin Pop
        "be_actual": cluster.FillBE(instance),
4657 a8083063 Iustin Pop
        }
4658 a8083063 Iustin Pop
4659 a8083063 Iustin Pop
      result[instance.name] = idict
4660 a8083063 Iustin Pop
4661 a8083063 Iustin Pop
    return result
4662 a8083063 Iustin Pop
4663 a8083063 Iustin Pop
4664 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
4665 a8083063 Iustin Pop
  """Modifies an instances's parameters.
4666 a8083063 Iustin Pop

4667 a8083063 Iustin Pop
  """
4668 a8083063 Iustin Pop
  HPATH = "instance-modify"
4669 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4670 74409b12 Iustin Pop
  _OP_REQP = ["instance_name", "hvparams"]
4671 1a5c7281 Guido Trotter
  REQ_BGL = False
4672 1a5c7281 Guido Trotter
4673 1a5c7281 Guido Trotter
  def ExpandNames(self):
4674 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
4675 74409b12 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
4676 74409b12 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4677 74409b12 Iustin Pop
4678 74409b12 Iustin Pop
4679 74409b12 Iustin Pop
  def DeclareLocks(self, level):
4680 74409b12 Iustin Pop
    if level == locking.LEVEL_NODE:
4681 74409b12 Iustin Pop
      self._LockInstancesNodes()
4682 a8083063 Iustin Pop
4683 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4684 a8083063 Iustin Pop
    """Build hooks env.
4685 a8083063 Iustin Pop

4686 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
4687 a8083063 Iustin Pop

4688 a8083063 Iustin Pop
    """
4689 396e1b78 Michael Hanselmann
    args = dict()
4690 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.be_new:
4691 338e51e8 Iustin Pop
      args['memory'] = self.be_new[constants.BE_MEMORY]
4692 338e51e8 Iustin Pop
    if constants.BE_VCPUS in self.be_new:
4693 61be6ba4 Iustin Pop
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
4694 ef756965 Iustin Pop
    if self.do_ip or self.do_bridge or self.mac:
4695 396e1b78 Michael Hanselmann
      if self.do_ip:
4696 396e1b78 Michael Hanselmann
        ip = self.ip
4697 396e1b78 Michael Hanselmann
      else:
4698 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
4699 396e1b78 Michael Hanselmann
      if self.bridge:
4700 396e1b78 Michael Hanselmann
        bridge = self.bridge
4701 396e1b78 Michael Hanselmann
      else:
4702 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
4703 ef756965 Iustin Pop
      if self.mac:
4704 ef756965 Iustin Pop
        mac = self.mac
4705 ef756965 Iustin Pop
      else:
4706 ef756965 Iustin Pop
        mac = self.instance.nics[0].mac
4707 ef756965 Iustin Pop
      args['nics'] = [(ip, bridge, mac)]
4708 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
4709 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(),
4710 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
4711 a8083063 Iustin Pop
    return env, nl, nl
4712 a8083063 Iustin Pop
4713 a8083063 Iustin Pop
  def CheckPrereq(self):
4714 a8083063 Iustin Pop
    """Check prerequisites.
4715 a8083063 Iustin Pop

4716 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
4717 a8083063 Iustin Pop

4718 a8083063 Iustin Pop
    """
4719 1a5c7281 Guido Trotter
    # FIXME: all the parameters could be checked before, in ExpandNames, or in
4720 1a5c7281 Guido Trotter
    # a separate CheckArguments function, if we implement one, so the operation
4721 1a5c7281 Guido Trotter
    # can be aborted without waiting for any lock, should it have an error...
4722 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
4723 1862d460 Alexander Schreiber
    self.mac = getattr(self.op, "mac", None)
4724 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
4725 973d7867 Iustin Pop
    self.kernel_path = getattr(self.op, "kernel_path", None)
4726 973d7867 Iustin Pop
    self.initrd_path = getattr(self.op, "initrd_path", None)
4727 4300c4b6 Guido Trotter
    self.force = getattr(self.op, "force", None)
4728 338e51e8 Iustin Pop
    all_parms = [self.ip, self.bridge, self.mac]
4729 338e51e8 Iustin Pop
    if (all_parms.count(None) == len(all_parms) and
4730 338e51e8 Iustin Pop
        not self.op.hvparams and
4731 338e51e8 Iustin Pop
        not self.op.beparams):
4732 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
4733 338e51e8 Iustin Pop
    for item in (constants.BE_MEMORY, constants.BE_VCPUS):
4734 338e51e8 Iustin Pop
      val = self.op.beparams.get(item, None)
4735 338e51e8 Iustin Pop
      if val is not None:
4736 338e51e8 Iustin Pop
        try:
4737 338e51e8 Iustin Pop
          val = int(val)
4738 338e51e8 Iustin Pop
        except ValueError, err:
4739 338e51e8 Iustin Pop
          raise errors.OpPrereqError("Invalid %s size: %s" % (item, str(err)))
4740 338e51e8 Iustin Pop
        self.op.beparams[item] = val
4741 a8083063 Iustin Pop
    if self.ip is not None:
4742 a8083063 Iustin Pop
      self.do_ip = True
4743 a8083063 Iustin Pop
      if self.ip.lower() == "none":
4744 a8083063 Iustin Pop
        self.ip = None
4745 a8083063 Iustin Pop
      else:
4746 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
4747 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
4748 a8083063 Iustin Pop
    else:
4749 a8083063 Iustin Pop
      self.do_ip = False
4750 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
4751 1862d460 Alexander Schreiber
    if self.mac is not None:
4752 1862d460 Alexander Schreiber
      if self.cfg.IsMacInUse(self.mac):
4753 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('MAC address %s already in use in cluster' %
4754 1862d460 Alexander Schreiber
                                   self.mac)
4755 1862d460 Alexander Schreiber
      if not utils.IsValidMac(self.mac):
4756 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('Invalid MAC address %s' % self.mac)
4757 a8083063 Iustin Pop
4758 74409b12 Iustin Pop
    # checking the new params on the primary/secondary nodes
4759 31a853d2 Iustin Pop
4760 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4761 1a5c7281 Guido Trotter
    assert self.instance is not None, \
4762 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4763 74409b12 Iustin Pop
    pnode = self.instance.primary_node
4764 74409b12 Iustin Pop
    nodelist = [pnode]
4765 74409b12 Iustin Pop
    nodelist.extend(instance.secondary_nodes)
4766 74409b12 Iustin Pop
4767 338e51e8 Iustin Pop
    # hvparams processing
4768 74409b12 Iustin Pop
    if self.op.hvparams:
4769 74409b12 Iustin Pop
      i_hvdict = copy.deepcopy(instance.hvparams)
4770 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
4771 74409b12 Iustin Pop
        if val is None:
4772 74409b12 Iustin Pop
          try:
4773 74409b12 Iustin Pop
            del i_hvdict[key]
4774 74409b12 Iustin Pop
          except KeyError:
4775 74409b12 Iustin Pop
            pass
4776 74409b12 Iustin Pop
        else:
4777 74409b12 Iustin Pop
          i_hvdict[key] = val
4778 74409b12 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
4779 74409b12 Iustin Pop
      hv_new = cluster.FillDict(cluster.hvparams[instance.hypervisor],
4780 74409b12 Iustin Pop
                                i_hvdict)
4781 74409b12 Iustin Pop
      # local check
4782 74409b12 Iustin Pop
      hypervisor.GetHypervisor(
4783 74409b12 Iustin Pop
        instance.hypervisor).CheckParameterSyntax(hv_new)
4784 74409b12 Iustin Pop
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
4785 338e51e8 Iustin Pop
      self.hv_new = hv_new # the new actual values
4786 338e51e8 Iustin Pop
      self.hv_inst = i_hvdict # the new dict (without defaults)
4787 338e51e8 Iustin Pop
    else:
4788 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
4789 338e51e8 Iustin Pop
4790 338e51e8 Iustin Pop
    # beparams processing
4791 338e51e8 Iustin Pop
    if self.op.beparams:
4792 338e51e8 Iustin Pop
      i_bedict = copy.deepcopy(instance.beparams)
4793 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
4794 338e51e8 Iustin Pop
        if val is None:
4795 338e51e8 Iustin Pop
          try:
4796 338e51e8 Iustin Pop
            del i_bedict[key]
4797 338e51e8 Iustin Pop
          except KeyError:
4798 338e51e8 Iustin Pop
            pass
4799 338e51e8 Iustin Pop
        else:
4800 338e51e8 Iustin Pop
          i_bedict[key] = val
4801 338e51e8 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
4802 338e51e8 Iustin Pop
      be_new = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
4803 338e51e8 Iustin Pop
                                i_bedict)
4804 338e51e8 Iustin Pop
      self.be_new = be_new # the new actual values
4805 338e51e8 Iustin Pop
      self.be_inst = i_bedict # the new dict (without defaults)
4806 338e51e8 Iustin Pop
    else:
4807 b637ae4d Iustin Pop
      self.be_new = self.be_inst = {}
4808 74409b12 Iustin Pop
4809 cfefe007 Guido Trotter
    self.warn = []
4810 647a5d80 Iustin Pop
4811 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.op.beparams and not self.force:
4812 647a5d80 Iustin Pop
      mem_check_list = [pnode]
4813 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
4814 c0f2b229 Iustin Pop
        # either we changed auto_balance to yes or it was from before
4815 647a5d80 Iustin Pop
        mem_check_list.extend(instance.secondary_nodes)
4816 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
4817 72737a7f Iustin Pop
                                                  instance.hypervisor)
4818 647a5d80 Iustin Pop
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
4819 72737a7f Iustin Pop
                                         instance.hypervisor)
4820 cfefe007 Guido Trotter
4821 cfefe007 Guido Trotter
      if pnode not in nodeinfo or not isinstance(nodeinfo[pnode], dict):
4822 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
4823 cfefe007 Guido Trotter
        self.warn.append("Can't get info from primary node %s" % pnode)
4824 cfefe007 Guido Trotter
      else:
4825 cfefe007 Guido Trotter
        if instance_info:
4826 cfefe007 Guido Trotter
          current_mem = instance_info['memory']
4827 cfefe007 Guido Trotter
        else:
4828 cfefe007 Guido Trotter
          # Assume instance not running
4829 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
4830 cfefe007 Guido Trotter
          # and we have no other way to check)
4831 cfefe007 Guido Trotter
          current_mem = 0
4832 338e51e8 Iustin Pop
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
4833 338e51e8 Iustin Pop
                    nodeinfo[pnode]['memory_free'])
4834 cfefe007 Guido Trotter
        if miss_mem > 0:
4835 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
4836 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
4837 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
4838 cfefe007 Guido Trotter
4839 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
4840 647a5d80 Iustin Pop
        for node in instance.secondary_nodes:
4841 647a5d80 Iustin Pop
          if node not in nodeinfo or not isinstance(nodeinfo[node], dict):
4842 647a5d80 Iustin Pop
            self.warn.append("Can't get info from secondary node %s" % node)
4843 647a5d80 Iustin Pop
          elif be_new[constants.BE_MEMORY] > nodeinfo[node]['memory_free']:
4844 647a5d80 Iustin Pop
            self.warn.append("Not enough memory to failover instance to"
4845 647a5d80 Iustin Pop
                             " secondary node %s" % node)
4846 5bc84f33 Alexander Schreiber
4847 a8083063 Iustin Pop
    return
4848 a8083063 Iustin Pop
4849 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4850 a8083063 Iustin Pop
    """Modifies an instance.
4851 a8083063 Iustin Pop

4852 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
4853 a8083063 Iustin Pop
    """
4854 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
4855 cfefe007 Guido Trotter
    # feedback_fn there.
4856 cfefe007 Guido Trotter
    for warn in self.warn:
4857 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
4858 cfefe007 Guido Trotter
4859 a8083063 Iustin Pop
    result = []
4860 a8083063 Iustin Pop
    instance = self.instance
4861 a8083063 Iustin Pop
    if self.do_ip:
4862 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
4863 a8083063 Iustin Pop
      result.append(("ip", self.ip))
4864 a8083063 Iustin Pop
    if self.bridge:
4865 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
4866 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
4867 1862d460 Alexander Schreiber
    if self.mac:
4868 1862d460 Alexander Schreiber
      instance.nics[0].mac = self.mac
4869 1862d460 Alexander Schreiber
      result.append(("mac", self.mac))
4870 74409b12 Iustin Pop
    if self.op.hvparams:
4871 74409b12 Iustin Pop
      instance.hvparams = self.hv_new
4872 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
4873 74409b12 Iustin Pop
        result.append(("hv/%s" % key, val))
4874 338e51e8 Iustin Pop
    if self.op.beparams:
4875 338e51e8 Iustin Pop
      instance.beparams = self.be_inst
4876 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
4877 338e51e8 Iustin Pop
        result.append(("be/%s" % key, val))
4878 a8083063 Iustin Pop
4879 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
4880 a8083063 Iustin Pop
4881 a8083063 Iustin Pop
    return result
4882 a8083063 Iustin Pop
4883 a8083063 Iustin Pop
4884 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
4885 a8083063 Iustin Pop
  """Query the exports list
4886 a8083063 Iustin Pop

4887 a8083063 Iustin Pop
  """
4888 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
4889 21a15682 Guido Trotter
  REQ_BGL = False
4890 21a15682 Guido Trotter
4891 21a15682 Guido Trotter
  def ExpandNames(self):
4892 21a15682 Guido Trotter
    self.needed_locks = {}
4893 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
4894 21a15682 Guido Trotter
    if not self.op.nodes:
4895 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4896 21a15682 Guido Trotter
    else:
4897 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
4898 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
4899 a8083063 Iustin Pop
4900 a8083063 Iustin Pop
  def CheckPrereq(self):
4901 21a15682 Guido Trotter
    """Check prerequisites.
4902 a8083063 Iustin Pop

4903 a8083063 Iustin Pop
    """
4904 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
4905 a8083063 Iustin Pop
4906 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4907 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
4908 a8083063 Iustin Pop

4909 e4376078 Iustin Pop
    @rtype: dict
4910 e4376078 Iustin Pop
    @return: a dictionary with the structure node->(export-list)
4911 e4376078 Iustin Pop
        where export-list is a list of the instances exported on
4912 e4376078 Iustin Pop
        that node.
4913 a8083063 Iustin Pop

4914 a8083063 Iustin Pop
    """
4915 72737a7f Iustin Pop
    return self.rpc.call_export_list(self.nodes)
4916 a8083063 Iustin Pop
4917 a8083063 Iustin Pop
4918 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
4919 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
4920 a8083063 Iustin Pop

4921 a8083063 Iustin Pop
  """
4922 a8083063 Iustin Pop
  HPATH = "instance-export"
4923 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4924 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
4925 6657590e Guido Trotter
  REQ_BGL = False
4926 6657590e Guido Trotter
4927 6657590e Guido Trotter
  def ExpandNames(self):
4928 6657590e Guido Trotter
    self._ExpandAndLockInstance()
4929 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
4930 6657590e Guido Trotter
    #
4931 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
4932 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
4933 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
4934 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
4935 6657590e Guido Trotter
    #    then one to remove, after
4936 6657590e Guido Trotter
    #  - removing the removal operation altoghether
4937 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4938 6657590e Guido Trotter
4939 6657590e Guido Trotter
  def DeclareLocks(self, level):
4940 6657590e Guido Trotter
    """Last minute lock declaration."""
4941 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
4942 a8083063 Iustin Pop
4943 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4944 a8083063 Iustin Pop
    """Build hooks env.
4945 a8083063 Iustin Pop

4946 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
4947 a8083063 Iustin Pop

4948 a8083063 Iustin Pop
    """
4949 a8083063 Iustin Pop
    env = {
4950 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
4951 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
4952 a8083063 Iustin Pop
      }
4953 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4954 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
4955 a8083063 Iustin Pop
          self.op.target_node]
4956 a8083063 Iustin Pop
    return env, nl, nl
4957 a8083063 Iustin Pop
4958 a8083063 Iustin Pop
  def CheckPrereq(self):
4959 a8083063 Iustin Pop
    """Check prerequisites.
4960 a8083063 Iustin Pop

4961 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
4962 a8083063 Iustin Pop

4963 a8083063 Iustin Pop
    """
4964 6657590e Guido Trotter
    instance_name = self.op.instance_name
4965 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
4966 6657590e Guido Trotter
    assert self.instance is not None, \
4967 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
4968 a8083063 Iustin Pop
4969 6657590e Guido Trotter
    self.dst_node = self.cfg.GetNodeInfo(
4970 6657590e Guido Trotter
      self.cfg.ExpandNodeName(self.op.target_node))
4971 a8083063 Iustin Pop
4972 6657590e Guido Trotter
    assert self.dst_node is not None, \
4973 6657590e Guido Trotter
          "Cannot retrieve locked node %s" % self.op.target_node
4974 a8083063 Iustin Pop
4975 b6023d6c Manuel Franceschini
    # instance disk type verification
4976 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
4977 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
4978 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
4979 b6023d6c Manuel Franceschini
                                   " file-based disks")
4980 b6023d6c Manuel Franceschini
4981 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4982 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
4983 a8083063 Iustin Pop

4984 a8083063 Iustin Pop
    """
4985 a8083063 Iustin Pop
    instance = self.instance
4986 a8083063 Iustin Pop
    dst_node = self.dst_node
4987 a8083063 Iustin Pop
    src_node = instance.primary_node
4988 a8083063 Iustin Pop
    if self.op.shutdown:
4989 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
4990 72737a7f Iustin Pop
      if not self.rpc.call_instance_shutdown(src_node, instance):
4991 38206f3c Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
4992 38206f3c Iustin Pop
                                 (instance.name, src_node))
4993 a8083063 Iustin Pop
4994 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
4995 a8083063 Iustin Pop
4996 a8083063 Iustin Pop
    snap_disks = []
4997 a8083063 Iustin Pop
4998 a8083063 Iustin Pop
    try:
4999 a8083063 Iustin Pop
      for disk in instance.disks:
5000 19d7f90a Guido Trotter
        # new_dev_name will be a snapshot of an lvm leaf of the one we passed
5001 19d7f90a Guido Trotter
        new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk)
5002 a8083063 Iustin Pop
5003 19d7f90a Guido Trotter
        if not new_dev_name:
5004 19d7f90a Guido Trotter
          self.LogWarning("Could not snapshot block device %s on node %s",
5005 9a4f63d1 Iustin Pop
                          disk.logical_id[1], src_node)
5006 19d7f90a Guido Trotter
          snap_disks.append(False)
5007 19d7f90a Guido Trotter
        else:
5008 19d7f90a Guido Trotter
          new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
5009 19d7f90a Guido Trotter
                                 logical_id=(vgname, new_dev_name),
5010 19d7f90a Guido Trotter
                                 physical_id=(vgname, new_dev_name),
5011 19d7f90a Guido Trotter
                                 iv_name=disk.iv_name)
5012 19d7f90a Guido Trotter
          snap_disks.append(new_dev)
5013 a8083063 Iustin Pop
5014 a8083063 Iustin Pop
    finally:
5015 fb300fb7 Guido Trotter
      if self.op.shutdown and instance.status == "up":
5016 72737a7f Iustin Pop
        if not self.rpc.call_instance_start(src_node, instance, None):
5017 b9bddb6b Iustin Pop
          _ShutdownInstanceDisks(self, instance)
5018 fb300fb7 Guido Trotter
          raise errors.OpExecError("Could not start instance")
5019 a8083063 Iustin Pop
5020 a8083063 Iustin Pop
    # TODO: check for size
5021 a8083063 Iustin Pop
5022 62c9ec92 Iustin Pop
    cluster_name = self.cfg.GetClusterName()
5023 74c47259 Iustin Pop
    for idx, dev in enumerate(snap_disks):
5024 19d7f90a Guido Trotter
      if dev:
5025 19d7f90a Guido Trotter
        if not self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
5026 74c47259 Iustin Pop
                                             instance, cluster_name, idx):
5027 19d7f90a Guido Trotter
          self.LogWarning("Could not export block device %s from node %s to"
5028 19d7f90a Guido Trotter
                          " node %s", dev.logical_id[1], src_node,
5029 19d7f90a Guido Trotter
                          dst_node.name)
5030 19d7f90a Guido Trotter
        if not self.rpc.call_blockdev_remove(src_node, dev):
5031 19d7f90a Guido Trotter
          self.LogWarning("Could not remove snapshot block device %s from node"
5032 19d7f90a Guido Trotter
                          " %s", dev.logical_id[1], src_node)
5033 a8083063 Iustin Pop
5034 72737a7f Iustin Pop
    if not self.rpc.call_finalize_export(dst_node.name, instance, snap_disks):
5035 19d7f90a Guido Trotter
      self.LogWarning("Could not finalize export for instance %s on node %s",
5036 19d7f90a Guido Trotter
                      instance.name, dst_node.name)
5037 a8083063 Iustin Pop
5038 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
5039 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
5040 a8083063 Iustin Pop
5041 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
5042 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
5043 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
5044 a8083063 Iustin Pop
    if nodelist:
5045 72737a7f Iustin Pop
      exportlist = self.rpc.call_export_list(nodelist)
5046 a8083063 Iustin Pop
      for node in exportlist:
5047 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
5048 72737a7f Iustin Pop
          if not self.rpc.call_export_remove(node, instance.name):
5049 19d7f90a Guido Trotter
            self.LogWarning("Could not remove older export for instance %s"
5050 19d7f90a Guido Trotter
                            " on node %s", instance.name, node)
5051 5c947f38 Iustin Pop
5052 5c947f38 Iustin Pop
5053 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
5054 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
5055 9ac99fda Guido Trotter

5056 9ac99fda Guido Trotter
  """
5057 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
5058 3656b3af Guido Trotter
  REQ_BGL = False
5059 3656b3af Guido Trotter
5060 3656b3af Guido Trotter
  def ExpandNames(self):
5061 3656b3af Guido Trotter
    self.needed_locks = {}
5062 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
5063 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
5064 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
5065 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5066 9ac99fda Guido Trotter
5067 9ac99fda Guido Trotter
  def CheckPrereq(self):
5068 9ac99fda Guido Trotter
    """Check prerequisites.
5069 9ac99fda Guido Trotter
    """
5070 9ac99fda Guido Trotter
    pass
5071 9ac99fda Guido Trotter
5072 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
5073 9ac99fda Guido Trotter
    """Remove any export.
5074 9ac99fda Guido Trotter

5075 9ac99fda Guido Trotter
    """
5076 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
5077 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
5078 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
5079 9ac99fda Guido Trotter
    fqdn_warn = False
5080 9ac99fda Guido Trotter
    if not instance_name:
5081 9ac99fda Guido Trotter
      fqdn_warn = True
5082 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
5083 9ac99fda Guido Trotter
5084 72737a7f Iustin Pop
    exportlist = self.rpc.call_export_list(self.acquired_locks[
5085 72737a7f Iustin Pop
      locking.LEVEL_NODE])
5086 9ac99fda Guido Trotter
    found = False
5087 9ac99fda Guido Trotter
    for node in exportlist:
5088 9ac99fda Guido Trotter
      if instance_name in exportlist[node]:
5089 9ac99fda Guido Trotter
        found = True
5090 72737a7f Iustin Pop
        if not self.rpc.call_export_remove(node, instance_name):
5091 9a4f63d1 Iustin Pop
          logging.error("Could not remove export for instance %s"
5092 9a4f63d1 Iustin Pop
                        " on node %s", instance_name, node)
5093 9ac99fda Guido Trotter
5094 9ac99fda Guido Trotter
    if fqdn_warn and not found:
5095 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
5096 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
5097 9ac99fda Guido Trotter
                  " Domain Name.")
5098 9ac99fda Guido Trotter
5099 9ac99fda Guido Trotter
5100 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
5101 5c947f38 Iustin Pop
  """Generic tags LU.
5102 5c947f38 Iustin Pop

5103 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
5104 5c947f38 Iustin Pop

5105 5c947f38 Iustin Pop
  """
5106 5c947f38 Iustin Pop
5107 8646adce Guido Trotter
  def ExpandNames(self):
5108 8646adce Guido Trotter
    self.needed_locks = {}
5109 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
5110 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
5111 5c947f38 Iustin Pop
      if name is None:
5112 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
5113 3ecf6786 Iustin Pop
                                   (self.op.name,))
5114 5c947f38 Iustin Pop
      self.op.name = name
5115 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = name
5116 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
5117 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
5118 5c947f38 Iustin Pop
      if name is None:
5119 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
5120 3ecf6786 Iustin Pop
                                   (self.op.name,))
5121 5c947f38 Iustin Pop
      self.op.name = name
5122 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = name
5123 8646adce Guido Trotter
5124 8646adce Guido Trotter
  def CheckPrereq(self):
5125 8646adce Guido Trotter
    """Check prerequisites.
5126 8646adce Guido Trotter

5127 8646adce Guido Trotter
    """
5128 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
5129 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
5130 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
5131 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
5132 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
5133 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
5134 5c947f38 Iustin Pop
    else:
5135 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
5136 3ecf6786 Iustin Pop
                                 str(self.op.kind))
5137 5c947f38 Iustin Pop
5138 5c947f38 Iustin Pop
5139 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
5140 5c947f38 Iustin Pop
  """Returns the tags of a given object.
5141 5c947f38 Iustin Pop

5142 5c947f38 Iustin Pop
  """
5143 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
5144 8646adce Guido Trotter
  REQ_BGL = False
5145 5c947f38 Iustin Pop
5146 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5147 5c947f38 Iustin Pop
    """Returns the tag list.
5148 5c947f38 Iustin Pop

5149 5c947f38 Iustin Pop
    """
5150 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
5151 5c947f38 Iustin Pop
5152 5c947f38 Iustin Pop
5153 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
5154 73415719 Iustin Pop
  """Searches the tags for a given pattern.
5155 73415719 Iustin Pop

5156 73415719 Iustin Pop
  """
5157 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
5158 8646adce Guido Trotter
  REQ_BGL = False
5159 8646adce Guido Trotter
5160 8646adce Guido Trotter
  def ExpandNames(self):
5161 8646adce Guido Trotter
    self.needed_locks = {}
5162 73415719 Iustin Pop
5163 73415719 Iustin Pop
  def CheckPrereq(self):
5164 73415719 Iustin Pop
    """Check prerequisites.
5165 73415719 Iustin Pop

5166 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
5167 73415719 Iustin Pop

5168 73415719 Iustin Pop
    """
5169 73415719 Iustin Pop
    try:
5170 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
5171 73415719 Iustin Pop
    except re.error, err:
5172 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
5173 73415719 Iustin Pop
                                 (self.op.pattern, err))
5174 73415719 Iustin Pop
5175 73415719 Iustin Pop
  def Exec(self, feedback_fn):
5176 73415719 Iustin Pop
    """Returns the tag list.
5177 73415719 Iustin Pop

5178 73415719 Iustin Pop
    """
5179 73415719 Iustin Pop
    cfg = self.cfg
5180 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
5181 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
5182 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
5183 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
5184 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
5185 73415719 Iustin Pop
    results = []
5186 73415719 Iustin Pop
    for path, target in tgts:
5187 73415719 Iustin Pop
      for tag in target.GetTags():
5188 73415719 Iustin Pop
        if self.re.search(tag):
5189 73415719 Iustin Pop
          results.append((path, tag))
5190 73415719 Iustin Pop
    return results
5191 73415719 Iustin Pop
5192 73415719 Iustin Pop
5193 f27302fa Iustin Pop
class LUAddTags(TagsLU):
5194 5c947f38 Iustin Pop
  """Sets a tag on a given object.
5195 5c947f38 Iustin Pop

5196 5c947f38 Iustin Pop
  """
5197 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
5198 8646adce Guido Trotter
  REQ_BGL = False
5199 5c947f38 Iustin Pop
5200 5c947f38 Iustin Pop
  def CheckPrereq(self):
5201 5c947f38 Iustin Pop
    """Check prerequisites.
5202 5c947f38 Iustin Pop

5203 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
5204 5c947f38 Iustin Pop

5205 5c947f38 Iustin Pop
    """
5206 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
5207 f27302fa Iustin Pop
    for tag in self.op.tags:
5208 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
5209 5c947f38 Iustin Pop
5210 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5211 5c947f38 Iustin Pop
    """Sets the tag.
5212 5c947f38 Iustin Pop

5213 5c947f38 Iustin Pop
    """
5214 5c947f38 Iustin Pop
    try:
5215 f27302fa Iustin Pop
      for tag in self.op.tags:
5216 f27302fa Iustin Pop
        self.target.AddTag(tag)
5217 5c947f38 Iustin Pop
    except errors.TagError, err:
5218 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
5219 5c947f38 Iustin Pop
    try:
5220 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
5221 5c947f38 Iustin Pop
    except errors.ConfigurationError:
5222 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
5223 3ecf6786 Iustin Pop
                                " config file and the operation has been"
5224 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
5225 5c947f38 Iustin Pop
5226 5c947f38 Iustin Pop
5227 f27302fa Iustin Pop
class LUDelTags(TagsLU):
5228 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
5229 5c947f38 Iustin Pop

5230 5c947f38 Iustin Pop
  """
5231 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
5232 8646adce Guido Trotter
  REQ_BGL = False
5233 5c947f38 Iustin Pop
5234 5c947f38 Iustin Pop
  def CheckPrereq(self):
5235 5c947f38 Iustin Pop
    """Check prerequisites.
5236 5c947f38 Iustin Pop

5237 5c947f38 Iustin Pop
    This checks that we have the given tag.
5238 5c947f38 Iustin Pop

5239 5c947f38 Iustin Pop
    """
5240 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
5241 f27302fa Iustin Pop
    for tag in self.op.tags:
5242 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
5243 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
5244 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
5245 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
5246 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
5247 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
5248 f27302fa Iustin Pop
      diff_names.sort()
5249 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
5250 f27302fa Iustin Pop
                                 (",".join(diff_names)))
5251 5c947f38 Iustin Pop
5252 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5253 5c947f38 Iustin Pop
    """Remove the tag from the object.
5254 5c947f38 Iustin Pop

5255 5c947f38 Iustin Pop
    """
5256 f27302fa Iustin Pop
    for tag in self.op.tags:
5257 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
5258 5c947f38 Iustin Pop
    try:
5259 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
5260 5c947f38 Iustin Pop
    except errors.ConfigurationError:
5261 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
5262 3ecf6786 Iustin Pop
                                " config file and the operation has been"
5263 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
5264 06009e27 Iustin Pop
5265 0eed6e61 Guido Trotter
5266 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
5267 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
5268 06009e27 Iustin Pop

5269 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
5270 06009e27 Iustin Pop
  time.
5271 06009e27 Iustin Pop

5272 06009e27 Iustin Pop
  """
5273 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
5274 fbe9022f Guido Trotter
  REQ_BGL = False
5275 06009e27 Iustin Pop
5276 fbe9022f Guido Trotter
  def ExpandNames(self):
5277 fbe9022f Guido Trotter
    """Expand names and set required locks.
5278 06009e27 Iustin Pop

5279 fbe9022f Guido Trotter
    This expands the node list, if any.
5280 06009e27 Iustin Pop

5281 06009e27 Iustin Pop
    """
5282 fbe9022f Guido Trotter
    self.needed_locks = {}
5283 06009e27 Iustin Pop
    if self.op.on_nodes:
5284 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
5285 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
5286 fbe9022f Guido Trotter
      # more information.
5287 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
5288 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
5289 fbe9022f Guido Trotter
5290 fbe9022f Guido Trotter
  def CheckPrereq(self):
5291 fbe9022f Guido Trotter
    """Check prerequisites.
5292 fbe9022f Guido Trotter

5293 fbe9022f Guido Trotter
    """
5294 06009e27 Iustin Pop
5295 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
5296 06009e27 Iustin Pop
    """Do the actual sleep.
5297 06009e27 Iustin Pop

5298 06009e27 Iustin Pop
    """
5299 06009e27 Iustin Pop
    if self.op.on_master:
5300 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
5301 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
5302 06009e27 Iustin Pop
    if self.op.on_nodes:
5303 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
5304 06009e27 Iustin Pop
      if not result:
5305 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
5306 06009e27 Iustin Pop
      for node, node_result in result.items():
5307 06009e27 Iustin Pop
        if not node_result:
5308 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
5309 06009e27 Iustin Pop
                                   " result: %s" % (node, node_result))
5310 d61df03e Iustin Pop
5311 d61df03e Iustin Pop
5312 d1c2dd75 Iustin Pop
class IAllocator(object):
5313 d1c2dd75 Iustin Pop
  """IAllocator framework.
5314 d61df03e Iustin Pop

5315 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
5316 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
5317 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
5318 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
5319 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
5320 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
5321 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
5322 d1c2dd75 Iustin Pop
      easy usage
5323 d61df03e Iustin Pop

5324 d61df03e Iustin Pop
  """
5325 29859cb7 Iustin Pop
  _ALLO_KEYS = [
5326 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
5327 8cc7e742 Guido Trotter
    "os", "tags", "nics", "vcpus", "hypervisor",
5328 d1c2dd75 Iustin Pop
    ]
5329 29859cb7 Iustin Pop
  _RELO_KEYS = [
5330 29859cb7 Iustin Pop
    "relocate_from",
5331 29859cb7 Iustin Pop
    ]
5332 d1c2dd75 Iustin Pop
5333 72737a7f Iustin Pop
  def __init__(self, lu, mode, name, **kwargs):
5334 72737a7f Iustin Pop
    self.lu = lu
5335 d1c2dd75 Iustin Pop
    # init buffer variables
5336 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
5337 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
5338 29859cb7 Iustin Pop
    self.mode = mode
5339 29859cb7 Iustin Pop
    self.name = name
5340 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
5341 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
5342 29859cb7 Iustin Pop
    self.relocate_from = None
5343 27579978 Iustin Pop
    # computed fields
5344 27579978 Iustin Pop
    self.required_nodes = None
5345 d1c2dd75 Iustin Pop
    # init result fields
5346 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
5347 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5348 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
5349 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
5350 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
5351 29859cb7 Iustin Pop
    else:
5352 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
5353 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
5354 d1c2dd75 Iustin Pop
    for key in kwargs:
5355 29859cb7 Iustin Pop
      if key not in keyset:
5356 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
5357 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
5358 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
5359 29859cb7 Iustin Pop
    for key in keyset:
5360 d1c2dd75 Iustin Pop
      if key not in kwargs:
5361 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
5362 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
5363 d1c2dd75 Iustin Pop
    self._BuildInputData()
5364 d1c2dd75 Iustin Pop
5365 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
5366 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
5367 d1c2dd75 Iustin Pop

5368 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
5369 d1c2dd75 Iustin Pop

5370 d1c2dd75 Iustin Pop
    """
5371 72737a7f Iustin Pop
    cfg = self.lu.cfg
5372 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
5373 d1c2dd75 Iustin Pop
    # cluster data
5374 d1c2dd75 Iustin Pop
    data = {
5375 d1c2dd75 Iustin Pop
      "version": 1,
5376 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
5377 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
5378 e69d05fd Iustin Pop
      "enable_hypervisors": list(cluster_info.enabled_hypervisors),
5379 d1c2dd75 Iustin Pop
      # we don't have job IDs
5380 d61df03e Iustin Pop
      }
5381 b57e9819 Guido Trotter
    iinfo = cfg.GetAllInstancesInfo().values()
5382 b57e9819 Guido Trotter
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
5383 6286519f Iustin Pop
5384 d1c2dd75 Iustin Pop
    # node data
5385 d1c2dd75 Iustin Pop
    node_results = {}
5386 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
5387 8cc7e742 Guido Trotter
5388 8cc7e742 Guido Trotter
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5389 8cc7e742 Guido Trotter
      hypervisor = self.hypervisor
5390 8cc7e742 Guido Trotter
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
5391 8cc7e742 Guido Trotter
      hypervisor = cfg.GetInstanceInfo(self.name).hypervisor
5392 8cc7e742 Guido Trotter
5393 72737a7f Iustin Pop
    node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
5394 8cc7e742 Guido Trotter
                                           hypervisor)
5395 18640d69 Guido Trotter
    node_iinfo = self.lu.rpc.call_all_instances_info(node_list,
5396 18640d69 Guido Trotter
                       cluster_info.enabled_hypervisors)
5397 d1c2dd75 Iustin Pop
    for nname in node_list:
5398 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
5399 d1c2dd75 Iustin Pop
      if nname not in node_data or not isinstance(node_data[nname], dict):
5400 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't get data for node %s" % nname)
5401 d1c2dd75 Iustin Pop
      remote_info = node_data[nname]
5402 b2662e7f Iustin Pop
      for attr in ['memory_total', 'memory_free', 'memory_dom0',
5403 4337cf1b Iustin Pop
                   'vg_size', 'vg_free', 'cpu_total']:
5404 d1c2dd75 Iustin Pop
        if attr not in remote_info:
5405 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' didn't return attribute '%s'" %
5406 d1c2dd75 Iustin Pop
                                   (nname, attr))
5407 d1c2dd75 Iustin Pop
        try:
5408 b2662e7f Iustin Pop
          remote_info[attr] = int(remote_info[attr])
5409 d1c2dd75 Iustin Pop
        except ValueError, err:
5410 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' returned invalid value for '%s':"
5411 d1c2dd75 Iustin Pop
                                   " %s" % (nname, attr, str(err)))
5412 6286519f Iustin Pop
      # compute memory used by primary instances
5413 6286519f Iustin Pop
      i_p_mem = i_p_up_mem = 0
5414 338e51e8 Iustin Pop
      for iinfo, beinfo in i_list:
5415 6286519f Iustin Pop
        if iinfo.primary_node == nname:
5416 338e51e8 Iustin Pop
          i_p_mem += beinfo[constants.BE_MEMORY]
5417 18640d69 Guido Trotter
          if iinfo.name not in node_iinfo[nname]:
5418 18640d69 Guido Trotter
            i_used_mem = 0
5419 18640d69 Guido Trotter
          else:
5420 18640d69 Guido Trotter
            i_used_mem = int(node_iinfo[nname][iinfo.name]['memory'])
5421 18640d69 Guido Trotter
          i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
5422 18640d69 Guido Trotter
          remote_info['memory_free'] -= max(0, i_mem_diff)
5423 18640d69 Guido Trotter
5424 6286519f Iustin Pop
          if iinfo.status == "up":
5425 338e51e8 Iustin Pop
            i_p_up_mem += beinfo[constants.BE_MEMORY]
5426 6286519f Iustin Pop
5427 b2662e7f Iustin Pop
      # compute memory used by instances
5428 d1c2dd75 Iustin Pop
      pnr = {
5429 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
5430 b2662e7f Iustin Pop
        "total_memory": remote_info['memory_total'],
5431 b2662e7f Iustin Pop
        "reserved_memory": remote_info['memory_dom0'],
5432 b2662e7f Iustin Pop
        "free_memory": remote_info['memory_free'],
5433 6286519f Iustin Pop
        "i_pri_memory": i_p_mem,
5434 6286519f Iustin Pop
        "i_pri_up_memory": i_p_up_mem,
5435 b2662e7f Iustin Pop
        "total_disk": remote_info['vg_size'],
5436 b2662e7f Iustin Pop
        "free_disk": remote_info['vg_free'],
5437 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
5438 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
5439 4337cf1b Iustin Pop
        "total_cpus": remote_info['cpu_total'],
5440 d1c2dd75 Iustin Pop
        }
5441 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
5442 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
5443 d1c2dd75 Iustin Pop
5444 d1c2dd75 Iustin Pop
    # instance data
5445 d1c2dd75 Iustin Pop
    instance_data = {}
5446 338e51e8 Iustin Pop
    for iinfo, beinfo in i_list:
5447 d1c2dd75 Iustin Pop
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
5448 d1c2dd75 Iustin Pop
                  for n in iinfo.nics]
5449 d1c2dd75 Iustin Pop
      pir = {
5450 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
5451 d1c2dd75 Iustin Pop
        "should_run": iinfo.status == "up",
5452 338e51e8 Iustin Pop
        "vcpus": beinfo[constants.BE_VCPUS],
5453 338e51e8 Iustin Pop
        "memory": beinfo[constants.BE_MEMORY],
5454 d1c2dd75 Iustin Pop
        "os": iinfo.os,
5455 d1c2dd75 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
5456 d1c2dd75 Iustin Pop
        "nics": nic_data,
5457 d1c2dd75 Iustin Pop
        "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
5458 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
5459 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
5460 d1c2dd75 Iustin Pop
        }
5461 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
5462 d61df03e Iustin Pop
5463 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
5464 d61df03e Iustin Pop
5465 d1c2dd75 Iustin Pop
    self.in_data = data
5466 d61df03e Iustin Pop
5467 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
5468 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
5469 d61df03e Iustin Pop

5470 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
5471 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5472 d61df03e Iustin Pop

5473 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5474 d1c2dd75 Iustin Pop
    done.
5475 d61df03e Iustin Pop

5476 d1c2dd75 Iustin Pop
    """
5477 d1c2dd75 Iustin Pop
    data = self.in_data
5478 d1c2dd75 Iustin Pop
    if len(self.disks) != 2:
5479 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Only two-disk configurations supported")
5480 d1c2dd75 Iustin Pop
5481 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
5482 d1c2dd75 Iustin Pop
5483 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
5484 27579978 Iustin Pop
      self.required_nodes = 2
5485 27579978 Iustin Pop
    else:
5486 27579978 Iustin Pop
      self.required_nodes = 1
5487 d1c2dd75 Iustin Pop
    request = {
5488 d1c2dd75 Iustin Pop
      "type": "allocate",
5489 d1c2dd75 Iustin Pop
      "name": self.name,
5490 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
5491 d1c2dd75 Iustin Pop
      "tags": self.tags,
5492 d1c2dd75 Iustin Pop
      "os": self.os,
5493 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
5494 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
5495 d1c2dd75 Iustin Pop
      "disks": self.disks,
5496 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
5497 d1c2dd75 Iustin Pop
      "nics": self.nics,
5498 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5499 d1c2dd75 Iustin Pop
      }
5500 d1c2dd75 Iustin Pop
    data["request"] = request
5501 298fe380 Iustin Pop
5502 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
5503 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
5504 298fe380 Iustin Pop

5505 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
5506 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5507 d61df03e Iustin Pop

5508 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5509 d1c2dd75 Iustin Pop
    done.
5510 d61df03e Iustin Pop

5511 d1c2dd75 Iustin Pop
    """
5512 72737a7f Iustin Pop
    instance = self.lu.cfg.GetInstanceInfo(self.name)
5513 27579978 Iustin Pop
    if instance is None:
5514 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
5515 27579978 Iustin Pop
                                   " IAllocator" % self.name)
5516 27579978 Iustin Pop
5517 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
5518 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
5519 27579978 Iustin Pop
5520 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
5521 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
5522 2a139bb0 Iustin Pop
5523 27579978 Iustin Pop
    self.required_nodes = 1
5524 dafc7302 Guido Trotter
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
5525 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
5526 27579978 Iustin Pop
5527 d1c2dd75 Iustin Pop
    request = {
5528 2a139bb0 Iustin Pop
      "type": "relocate",
5529 d1c2dd75 Iustin Pop
      "name": self.name,
5530 27579978 Iustin Pop
      "disk_space_total": disk_space,
5531 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5532 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
5533 d1c2dd75 Iustin Pop
      }
5534 27579978 Iustin Pop
    self.in_data["request"] = request
5535 d61df03e Iustin Pop
5536 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
5537 d1c2dd75 Iustin Pop
    """Build input data structures.
5538 d61df03e Iustin Pop

5539 d1c2dd75 Iustin Pop
    """
5540 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
5541 d61df03e Iustin Pop
5542 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5543 d1c2dd75 Iustin Pop
      self._AddNewInstance()
5544 d1c2dd75 Iustin Pop
    else:
5545 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
5546 d61df03e Iustin Pop
5547 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
5548 d61df03e Iustin Pop
5549 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
5550 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
5551 298fe380 Iustin Pop

5552 d1c2dd75 Iustin Pop
    """
5553 72737a7f Iustin Pop
    if call_fn is None:
5554 72737a7f Iustin Pop
      call_fn = self.lu.rpc.call_iallocator_runner
5555 d1c2dd75 Iustin Pop
    data = self.in_text
5556 298fe380 Iustin Pop
5557 72737a7f Iustin Pop
    result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
5558 298fe380 Iustin Pop
5559 43f5ea7a Guido Trotter
    if not isinstance(result, (list, tuple)) or len(result) != 4:
5560 8d528b7c Iustin Pop
      raise errors.OpExecError("Invalid result from master iallocator runner")
5561 8d528b7c Iustin Pop
5562 8d528b7c Iustin Pop
    rcode, stdout, stderr, fail = result
5563 8d528b7c Iustin Pop
5564 8d528b7c Iustin Pop
    if rcode == constants.IARUN_NOTFOUND:
5565 8d528b7c Iustin Pop
      raise errors.OpExecError("Can't find allocator '%s'" % name)
5566 8d528b7c Iustin Pop
    elif rcode == constants.IARUN_FAILURE:
5567 38206f3c Iustin Pop
      raise errors.OpExecError("Instance allocator call failed: %s,"
5568 38206f3c Iustin Pop
                               " output: %s" % (fail, stdout+stderr))
5569 8d528b7c Iustin Pop
    self.out_text = stdout
5570 d1c2dd75 Iustin Pop
    if validate:
5571 d1c2dd75 Iustin Pop
      self._ValidateResult()
5572 298fe380 Iustin Pop
5573 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
5574 d1c2dd75 Iustin Pop
    """Process the allocator results.
5575 538475ca Iustin Pop

5576 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
5577 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
5578 538475ca Iustin Pop

5579 d1c2dd75 Iustin Pop
    """
5580 d1c2dd75 Iustin Pop
    try:
5581 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
5582 d1c2dd75 Iustin Pop
    except Exception, err:
5583 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
5584 d1c2dd75 Iustin Pop
5585 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
5586 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
5587 538475ca Iustin Pop
5588 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
5589 d1c2dd75 Iustin Pop
      if key not in rdict:
5590 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
5591 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
5592 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
5593 538475ca Iustin Pop
5594 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
5595 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
5596 d1c2dd75 Iustin Pop
                               " is not a list")
5597 d1c2dd75 Iustin Pop
    self.out_data = rdict
5598 538475ca Iustin Pop
5599 538475ca Iustin Pop
5600 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
5601 d61df03e Iustin Pop
  """Run allocator tests.
5602 d61df03e Iustin Pop

5603 d61df03e Iustin Pop
  This LU runs the allocator tests
5604 d61df03e Iustin Pop

5605 d61df03e Iustin Pop
  """
5606 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
5607 d61df03e Iustin Pop
5608 d61df03e Iustin Pop
  def CheckPrereq(self):
5609 d61df03e Iustin Pop
    """Check prerequisites.
5610 d61df03e Iustin Pop

5611 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
5612 d61df03e Iustin Pop

5613 d61df03e Iustin Pop
    """
5614 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5615 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
5616 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
5617 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
5618 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
5619 d61df03e Iustin Pop
                                     attr)
5620 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
5621 d61df03e Iustin Pop
      if iname is not None:
5622 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
5623 d61df03e Iustin Pop
                                   iname)
5624 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
5625 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
5626 d61df03e Iustin Pop
      for row in self.op.nics:
5627 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5628 d61df03e Iustin Pop
            "mac" not in row or
5629 d61df03e Iustin Pop
            "ip" not in row or
5630 d61df03e Iustin Pop
            "bridge" not in row):
5631 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5632 d61df03e Iustin Pop
                                     " 'nics' parameter")
5633 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
5634 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
5635 298fe380 Iustin Pop
      if len(self.op.disks) != 2:
5636 298fe380 Iustin Pop
        raise errors.OpPrereqError("Only two-disk configurations supported")
5637 d61df03e Iustin Pop
      for row in self.op.disks:
5638 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5639 d61df03e Iustin Pop
            "size" not in row or
5640 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
5641 d61df03e Iustin Pop
            "mode" not in row or
5642 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
5643 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5644 d61df03e Iustin Pop
                                     " 'disks' parameter")
5645 8cc7e742 Guido Trotter
      if self.op.hypervisor is None:
5646 8cc7e742 Guido Trotter
        self.op.hypervisor = self.cfg.GetHypervisorType()
5647 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
5648 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
5649 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
5650 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
5651 d61df03e Iustin Pop
      if fname is None:
5652 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
5653 d61df03e Iustin Pop
                                   self.op.name)
5654 d61df03e Iustin Pop
      self.op.name = fname
5655 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
5656 d61df03e Iustin Pop
    else:
5657 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
5658 d61df03e Iustin Pop
                                 self.op.mode)
5659 d61df03e Iustin Pop
5660 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
5661 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
5662 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
5663 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
5664 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
5665 d61df03e Iustin Pop
                                 self.op.direction)
5666 d61df03e Iustin Pop
5667 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
5668 d61df03e Iustin Pop
    """Run the allocator test.
5669 d61df03e Iustin Pop

5670 d61df03e Iustin Pop
    """
5671 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5672 72737a7f Iustin Pop
      ial = IAllocator(self,
5673 29859cb7 Iustin Pop
                       mode=self.op.mode,
5674 29859cb7 Iustin Pop
                       name=self.op.name,
5675 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
5676 29859cb7 Iustin Pop
                       disks=self.op.disks,
5677 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
5678 29859cb7 Iustin Pop
                       os=self.op.os,
5679 29859cb7 Iustin Pop
                       tags=self.op.tags,
5680 29859cb7 Iustin Pop
                       nics=self.op.nics,
5681 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
5682 8cc7e742 Guido Trotter
                       hypervisor=self.op.hypervisor,
5683 29859cb7 Iustin Pop
                       )
5684 29859cb7 Iustin Pop
    else:
5685 72737a7f Iustin Pop
      ial = IAllocator(self,
5686 29859cb7 Iustin Pop
                       mode=self.op.mode,
5687 29859cb7 Iustin Pop
                       name=self.op.name,
5688 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
5689 29859cb7 Iustin Pop
                       )
5690 d61df03e Iustin Pop
5691 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
5692 d1c2dd75 Iustin Pop
      result = ial.in_text
5693 298fe380 Iustin Pop
    else:
5694 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
5695 d1c2dd75 Iustin Pop
      result = ial.out_text
5696 298fe380 Iustin Pop
    return result