Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ fc0fe88c

History | View | Annotate | Download (214.7 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 ffa1c0dc Iustin Pop
import logging
34 74409b12 Iustin Pop
import copy
35 4b7735f9 Iustin Pop
import random
36 a8083063 Iustin Pop
37 a8083063 Iustin Pop
from ganeti import ssh
38 a8083063 Iustin Pop
from ganeti import utils
39 a8083063 Iustin Pop
from ganeti import errors
40 a8083063 Iustin Pop
from ganeti import hypervisor
41 6048c986 Guido Trotter
from ganeti import locking
42 a8083063 Iustin Pop
from ganeti import constants
43 a8083063 Iustin Pop
from ganeti import objects
44 a8083063 Iustin Pop
from ganeti import opcodes
45 8d14b30d Iustin Pop
from ganeti import serializer
46 112f18a5 Iustin Pop
from ganeti import ssconf
47 d61df03e Iustin Pop
48 d61df03e Iustin Pop
49 a8083063 Iustin Pop
class LogicalUnit(object):
50 396e1b78 Michael Hanselmann
  """Logical Unit base class.
51 a8083063 Iustin Pop

52 a8083063 Iustin Pop
  Subclasses must follow these rules:
53 d465bdc8 Guido Trotter
    - implement ExpandNames
54 d465bdc8 Guido Trotter
    - implement CheckPrereq
55 a8083063 Iustin Pop
    - implement Exec
56 a8083063 Iustin Pop
    - implement BuildHooksEnv
57 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
58 05f86716 Guido Trotter
    - optionally redefine their run requirements:
59 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
60 05f86716 Guido Trotter

61 05f86716 Guido Trotter
  Note that all commands require root permissions.
62 a8083063 Iustin Pop

63 a8083063 Iustin Pop
  """
64 a8083063 Iustin Pop
  HPATH = None
65 a8083063 Iustin Pop
  HTYPE = None
66 a8083063 Iustin Pop
  _OP_REQP = []
67 7e55040e Guido Trotter
  REQ_BGL = True
68 a8083063 Iustin Pop
69 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
70 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
71 a8083063 Iustin Pop

72 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
73 a8083063 Iustin Pop
    validity.
74 a8083063 Iustin Pop

75 a8083063 Iustin Pop
    """
76 5bfac263 Iustin Pop
    self.proc = processor
77 a8083063 Iustin Pop
    self.op = op
78 77b657a3 Guido Trotter
    self.cfg = context.cfg
79 77b657a3 Guido Trotter
    self.context = context
80 72737a7f Iustin Pop
    self.rpc = rpc
81 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
82 d465bdc8 Guido Trotter
    self.needed_locks = None
83 6683bba2 Guido Trotter
    self.acquired_locks = {}
84 3977a4c1 Guido Trotter
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
85 ca2a79e1 Guido Trotter
    self.add_locks = {}
86 ca2a79e1 Guido Trotter
    self.remove_locks = {}
87 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
88 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
89 c92b310a Michael Hanselmann
    self.__ssh = None
90 86d9d3bb Iustin Pop
    # logging
91 86d9d3bb Iustin Pop
    self.LogWarning = processor.LogWarning
92 86d9d3bb Iustin Pop
    self.LogInfo = processor.LogInfo
93 c92b310a Michael Hanselmann
94 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
95 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
96 a8083063 Iustin Pop
      if attr_val is None:
97 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
98 3ecf6786 Iustin Pop
                                   attr_name)
99 4be4691d Iustin Pop
    self.CheckArguments()
100 a8083063 Iustin Pop
101 c92b310a Michael Hanselmann
  def __GetSSH(self):
102 c92b310a Michael Hanselmann
    """Returns the SshRunner object
103 c92b310a Michael Hanselmann

104 c92b310a Michael Hanselmann
    """
105 c92b310a Michael Hanselmann
    if not self.__ssh:
106 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
107 c92b310a Michael Hanselmann
    return self.__ssh
108 c92b310a Michael Hanselmann
109 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
110 c92b310a Michael Hanselmann
111 4be4691d Iustin Pop
  def CheckArguments(self):
112 4be4691d Iustin Pop
    """Check syntactic validity for the opcode arguments.
113 4be4691d Iustin Pop

114 4be4691d Iustin Pop
    This method is for doing a simple syntactic check and ensure
115 4be4691d Iustin Pop
    validity of opcode parameters, without any cluster-related
116 4be4691d Iustin Pop
    checks. While the same can be accomplished in ExpandNames and/or
117 4be4691d Iustin Pop
    CheckPrereq, doing these separate is better because:
118 4be4691d Iustin Pop

119 4be4691d Iustin Pop
      - ExpandNames is left as as purely a lock-related function
120 4be4691d Iustin Pop
      - CheckPrereq is run after we have aquired locks (and possible
121 4be4691d Iustin Pop
        waited for them)
122 4be4691d Iustin Pop

123 4be4691d Iustin Pop
    The function is allowed to change the self.op attribute so that
124 4be4691d Iustin Pop
    later methods can no longer worry about missing parameters.
125 4be4691d Iustin Pop

126 4be4691d Iustin Pop
    """
127 4be4691d Iustin Pop
    pass
128 4be4691d Iustin Pop
129 d465bdc8 Guido Trotter
  def ExpandNames(self):
130 d465bdc8 Guido Trotter
    """Expand names for this LU.
131 d465bdc8 Guido Trotter

132 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
133 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
134 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
135 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
136 d465bdc8 Guido Trotter

137 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
138 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
139 d465bdc8 Guido Trotter
    as values. Rules:
140 e4376078 Iustin Pop

141 e4376078 Iustin Pop
      - use an empty dict if you don't need any lock
142 e4376078 Iustin Pop
      - if you don't need any lock at a particular level omit that level
143 e4376078 Iustin Pop
      - don't put anything for the BGL level
144 e4376078 Iustin Pop
      - if you want all locks at a level use locking.ALL_SET as a value
145 d465bdc8 Guido Trotter

146 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
147 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
148 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
149 3977a4c1 Guido Trotter

150 e4376078 Iustin Pop
    Examples::
151 e4376078 Iustin Pop

152 e4376078 Iustin Pop
      # Acquire all nodes and one instance
153 e4376078 Iustin Pop
      self.needed_locks = {
154 e4376078 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
155 e4376078 Iustin Pop
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
156 e4376078 Iustin Pop
      }
157 e4376078 Iustin Pop
      # Acquire just two nodes
158 e4376078 Iustin Pop
      self.needed_locks = {
159 e4376078 Iustin Pop
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
160 e4376078 Iustin Pop
      }
161 e4376078 Iustin Pop
      # Acquire no locks
162 e4376078 Iustin Pop
      self.needed_locks = {} # No, you can't leave it to the default value None
163 d465bdc8 Guido Trotter

164 d465bdc8 Guido Trotter
    """
165 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
166 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
167 d465bdc8 Guido Trotter
    # time.
168 d465bdc8 Guido Trotter
    if self.REQ_BGL:
169 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
170 d465bdc8 Guido Trotter
    else:
171 d465bdc8 Guido Trotter
      raise NotImplementedError
172 d465bdc8 Guido Trotter
173 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
174 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
175 fb8dcb62 Guido Trotter

176 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
177 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
178 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
179 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
180 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
181 fb8dcb62 Guido Trotter
    default it does nothing.
182 fb8dcb62 Guido Trotter

183 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
184 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
185 fb8dcb62 Guido Trotter

186 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
187 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
188 fb8dcb62 Guido Trotter

189 fb8dcb62 Guido Trotter
    """
190 fb8dcb62 Guido Trotter
191 a8083063 Iustin Pop
  def CheckPrereq(self):
192 a8083063 Iustin Pop
    """Check prerequisites for this LU.
193 a8083063 Iustin Pop

194 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
195 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
196 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
197 a8083063 Iustin Pop
    allowed.
198 a8083063 Iustin Pop

199 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
200 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
201 a8083063 Iustin Pop

202 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
203 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
204 a8083063 Iustin Pop

205 a8083063 Iustin Pop
    """
206 a8083063 Iustin Pop
    raise NotImplementedError
207 a8083063 Iustin Pop
208 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
209 a8083063 Iustin Pop
    """Execute the LU.
210 a8083063 Iustin Pop

211 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
212 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
213 a8083063 Iustin Pop
    code, or expected.
214 a8083063 Iustin Pop

215 a8083063 Iustin Pop
    """
216 a8083063 Iustin Pop
    raise NotImplementedError
217 a8083063 Iustin Pop
218 a8083063 Iustin Pop
  def BuildHooksEnv(self):
219 a8083063 Iustin Pop
    """Build hooks environment for this LU.
220 a8083063 Iustin Pop

221 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
222 a8083063 Iustin Pop
    containing the environment that will be used for running the
223 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
224 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
225 a8083063 Iustin Pop
    the hook should run after the execution.
226 a8083063 Iustin Pop

227 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
228 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
229 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
230 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
231 a8083063 Iustin Pop

232 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
233 a8083063 Iustin Pop

234 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
235 a8083063 Iustin Pop
    not be called.
236 a8083063 Iustin Pop

237 a8083063 Iustin Pop
    """
238 a8083063 Iustin Pop
    raise NotImplementedError
239 a8083063 Iustin Pop
240 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
241 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
242 1fce5219 Guido Trotter

243 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
244 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
245 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
246 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
247 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
248 1fce5219 Guido Trotter

249 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
250 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
251 e4376078 Iustin Pop
    @param hook_results: the results of the multi-node hooks rpc call
252 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
253 e4376078 Iustin Pop
    @param lu_result: the previous Exec result this LU had, or None
254 e4376078 Iustin Pop
        in the PRE phase
255 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
256 e4376078 Iustin Pop
        and hook results
257 1fce5219 Guido Trotter

258 1fce5219 Guido Trotter
    """
259 1fce5219 Guido Trotter
    return lu_result
260 1fce5219 Guido Trotter
261 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
262 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
263 43905206 Guido Trotter

264 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
265 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
266 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
267 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
268 43905206 Guido Trotter
    before.
269 43905206 Guido Trotter

270 43905206 Guido Trotter
    """
271 43905206 Guido Trotter
    if self.needed_locks is None:
272 43905206 Guido Trotter
      self.needed_locks = {}
273 43905206 Guido Trotter
    else:
274 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
275 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
276 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
277 43905206 Guido Trotter
    if expanded_name is None:
278 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
279 43905206 Guido Trotter
                                  self.op.instance_name)
280 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
281 43905206 Guido Trotter
    self.op.instance_name = expanded_name
282 43905206 Guido Trotter
283 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
284 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
285 c4a2fee1 Guido Trotter

286 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
287 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
288 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
289 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
290 c4a2fee1 Guido Trotter

291 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
292 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
293 c4a2fee1 Guido Trotter

294 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
295 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
296 c4a2fee1 Guido Trotter

297 e4376078 Iustin Pop
    If should be called in DeclareLocks in a way similar to::
298 c4a2fee1 Guido Trotter

299 e4376078 Iustin Pop
      if level == locking.LEVEL_NODE:
300 e4376078 Iustin Pop
        self._LockInstancesNodes()
301 c4a2fee1 Guido Trotter

302 a82ce292 Guido Trotter
    @type primary_only: boolean
303 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
304 a82ce292 Guido Trotter

305 c4a2fee1 Guido Trotter
    """
306 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
307 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
308 c4a2fee1 Guido Trotter
309 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
310 c4a2fee1 Guido Trotter
311 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
312 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
313 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
314 c4a2fee1 Guido Trotter
    wanted_nodes = []
315 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
316 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
317 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
318 a82ce292 Guido Trotter
      if not primary_only:
319 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
320 9513b6ab Guido Trotter
321 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
322 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
323 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
324 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
325 c4a2fee1 Guido Trotter
326 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
327 c4a2fee1 Guido Trotter
328 a8083063 Iustin Pop
329 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
330 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
331 a8083063 Iustin Pop

332 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
333 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
334 a8083063 Iustin Pop

335 a8083063 Iustin Pop
  """
336 a8083063 Iustin Pop
  HPATH = None
337 a8083063 Iustin Pop
  HTYPE = None
338 a8083063 Iustin Pop
339 a8083063 Iustin Pop
340 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
341 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
342 83120a01 Michael Hanselmann

343 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
344 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
345 e4376078 Iustin Pop
  @type nodes: list
346 e4376078 Iustin Pop
  @param nodes: list of node names or None for all nodes
347 e4376078 Iustin Pop
  @rtype: list
348 e4376078 Iustin Pop
  @return: the list of nodes, sorted
349 e4376078 Iustin Pop
  @raise errors.OpProgrammerError: if the nodes parameter is wrong type
350 83120a01 Michael Hanselmann

351 83120a01 Michael Hanselmann
  """
352 3312b702 Iustin Pop
  if not isinstance(nodes, list):
353 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
354 dcb93971 Michael Hanselmann
355 ea47808a Guido Trotter
  if not nodes:
356 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
357 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
358 dcb93971 Michael Hanselmann
359 ea47808a Guido Trotter
  wanted = []
360 ea47808a Guido Trotter
  for name in nodes:
361 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
362 ea47808a Guido Trotter
    if node is None:
363 ea47808a Guido Trotter
      raise errors.OpPrereqError("No such node name '%s'" % name)
364 ea47808a Guido Trotter
    wanted.append(node)
365 dcb93971 Michael Hanselmann
366 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
367 3312b702 Iustin Pop
368 3312b702 Iustin Pop
369 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
370 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
371 3312b702 Iustin Pop

372 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
373 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
374 e4376078 Iustin Pop
  @type instances: list
375 e4376078 Iustin Pop
  @param instances: list of instance names or None for all instances
376 e4376078 Iustin Pop
  @rtype: list
377 e4376078 Iustin Pop
  @return: the list of instances, sorted
378 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if the instances parameter is wrong type
379 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if any of the passed instances is not found
380 3312b702 Iustin Pop

381 3312b702 Iustin Pop
  """
382 3312b702 Iustin Pop
  if not isinstance(instances, list):
383 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
384 3312b702 Iustin Pop
385 3312b702 Iustin Pop
  if instances:
386 3312b702 Iustin Pop
    wanted = []
387 3312b702 Iustin Pop
388 3312b702 Iustin Pop
    for name in instances:
389 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
390 3312b702 Iustin Pop
      if instance is None:
391 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
392 3312b702 Iustin Pop
      wanted.append(instance)
393 3312b702 Iustin Pop
394 3312b702 Iustin Pop
  else:
395 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
396 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
397 dcb93971 Michael Hanselmann
398 dcb93971 Michael Hanselmann
399 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
400 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
401 83120a01 Michael Hanselmann

402 a2d2e1a7 Iustin Pop
  @type static: L{utils.FieldSet}
403 31bf511f Iustin Pop
  @param static: static fields set
404 a2d2e1a7 Iustin Pop
  @type dynamic: L{utils.FieldSet}
405 31bf511f Iustin Pop
  @param dynamic: dynamic fields set
406 83120a01 Michael Hanselmann

407 83120a01 Michael Hanselmann
  """
408 a2d2e1a7 Iustin Pop
  f = utils.FieldSet()
409 31bf511f Iustin Pop
  f.Extend(static)
410 31bf511f Iustin Pop
  f.Extend(dynamic)
411 dcb93971 Michael Hanselmann
412 31bf511f Iustin Pop
  delta = f.NonMatching(selected)
413 31bf511f Iustin Pop
  if delta:
414 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
415 31bf511f Iustin Pop
                               % ",".join(delta))
416 dcb93971 Michael Hanselmann
417 dcb93971 Michael Hanselmann
418 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
419 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
420 e4376078 Iustin Pop
  """Builds instance related env variables for hooks
421 e4376078 Iustin Pop

422 e4376078 Iustin Pop
  This builds the hook environment from individual variables.
423 e4376078 Iustin Pop

424 e4376078 Iustin Pop
  @type name: string
425 e4376078 Iustin Pop
  @param name: the name of the instance
426 e4376078 Iustin Pop
  @type primary_node: string
427 e4376078 Iustin Pop
  @param primary_node: the name of the instance's primary node
428 e4376078 Iustin Pop
  @type secondary_nodes: list
429 e4376078 Iustin Pop
  @param secondary_nodes: list of secondary nodes as strings
430 e4376078 Iustin Pop
  @type os_type: string
431 e4376078 Iustin Pop
  @param os_type: the name of the instance's OS
432 e4376078 Iustin Pop
  @type status: string
433 e4376078 Iustin Pop
  @param status: the desired status of the instances
434 e4376078 Iustin Pop
  @type memory: string
435 e4376078 Iustin Pop
  @param memory: the memory size of the instance
436 e4376078 Iustin Pop
  @type vcpus: string
437 e4376078 Iustin Pop
  @param vcpus: the count of VCPUs the instance has
438 e4376078 Iustin Pop
  @type nics: list
439 e4376078 Iustin Pop
  @param nics: list of tuples (ip, bridge, mac) representing
440 e4376078 Iustin Pop
      the NICs the instance  has
441 e4376078 Iustin Pop
  @rtype: dict
442 e4376078 Iustin Pop
  @return: the hook environment for this instance
443 ecb215b5 Michael Hanselmann

444 396e1b78 Michael Hanselmann
  """
445 396e1b78 Michael Hanselmann
  env = {
446 0e137c28 Iustin Pop
    "OP_TARGET": name,
447 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
448 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
449 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
450 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
451 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
452 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
453 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
454 396e1b78 Michael Hanselmann
  }
455 396e1b78 Michael Hanselmann
456 396e1b78 Michael Hanselmann
  if nics:
457 396e1b78 Michael Hanselmann
    nic_count = len(nics)
458 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
459 396e1b78 Michael Hanselmann
      if ip is None:
460 396e1b78 Michael Hanselmann
        ip = ""
461 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
462 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
463 53e4e875 Guido Trotter
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
464 396e1b78 Michael Hanselmann
  else:
465 396e1b78 Michael Hanselmann
    nic_count = 0
466 396e1b78 Michael Hanselmann
467 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
468 396e1b78 Michael Hanselmann
469 396e1b78 Michael Hanselmann
  return env
470 396e1b78 Michael Hanselmann
471 396e1b78 Michael Hanselmann
472 338e51e8 Iustin Pop
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
473 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
474 ecb215b5 Michael Hanselmann

475 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
476 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
477 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
478 e4376078 Iustin Pop
  @param instance: the instance for which we should build the
479 e4376078 Iustin Pop
      environment
480 e4376078 Iustin Pop
  @type override: dict
481 e4376078 Iustin Pop
  @param override: dictionary with key/values that will override
482 e4376078 Iustin Pop
      our values
483 e4376078 Iustin Pop
  @rtype: dict
484 e4376078 Iustin Pop
  @return: the hook environment dictionary
485 e4376078 Iustin Pop

486 ecb215b5 Michael Hanselmann
  """
487 338e51e8 Iustin Pop
  bep = lu.cfg.GetClusterInfo().FillBE(instance)
488 396e1b78 Michael Hanselmann
  args = {
489 396e1b78 Michael Hanselmann
    'name': instance.name,
490 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
491 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
492 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
493 396e1b78 Michael Hanselmann
    'status': instance.os,
494 338e51e8 Iustin Pop
    'memory': bep[constants.BE_MEMORY],
495 338e51e8 Iustin Pop
    'vcpus': bep[constants.BE_VCPUS],
496 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
497 396e1b78 Michael Hanselmann
  }
498 396e1b78 Michael Hanselmann
  if override:
499 396e1b78 Michael Hanselmann
    args.update(override)
500 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
501 396e1b78 Michael Hanselmann
502 396e1b78 Michael Hanselmann
503 b9bddb6b Iustin Pop
def _CheckInstanceBridgesExist(lu, instance):
504 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
505 bf6929a2 Alexander Schreiber

506 bf6929a2 Alexander Schreiber
  """
507 bf6929a2 Alexander Schreiber
  # check bridges existance
508 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
509 781de953 Iustin Pop
  result = lu.rpc.call_bridges_exist(instance.primary_node, brlist)
510 781de953 Iustin Pop
  result.Raise()
511 781de953 Iustin Pop
  if not result.data:
512 781de953 Iustin Pop
    raise errors.OpPrereqError("One or more target bridges %s does not"
513 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
514 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
515 bf6929a2 Alexander Schreiber
516 bf6929a2 Alexander Schreiber
517 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
518 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
519 a8083063 Iustin Pop

520 a8083063 Iustin Pop
  """
521 a8083063 Iustin Pop
  _OP_REQP = []
522 a8083063 Iustin Pop
523 a8083063 Iustin Pop
  def CheckPrereq(self):
524 a8083063 Iustin Pop
    """Check prerequisites.
525 a8083063 Iustin Pop

526 a8083063 Iustin Pop
    This checks whether the cluster is empty.
527 a8083063 Iustin Pop

528 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
529 a8083063 Iustin Pop

530 a8083063 Iustin Pop
    """
531 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
532 a8083063 Iustin Pop
533 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
534 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
535 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
536 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
537 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
538 db915bd1 Michael Hanselmann
    if instancelist:
539 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
540 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
541 a8083063 Iustin Pop
542 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
543 a8083063 Iustin Pop
    """Destroys the cluster.
544 a8083063 Iustin Pop

545 a8083063 Iustin Pop
    """
546 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
547 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
548 781de953 Iustin Pop
    result.Raise()
549 781de953 Iustin Pop
    if not result.data:
550 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
551 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
552 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
553 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
554 140aa4a8 Iustin Pop
    return master
555 a8083063 Iustin Pop
556 a8083063 Iustin Pop
557 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
558 a8083063 Iustin Pop
  """Verifies the cluster status.
559 a8083063 Iustin Pop

560 a8083063 Iustin Pop
  """
561 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
562 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
563 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
564 d4b9d97f Guido Trotter
  REQ_BGL = False
565 d4b9d97f Guido Trotter
566 d4b9d97f Guido Trotter
  def ExpandNames(self):
567 d4b9d97f Guido Trotter
    self.needed_locks = {
568 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
569 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
570 d4b9d97f Guido Trotter
    }
571 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
572 a8083063 Iustin Pop
573 25361b9a Iustin Pop
  def _VerifyNode(self, nodeinfo, file_list, local_cksum,
574 25361b9a Iustin Pop
                  node_result, feedback_fn, master_files):
575 a8083063 Iustin Pop
    """Run multiple tests against a node.
576 a8083063 Iustin Pop

577 112f18a5 Iustin Pop
    Test list:
578 e4376078 Iustin Pop

579 a8083063 Iustin Pop
      - compares ganeti version
580 a8083063 Iustin Pop
      - checks vg existance and size > 20G
581 a8083063 Iustin Pop
      - checks config file checksum
582 a8083063 Iustin Pop
      - checks ssh to other nodes
583 a8083063 Iustin Pop

584 112f18a5 Iustin Pop
    @type nodeinfo: L{objects.Node}
585 112f18a5 Iustin Pop
    @param nodeinfo: the node to check
586 e4376078 Iustin Pop
    @param file_list: required list of files
587 e4376078 Iustin Pop
    @param local_cksum: dictionary of local files and their checksums
588 e4376078 Iustin Pop
    @param node_result: the results from the node
589 e4376078 Iustin Pop
    @param feedback_fn: function used to accumulate results
590 112f18a5 Iustin Pop
    @param master_files: list of files that only masters should have
591 098c0958 Michael Hanselmann

592 a8083063 Iustin Pop
    """
593 112f18a5 Iustin Pop
    node = nodeinfo.name
594 25361b9a Iustin Pop
595 25361b9a Iustin Pop
    # main result, node_result should be a non-empty dict
596 25361b9a Iustin Pop
    if not node_result or not isinstance(node_result, dict):
597 25361b9a Iustin Pop
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
598 25361b9a Iustin Pop
      return True
599 25361b9a Iustin Pop
600 a8083063 Iustin Pop
    # compares ganeti version
601 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
602 25361b9a Iustin Pop
    remote_version = node_result.get('version', None)
603 a8083063 Iustin Pop
    if not remote_version:
604 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
605 a8083063 Iustin Pop
      return True
606 a8083063 Iustin Pop
607 a8083063 Iustin Pop
    if local_version != remote_version:
608 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
609 a8083063 Iustin Pop
                      (local_version, node, remote_version))
610 a8083063 Iustin Pop
      return True
611 a8083063 Iustin Pop
612 a8083063 Iustin Pop
    # checks vg existance and size > 20G
613 a8083063 Iustin Pop
614 a8083063 Iustin Pop
    bad = False
615 25361b9a Iustin Pop
    vglist = node_result.get(constants.NV_VGLIST, None)
616 a8083063 Iustin Pop
    if not vglist:
617 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
618 a8083063 Iustin Pop
                      (node,))
619 a8083063 Iustin Pop
      bad = True
620 a8083063 Iustin Pop
    else:
621 8d1a2a64 Michael Hanselmann
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
622 8d1a2a64 Michael Hanselmann
                                            constants.MIN_VG_SIZE)
623 a8083063 Iustin Pop
      if vgstatus:
624 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
625 a8083063 Iustin Pop
        bad = True
626 a8083063 Iustin Pop
627 a8083063 Iustin Pop
    # checks config file checksum
628 a8083063 Iustin Pop
629 25361b9a Iustin Pop
    remote_cksum = node_result.get(constants.NV_FILELIST, None)
630 25361b9a Iustin Pop
    if not isinstance(remote_cksum, dict):
631 a8083063 Iustin Pop
      bad = True
632 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
633 a8083063 Iustin Pop
    else:
634 a8083063 Iustin Pop
      for file_name in file_list:
635 112f18a5 Iustin Pop
        node_is_mc = nodeinfo.master_candidate
636 112f18a5 Iustin Pop
        must_have_file = file_name not in master_files
637 a8083063 Iustin Pop
        if file_name not in remote_cksum:
638 112f18a5 Iustin Pop
          if node_is_mc or must_have_file:
639 112f18a5 Iustin Pop
            bad = True
640 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' missing" % file_name)
641 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
642 112f18a5 Iustin Pop
          if node_is_mc or must_have_file:
643 112f18a5 Iustin Pop
            bad = True
644 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
645 112f18a5 Iustin Pop
          else:
646 112f18a5 Iustin Pop
            # not candidate and this is not a must-have file
647 112f18a5 Iustin Pop
            bad = True
648 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: non master-candidate has old/wrong file"
649 112f18a5 Iustin Pop
                        " '%s'" % file_name)
650 112f18a5 Iustin Pop
        else:
651 112f18a5 Iustin Pop
          # all good, except non-master/non-must have combination
652 112f18a5 Iustin Pop
          if not node_is_mc and not must_have_file:
653 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' should not exist on non master"
654 112f18a5 Iustin Pop
                        " candidates" % file_name)
655 a8083063 Iustin Pop
656 25361b9a Iustin Pop
    # checks ssh to any
657 25361b9a Iustin Pop
658 25361b9a Iustin Pop
    if constants.NV_NODELIST not in node_result:
659 a8083063 Iustin Pop
      bad = True
660 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
661 a8083063 Iustin Pop
    else:
662 25361b9a Iustin Pop
      if node_result[constants.NV_NODELIST]:
663 a8083063 Iustin Pop
        bad = True
664 25361b9a Iustin Pop
        for node in node_result[constants.NV_NODELIST]:
665 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
666 25361b9a Iustin Pop
                          (node, node_result[constants.NV_NODELIST][node]))
667 25361b9a Iustin Pop
668 25361b9a Iustin Pop
    if constants.NV_NODENETTEST not in node_result:
669 9d4bfc96 Iustin Pop
      bad = True
670 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
671 9d4bfc96 Iustin Pop
    else:
672 25361b9a Iustin Pop
      if node_result[constants.NV_NODENETTEST]:
673 9d4bfc96 Iustin Pop
        bad = True
674 25361b9a Iustin Pop
        nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys())
675 9d4bfc96 Iustin Pop
        for node in nlist:
676 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
677 25361b9a Iustin Pop
                          (node, node_result[constants.NV_NODENETTEST][node]))
678 9d4bfc96 Iustin Pop
679 25361b9a Iustin Pop
    hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
680 e69d05fd Iustin Pop
    if isinstance(hyp_result, dict):
681 e69d05fd Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
682 e69d05fd Iustin Pop
        if hv_result is not None:
683 e69d05fd Iustin Pop
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
684 e69d05fd Iustin Pop
                      (hv_name, hv_result))
685 a8083063 Iustin Pop
    return bad
686 a8083063 Iustin Pop
687 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
688 c5705f58 Guido Trotter
                      node_instance, feedback_fn):
689 a8083063 Iustin Pop
    """Verify an instance.
690 a8083063 Iustin Pop

691 a8083063 Iustin Pop
    This function checks to see if the required block devices are
692 a8083063 Iustin Pop
    available on the instance's node.
693 a8083063 Iustin Pop

694 a8083063 Iustin Pop
    """
695 a8083063 Iustin Pop
    bad = False
696 a8083063 Iustin Pop
697 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
698 a8083063 Iustin Pop
699 a8083063 Iustin Pop
    node_vol_should = {}
700 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
701 a8083063 Iustin Pop
702 a8083063 Iustin Pop
    for node in node_vol_should:
703 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
704 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
705 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
706 a8083063 Iustin Pop
                          (volume, node))
707 a8083063 Iustin Pop
          bad = True
708 a8083063 Iustin Pop
709 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
710 a872dae6 Guido Trotter
      if (node_current not in node_instance or
711 a872dae6 Guido Trotter
          not instance in node_instance[node_current]):
712 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
713 a8083063 Iustin Pop
                        (instance, node_current))
714 a8083063 Iustin Pop
        bad = True
715 a8083063 Iustin Pop
716 a8083063 Iustin Pop
    for node in node_instance:
717 a8083063 Iustin Pop
      if (not node == node_current):
718 a8083063 Iustin Pop
        if instance in node_instance[node]:
719 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
720 a8083063 Iustin Pop
                          (instance, node))
721 a8083063 Iustin Pop
          bad = True
722 a8083063 Iustin Pop
723 6a438c98 Michael Hanselmann
    return bad
724 a8083063 Iustin Pop
725 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
726 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
727 a8083063 Iustin Pop

728 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
729 a8083063 Iustin Pop
    reported as unknown.
730 a8083063 Iustin Pop

731 a8083063 Iustin Pop
    """
732 a8083063 Iustin Pop
    bad = False
733 a8083063 Iustin Pop
734 a8083063 Iustin Pop
    for node in node_vol_is:
735 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
736 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
737 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
738 a8083063 Iustin Pop
                      (volume, node))
739 a8083063 Iustin Pop
          bad = True
740 a8083063 Iustin Pop
    return bad
741 a8083063 Iustin Pop
742 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
743 a8083063 Iustin Pop
    """Verify the list of running instances.
744 a8083063 Iustin Pop

745 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
746 a8083063 Iustin Pop

747 a8083063 Iustin Pop
    """
748 a8083063 Iustin Pop
    bad = False
749 a8083063 Iustin Pop
    for node in node_instance:
750 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
751 a8083063 Iustin Pop
        if runninginstance not in instancelist:
752 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
753 a8083063 Iustin Pop
                          (runninginstance, node))
754 a8083063 Iustin Pop
          bad = True
755 a8083063 Iustin Pop
    return bad
756 a8083063 Iustin Pop
757 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
758 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
759 2b3b6ddd Guido Trotter

760 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
761 2b3b6ddd Guido Trotter
    was primary for.
762 2b3b6ddd Guido Trotter

763 2b3b6ddd Guido Trotter
    """
764 2b3b6ddd Guido Trotter
    bad = False
765 2b3b6ddd Guido Trotter
766 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
767 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
768 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
769 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
770 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
771 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
772 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
773 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
774 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
775 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
776 2b3b6ddd Guido Trotter
        needed_mem = 0
777 2b3b6ddd Guido Trotter
        for instance in instances:
778 338e51e8 Iustin Pop
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
779 c0f2b229 Iustin Pop
          if bep[constants.BE_AUTO_BALANCE]:
780 3924700f Iustin Pop
            needed_mem += bep[constants.BE_MEMORY]
781 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
782 2b3b6ddd Guido Trotter
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
783 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
784 2b3b6ddd Guido Trotter
          bad = True
785 2b3b6ddd Guido Trotter
    return bad
786 2b3b6ddd Guido Trotter
787 a8083063 Iustin Pop
  def CheckPrereq(self):
788 a8083063 Iustin Pop
    """Check prerequisites.
789 a8083063 Iustin Pop

790 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
791 e54c4c5e Guido Trotter
    all its members are valid.
792 a8083063 Iustin Pop

793 a8083063 Iustin Pop
    """
794 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
795 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
796 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
797 a8083063 Iustin Pop
798 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
799 d8fff41c Guido Trotter
    """Build hooks env.
800 d8fff41c Guido Trotter

801 d8fff41c Guido Trotter
    Cluster-Verify hooks just rone in the post phase and their failure makes
802 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
803 d8fff41c Guido Trotter

804 d8fff41c Guido Trotter
    """
805 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
806 d8fff41c Guido Trotter
    # TODO: populate the environment with useful information for verify hooks
807 d8fff41c Guido Trotter
    env = {}
808 d8fff41c Guido Trotter
    return env, [], all_nodes
809 d8fff41c Guido Trotter
810 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
811 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
812 a8083063 Iustin Pop

813 a8083063 Iustin Pop
    """
814 a8083063 Iustin Pop
    bad = False
815 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
816 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
817 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
818 a8083063 Iustin Pop
819 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
820 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
821 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
822 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
823 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
824 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
825 3924700f Iustin Pop
    i_non_a_balanced = [] # Non auto-balanced instances
826 a8083063 Iustin Pop
    node_volume = {}
827 a8083063 Iustin Pop
    node_instance = {}
828 9c9c7d30 Guido Trotter
    node_info = {}
829 26b6af5e Guido Trotter
    instance_cfg = {}
830 a8083063 Iustin Pop
831 a8083063 Iustin Pop
    # FIXME: verify OS list
832 a8083063 Iustin Pop
    # do local checksums
833 112f18a5 Iustin Pop
    master_files = [constants.CLUSTER_CONF_FILE]
834 112f18a5 Iustin Pop
835 112f18a5 Iustin Pop
    file_names = ssconf.SimpleStore().GetFileList()
836 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
837 112f18a5 Iustin Pop
    file_names.extend(master_files)
838 112f18a5 Iustin Pop
839 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
840 a8083063 Iustin Pop
841 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
842 a8083063 Iustin Pop
    node_verify_param = {
843 25361b9a Iustin Pop
      constants.NV_FILELIST: file_names,
844 25361b9a Iustin Pop
      constants.NV_NODELIST: nodelist,
845 25361b9a Iustin Pop
      constants.NV_HYPERVISOR: hypervisors,
846 25361b9a Iustin Pop
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
847 25361b9a Iustin Pop
                                  node.secondary_ip) for node in nodeinfo],
848 25361b9a Iustin Pop
      constants.NV_LVLIST: vg_name,
849 25361b9a Iustin Pop
      constants.NV_INSTANCELIST: hypervisors,
850 25361b9a Iustin Pop
      constants.NV_VGLIST: None,
851 25361b9a Iustin Pop
      constants.NV_VERSION: None,
852 25361b9a Iustin Pop
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
853 a8083063 Iustin Pop
      }
854 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
855 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
856 a8083063 Iustin Pop
857 3924700f Iustin Pop
    cluster = self.cfg.GetClusterInfo()
858 112f18a5 Iustin Pop
    master_node = self.cfg.GetMasterNode()
859 112f18a5 Iustin Pop
    for node_i in nodeinfo:
860 112f18a5 Iustin Pop
      node = node_i.name
861 25361b9a Iustin Pop
      nresult = all_nvinfo[node].data
862 25361b9a Iustin Pop
863 112f18a5 Iustin Pop
      if node == master_node:
864 25361b9a Iustin Pop
        ntype = "master"
865 112f18a5 Iustin Pop
      elif node_i.master_candidate:
866 25361b9a Iustin Pop
        ntype = "master candidate"
867 112f18a5 Iustin Pop
      else:
868 25361b9a Iustin Pop
        ntype = "regular"
869 112f18a5 Iustin Pop
      feedback_fn("* Verifying node %s (%s)" % (node, ntype))
870 25361b9a Iustin Pop
871 25361b9a Iustin Pop
      if all_nvinfo[node].failed or not isinstance(nresult, dict):
872 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
873 25361b9a Iustin Pop
        bad = True
874 25361b9a Iustin Pop
        continue
875 25361b9a Iustin Pop
876 112f18a5 Iustin Pop
      result = self._VerifyNode(node_i, file_names, local_checksums,
877 25361b9a Iustin Pop
                                nresult, feedback_fn, master_files)
878 a8083063 Iustin Pop
      bad = bad or result
879 a8083063 Iustin Pop
880 25361b9a Iustin Pop
      lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
881 25361b9a Iustin Pop
      if isinstance(lvdata, basestring):
882 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
883 25361b9a Iustin Pop
                    (node, lvdata.encode('string_escape')))
884 b63ed789 Iustin Pop
        bad = True
885 b63ed789 Iustin Pop
        node_volume[node] = {}
886 25361b9a Iustin Pop
      elif not isinstance(lvdata, dict):
887 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (lvlist)" % (node,))
888 a8083063 Iustin Pop
        bad = True
889 a8083063 Iustin Pop
        continue
890 b63ed789 Iustin Pop
      else:
891 25361b9a Iustin Pop
        node_volume[node] = lvdata
892 a8083063 Iustin Pop
893 a8083063 Iustin Pop
      # node_instance
894 25361b9a Iustin Pop
      idata = nresult.get(constants.NV_INSTANCELIST, None)
895 25361b9a Iustin Pop
      if not isinstance(idata, list):
896 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (instancelist)" %
897 25361b9a Iustin Pop
                    (node,))
898 a8083063 Iustin Pop
        bad = True
899 a8083063 Iustin Pop
        continue
900 a8083063 Iustin Pop
901 25361b9a Iustin Pop
      node_instance[node] = idata
902 a8083063 Iustin Pop
903 9c9c7d30 Guido Trotter
      # node_info
904 25361b9a Iustin Pop
      nodeinfo = nresult.get(constants.NV_HVINFO, None)
905 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
906 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (hvinfo)" % (node,))
907 9c9c7d30 Guido Trotter
        bad = True
908 9c9c7d30 Guido Trotter
        continue
909 9c9c7d30 Guido Trotter
910 9c9c7d30 Guido Trotter
      try:
911 9c9c7d30 Guido Trotter
        node_info[node] = {
912 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
913 25361b9a Iustin Pop
          "dfree": int(nresult[constants.NV_VGLIST][vg_name]),
914 93e4c50b Guido Trotter
          "pinst": [],
915 93e4c50b Guido Trotter
          "sinst": [],
916 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
917 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
918 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
919 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
920 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
921 36e7da50 Guido Trotter
          # secondary.
922 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
923 9c9c7d30 Guido Trotter
        }
924 9c9c7d30 Guido Trotter
      except ValueError:
925 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
926 9c9c7d30 Guido Trotter
        bad = True
927 9c9c7d30 Guido Trotter
        continue
928 9c9c7d30 Guido Trotter
929 a8083063 Iustin Pop
    node_vol_should = {}
930 a8083063 Iustin Pop
931 a8083063 Iustin Pop
    for instance in instancelist:
932 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
933 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
934 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
935 c5705f58 Guido Trotter
                                     node_instance, feedback_fn)
936 c5705f58 Guido Trotter
      bad = bad or result
937 a8083063 Iustin Pop
938 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
939 a8083063 Iustin Pop
940 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
941 26b6af5e Guido Trotter
942 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
943 93e4c50b Guido Trotter
      if pnode in node_info:
944 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
945 93e4c50b Guido Trotter
      else:
946 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
947 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
948 93e4c50b Guido Trotter
        bad = True
949 93e4c50b Guido Trotter
950 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
951 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
952 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
953 93e4c50b Guido Trotter
      # supported either.
954 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
955 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
956 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
957 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
958 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
959 93e4c50b Guido Trotter
                    % instance)
960 93e4c50b Guido Trotter
961 c0f2b229 Iustin Pop
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
962 3924700f Iustin Pop
        i_non_a_balanced.append(instance)
963 3924700f Iustin Pop
964 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
965 93e4c50b Guido Trotter
        if snode in node_info:
966 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
967 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
968 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
969 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
970 93e4c50b Guido Trotter
        else:
971 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
972 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
973 93e4c50b Guido Trotter
974 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
975 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
976 a8083063 Iustin Pop
                                       feedback_fn)
977 a8083063 Iustin Pop
    bad = bad or result
978 a8083063 Iustin Pop
979 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
980 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
981 a8083063 Iustin Pop
                                         feedback_fn)
982 a8083063 Iustin Pop
    bad = bad or result
983 a8083063 Iustin Pop
984 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
985 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
986 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
987 e54c4c5e Guido Trotter
      bad = bad or result
988 2b3b6ddd Guido Trotter
989 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
990 2b3b6ddd Guido Trotter
    if i_non_redundant:
991 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
992 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
993 2b3b6ddd Guido Trotter
994 3924700f Iustin Pop
    if i_non_a_balanced:
995 3924700f Iustin Pop
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
996 3924700f Iustin Pop
                  % len(i_non_a_balanced))
997 3924700f Iustin Pop
998 34290825 Michael Hanselmann
    return not bad
999 a8083063 Iustin Pop
1000 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1001 e4376078 Iustin Pop
    """Analize the post-hooks' result
1002 e4376078 Iustin Pop

1003 e4376078 Iustin Pop
    This method analyses the hook result, handles it, and sends some
1004 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
1005 d8fff41c Guido Trotter

1006 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1007 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1008 e4376078 Iustin Pop
    @param hooks_results: the results of the multi-node hooks rpc call
1009 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
1010 e4376078 Iustin Pop
    @param lu_result: previous Exec result
1011 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
1012 e4376078 Iustin Pop
        and hook results
1013 d8fff41c Guido Trotter

1014 d8fff41c Guido Trotter
    """
1015 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
1016 38206f3c Iustin Pop
    # their results
1017 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
1018 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
1019 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
1020 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
1021 d8fff41c Guido Trotter
      if not hooks_results:
1022 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
1023 d8fff41c Guido Trotter
        lu_result = 1
1024 d8fff41c Guido Trotter
      else:
1025 d8fff41c Guido Trotter
        for node_name in hooks_results:
1026 d8fff41c Guido Trotter
          show_node_header = True
1027 d8fff41c Guido Trotter
          res = hooks_results[node_name]
1028 25361b9a Iustin Pop
          if res.failed or res.data is False or not isinstance(res.data, list):
1029 25361b9a Iustin Pop
            feedback_fn("    Communication failure in hooks execution")
1030 d8fff41c Guido Trotter
            lu_result = 1
1031 d8fff41c Guido Trotter
            continue
1032 25361b9a Iustin Pop
          for script, hkr, output in res.data:
1033 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
1034 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
1035 d8fff41c Guido Trotter
              # failing hooks on that node
1036 d8fff41c Guido Trotter
              if show_node_header:
1037 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
1038 d8fff41c Guido Trotter
                show_node_header = False
1039 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
1040 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
1041 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
1042 d8fff41c Guido Trotter
              lu_result = 1
1043 d8fff41c Guido Trotter
1044 d8fff41c Guido Trotter
      return lu_result
1045 d8fff41c Guido Trotter
1046 a8083063 Iustin Pop
1047 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
1048 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
1049 2c95a8d4 Iustin Pop

1050 2c95a8d4 Iustin Pop
  """
1051 2c95a8d4 Iustin Pop
  _OP_REQP = []
1052 d4b9d97f Guido Trotter
  REQ_BGL = False
1053 d4b9d97f Guido Trotter
1054 d4b9d97f Guido Trotter
  def ExpandNames(self):
1055 d4b9d97f Guido Trotter
    self.needed_locks = {
1056 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1057 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1058 d4b9d97f Guido Trotter
    }
1059 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1060 2c95a8d4 Iustin Pop
1061 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
1062 2c95a8d4 Iustin Pop
    """Check prerequisites.
1063 2c95a8d4 Iustin Pop

1064 2c95a8d4 Iustin Pop
    This has no prerequisites.
1065 2c95a8d4 Iustin Pop

1066 2c95a8d4 Iustin Pop
    """
1067 2c95a8d4 Iustin Pop
    pass
1068 2c95a8d4 Iustin Pop
1069 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
1070 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
1071 2c95a8d4 Iustin Pop

1072 2c95a8d4 Iustin Pop
    """
1073 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
1074 2c95a8d4 Iustin Pop
1075 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
1076 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1077 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
1078 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
1079 2c95a8d4 Iustin Pop
1080 2c95a8d4 Iustin Pop
    nv_dict = {}
1081 2c95a8d4 Iustin Pop
    for inst in instances:
1082 2c95a8d4 Iustin Pop
      inst_lvs = {}
1083 2c95a8d4 Iustin Pop
      if (inst.status != "up" or
1084 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
1085 2c95a8d4 Iustin Pop
        continue
1086 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
1087 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1088 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
1089 2c95a8d4 Iustin Pop
        for vol in vol_list:
1090 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
1091 2c95a8d4 Iustin Pop
1092 2c95a8d4 Iustin Pop
    if not nv_dict:
1093 2c95a8d4 Iustin Pop
      return result
1094 2c95a8d4 Iustin Pop
1095 72737a7f Iustin Pop
    node_lvs = self.rpc.call_volume_list(nodes, vg_name)
1096 2c95a8d4 Iustin Pop
1097 2c95a8d4 Iustin Pop
    to_act = set()
1098 2c95a8d4 Iustin Pop
    for node in nodes:
1099 2c95a8d4 Iustin Pop
      # node_volume
1100 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
1101 781de953 Iustin Pop
      if lvs.failed:
1102 781de953 Iustin Pop
        self.LogWarning("Connection to node %s failed: %s" %
1103 781de953 Iustin Pop
                        (node, lvs.data))
1104 781de953 Iustin Pop
        continue
1105 781de953 Iustin Pop
      lvs = lvs.data
1106 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
1107 9a4f63d1 Iustin Pop
        logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
1108 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
1109 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
1110 9a4f63d1 Iustin Pop
        logging.warning("Connection to node %s failed or invalid data"
1111 9a4f63d1 Iustin Pop
                        " returned", node)
1112 2c95a8d4 Iustin Pop
        res_nodes.append(node)
1113 2c95a8d4 Iustin Pop
        continue
1114 2c95a8d4 Iustin Pop
1115 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
1116 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
1117 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
1118 b63ed789 Iustin Pop
            and inst.name not in res_instances):
1119 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
1120 2c95a8d4 Iustin Pop
1121 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1122 b63ed789 Iustin Pop
    # data better
1123 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1124 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1125 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1126 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1127 b63ed789 Iustin Pop
1128 2c95a8d4 Iustin Pop
    return result
1129 2c95a8d4 Iustin Pop
1130 2c95a8d4 Iustin Pop
1131 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1132 07bd8a51 Iustin Pop
  """Rename the cluster.
1133 07bd8a51 Iustin Pop

1134 07bd8a51 Iustin Pop
  """
1135 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1136 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1137 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1138 07bd8a51 Iustin Pop
1139 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1140 07bd8a51 Iustin Pop
    """Build hooks env.
1141 07bd8a51 Iustin Pop

1142 07bd8a51 Iustin Pop
    """
1143 07bd8a51 Iustin Pop
    env = {
1144 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1145 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1146 07bd8a51 Iustin Pop
      }
1147 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1148 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1149 07bd8a51 Iustin Pop
1150 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1151 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1152 07bd8a51 Iustin Pop

1153 07bd8a51 Iustin Pop
    """
1154 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1155 07bd8a51 Iustin Pop
1156 bcf043c9 Iustin Pop
    new_name = hostname.name
1157 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1158 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
1159 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
1160 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1161 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1162 07bd8a51 Iustin Pop
                                 " cluster has changed")
1163 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1164 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1165 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1166 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1167 07bd8a51 Iustin Pop
                                   new_ip)
1168 07bd8a51 Iustin Pop
1169 07bd8a51 Iustin Pop
    self.op.name = new_name
1170 07bd8a51 Iustin Pop
1171 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1172 07bd8a51 Iustin Pop
    """Rename the cluster.
1173 07bd8a51 Iustin Pop

1174 07bd8a51 Iustin Pop
    """
1175 07bd8a51 Iustin Pop
    clustername = self.op.name
1176 07bd8a51 Iustin Pop
    ip = self.ip
1177 07bd8a51 Iustin Pop
1178 07bd8a51 Iustin Pop
    # shutdown the master IP
1179 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
1180 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
1181 781de953 Iustin Pop
    if result.failed or not result.data:
1182 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1183 07bd8a51 Iustin Pop
1184 07bd8a51 Iustin Pop
    try:
1185 55cf7d83 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
1186 55cf7d83 Iustin Pop
      cluster.cluster_name = clustername
1187 55cf7d83 Iustin Pop
      cluster.master_ip = ip
1188 55cf7d83 Iustin Pop
      self.cfg.Update(cluster)
1189 ec85e3d5 Iustin Pop
1190 ec85e3d5 Iustin Pop
      # update the known hosts file
1191 ec85e3d5 Iustin Pop
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
1192 ec85e3d5 Iustin Pop
      node_list = self.cfg.GetNodeList()
1193 ec85e3d5 Iustin Pop
      try:
1194 ec85e3d5 Iustin Pop
        node_list.remove(master)
1195 ec85e3d5 Iustin Pop
      except ValueError:
1196 ec85e3d5 Iustin Pop
        pass
1197 ec85e3d5 Iustin Pop
      result = self.rpc.call_upload_file(node_list,
1198 ec85e3d5 Iustin Pop
                                         constants.SSH_KNOWN_HOSTS_FILE)
1199 ec85e3d5 Iustin Pop
      for to_node, to_result in result.iteritems():
1200 ec85e3d5 Iustin Pop
        if to_result.failed or not to_result.data:
1201 ec85e3d5 Iustin Pop
          logging.error("Copy of file %s to node %s failed", fname, to_node)
1202 ec85e3d5 Iustin Pop
1203 07bd8a51 Iustin Pop
    finally:
1204 781de953 Iustin Pop
      result = self.rpc.call_node_start_master(master, False)
1205 781de953 Iustin Pop
      if result.failed or not result.data:
1206 86d9d3bb Iustin Pop
        self.LogWarning("Could not re-enable the master role on"
1207 86d9d3bb Iustin Pop
                        " the master, please restart manually.")
1208 07bd8a51 Iustin Pop
1209 07bd8a51 Iustin Pop
1210 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1211 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1212 8084f9f6 Manuel Franceschini

1213 e4376078 Iustin Pop
  @type disk: L{objects.Disk}
1214 e4376078 Iustin Pop
  @param disk: the disk to check
1215 e4376078 Iustin Pop
  @rtype: booleean
1216 e4376078 Iustin Pop
  @return: boolean indicating whether a LD_LV dev_type was found or not
1217 8084f9f6 Manuel Franceschini

1218 8084f9f6 Manuel Franceschini
  """
1219 8084f9f6 Manuel Franceschini
  if disk.children:
1220 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1221 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1222 8084f9f6 Manuel Franceschini
        return True
1223 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1224 8084f9f6 Manuel Franceschini
1225 8084f9f6 Manuel Franceschini
1226 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1227 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1228 8084f9f6 Manuel Franceschini

1229 8084f9f6 Manuel Franceschini
  """
1230 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1231 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1232 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1233 c53279cf Guido Trotter
  REQ_BGL = False
1234 c53279cf Guido Trotter
1235 4b7735f9 Iustin Pop
  def CheckParameters(self):
1236 4b7735f9 Iustin Pop
    """Check parameters
1237 4b7735f9 Iustin Pop

1238 4b7735f9 Iustin Pop
    """
1239 4b7735f9 Iustin Pop
    if not hasattr(self.op, "candidate_pool_size"):
1240 4b7735f9 Iustin Pop
      self.op.candidate_pool_size = None
1241 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1242 4b7735f9 Iustin Pop
      try:
1243 4b7735f9 Iustin Pop
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1244 4b7735f9 Iustin Pop
      except ValueError, err:
1245 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1246 4b7735f9 Iustin Pop
                                   str(err))
1247 4b7735f9 Iustin Pop
      if self.op.candidate_pool_size < 1:
1248 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("At least one master candidate needed")
1249 4b7735f9 Iustin Pop
1250 c53279cf Guido Trotter
  def ExpandNames(self):
1251 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
1252 c53279cf Guido Trotter
    # all nodes to be modified.
1253 c53279cf Guido Trotter
    self.needed_locks = {
1254 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1255 c53279cf Guido Trotter
    }
1256 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1257 8084f9f6 Manuel Franceschini
1258 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1259 8084f9f6 Manuel Franceschini
    """Build hooks env.
1260 8084f9f6 Manuel Franceschini

1261 8084f9f6 Manuel Franceschini
    """
1262 8084f9f6 Manuel Franceschini
    env = {
1263 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1264 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1265 8084f9f6 Manuel Franceschini
      }
1266 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1267 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1268 8084f9f6 Manuel Franceschini
1269 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1270 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1271 8084f9f6 Manuel Franceschini

1272 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1273 5f83e263 Iustin Pop
    if the given volume group is valid.
1274 8084f9f6 Manuel Franceschini

1275 8084f9f6 Manuel Franceschini
    """
1276 c53279cf Guido Trotter
    # FIXME: This only works because there is only one parameter that can be
1277 c53279cf Guido Trotter
    # changed or removed.
1278 779c15bb Iustin Pop
    if self.op.vg_name is not None and not self.op.vg_name:
1279 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
1280 8084f9f6 Manuel Franceschini
      for inst in instances:
1281 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1282 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1283 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1284 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1285 8084f9f6 Manuel Franceschini
1286 779c15bb Iustin Pop
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1287 779c15bb Iustin Pop
1288 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1289 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1290 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
1291 8084f9f6 Manuel Franceschini
      for node in node_list:
1292 781de953 Iustin Pop
        if vglist[node].failed:
1293 781de953 Iustin Pop
          # ignoring down node
1294 781de953 Iustin Pop
          self.LogWarning("Node %s unreachable/error, ignoring" % node)
1295 781de953 Iustin Pop
          continue
1296 781de953 Iustin Pop
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].data,
1297 781de953 Iustin Pop
                                              self.op.vg_name,
1298 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1299 8084f9f6 Manuel Franceschini
        if vgstatus:
1300 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1301 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1302 8084f9f6 Manuel Franceschini
1303 779c15bb Iustin Pop
    self.cluster = cluster = self.cfg.GetClusterInfo()
1304 d4b72030 Guido Trotter
    # validate beparams changes
1305 779c15bb Iustin Pop
    if self.op.beparams:
1306 d4b72030 Guido Trotter
      utils.CheckBEParams(self.op.beparams)
1307 779c15bb Iustin Pop
      self.new_beparams = cluster.FillDict(
1308 779c15bb Iustin Pop
        cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)
1309 779c15bb Iustin Pop
1310 779c15bb Iustin Pop
    # hypervisor list/parameters
1311 779c15bb Iustin Pop
    self.new_hvparams = cluster.FillDict(cluster.hvparams, {})
1312 779c15bb Iustin Pop
    if self.op.hvparams:
1313 779c15bb Iustin Pop
      if not isinstance(self.op.hvparams, dict):
1314 779c15bb Iustin Pop
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1315 779c15bb Iustin Pop
      for hv_name, hv_dict in self.op.hvparams.items():
1316 779c15bb Iustin Pop
        if hv_name not in self.new_hvparams:
1317 779c15bb Iustin Pop
          self.new_hvparams[hv_name] = hv_dict
1318 779c15bb Iustin Pop
        else:
1319 779c15bb Iustin Pop
          self.new_hvparams[hv_name].update(hv_dict)
1320 779c15bb Iustin Pop
1321 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1322 779c15bb Iustin Pop
      self.hv_list = self.op.enabled_hypervisors
1323 779c15bb Iustin Pop
    else:
1324 779c15bb Iustin Pop
      self.hv_list = cluster.enabled_hypervisors
1325 779c15bb Iustin Pop
1326 779c15bb Iustin Pop
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1327 779c15bb Iustin Pop
      # either the enabled list has changed, or the parameters have, validate
1328 779c15bb Iustin Pop
      for hv_name, hv_params in self.new_hvparams.items():
1329 779c15bb Iustin Pop
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1330 779c15bb Iustin Pop
            (self.op.enabled_hypervisors and
1331 779c15bb Iustin Pop
             hv_name in self.op.enabled_hypervisors)):
1332 779c15bb Iustin Pop
          # either this is a new hypervisor, or its parameters have changed
1333 779c15bb Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
1334 779c15bb Iustin Pop
          hv_class.CheckParameterSyntax(hv_params)
1335 779c15bb Iustin Pop
          _CheckHVParams(self, node_list, hv_name, hv_params)
1336 779c15bb Iustin Pop
1337 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1338 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1339 8084f9f6 Manuel Franceschini

1340 8084f9f6 Manuel Franceschini
    """
1341 779c15bb Iustin Pop
    if self.op.vg_name is not None:
1342 779c15bb Iustin Pop
      if self.op.vg_name != self.cfg.GetVGName():
1343 779c15bb Iustin Pop
        self.cfg.SetVGName(self.op.vg_name)
1344 779c15bb Iustin Pop
      else:
1345 779c15bb Iustin Pop
        feedback_fn("Cluster LVM configuration already in desired"
1346 779c15bb Iustin Pop
                    " state, not changing")
1347 779c15bb Iustin Pop
    if self.op.hvparams:
1348 779c15bb Iustin Pop
      self.cluster.hvparams = self.new_hvparams
1349 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1350 779c15bb Iustin Pop
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1351 779c15bb Iustin Pop
    if self.op.beparams:
1352 779c15bb Iustin Pop
      self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams
1353 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1354 4b7735f9 Iustin Pop
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
1355 4b7735f9 Iustin Pop
1356 779c15bb Iustin Pop
    self.cfg.Update(self.cluster)
1357 8084f9f6 Manuel Franceschini
1358 4b7735f9 Iustin Pop
    # we want to update nodes after the cluster so that if any errors
1359 4b7735f9 Iustin Pop
    # happen, we have recorded and saved the cluster info
1360 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1361 4b7735f9 Iustin Pop
      node_info = self.cfg.GetAllNodesInfo().values()
1362 4b7735f9 Iustin Pop
      num_candidates = len([node for node in node_info
1363 4b7735f9 Iustin Pop
                            if node.master_candidate])
1364 4b7735f9 Iustin Pop
      num_nodes = len(node_info)
1365 4b7735f9 Iustin Pop
      if num_candidates < self.op.candidate_pool_size:
1366 4b7735f9 Iustin Pop
        random.shuffle(node_info)
1367 4b7735f9 Iustin Pop
        for node in node_info:
1368 4b7735f9 Iustin Pop
          if num_candidates >= self.op.candidate_pool_size:
1369 4b7735f9 Iustin Pop
            break
1370 4b7735f9 Iustin Pop
          if node.master_candidate:
1371 4b7735f9 Iustin Pop
            continue
1372 4b7735f9 Iustin Pop
          node.master_candidate = True
1373 4b7735f9 Iustin Pop
          self.LogInfo("Promoting node %s to master candidate", node.name)
1374 4b7735f9 Iustin Pop
          self.cfg.Update(node)
1375 4b7735f9 Iustin Pop
          self.context.ReaddNode(node)
1376 4b7735f9 Iustin Pop
          num_candidates += 1
1377 4b7735f9 Iustin Pop
      elif num_candidates > self.op.candidate_pool_size:
1378 4b7735f9 Iustin Pop
        self.LogInfo("Note: more nodes are candidates (%d) than the new value"
1379 4b7735f9 Iustin Pop
                     " of candidate_pool_size (%d)" %
1380 4b7735f9 Iustin Pop
                     (num_candidates, self.op.candidate_pool_size))
1381 4b7735f9 Iustin Pop
1382 8084f9f6 Manuel Franceschini
1383 b9bddb6b Iustin Pop
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1384 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1385 a8083063 Iustin Pop

1386 a8083063 Iustin Pop
  """
1387 a8083063 Iustin Pop
  if not instance.disks:
1388 a8083063 Iustin Pop
    return True
1389 a8083063 Iustin Pop
1390 a8083063 Iustin Pop
  if not oneshot:
1391 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1392 a8083063 Iustin Pop
1393 a8083063 Iustin Pop
  node = instance.primary_node
1394 a8083063 Iustin Pop
1395 a8083063 Iustin Pop
  for dev in instance.disks:
1396 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
1397 a8083063 Iustin Pop
1398 a8083063 Iustin Pop
  retries = 0
1399 a8083063 Iustin Pop
  while True:
1400 a8083063 Iustin Pop
    max_time = 0
1401 a8083063 Iustin Pop
    done = True
1402 a8083063 Iustin Pop
    cumul_degraded = False
1403 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1404 781de953 Iustin Pop
    if rstats.failed or not rstats.data:
1405 86d9d3bb Iustin Pop
      lu.LogWarning("Can't get any data from node %s", node)
1406 a8083063 Iustin Pop
      retries += 1
1407 a8083063 Iustin Pop
      if retries >= 10:
1408 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1409 3ecf6786 Iustin Pop
                                 " aborting." % node)
1410 a8083063 Iustin Pop
      time.sleep(6)
1411 a8083063 Iustin Pop
      continue
1412 781de953 Iustin Pop
    rstats = rstats.data
1413 a8083063 Iustin Pop
    retries = 0
1414 a8083063 Iustin Pop
    for i in range(len(rstats)):
1415 a8083063 Iustin Pop
      mstat = rstats[i]
1416 a8083063 Iustin Pop
      if mstat is None:
1417 86d9d3bb Iustin Pop
        lu.LogWarning("Can't compute data for node %s/%s",
1418 86d9d3bb Iustin Pop
                           node, instance.disks[i].iv_name)
1419 a8083063 Iustin Pop
        continue
1420 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1421 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1422 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1423 a8083063 Iustin Pop
      if perc_done is not None:
1424 a8083063 Iustin Pop
        done = False
1425 a8083063 Iustin Pop
        if est_time is not None:
1426 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1427 a8083063 Iustin Pop
          max_time = est_time
1428 a8083063 Iustin Pop
        else:
1429 a8083063 Iustin Pop
          rem_time = "no time estimate"
1430 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1431 b9bddb6b Iustin Pop
                        (instance.disks[i].iv_name, perc_done, rem_time))
1432 a8083063 Iustin Pop
    if done or oneshot:
1433 a8083063 Iustin Pop
      break
1434 a8083063 Iustin Pop
1435 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
1436 a8083063 Iustin Pop
1437 a8083063 Iustin Pop
  if done:
1438 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1439 a8083063 Iustin Pop
  return not cumul_degraded
1440 a8083063 Iustin Pop
1441 a8083063 Iustin Pop
1442 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1443 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1444 a8083063 Iustin Pop

1445 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1446 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1447 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1448 0834c866 Iustin Pop

1449 a8083063 Iustin Pop
  """
1450 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
1451 0834c866 Iustin Pop
  if ldisk:
1452 0834c866 Iustin Pop
    idx = 6
1453 0834c866 Iustin Pop
  else:
1454 0834c866 Iustin Pop
    idx = 5
1455 a8083063 Iustin Pop
1456 a8083063 Iustin Pop
  result = True
1457 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1458 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
1459 781de953 Iustin Pop
    if rstats.failed or not rstats.data:
1460 9a4f63d1 Iustin Pop
      logging.warning("Node %s: disk degraded, not found or node down", node)
1461 a8083063 Iustin Pop
      result = False
1462 a8083063 Iustin Pop
    else:
1463 781de953 Iustin Pop
      result = result and (not rstats.data[idx])
1464 a8083063 Iustin Pop
  if dev.children:
1465 a8083063 Iustin Pop
    for child in dev.children:
1466 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1467 a8083063 Iustin Pop
1468 a8083063 Iustin Pop
  return result
1469 a8083063 Iustin Pop
1470 a8083063 Iustin Pop
1471 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1472 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1473 a8083063 Iustin Pop

1474 a8083063 Iustin Pop
  """
1475 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1476 6bf01bbb Guido Trotter
  REQ_BGL = False
1477 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet()
1478 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
1479 a8083063 Iustin Pop
1480 6bf01bbb Guido Trotter
  def ExpandNames(self):
1481 1f9430d6 Iustin Pop
    if self.op.names:
1482 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1483 1f9430d6 Iustin Pop
1484 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1485 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1486 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1487 1f9430d6 Iustin Pop
1488 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
1489 6bf01bbb Guido Trotter
    self.needed_locks = {}
1490 6bf01bbb Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1491 e310b019 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1492 6bf01bbb Guido Trotter
1493 6bf01bbb Guido Trotter
  def CheckPrereq(self):
1494 6bf01bbb Guido Trotter
    """Check prerequisites.
1495 6bf01bbb Guido Trotter

1496 6bf01bbb Guido Trotter
    """
1497 6bf01bbb Guido Trotter
1498 1f9430d6 Iustin Pop
  @staticmethod
1499 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1500 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1501 1f9430d6 Iustin Pop

1502 e4376078 Iustin Pop
    @param node_list: a list with the names of all nodes
1503 e4376078 Iustin Pop
    @param rlist: a map with node names as keys and OS objects as values
1504 1f9430d6 Iustin Pop

1505 e4376078 Iustin Pop
    @rtype: dict
1506 e4376078 Iustin Pop
    @returns: a dictionary with osnames as keys and as value another map, with
1507 e4376078 Iustin Pop
        nodes as keys and list of OS objects as values, eg::
1508 e4376078 Iustin Pop

1509 e4376078 Iustin Pop
          {"debian-etch": {"node1": [<object>,...],
1510 e4376078 Iustin Pop
                           "node2": [<object>,]}
1511 e4376078 Iustin Pop
          }
1512 1f9430d6 Iustin Pop

1513 1f9430d6 Iustin Pop
    """
1514 1f9430d6 Iustin Pop
    all_os = {}
1515 1f9430d6 Iustin Pop
    for node_name, nr in rlist.iteritems():
1516 781de953 Iustin Pop
      if nr.failed or not nr.data:
1517 1f9430d6 Iustin Pop
        continue
1518 781de953 Iustin Pop
      for os_obj in nr.data:
1519 b4de68a9 Iustin Pop
        if os_obj.name not in all_os:
1520 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1521 1f9430d6 Iustin Pop
          # for each node in node_list
1522 b4de68a9 Iustin Pop
          all_os[os_obj.name] = {}
1523 1f9430d6 Iustin Pop
          for nname in node_list:
1524 b4de68a9 Iustin Pop
            all_os[os_obj.name][nname] = []
1525 b4de68a9 Iustin Pop
        all_os[os_obj.name][node_name].append(os_obj)
1526 1f9430d6 Iustin Pop
    return all_os
1527 a8083063 Iustin Pop
1528 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1529 a8083063 Iustin Pop
    """Compute the list of OSes.
1530 a8083063 Iustin Pop

1531 a8083063 Iustin Pop
    """
1532 6bf01bbb Guido Trotter
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1533 72737a7f Iustin Pop
    node_data = self.rpc.call_os_diagnose(node_list)
1534 a8083063 Iustin Pop
    if node_data == False:
1535 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1536 1f9430d6 Iustin Pop
    pol = self._DiagnoseByOS(node_list, node_data)
1537 1f9430d6 Iustin Pop
    output = []
1538 1f9430d6 Iustin Pop
    for os_name, os_data in pol.iteritems():
1539 1f9430d6 Iustin Pop
      row = []
1540 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1541 1f9430d6 Iustin Pop
        if field == "name":
1542 1f9430d6 Iustin Pop
          val = os_name
1543 1f9430d6 Iustin Pop
        elif field == "valid":
1544 1f9430d6 Iustin Pop
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1545 1f9430d6 Iustin Pop
        elif field == "node_status":
1546 1f9430d6 Iustin Pop
          val = {}
1547 1f9430d6 Iustin Pop
          for node_name, nos_list in os_data.iteritems():
1548 1f9430d6 Iustin Pop
            val[node_name] = [(v.status, v.path) for v in nos_list]
1549 1f9430d6 Iustin Pop
        else:
1550 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1551 1f9430d6 Iustin Pop
        row.append(val)
1552 1f9430d6 Iustin Pop
      output.append(row)
1553 1f9430d6 Iustin Pop
1554 1f9430d6 Iustin Pop
    return output
1555 a8083063 Iustin Pop
1556 a8083063 Iustin Pop
1557 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1558 a8083063 Iustin Pop
  """Logical unit for removing a node.
1559 a8083063 Iustin Pop

1560 a8083063 Iustin Pop
  """
1561 a8083063 Iustin Pop
  HPATH = "node-remove"
1562 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1563 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1564 a8083063 Iustin Pop
1565 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1566 a8083063 Iustin Pop
    """Build hooks env.
1567 a8083063 Iustin Pop

1568 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1569 d08869ee Guido Trotter
    node would then be impossible to remove.
1570 a8083063 Iustin Pop

1571 a8083063 Iustin Pop
    """
1572 396e1b78 Michael Hanselmann
    env = {
1573 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1574 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1575 396e1b78 Michael Hanselmann
      }
1576 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1577 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1578 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1579 a8083063 Iustin Pop
1580 a8083063 Iustin Pop
  def CheckPrereq(self):
1581 a8083063 Iustin Pop
    """Check prerequisites.
1582 a8083063 Iustin Pop

1583 a8083063 Iustin Pop
    This checks:
1584 a8083063 Iustin Pop
     - the node exists in the configuration
1585 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1586 a8083063 Iustin Pop
     - it's not the master
1587 a8083063 Iustin Pop

1588 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1589 a8083063 Iustin Pop

1590 a8083063 Iustin Pop
    """
1591 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1592 a8083063 Iustin Pop
    if node is None:
1593 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1594 a8083063 Iustin Pop
1595 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1596 a8083063 Iustin Pop
1597 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
1598 a8083063 Iustin Pop
    if node.name == masternode:
1599 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1600 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1601 a8083063 Iustin Pop
1602 a8083063 Iustin Pop
    for instance_name in instance_list:
1603 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1604 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1605 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1606 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1607 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1608 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1609 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1610 a8083063 Iustin Pop
    self.op.node_name = node.name
1611 a8083063 Iustin Pop
    self.node = node
1612 a8083063 Iustin Pop
1613 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1614 a8083063 Iustin Pop
    """Removes the node from the cluster.
1615 a8083063 Iustin Pop

1616 a8083063 Iustin Pop
    """
1617 a8083063 Iustin Pop
    node = self.node
1618 9a4f63d1 Iustin Pop
    logging.info("Stopping the node daemon and removing configs from node %s",
1619 9a4f63d1 Iustin Pop
                 node.name)
1620 a8083063 Iustin Pop
1621 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
1622 a8083063 Iustin Pop
1623 72737a7f Iustin Pop
    self.rpc.call_node_leave_cluster(node.name)
1624 c8a0948f Michael Hanselmann
1625 eb1742d5 Guido Trotter
    # Promote nodes to master candidate as needed
1626 eb1742d5 Guido Trotter
    cp_size = self.cfg.GetClusterInfo().candidate_pool_size
1627 eb1742d5 Guido Trotter
    node_info = self.cfg.GetAllNodesInfo().values()
1628 eb1742d5 Guido Trotter
    num_candidates = len([n for n in node_info
1629 eb1742d5 Guido Trotter
                          if n.master_candidate])
1630 eb1742d5 Guido Trotter
    num_nodes = len(node_info)
1631 eb1742d5 Guido Trotter
    random.shuffle(node_info)
1632 eb1742d5 Guido Trotter
    for node in node_info:
1633 eb1742d5 Guido Trotter
      if num_candidates >= cp_size or num_candidates >= num_nodes:
1634 eb1742d5 Guido Trotter
        break
1635 eb1742d5 Guido Trotter
      if node.master_candidate:
1636 eb1742d5 Guido Trotter
        continue
1637 eb1742d5 Guido Trotter
      node.master_candidate = True
1638 eb1742d5 Guido Trotter
      self.LogInfo("Promoting node %s to master candidate", node.name)
1639 eb1742d5 Guido Trotter
      self.cfg.Update(node)
1640 eb1742d5 Guido Trotter
      self.context.ReaddNode(node)
1641 eb1742d5 Guido Trotter
      num_candidates += 1
1642 eb1742d5 Guido Trotter
1643 a8083063 Iustin Pop
1644 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1645 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1646 a8083063 Iustin Pop

1647 a8083063 Iustin Pop
  """
1648 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1649 35705d8f Guido Trotter
  REQ_BGL = False
1650 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet(
1651 31bf511f Iustin Pop
    "dtotal", "dfree",
1652 31bf511f Iustin Pop
    "mtotal", "mnode", "mfree",
1653 31bf511f Iustin Pop
    "bootid",
1654 31bf511f Iustin Pop
    "ctotal",
1655 31bf511f Iustin Pop
    )
1656 31bf511f Iustin Pop
1657 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(
1658 31bf511f Iustin Pop
    "name", "pinst_cnt", "sinst_cnt",
1659 31bf511f Iustin Pop
    "pinst_list", "sinst_list",
1660 31bf511f Iustin Pop
    "pip", "sip", "tags",
1661 31bf511f Iustin Pop
    "serial_no",
1662 0e67cdbe Iustin Pop
    "master_candidate",
1663 0e67cdbe Iustin Pop
    "master",
1664 31bf511f Iustin Pop
    )
1665 a8083063 Iustin Pop
1666 35705d8f Guido Trotter
  def ExpandNames(self):
1667 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1668 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1669 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1670 a8083063 Iustin Pop
1671 35705d8f Guido Trotter
    self.needed_locks = {}
1672 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1673 c8d8b4c8 Iustin Pop
1674 c8d8b4c8 Iustin Pop
    if self.op.names:
1675 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
1676 35705d8f Guido Trotter
    else:
1677 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
1678 c8d8b4c8 Iustin Pop
1679 31bf511f Iustin Pop
    self.do_locking = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
1680 c8d8b4c8 Iustin Pop
    if self.do_locking:
1681 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
1682 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
1683 c8d8b4c8 Iustin Pop
1684 35705d8f Guido Trotter
1685 35705d8f Guido Trotter
  def CheckPrereq(self):
1686 35705d8f Guido Trotter
    """Check prerequisites.
1687 35705d8f Guido Trotter

1688 35705d8f Guido Trotter
    """
1689 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
1690 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
1691 c8d8b4c8 Iustin Pop
    pass
1692 a8083063 Iustin Pop
1693 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1694 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1695 a8083063 Iustin Pop

1696 a8083063 Iustin Pop
    """
1697 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
1698 c8d8b4c8 Iustin Pop
    if self.do_locking:
1699 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
1700 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
1701 3fa93523 Guido Trotter
      nodenames = self.wanted
1702 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
1703 3fa93523 Guido Trotter
      if missing:
1704 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
1705 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
1706 c8d8b4c8 Iustin Pop
    else:
1707 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
1708 c1f1cbb2 Iustin Pop
1709 c1f1cbb2 Iustin Pop
    nodenames = utils.NiceSort(nodenames)
1710 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
1711 a8083063 Iustin Pop
1712 a8083063 Iustin Pop
    # begin data gathering
1713 a8083063 Iustin Pop
1714 31bf511f Iustin Pop
    if self.do_locking:
1715 a8083063 Iustin Pop
      live_data = {}
1716 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
1717 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
1718 a8083063 Iustin Pop
      for name in nodenames:
1719 781de953 Iustin Pop
        nodeinfo = node_data[name]
1720 781de953 Iustin Pop
        if not nodeinfo.failed and nodeinfo.data:
1721 781de953 Iustin Pop
          nodeinfo = nodeinfo.data
1722 d599d686 Iustin Pop
          fn = utils.TryConvert
1723 a8083063 Iustin Pop
          live_data[name] = {
1724 d599d686 Iustin Pop
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
1725 d599d686 Iustin Pop
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
1726 d599d686 Iustin Pop
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
1727 d599d686 Iustin Pop
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
1728 d599d686 Iustin Pop
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
1729 d599d686 Iustin Pop
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
1730 d599d686 Iustin Pop
            "bootid": nodeinfo.get('bootid', None),
1731 a8083063 Iustin Pop
            }
1732 a8083063 Iustin Pop
        else:
1733 a8083063 Iustin Pop
          live_data[name] = {}
1734 a8083063 Iustin Pop
    else:
1735 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1736 a8083063 Iustin Pop
1737 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1738 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1739 a8083063 Iustin Pop
1740 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1741 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1742 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1743 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1744 a8083063 Iustin Pop
1745 ec223efb Iustin Pop
      for instance_name in instancelist:
1746 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1747 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1748 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1749 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1750 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1751 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1752 a8083063 Iustin Pop
1753 0e67cdbe Iustin Pop
    master_node = self.cfg.GetMasterNode()
1754 0e67cdbe Iustin Pop
1755 a8083063 Iustin Pop
    # end data gathering
1756 a8083063 Iustin Pop
1757 a8083063 Iustin Pop
    output = []
1758 a8083063 Iustin Pop
    for node in nodelist:
1759 a8083063 Iustin Pop
      node_output = []
1760 a8083063 Iustin Pop
      for field in self.op.output_fields:
1761 a8083063 Iustin Pop
        if field == "name":
1762 a8083063 Iustin Pop
          val = node.name
1763 ec223efb Iustin Pop
        elif field == "pinst_list":
1764 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1765 ec223efb Iustin Pop
        elif field == "sinst_list":
1766 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1767 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1768 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1769 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1770 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1771 a8083063 Iustin Pop
        elif field == "pip":
1772 a8083063 Iustin Pop
          val = node.primary_ip
1773 a8083063 Iustin Pop
        elif field == "sip":
1774 a8083063 Iustin Pop
          val = node.secondary_ip
1775 130a6a6f Iustin Pop
        elif field == "tags":
1776 130a6a6f Iustin Pop
          val = list(node.GetTags())
1777 38d7239a Iustin Pop
        elif field == "serial_no":
1778 38d7239a Iustin Pop
          val = node.serial_no
1779 0e67cdbe Iustin Pop
        elif field == "master_candidate":
1780 0e67cdbe Iustin Pop
          val = node.master_candidate
1781 0e67cdbe Iustin Pop
        elif field == "master":
1782 0e67cdbe Iustin Pop
          val = node.name == master_node
1783 31bf511f Iustin Pop
        elif self._FIELDS_DYNAMIC.Matches(field):
1784 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1785 a8083063 Iustin Pop
        else:
1786 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1787 a8083063 Iustin Pop
        node_output.append(val)
1788 a8083063 Iustin Pop
      output.append(node_output)
1789 a8083063 Iustin Pop
1790 a8083063 Iustin Pop
    return output
1791 a8083063 Iustin Pop
1792 a8083063 Iustin Pop
1793 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1794 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1795 dcb93971 Michael Hanselmann

1796 dcb93971 Michael Hanselmann
  """
1797 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1798 21a15682 Guido Trotter
  REQ_BGL = False
1799 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
1800 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("node")
1801 21a15682 Guido Trotter
1802 21a15682 Guido Trotter
  def ExpandNames(self):
1803 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1804 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1805 21a15682 Guido Trotter
                       selected=self.op.output_fields)
1806 21a15682 Guido Trotter
1807 21a15682 Guido Trotter
    self.needed_locks = {}
1808 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1809 21a15682 Guido Trotter
    if not self.op.nodes:
1810 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1811 21a15682 Guido Trotter
    else:
1812 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
1813 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
1814 dcb93971 Michael Hanselmann
1815 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1816 dcb93971 Michael Hanselmann
    """Check prerequisites.
1817 dcb93971 Michael Hanselmann

1818 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1819 dcb93971 Michael Hanselmann

1820 dcb93971 Michael Hanselmann
    """
1821 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
1822 dcb93971 Michael Hanselmann
1823 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1824 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1825 dcb93971 Michael Hanselmann

1826 dcb93971 Michael Hanselmann
    """
1827 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1828 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
1829 dcb93971 Michael Hanselmann
1830 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1831 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1832 dcb93971 Michael Hanselmann
1833 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1834 dcb93971 Michael Hanselmann
1835 dcb93971 Michael Hanselmann
    output = []
1836 dcb93971 Michael Hanselmann
    for node in nodenames:
1837 781de953 Iustin Pop
      if node not in volumes or volumes[node].failed or not volumes[node].data:
1838 37d19eb2 Michael Hanselmann
        continue
1839 37d19eb2 Michael Hanselmann
1840 781de953 Iustin Pop
      node_vols = volumes[node].data[:]
1841 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1842 dcb93971 Michael Hanselmann
1843 dcb93971 Michael Hanselmann
      for vol in node_vols:
1844 dcb93971 Michael Hanselmann
        node_output = []
1845 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1846 dcb93971 Michael Hanselmann
          if field == "node":
1847 dcb93971 Michael Hanselmann
            val = node
1848 dcb93971 Michael Hanselmann
          elif field == "phys":
1849 dcb93971 Michael Hanselmann
            val = vol['dev']
1850 dcb93971 Michael Hanselmann
          elif field == "vg":
1851 dcb93971 Michael Hanselmann
            val = vol['vg']
1852 dcb93971 Michael Hanselmann
          elif field == "name":
1853 dcb93971 Michael Hanselmann
            val = vol['name']
1854 dcb93971 Michael Hanselmann
          elif field == "size":
1855 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1856 dcb93971 Michael Hanselmann
          elif field == "instance":
1857 dcb93971 Michael Hanselmann
            for inst in ilist:
1858 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1859 dcb93971 Michael Hanselmann
                continue
1860 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1861 dcb93971 Michael Hanselmann
                val = inst.name
1862 dcb93971 Michael Hanselmann
                break
1863 dcb93971 Michael Hanselmann
            else:
1864 dcb93971 Michael Hanselmann
              val = '-'
1865 dcb93971 Michael Hanselmann
          else:
1866 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1867 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1868 dcb93971 Michael Hanselmann
1869 dcb93971 Michael Hanselmann
        output.append(node_output)
1870 dcb93971 Michael Hanselmann
1871 dcb93971 Michael Hanselmann
    return output
1872 dcb93971 Michael Hanselmann
1873 dcb93971 Michael Hanselmann
1874 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1875 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1876 a8083063 Iustin Pop

1877 a8083063 Iustin Pop
  """
1878 a8083063 Iustin Pop
  HPATH = "node-add"
1879 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1880 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1881 a8083063 Iustin Pop
1882 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1883 a8083063 Iustin Pop
    """Build hooks env.
1884 a8083063 Iustin Pop

1885 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1886 a8083063 Iustin Pop

1887 a8083063 Iustin Pop
    """
1888 a8083063 Iustin Pop
    env = {
1889 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1890 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1891 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1892 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1893 a8083063 Iustin Pop
      }
1894 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1895 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1896 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1897 a8083063 Iustin Pop
1898 a8083063 Iustin Pop
  def CheckPrereq(self):
1899 a8083063 Iustin Pop
    """Check prerequisites.
1900 a8083063 Iustin Pop

1901 a8083063 Iustin Pop
    This checks:
1902 a8083063 Iustin Pop
     - the new node is not already in the config
1903 a8083063 Iustin Pop
     - it is resolvable
1904 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1905 a8083063 Iustin Pop

1906 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1907 a8083063 Iustin Pop

1908 a8083063 Iustin Pop
    """
1909 a8083063 Iustin Pop
    node_name = self.op.node_name
1910 a8083063 Iustin Pop
    cfg = self.cfg
1911 a8083063 Iustin Pop
1912 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1913 a8083063 Iustin Pop
1914 bcf043c9 Iustin Pop
    node = dns_data.name
1915 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1916 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1917 a8083063 Iustin Pop
    if secondary_ip is None:
1918 a8083063 Iustin Pop
      secondary_ip = primary_ip
1919 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1920 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1921 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1922 e7c6e02b Michael Hanselmann
1923 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1924 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
1925 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
1926 e7c6e02b Michael Hanselmann
                                 node)
1927 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
1928 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
1929 a8083063 Iustin Pop
1930 a8083063 Iustin Pop
    for existing_node_name in node_list:
1931 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1932 e7c6e02b Michael Hanselmann
1933 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
1934 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
1935 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
1936 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
1937 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
1938 e7c6e02b Michael Hanselmann
        continue
1939 e7c6e02b Michael Hanselmann
1940 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1941 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1942 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1943 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1944 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1945 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1946 a8083063 Iustin Pop
1947 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1948 a8083063 Iustin Pop
    # same as for the master
1949 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
1950 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1951 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1952 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1953 a8083063 Iustin Pop
      if master_singlehomed:
1954 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1955 3ecf6786 Iustin Pop
                                   " new node has one")
1956 a8083063 Iustin Pop
      else:
1957 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1958 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1959 a8083063 Iustin Pop
1960 a8083063 Iustin Pop
    # checks reachablity
1961 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
1962 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1963 a8083063 Iustin Pop
1964 a8083063 Iustin Pop
    if not newbie_singlehomed:
1965 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1966 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
1967 b15d625f Iustin Pop
                           source=myself.secondary_ip):
1968 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1969 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
1970 a8083063 Iustin Pop
1971 0fff97e9 Guido Trotter
    cp_size = self.cfg.GetClusterInfo().candidate_pool_size
1972 0fff97e9 Guido Trotter
    node_info = self.cfg.GetAllNodesInfo().values()
1973 0fff97e9 Guido Trotter
    num_candidates = len([n for n in node_info
1974 0fff97e9 Guido Trotter
                          if n.master_candidate])
1975 0fff97e9 Guido Trotter
    master_candidate = num_candidates < cp_size
1976 0fff97e9 Guido Trotter
1977 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1978 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1979 0fff97e9 Guido Trotter
                                 secondary_ip=secondary_ip,
1980 fc0fe88c Iustin Pop
                                 master_candidate=master_candidate,
1981 fc0fe88c Iustin Pop
                                 offline=False)
1982 a8083063 Iustin Pop
1983 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1984 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1985 a8083063 Iustin Pop

1986 a8083063 Iustin Pop
    """
1987 a8083063 Iustin Pop
    new_node = self.new_node
1988 a8083063 Iustin Pop
    node = new_node.name
1989 a8083063 Iustin Pop
1990 a8083063 Iustin Pop
    # check connectivity
1991 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
1992 781de953 Iustin Pop
    result.Raise()
1993 781de953 Iustin Pop
    if result.data:
1994 781de953 Iustin Pop
      if constants.PROTOCOL_VERSION == result.data:
1995 9a4f63d1 Iustin Pop
        logging.info("Communication to node %s fine, sw version %s match",
1996 781de953 Iustin Pop
                     node, result.data)
1997 a8083063 Iustin Pop
      else:
1998 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1999 3ecf6786 Iustin Pop
                                 " node version %s" %
2000 781de953 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result.data))
2001 a8083063 Iustin Pop
    else:
2002 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
2003 a8083063 Iustin Pop
2004 a8083063 Iustin Pop
    # setup ssh on node
2005 9a4f63d1 Iustin Pop
    logging.info("Copy ssh key to node %s", node)
2006 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
2007 a8083063 Iustin Pop
    keyarray = []
2008 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
2009 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
2010 70d9e3d8 Iustin Pop
                priv_key, pub_key]
2011 a8083063 Iustin Pop
2012 a8083063 Iustin Pop
    for i in keyfiles:
2013 a8083063 Iustin Pop
      f = open(i, 'r')
2014 a8083063 Iustin Pop
      try:
2015 a8083063 Iustin Pop
        keyarray.append(f.read())
2016 a8083063 Iustin Pop
      finally:
2017 a8083063 Iustin Pop
        f.close()
2018 a8083063 Iustin Pop
2019 72737a7f Iustin Pop
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
2020 72737a7f Iustin Pop
                                    keyarray[2],
2021 72737a7f Iustin Pop
                                    keyarray[3], keyarray[4], keyarray[5])
2022 a8083063 Iustin Pop
2023 781de953 Iustin Pop
    if result.failed or not result.data:
2024 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
2025 a8083063 Iustin Pop
2026 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
2027 d9c02ca6 Michael Hanselmann
    utils.AddHostToEtcHosts(new_node.name)
2028 c8a0948f Michael Hanselmann
2029 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
2030 781de953 Iustin Pop
      result = self.rpc.call_node_has_ip_address(new_node.name,
2031 781de953 Iustin Pop
                                                 new_node.secondary_ip)
2032 781de953 Iustin Pop
      if result.failed or not result.data:
2033 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
2034 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
2035 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
2036 a8083063 Iustin Pop
2037 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
2038 5c0527ed Guido Trotter
    node_verify_param = {
2039 5c0527ed Guido Trotter
      'nodelist': [node],
2040 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
2041 5c0527ed Guido Trotter
    }
2042 5c0527ed Guido Trotter
2043 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
2044 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
2045 5c0527ed Guido Trotter
    for verifier in node_verify_list:
2046 f08ce603 Guido Trotter
      if result[verifier].failed or not result[verifier].data:
2047 5c0527ed Guido Trotter
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
2048 5c0527ed Guido Trotter
                                 " for remote verification" % verifier)
2049 781de953 Iustin Pop
      if result[verifier].data['nodelist']:
2050 781de953 Iustin Pop
        for failed in result[verifier].data['nodelist']:
2051 5c0527ed Guido Trotter
          feedback_fn("ssh/hostname verification failed %s -> %s" %
2052 5c0527ed Guido Trotter
                      (verifier, result[verifier]['nodelist'][failed]))
2053 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
2054 ff98055b Iustin Pop
2055 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
2056 a8083063 Iustin Pop
    # including the node just added
2057 d6a02168 Michael Hanselmann
    myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
2058 102b115b Michael Hanselmann
    dist_nodes = self.cfg.GetNodeList()
2059 102b115b Michael Hanselmann
    if not self.op.readd:
2060 102b115b Michael Hanselmann
      dist_nodes.append(node)
2061 a8083063 Iustin Pop
    if myself.name in dist_nodes:
2062 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
2063 a8083063 Iustin Pop
2064 9a4f63d1 Iustin Pop
    logging.debug("Copying hosts and known_hosts to all nodes")
2065 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
2066 72737a7f Iustin Pop
      result = self.rpc.call_upload_file(dist_nodes, fname)
2067 ec85e3d5 Iustin Pop
      for to_node, to_result in result.iteritems():
2068 ec85e3d5 Iustin Pop
        if to_result.failed or not to_result.data:
2069 9a4f63d1 Iustin Pop
          logging.error("Copy of file %s to node %s failed", fname, to_node)
2070 a8083063 Iustin Pop
2071 d6a02168 Michael Hanselmann
    to_copy = []
2072 00cd937c Iustin Pop
    if constants.HT_XEN_HVM in self.cfg.GetClusterInfo().enabled_hypervisors:
2073 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
2074 a8083063 Iustin Pop
    for fname in to_copy:
2075 72737a7f Iustin Pop
      result = self.rpc.call_upload_file([node], fname)
2076 781de953 Iustin Pop
      if result[node].failed or not result[node]:
2077 9a4f63d1 Iustin Pop
        logging.error("Could not copy file %s to node %s", fname, node)
2078 a8083063 Iustin Pop
2079 d8470559 Michael Hanselmann
    if self.op.readd:
2080 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
2081 d8470559 Michael Hanselmann
    else:
2082 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
2083 a8083063 Iustin Pop
2084 a8083063 Iustin Pop
2085 b31c8676 Iustin Pop
class LUSetNodeParams(LogicalUnit):
2086 b31c8676 Iustin Pop
  """Modifies the parameters of a node.
2087 b31c8676 Iustin Pop

2088 b31c8676 Iustin Pop
  """
2089 b31c8676 Iustin Pop
  HPATH = "node-modify"
2090 b31c8676 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2091 b31c8676 Iustin Pop
  _OP_REQP = ["node_name"]
2092 b31c8676 Iustin Pop
  REQ_BGL = False
2093 b31c8676 Iustin Pop
2094 b31c8676 Iustin Pop
  def CheckArguments(self):
2095 b31c8676 Iustin Pop
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2096 b31c8676 Iustin Pop
    if node_name is None:
2097 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2098 b31c8676 Iustin Pop
    self.op.node_name = node_name
2099 b31c8676 Iustin Pop
    if not hasattr(self.op, 'master_candidate'):
2100 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Please pass at least one modification")
2101 b31c8676 Iustin Pop
    self.op.master_candidate = bool(self.op.master_candidate)
2102 b31c8676 Iustin Pop
2103 b31c8676 Iustin Pop
  def ExpandNames(self):
2104 b31c8676 Iustin Pop
    self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
2105 b31c8676 Iustin Pop
2106 b31c8676 Iustin Pop
  def BuildHooksEnv(self):
2107 b31c8676 Iustin Pop
    """Build hooks env.
2108 b31c8676 Iustin Pop

2109 b31c8676 Iustin Pop
    This runs on the master node.
2110 b31c8676 Iustin Pop

2111 b31c8676 Iustin Pop
    """
2112 b31c8676 Iustin Pop
    env = {
2113 b31c8676 Iustin Pop
      "OP_TARGET": self.op.node_name,
2114 b31c8676 Iustin Pop
      "MASTER_CANDIDATE": str(self.op.master_candidate),
2115 b31c8676 Iustin Pop
      }
2116 b31c8676 Iustin Pop
    nl = [self.cfg.GetMasterNode(),
2117 b31c8676 Iustin Pop
          self.op.node_name]
2118 b31c8676 Iustin Pop
    return env, nl, nl
2119 b31c8676 Iustin Pop
2120 b31c8676 Iustin Pop
  def CheckPrereq(self):
2121 b31c8676 Iustin Pop
    """Check prerequisites.
2122 b31c8676 Iustin Pop

2123 b31c8676 Iustin Pop
    This only checks the instance list against the existing names.
2124 b31c8676 Iustin Pop

2125 b31c8676 Iustin Pop
    """
2126 b31c8676 Iustin Pop
    force = self.force = self.op.force
2127 b31c8676 Iustin Pop
2128 3e83dd48 Iustin Pop
    if self.op.master_candidate == False:
2129 3a26773f Iustin Pop
      if self.op.node_name == self.cfg.GetMasterNode():
2130 3a26773f Iustin Pop
        raise errors.OpPrereqError("The master node has to be a"
2131 3a26773f Iustin Pop
                                   " master candidate")
2132 3e83dd48 Iustin Pop
      cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2133 3e83dd48 Iustin Pop
      node_info = self.cfg.GetAllNodesInfo().values()
2134 3e83dd48 Iustin Pop
      num_candidates = len([node for node in node_info
2135 3e83dd48 Iustin Pop
                            if node.master_candidate])
2136 3e83dd48 Iustin Pop
      if num_candidates <= cp_size:
2137 3e83dd48 Iustin Pop
        msg = ("Not enough master candidates (desired"
2138 3e83dd48 Iustin Pop
               " %d, new value will be %d)" % (cp_size, num_candidates-1))
2139 3e83dd48 Iustin Pop
        if force:
2140 3e83dd48 Iustin Pop
          self.LogWarning(msg)
2141 3e83dd48 Iustin Pop
        else:
2142 3e83dd48 Iustin Pop
          raise errors.OpPrereqError(msg)
2143 3e83dd48 Iustin Pop
2144 b31c8676 Iustin Pop
    return
2145 b31c8676 Iustin Pop
2146 b31c8676 Iustin Pop
  def Exec(self, feedback_fn):
2147 b31c8676 Iustin Pop
    """Modifies a node.
2148 b31c8676 Iustin Pop

2149 b31c8676 Iustin Pop
    """
2150 b31c8676 Iustin Pop
    node = self.cfg.GetNodeInfo(self.op.node_name)
2151 b31c8676 Iustin Pop
2152 b31c8676 Iustin Pop
    result = []
2153 b31c8676 Iustin Pop
2154 b31c8676 Iustin Pop
    if self.op.master_candidate is not None:
2155 b31c8676 Iustin Pop
      node.master_candidate = self.op.master_candidate
2156 b31c8676 Iustin Pop
      result.append(("master_candidate", str(self.op.master_candidate)))
2157 b31c8676 Iustin Pop
2158 b31c8676 Iustin Pop
    # this will trigger configuration file update, if needed
2159 b31c8676 Iustin Pop
    self.cfg.Update(node)
2160 b31c8676 Iustin Pop
    # this will trigger job queue propagation or cleanup
2161 3a26773f Iustin Pop
    if self.op.node_name != self.cfg.GetMasterNode():
2162 3a26773f Iustin Pop
      self.context.ReaddNode(node)
2163 b31c8676 Iustin Pop
2164 b31c8676 Iustin Pop
    return result
2165 b31c8676 Iustin Pop
2166 b31c8676 Iustin Pop
2167 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
2168 a8083063 Iustin Pop
  """Query cluster configuration.
2169 a8083063 Iustin Pop

2170 a8083063 Iustin Pop
  """
2171 a8083063 Iustin Pop
  _OP_REQP = []
2172 642339cf Guido Trotter
  REQ_BGL = False
2173 642339cf Guido Trotter
2174 642339cf Guido Trotter
  def ExpandNames(self):
2175 642339cf Guido Trotter
    self.needed_locks = {}
2176 a8083063 Iustin Pop
2177 a8083063 Iustin Pop
  def CheckPrereq(self):
2178 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
2179 a8083063 Iustin Pop

2180 a8083063 Iustin Pop
    """
2181 a8083063 Iustin Pop
    pass
2182 a8083063 Iustin Pop
2183 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2184 a8083063 Iustin Pop
    """Return cluster config.
2185 a8083063 Iustin Pop

2186 a8083063 Iustin Pop
    """
2187 469f88e1 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
2188 a8083063 Iustin Pop
    result = {
2189 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
2190 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
2191 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
2192 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
2193 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
2194 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
2195 469f88e1 Iustin Pop
      "name": cluster.cluster_name,
2196 469f88e1 Iustin Pop
      "master": cluster.master_node,
2197 02691904 Alexander Schreiber
      "default_hypervisor": cluster.default_hypervisor,
2198 469f88e1 Iustin Pop
      "enabled_hypervisors": cluster.enabled_hypervisors,
2199 469f88e1 Iustin Pop
      "hvparams": cluster.hvparams,
2200 469f88e1 Iustin Pop
      "beparams": cluster.beparams,
2201 4b7735f9 Iustin Pop
      "candidate_pool_size": cluster.candidate_pool_size,
2202 a8083063 Iustin Pop
      }
2203 a8083063 Iustin Pop
2204 a8083063 Iustin Pop
    return result
2205 a8083063 Iustin Pop
2206 a8083063 Iustin Pop
2207 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
2208 ae5849b5 Michael Hanselmann
  """Return configuration values.
2209 a8083063 Iustin Pop

2210 a8083063 Iustin Pop
  """
2211 a8083063 Iustin Pop
  _OP_REQP = []
2212 642339cf Guido Trotter
  REQ_BGL = False
2213 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet()
2214 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag")
2215 642339cf Guido Trotter
2216 642339cf Guido Trotter
  def ExpandNames(self):
2217 642339cf Guido Trotter
    self.needed_locks = {}
2218 a8083063 Iustin Pop
2219 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2220 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2221 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
2222 ae5849b5 Michael Hanselmann
2223 a8083063 Iustin Pop
  def CheckPrereq(self):
2224 a8083063 Iustin Pop
    """No prerequisites.
2225 a8083063 Iustin Pop

2226 a8083063 Iustin Pop
    """
2227 a8083063 Iustin Pop
    pass
2228 a8083063 Iustin Pop
2229 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2230 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
2231 a8083063 Iustin Pop

2232 a8083063 Iustin Pop
    """
2233 ae5849b5 Michael Hanselmann
    values = []
2234 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
2235 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
2236 3ccafd0e Iustin Pop
        entry = self.cfg.GetClusterName()
2237 ae5849b5 Michael Hanselmann
      elif field == "master_node":
2238 3ccafd0e Iustin Pop
        entry = self.cfg.GetMasterNode()
2239 3ccafd0e Iustin Pop
      elif field == "drain_flag":
2240 3ccafd0e Iustin Pop
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2241 ae5849b5 Michael Hanselmann
      else:
2242 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
2243 3ccafd0e Iustin Pop
      values.append(entry)
2244 ae5849b5 Michael Hanselmann
    return values
2245 a8083063 Iustin Pop
2246 a8083063 Iustin Pop
2247 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
2248 a8083063 Iustin Pop
  """Bring up an instance's disks.
2249 a8083063 Iustin Pop

2250 a8083063 Iustin Pop
  """
2251 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2252 f22a8ba3 Guido Trotter
  REQ_BGL = False
2253 f22a8ba3 Guido Trotter
2254 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2255 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2256 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2257 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2258 f22a8ba3 Guido Trotter
2259 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2260 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2261 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2262 a8083063 Iustin Pop
2263 a8083063 Iustin Pop
  def CheckPrereq(self):
2264 a8083063 Iustin Pop
    """Check prerequisites.
2265 a8083063 Iustin Pop

2266 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2267 a8083063 Iustin Pop

2268 a8083063 Iustin Pop
    """
2269 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2270 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2271 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2272 a8083063 Iustin Pop
2273 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2274 a8083063 Iustin Pop
    """Activate the disks.
2275 a8083063 Iustin Pop

2276 a8083063 Iustin Pop
    """
2277 b9bddb6b Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
2278 a8083063 Iustin Pop
    if not disks_ok:
2279 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
2280 a8083063 Iustin Pop
2281 a8083063 Iustin Pop
    return disks_info
2282 a8083063 Iustin Pop
2283 a8083063 Iustin Pop
2284 b9bddb6b Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
2285 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
2286 a8083063 Iustin Pop

2287 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
2288 a8083063 Iustin Pop

2289 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
2290 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
2291 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
2292 e4376078 Iustin Pop
  @param instance: the instance for whose disks we assemble
2293 e4376078 Iustin Pop
  @type ignore_secondaries: boolean
2294 e4376078 Iustin Pop
  @param ignore_secondaries: if true, errors on secondary nodes
2295 e4376078 Iustin Pop
      won't result in an error return from the function
2296 e4376078 Iustin Pop
  @return: False if the operation failed, otherwise a list of
2297 e4376078 Iustin Pop
      (host, instance_visible_name, node_visible_name)
2298 e4376078 Iustin Pop
      with the mapping from node devices to instance devices
2299 a8083063 Iustin Pop

2300 a8083063 Iustin Pop
  """
2301 a8083063 Iustin Pop
  device_info = []
2302 a8083063 Iustin Pop
  disks_ok = True
2303 fdbd668d Iustin Pop
  iname = instance.name
2304 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
2305 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
2306 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
2307 fdbd668d Iustin Pop
2308 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
2309 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
2310 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
2311 fdbd668d Iustin Pop
  # SyncSource, etc.)
2312 fdbd668d Iustin Pop
2313 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
2314 a8083063 Iustin Pop
  for inst_disk in instance.disks:
2315 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2316 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2317 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2318 781de953 Iustin Pop
      if result.failed or not result:
2319 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2320 86d9d3bb Iustin Pop
                           " (is_primary=False, pass=1)",
2321 86d9d3bb Iustin Pop
                           inst_disk.iv_name, node)
2322 fdbd668d Iustin Pop
        if not ignore_secondaries:
2323 a8083063 Iustin Pop
          disks_ok = False
2324 fdbd668d Iustin Pop
2325 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
2326 fdbd668d Iustin Pop
2327 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
2328 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
2329 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2330 fdbd668d Iustin Pop
      if node != instance.primary_node:
2331 fdbd668d Iustin Pop
        continue
2332 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2333 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2334 781de953 Iustin Pop
      if result.failed or not result:
2335 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2336 86d9d3bb Iustin Pop
                           " (is_primary=True, pass=2)",
2337 86d9d3bb Iustin Pop
                           inst_disk.iv_name, node)
2338 fdbd668d Iustin Pop
        disks_ok = False
2339 fdbd668d Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name, result))
2340 a8083063 Iustin Pop
2341 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
2342 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
2343 b352ab5b Iustin Pop
  # improving the logical/physical id handling
2344 b352ab5b Iustin Pop
  for disk in instance.disks:
2345 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
2346 b352ab5b Iustin Pop
2347 a8083063 Iustin Pop
  return disks_ok, device_info
2348 a8083063 Iustin Pop
2349 a8083063 Iustin Pop
2350 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
2351 3ecf6786 Iustin Pop
  """Start the disks of an instance.
2352 3ecf6786 Iustin Pop

2353 3ecf6786 Iustin Pop
  """
2354 b9bddb6b Iustin Pop
  disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2355 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
2356 fe7b0351 Michael Hanselmann
  if not disks_ok:
2357 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
2358 fe7b0351 Michael Hanselmann
    if force is not None and not force:
2359 86d9d3bb Iustin Pop
      lu.proc.LogWarning("", hint="If the message above refers to a"
2360 86d9d3bb Iustin Pop
                         " secondary node,"
2361 86d9d3bb Iustin Pop
                         " you can retry the operation using '--force'.")
2362 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
2363 fe7b0351 Michael Hanselmann
2364 fe7b0351 Michael Hanselmann
2365 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
2366 a8083063 Iustin Pop
  """Shutdown an instance's disks.
2367 a8083063 Iustin Pop

2368 a8083063 Iustin Pop
  """
2369 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2370 f22a8ba3 Guido Trotter
  REQ_BGL = False
2371 f22a8ba3 Guido Trotter
2372 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2373 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2374 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2375 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2376 f22a8ba3 Guido Trotter
2377 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2378 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2379 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2380 a8083063 Iustin Pop
2381 a8083063 Iustin Pop
  def CheckPrereq(self):
2382 a8083063 Iustin Pop
    """Check prerequisites.
2383 a8083063 Iustin Pop

2384 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2385 a8083063 Iustin Pop

2386 a8083063 Iustin Pop
    """
2387 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2388 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2389 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2390 a8083063 Iustin Pop
2391 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2392 a8083063 Iustin Pop
    """Deactivate the disks
2393 a8083063 Iustin Pop

2394 a8083063 Iustin Pop
    """
2395 a8083063 Iustin Pop
    instance = self.instance
2396 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
2397 a8083063 Iustin Pop
2398 a8083063 Iustin Pop
2399 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
2400 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
2401 155d6c75 Guido Trotter

2402 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
2403 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
2404 155d6c75 Guido Trotter

2405 155d6c75 Guido Trotter
  """
2406 72737a7f Iustin Pop
  ins_l = lu.rpc.call_instance_list([instance.primary_node],
2407 72737a7f Iustin Pop
                                      [instance.hypervisor])
2408 155d6c75 Guido Trotter
  ins_l = ins_l[instance.primary_node]
2409 781de953 Iustin Pop
  if ins_l.failed or not isinstance(ins_l.data, list):
2410 155d6c75 Guido Trotter
    raise errors.OpExecError("Can't contact node '%s'" %
2411 155d6c75 Guido Trotter
                             instance.primary_node)
2412 155d6c75 Guido Trotter
2413 781de953 Iustin Pop
  if instance.name in ins_l.data:
2414 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
2415 155d6c75 Guido Trotter
                             " block devices.")
2416 155d6c75 Guido Trotter
2417 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
2418 a8083063 Iustin Pop
2419 a8083063 Iustin Pop
2420 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2421 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
2422 a8083063 Iustin Pop

2423 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
2424 a8083063 Iustin Pop

2425 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
2426 a8083063 Iustin Pop
  ignored.
2427 a8083063 Iustin Pop

2428 a8083063 Iustin Pop
  """
2429 a8083063 Iustin Pop
  result = True
2430 a8083063 Iustin Pop
  for disk in instance.disks:
2431 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2432 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
2433 781de953 Iustin Pop
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
2434 781de953 Iustin Pop
      if result.failed or not result.data:
2435 9a4f63d1 Iustin Pop
        logging.error("Could not shutdown block device %s on node %s",
2436 9a4f63d1 Iustin Pop
                      disk.iv_name, node)
2437 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
2438 a8083063 Iustin Pop
          result = False
2439 a8083063 Iustin Pop
  return result
2440 a8083063 Iustin Pop
2441 a8083063 Iustin Pop
2442 b9bddb6b Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor):
2443 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
2444 d4f16fd9 Iustin Pop

2445 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
2446 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
2447 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
2448 d4f16fd9 Iustin Pop
  exception.
2449 d4f16fd9 Iustin Pop

2450 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
2451 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
2452 e69d05fd Iustin Pop
  @type node: C{str}
2453 e69d05fd Iustin Pop
  @param node: the node to check
2454 e69d05fd Iustin Pop
  @type reason: C{str}
2455 e69d05fd Iustin Pop
  @param reason: string to use in the error message
2456 e69d05fd Iustin Pop
  @type requested: C{int}
2457 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
2458 e69d05fd Iustin Pop
  @type hypervisor: C{str}
2459 e69d05fd Iustin Pop
  @param hypervisor: the hypervisor to ask for memory stats
2460 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2461 e69d05fd Iustin Pop
      we cannot check the node
2462 d4f16fd9 Iustin Pop

2463 d4f16fd9 Iustin Pop
  """
2464 72737a7f Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor)
2465 781de953 Iustin Pop
  nodeinfo[node].Raise()
2466 781de953 Iustin Pop
  free_mem = nodeinfo[node].data.get('memory_free')
2467 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2468 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2469 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
2470 d4f16fd9 Iustin Pop
  if requested > free_mem:
2471 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2472 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
2473 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
2474 d4f16fd9 Iustin Pop
2475 d4f16fd9 Iustin Pop
2476 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2477 a8083063 Iustin Pop
  """Starts an instance.
2478 a8083063 Iustin Pop

2479 a8083063 Iustin Pop
  """
2480 a8083063 Iustin Pop
  HPATH = "instance-start"
2481 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2482 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2483 e873317a Guido Trotter
  REQ_BGL = False
2484 e873317a Guido Trotter
2485 e873317a Guido Trotter
  def ExpandNames(self):
2486 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2487 a8083063 Iustin Pop
2488 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2489 a8083063 Iustin Pop
    """Build hooks env.
2490 a8083063 Iustin Pop

2491 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2492 a8083063 Iustin Pop

2493 a8083063 Iustin Pop
    """
2494 a8083063 Iustin Pop
    env = {
2495 a8083063 Iustin Pop
      "FORCE": self.op.force,
2496 a8083063 Iustin Pop
      }
2497 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2498 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2499 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2500 a8083063 Iustin Pop
    return env, nl, nl
2501 a8083063 Iustin Pop
2502 a8083063 Iustin Pop
  def CheckPrereq(self):
2503 a8083063 Iustin Pop
    """Check prerequisites.
2504 a8083063 Iustin Pop

2505 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2506 a8083063 Iustin Pop

2507 a8083063 Iustin Pop
    """
2508 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2509 e873317a Guido Trotter
    assert self.instance is not None, \
2510 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2511 a8083063 Iustin Pop
2512 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2513 a8083063 Iustin Pop
    # check bridges existance
2514 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2515 a8083063 Iustin Pop
2516 b9bddb6b Iustin Pop
    _CheckNodeFreeMemory(self, instance.primary_node,
2517 d4f16fd9 Iustin Pop
                         "starting instance %s" % instance.name,
2518 338e51e8 Iustin Pop
                         bep[constants.BE_MEMORY], instance.hypervisor)
2519 d4f16fd9 Iustin Pop
2520 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2521 a8083063 Iustin Pop
    """Start the instance.
2522 a8083063 Iustin Pop

2523 a8083063 Iustin Pop
    """
2524 a8083063 Iustin Pop
    instance = self.instance
2525 a8083063 Iustin Pop
    force = self.op.force
2526 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
2527 a8083063 Iustin Pop
2528 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2529 fe482621 Iustin Pop
2530 a8083063 Iustin Pop
    node_current = instance.primary_node
2531 a8083063 Iustin Pop
2532 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
2533 a8083063 Iustin Pop
2534 781de953 Iustin Pop
    result = self.rpc.call_instance_start(node_current, instance, extra_args)
2535 781de953 Iustin Pop
    if result.failed or not result.data:
2536 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
2537 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
2538 a8083063 Iustin Pop
2539 a8083063 Iustin Pop
2540 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2541 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2542 bf6929a2 Alexander Schreiber

2543 bf6929a2 Alexander Schreiber
  """
2544 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2545 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2546 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2547 e873317a Guido Trotter
  REQ_BGL = False
2548 e873317a Guido Trotter
2549 e873317a Guido Trotter
  def ExpandNames(self):
2550 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2551 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2552 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
2553 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2554 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
2555 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2556 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
2557 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2558 bf6929a2 Alexander Schreiber
2559 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2560 bf6929a2 Alexander Schreiber
    """Build hooks env.
2561 bf6929a2 Alexander Schreiber

2562 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2563 bf6929a2 Alexander Schreiber

2564 bf6929a2 Alexander Schreiber
    """
2565 bf6929a2 Alexander Schreiber
    env = {
2566 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2567 bf6929a2 Alexander Schreiber
      }
2568 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2569 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2570 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
2571 bf6929a2 Alexander Schreiber
    return env, nl, nl
2572 bf6929a2 Alexander Schreiber
2573 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2574 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2575 bf6929a2 Alexander Schreiber

2576 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2577 bf6929a2 Alexander Schreiber

2578 bf6929a2 Alexander Schreiber
    """
2579 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2580 e873317a Guido Trotter
    assert self.instance is not None, \
2581 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2582 bf6929a2 Alexander Schreiber
2583 bf6929a2 Alexander Schreiber
    # check bridges existance
2584 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2585 bf6929a2 Alexander Schreiber
2586 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2587 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2588 bf6929a2 Alexander Schreiber

2589 bf6929a2 Alexander Schreiber
    """
2590 bf6929a2 Alexander Schreiber
    instance = self.instance
2591 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2592 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2593 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2594 bf6929a2 Alexander Schreiber
2595 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2596 bf6929a2 Alexander Schreiber
2597 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2598 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2599 781de953 Iustin Pop
      result = self.rpc.call_instance_reboot(node_current, instance,
2600 781de953 Iustin Pop
                                             reboot_type, extra_args)
2601 781de953 Iustin Pop
      if result.failed or not result.data:
2602 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2603 bf6929a2 Alexander Schreiber
    else:
2604 72737a7f Iustin Pop
      if not self.rpc.call_instance_shutdown(node_current, instance):
2605 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2606 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
2607 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
2608 781de953 Iustin Pop
      result = self.rpc.call_instance_start(node_current, instance, extra_args)
2609 781de953 Iustin Pop
      if result.failed or not result.data:
2610 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
2611 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2612 bf6929a2 Alexander Schreiber
2613 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2614 bf6929a2 Alexander Schreiber
2615 bf6929a2 Alexander Schreiber
2616 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2617 a8083063 Iustin Pop
  """Shutdown an instance.
2618 a8083063 Iustin Pop

2619 a8083063 Iustin Pop
  """
2620 a8083063 Iustin Pop
  HPATH = "instance-stop"
2621 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2622 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2623 e873317a Guido Trotter
  REQ_BGL = False
2624 e873317a Guido Trotter
2625 e873317a Guido Trotter
  def ExpandNames(self):
2626 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2627 a8083063 Iustin Pop
2628 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2629 a8083063 Iustin Pop
    """Build hooks env.
2630 a8083063 Iustin Pop

2631 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2632 a8083063 Iustin Pop

2633 a8083063 Iustin Pop
    """
2634 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2635 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2636 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2637 a8083063 Iustin Pop
    return env, nl, nl
2638 a8083063 Iustin Pop
2639 a8083063 Iustin Pop
  def CheckPrereq(self):
2640 a8083063 Iustin Pop
    """Check prerequisites.
2641 a8083063 Iustin Pop

2642 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2643 a8083063 Iustin Pop

2644 a8083063 Iustin Pop
    """
2645 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2646 e873317a Guido Trotter
    assert self.instance is not None, \
2647 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2648 a8083063 Iustin Pop
2649 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2650 a8083063 Iustin Pop
    """Shutdown the instance.
2651 a8083063 Iustin Pop

2652 a8083063 Iustin Pop
    """
2653 a8083063 Iustin Pop
    instance = self.instance
2654 a8083063 Iustin Pop
    node_current = instance.primary_node
2655 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2656 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(node_current, instance)
2657 781de953 Iustin Pop
    if result.failed or not result.data:
2658 86d9d3bb Iustin Pop
      self.proc.LogWarning("Could not shutdown instance")
2659 a8083063 Iustin Pop
2660 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
2661 a8083063 Iustin Pop
2662 a8083063 Iustin Pop
2663 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2664 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2665 fe7b0351 Michael Hanselmann

2666 fe7b0351 Michael Hanselmann
  """
2667 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2668 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2669 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2670 4e0b4d2d Guido Trotter
  REQ_BGL = False
2671 4e0b4d2d Guido Trotter
2672 4e0b4d2d Guido Trotter
  def ExpandNames(self):
2673 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
2674 fe7b0351 Michael Hanselmann
2675 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2676 fe7b0351 Michael Hanselmann
    """Build hooks env.
2677 fe7b0351 Michael Hanselmann

2678 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2679 fe7b0351 Michael Hanselmann

2680 fe7b0351 Michael Hanselmann
    """
2681 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2682 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2683 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2684 fe7b0351 Michael Hanselmann
    return env, nl, nl
2685 fe7b0351 Michael Hanselmann
2686 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2687 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2688 fe7b0351 Michael Hanselmann

2689 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2690 fe7b0351 Michael Hanselmann

2691 fe7b0351 Michael Hanselmann
    """
2692 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2693 4e0b4d2d Guido Trotter
    assert instance is not None, \
2694 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2695 4e0b4d2d Guido Trotter
2696 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2697 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2698 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2699 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2700 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2701 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2702 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2703 72737a7f Iustin Pop
                                              instance.name,
2704 72737a7f Iustin Pop
                                              instance.hypervisor)
2705 781de953 Iustin Pop
    if remote_info.failed or remote_info.data:
2706 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2707 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2708 3ecf6786 Iustin Pop
                                  instance.primary_node))
2709 d0834de3 Michael Hanselmann
2710 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2711 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2712 d0834de3 Michael Hanselmann
      # OS verification
2713 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2714 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2715 d0834de3 Michael Hanselmann
      if pnode is None:
2716 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2717 3ecf6786 Iustin Pop
                                   self.op.pnode)
2718 781de953 Iustin Pop
      result = self.rpc.call_os_get(pnode.name, self.op.os_type)
2719 781de953 Iustin Pop
      result.Raise()
2720 781de953 Iustin Pop
      if not isinstance(result.data, objects.OS):
2721 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2722 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2723 d0834de3 Michael Hanselmann
2724 fe7b0351 Michael Hanselmann
    self.instance = instance
2725 fe7b0351 Michael Hanselmann
2726 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2727 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2728 fe7b0351 Michael Hanselmann

2729 fe7b0351 Michael Hanselmann
    """
2730 fe7b0351 Michael Hanselmann
    inst = self.instance
2731 fe7b0351 Michael Hanselmann
2732 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2733 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2734 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2735 97abc79f Iustin Pop
      self.cfg.Update(inst)
2736 d0834de3 Michael Hanselmann
2737 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
2738 fe7b0351 Michael Hanselmann
    try:
2739 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2740 781de953 Iustin Pop
      result = self.rpc.call_instance_os_add(inst.primary_node, inst)
2741 781de953 Iustin Pop
      result.Raise()
2742 781de953 Iustin Pop
      if not result.data:
2743 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
2744 f4bc1f2c Michael Hanselmann
                                 " on node %s" %
2745 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2746 fe7b0351 Michael Hanselmann
    finally:
2747 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
2748 fe7b0351 Michael Hanselmann
2749 fe7b0351 Michael Hanselmann
2750 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2751 decd5f45 Iustin Pop
  """Rename an instance.
2752 decd5f45 Iustin Pop

2753 decd5f45 Iustin Pop
  """
2754 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2755 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2756 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2757 decd5f45 Iustin Pop
2758 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2759 decd5f45 Iustin Pop
    """Build hooks env.
2760 decd5f45 Iustin Pop

2761 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2762 decd5f45 Iustin Pop

2763 decd5f45 Iustin Pop
    """
2764 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2765 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2766 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2767 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2768 decd5f45 Iustin Pop
    return env, nl, nl
2769 decd5f45 Iustin Pop
2770 decd5f45 Iustin Pop
  def CheckPrereq(self):
2771 decd5f45 Iustin Pop
    """Check prerequisites.
2772 decd5f45 Iustin Pop

2773 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2774 decd5f45 Iustin Pop

2775 decd5f45 Iustin Pop
    """
2776 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2777 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2778 decd5f45 Iustin Pop
    if instance is None:
2779 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2780 decd5f45 Iustin Pop
                                 self.op.instance_name)
2781 decd5f45 Iustin Pop
    if instance.status != "down":
2782 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2783 decd5f45 Iustin Pop
                                 self.op.instance_name)
2784 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2785 72737a7f Iustin Pop
                                              instance.name,
2786 72737a7f Iustin Pop
                                              instance.hypervisor)
2787 781de953 Iustin Pop
    remote_info.Raise()
2788 781de953 Iustin Pop
    if remote_info.data:
2789 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2790 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2791 decd5f45 Iustin Pop
                                  instance.primary_node))
2792 decd5f45 Iustin Pop
    self.instance = instance
2793 decd5f45 Iustin Pop
2794 decd5f45 Iustin Pop
    # new name verification
2795 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2796 decd5f45 Iustin Pop
2797 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2798 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
2799 7bde3275 Guido Trotter
    if new_name in instance_list:
2800 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2801 c09f363f Manuel Franceschini
                                 new_name)
2802 7bde3275 Guido Trotter
2803 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2804 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
2805 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2806 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2807 decd5f45 Iustin Pop
2808 decd5f45 Iustin Pop
2809 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2810 decd5f45 Iustin Pop
    """Reinstall the instance.
2811 decd5f45 Iustin Pop

2812 decd5f45 Iustin Pop
    """
2813 decd5f45 Iustin Pop
    inst = self.instance
2814 decd5f45 Iustin Pop
    old_name = inst.name
2815 decd5f45 Iustin Pop
2816 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2817 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2818 b23c4333 Manuel Franceschini
2819 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2820 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
2821 cb4e8387 Iustin Pop
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
2822 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
2823 decd5f45 Iustin Pop
2824 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2825 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2826 decd5f45 Iustin Pop
2827 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2828 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2829 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
2830 72737a7f Iustin Pop
                                                     old_file_storage_dir,
2831 72737a7f Iustin Pop
                                                     new_file_storage_dir)
2832 781de953 Iustin Pop
      result.Raise()
2833 781de953 Iustin Pop
      if not result.data:
2834 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not connect to node '%s' to rename"
2835 b23c4333 Manuel Franceschini
                                 " directory '%s' to '%s' (but the instance"
2836 b23c4333 Manuel Franceschini
                                 " has been renamed in Ganeti)" % (
2837 b23c4333 Manuel Franceschini
                                 inst.primary_node, old_file_storage_dir,
2838 b23c4333 Manuel Franceschini
                                 new_file_storage_dir))
2839 b23c4333 Manuel Franceschini
2840 781de953 Iustin Pop
      if not result.data[0]:
2841 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2842 b23c4333 Manuel Franceschini
                                 " (but the instance has been renamed in"
2843 b23c4333 Manuel Franceschini
                                 " Ganeti)" % (old_file_storage_dir,
2844 b23c4333 Manuel Franceschini
                                               new_file_storage_dir))
2845 b23c4333 Manuel Franceschini
2846 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
2847 decd5f45 Iustin Pop
    try:
2848 781de953 Iustin Pop
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
2849 781de953 Iustin Pop
                                                 old_name)
2850 781de953 Iustin Pop
      if result.failed or not result.data:
2851 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
2852 6291574d Alexander Schreiber
               " (but the instance has been renamed in Ganeti)" %
2853 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2854 86d9d3bb Iustin Pop
        self.proc.LogWarning(msg)
2855 decd5f45 Iustin Pop
    finally:
2856 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
2857 decd5f45 Iustin Pop
2858 decd5f45 Iustin Pop
2859 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2860 a8083063 Iustin Pop
  """Remove an instance.
2861 a8083063 Iustin Pop

2862 a8083063 Iustin Pop
  """
2863 a8083063 Iustin Pop
  HPATH = "instance-remove"
2864 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2865 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
2866 cf472233 Guido Trotter
  REQ_BGL = False
2867 cf472233 Guido Trotter
2868 cf472233 Guido Trotter
  def ExpandNames(self):
2869 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
2870 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2871 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2872 cf472233 Guido Trotter
2873 cf472233 Guido Trotter
  def DeclareLocks(self, level):
2874 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
2875 cf472233 Guido Trotter
      self._LockInstancesNodes()
2876 a8083063 Iustin Pop
2877 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2878 a8083063 Iustin Pop
    """Build hooks env.
2879 a8083063 Iustin Pop

2880 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2881 a8083063 Iustin Pop

2882 a8083063 Iustin Pop
    """
2883 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2884 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
2885 a8083063 Iustin Pop
    return env, nl, nl
2886 a8083063 Iustin Pop
2887 a8083063 Iustin Pop
  def CheckPrereq(self):
2888 a8083063 Iustin Pop
    """Check prerequisites.
2889 a8083063 Iustin Pop

2890 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2891 a8083063 Iustin Pop

2892 a8083063 Iustin Pop
    """
2893 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2894 cf472233 Guido Trotter
    assert self.instance is not None, \
2895 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2896 a8083063 Iustin Pop
2897 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2898 a8083063 Iustin Pop
    """Remove the instance.
2899 a8083063 Iustin Pop

2900 a8083063 Iustin Pop
    """
2901 a8083063 Iustin Pop
    instance = self.instance
2902 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
2903 9a4f63d1 Iustin Pop
                 instance.name, instance.primary_node)
2904 a8083063 Iustin Pop
2905 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
2906 781de953 Iustin Pop
    if result.failed or not result.data:
2907 1d67656e Iustin Pop
      if self.op.ignore_failures:
2908 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2909 1d67656e Iustin Pop
      else:
2910 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2911 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2912 a8083063 Iustin Pop
2913 9a4f63d1 Iustin Pop
    logging.info("Removing block devices for instance %s", instance.name)
2914 a8083063 Iustin Pop
2915 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
2916 1d67656e Iustin Pop
      if self.op.ignore_failures:
2917 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2918 1d67656e Iustin Pop
      else:
2919 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2920 a8083063 Iustin Pop
2921 9a4f63d1 Iustin Pop
    logging.info("Removing instance %s out of cluster config", instance.name)
2922 a8083063 Iustin Pop
2923 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2924 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
2925 a8083063 Iustin Pop
2926 a8083063 Iustin Pop
2927 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2928 a8083063 Iustin Pop
  """Logical unit for querying instances.
2929 a8083063 Iustin Pop

2930 a8083063 Iustin Pop
  """
2931 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2932 7eb9d8f7 Guido Trotter
  REQ_BGL = False
2933 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
2934 a2d2e1a7 Iustin Pop
                                    "admin_state", "admin_ram",
2935 a2d2e1a7 Iustin Pop
                                    "disk_template", "ip", "mac", "bridge",
2936 a2d2e1a7 Iustin Pop
                                    "sda_size", "sdb_size", "vcpus", "tags",
2937 a2d2e1a7 Iustin Pop
                                    "network_port", "beparams",
2938 a2d2e1a7 Iustin Pop
                                    "(disk).(size)/([0-9]+)",
2939 a2d2e1a7 Iustin Pop
                                    "(disk).(sizes)",
2940 a2d2e1a7 Iustin Pop
                                    "(nic).(mac|ip|bridge)/([0-9]+)",
2941 a2d2e1a7 Iustin Pop
                                    "(nic).(macs|ips|bridges)",
2942 a2d2e1a7 Iustin Pop
                                    "(disk|nic).(count)",
2943 a2d2e1a7 Iustin Pop
                                    "serial_no", "hypervisor", "hvparams",] +
2944 a2d2e1a7 Iustin Pop
                                  ["hv/%s" % name
2945 a2d2e1a7 Iustin Pop
                                   for name in constants.HVS_PARAMETERS] +
2946 a2d2e1a7 Iustin Pop
                                  ["be/%s" % name
2947 a2d2e1a7 Iustin Pop
                                   for name in constants.BES_PARAMETERS])
2948 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
2949 31bf511f Iustin Pop
2950 a8083063 Iustin Pop
2951 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
2952 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2953 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2954 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2955 a8083063 Iustin Pop
2956 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
2957 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
2958 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2959 7eb9d8f7 Guido Trotter
2960 57a2fb91 Iustin Pop
    if self.op.names:
2961 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
2962 7eb9d8f7 Guido Trotter
    else:
2963 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
2964 7eb9d8f7 Guido Trotter
2965 31bf511f Iustin Pop
    self.do_locking = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
2966 57a2fb91 Iustin Pop
    if self.do_locking:
2967 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
2968 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
2969 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2970 7eb9d8f7 Guido Trotter
2971 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
2972 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
2973 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
2974 7eb9d8f7 Guido Trotter
2975 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
2976 7eb9d8f7 Guido Trotter
    """Check prerequisites.
2977 7eb9d8f7 Guido Trotter

2978 7eb9d8f7 Guido Trotter
    """
2979 57a2fb91 Iustin Pop
    pass
2980 069dcc86 Iustin Pop
2981 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2982 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2983 a8083063 Iustin Pop

2984 a8083063 Iustin Pop
    """
2985 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
2986 57a2fb91 Iustin Pop
    if self.do_locking:
2987 57a2fb91 Iustin Pop
      instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2988 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
2989 3fa93523 Guido Trotter
      instance_names = self.wanted
2990 3fa93523 Guido Trotter
      missing = set(instance_names).difference(all_info.keys())
2991 3fa93523 Guido Trotter
      if missing:
2992 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
2993 3fa93523 Guido Trotter
          "Some instances were removed before retrieving their data: %s"
2994 3fa93523 Guido Trotter
          % missing)
2995 57a2fb91 Iustin Pop
    else:
2996 57a2fb91 Iustin Pop
      instance_names = all_info.keys()
2997 c1f1cbb2 Iustin Pop
2998 c1f1cbb2 Iustin Pop
    instance_names = utils.NiceSort(instance_names)
2999 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
3000 a8083063 Iustin Pop
3001 a8083063 Iustin Pop
    # begin data gathering
3002 a8083063 Iustin Pop
3003 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
3004 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
3005 a8083063 Iustin Pop
3006 a8083063 Iustin Pop
    bad_nodes = []
3007 31bf511f Iustin Pop
    if self.do_locking:
3008 a8083063 Iustin Pop
      live_data = {}
3009 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
3010 a8083063 Iustin Pop
      for name in nodes:
3011 a8083063 Iustin Pop
        result = node_data[name]
3012 781de953 Iustin Pop
        if result.failed:
3013 a8083063 Iustin Pop
          bad_nodes.append(name)
3014 781de953 Iustin Pop
        else:
3015 781de953 Iustin Pop
          if result.data:
3016 781de953 Iustin Pop
            live_data.update(result.data)
3017 781de953 Iustin Pop
            # else no instance is alive
3018 a8083063 Iustin Pop
    else:
3019 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
3020 a8083063 Iustin Pop
3021 a8083063 Iustin Pop
    # end data gathering
3022 a8083063 Iustin Pop
3023 5018a335 Iustin Pop
    HVPREFIX = "hv/"
3024 338e51e8 Iustin Pop
    BEPREFIX = "be/"
3025 a8083063 Iustin Pop
    output = []
3026 a8083063 Iustin Pop
    for instance in instance_list:
3027 a8083063 Iustin Pop
      iout = []
3028 5018a335 Iustin Pop
      i_hv = self.cfg.GetClusterInfo().FillHV(instance)
3029 338e51e8 Iustin Pop
      i_be = self.cfg.GetClusterInfo().FillBE(instance)
3030 a8083063 Iustin Pop
      for field in self.op.output_fields:
3031 71c1af58 Iustin Pop
        st_match = self._FIELDS_STATIC.Matches(field)
3032 a8083063 Iustin Pop
        if field == "name":
3033 a8083063 Iustin Pop
          val = instance.name
3034 a8083063 Iustin Pop
        elif field == "os":
3035 a8083063 Iustin Pop
          val = instance.os
3036 a8083063 Iustin Pop
        elif field == "pnode":
3037 a8083063 Iustin Pop
          val = instance.primary_node
3038 a8083063 Iustin Pop
        elif field == "snodes":
3039 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
3040 a8083063 Iustin Pop
        elif field == "admin_state":
3041 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
3042 a8083063 Iustin Pop
        elif field == "oper_state":
3043 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
3044 8a23d2d3 Iustin Pop
            val = None
3045 a8083063 Iustin Pop
          else:
3046 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
3047 d8052456 Iustin Pop
        elif field == "status":
3048 d8052456 Iustin Pop
          if instance.primary_node in bad_nodes:
3049 d8052456 Iustin Pop
            val = "ERROR_nodedown"
3050 d8052456 Iustin Pop
          else:
3051 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
3052 d8052456 Iustin Pop
            if running:
3053 d8052456 Iustin Pop
              if instance.status != "down":
3054 d8052456 Iustin Pop
                val = "running"
3055 d8052456 Iustin Pop
              else:
3056 d8052456 Iustin Pop
                val = "ERROR_up"
3057 d8052456 Iustin Pop
            else:
3058 d8052456 Iustin Pop
              if instance.status != "down":
3059 d8052456 Iustin Pop
                val = "ERROR_down"
3060 d8052456 Iustin Pop
              else:
3061 d8052456 Iustin Pop
                val = "ADMIN_down"
3062 a8083063 Iustin Pop
        elif field == "oper_ram":
3063 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
3064 8a23d2d3 Iustin Pop
            val = None
3065 a8083063 Iustin Pop
          elif instance.name in live_data:
3066 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
3067 a8083063 Iustin Pop
          else:
3068 a8083063 Iustin Pop
            val = "-"
3069 a8083063 Iustin Pop
        elif field == "disk_template":
3070 a8083063 Iustin Pop
          val = instance.disk_template
3071 a8083063 Iustin Pop
        elif field == "ip":
3072 a8083063 Iustin Pop
          val = instance.nics[0].ip
3073 a8083063 Iustin Pop
        elif field == "bridge":
3074 a8083063 Iustin Pop
          val = instance.nics[0].bridge
3075 a8083063 Iustin Pop
        elif field == "mac":
3076 a8083063 Iustin Pop
          val = instance.nics[0].mac
3077 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
3078 ad24e046 Iustin Pop
          idx = ord(field[2]) - ord('a')
3079 ad24e046 Iustin Pop
          try:
3080 ad24e046 Iustin Pop
            val = instance.FindDisk(idx).size
3081 ad24e046 Iustin Pop
          except errors.OpPrereqError:
3082 8a23d2d3 Iustin Pop
            val = None
3083 130a6a6f Iustin Pop
        elif field == "tags":
3084 130a6a6f Iustin Pop
          val = list(instance.GetTags())
3085 38d7239a Iustin Pop
        elif field == "serial_no":
3086 38d7239a Iustin Pop
          val = instance.serial_no
3087 5018a335 Iustin Pop
        elif field == "network_port":
3088 5018a335 Iustin Pop
          val = instance.network_port
3089 338e51e8 Iustin Pop
        elif field == "hypervisor":
3090 338e51e8 Iustin Pop
          val = instance.hypervisor
3091 338e51e8 Iustin Pop
        elif field == "hvparams":
3092 338e51e8 Iustin Pop
          val = i_hv
3093 5018a335 Iustin Pop
        elif (field.startswith(HVPREFIX) and
3094 5018a335 Iustin Pop
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
3095 5018a335 Iustin Pop
          val = i_hv.get(field[len(HVPREFIX):], None)
3096 338e51e8 Iustin Pop
        elif field == "beparams":
3097 338e51e8 Iustin Pop
          val = i_be
3098 338e51e8 Iustin Pop
        elif (field.startswith(BEPREFIX) and
3099 338e51e8 Iustin Pop
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
3100 338e51e8 Iustin Pop
          val = i_be.get(field[len(BEPREFIX):], None)
3101 71c1af58 Iustin Pop
        elif st_match and st_match.groups():
3102 71c1af58 Iustin Pop
          # matches a variable list
3103 71c1af58 Iustin Pop
          st_groups = st_match.groups()
3104 71c1af58 Iustin Pop
          if st_groups and st_groups[0] == "disk":
3105 71c1af58 Iustin Pop
            if st_groups[1] == "count":
3106 71c1af58 Iustin Pop
              val = len(instance.disks)
3107 41a776da Iustin Pop
            elif st_groups[1] == "sizes":
3108 41a776da Iustin Pop
              val = [disk.size for disk in instance.disks]
3109 71c1af58 Iustin Pop
            elif st_groups[1] == "size":
3110 3e0cea06 Iustin Pop
              try:
3111 3e0cea06 Iustin Pop
                val = instance.FindDisk(st_groups[2]).size
3112 3e0cea06 Iustin Pop
              except errors.OpPrereqError:
3113 71c1af58 Iustin Pop
                val = None
3114 71c1af58 Iustin Pop
            else:
3115 71c1af58 Iustin Pop
              assert False, "Unhandled disk parameter"
3116 71c1af58 Iustin Pop
          elif st_groups[0] == "nic":
3117 71c1af58 Iustin Pop
            if st_groups[1] == "count":
3118 71c1af58 Iustin Pop
              val = len(instance.nics)
3119 41a776da Iustin Pop
            elif st_groups[1] == "macs":
3120 41a776da Iustin Pop
              val = [nic.mac for nic in instance.nics]
3121 41a776da Iustin Pop
            elif st_groups[1] == "ips":
3122 41a776da Iustin Pop
              val = [nic.ip for nic in instance.nics]
3123 41a776da Iustin Pop
            elif st_groups[1] == "bridges":
3124 41a776da Iustin Pop
              val = [nic.bridge for nic in instance.nics]
3125 71c1af58 Iustin Pop
            else:
3126 71c1af58 Iustin Pop
              # index-based item
3127 71c1af58 Iustin Pop
              nic_idx = int(st_groups[2])
3128 71c1af58 Iustin Pop
              if nic_idx >= len(instance.nics):
3129 71c1af58 Iustin Pop
                val = None
3130 71c1af58 Iustin Pop
              else:
3131 71c1af58 Iustin Pop
                if st_groups[1] == "mac":
3132 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].mac
3133 71c1af58 Iustin Pop
                elif st_groups[1] == "ip":
3134 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].ip
3135 71c1af58 Iustin Pop
                elif st_groups[1] == "bridge":
3136 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].bridge
3137 71c1af58 Iustin Pop
                else:
3138 71c1af58 Iustin Pop
                  assert False, "Unhandled NIC parameter"
3139 71c1af58 Iustin Pop
          else:
3140 71c1af58 Iustin Pop
            assert False, "Unhandled variable parameter"
3141 a8083063 Iustin Pop
        else:
3142 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
3143 a8083063 Iustin Pop
        iout.append(val)
3144 a8083063 Iustin Pop
      output.append(iout)
3145 a8083063 Iustin Pop
3146 a8083063 Iustin Pop
    return output
3147 a8083063 Iustin Pop
3148 a8083063 Iustin Pop
3149 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
3150 a8083063 Iustin Pop
  """Failover an instance.
3151 a8083063 Iustin Pop

3152 a8083063 Iustin Pop
  """
3153 a8083063 Iustin Pop
  HPATH = "instance-failover"
3154 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3155 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
3156 c9e5c064 Guido Trotter
  REQ_BGL = False
3157 c9e5c064 Guido Trotter
3158 c9e5c064 Guido Trotter
  def ExpandNames(self):
3159 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
3160 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3161 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3162 c9e5c064 Guido Trotter
3163 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
3164 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
3165 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
3166 a8083063 Iustin Pop
3167 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3168 a8083063 Iustin Pop
    """Build hooks env.
3169 a8083063 Iustin Pop

3170 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3171 a8083063 Iustin Pop

3172 a8083063 Iustin Pop
    """
3173 a8083063 Iustin Pop
    env = {
3174 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
3175 a8083063 Iustin Pop
      }
3176 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3177 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3178 a8083063 Iustin Pop
    return env, nl, nl
3179 a8083063 Iustin Pop
3180 a8083063 Iustin Pop
  def CheckPrereq(self):
3181 a8083063 Iustin Pop
    """Check prerequisites.
3182 a8083063 Iustin Pop

3183 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3184 a8083063 Iustin Pop

3185 a8083063 Iustin Pop
    """
3186 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3187 c9e5c064 Guido Trotter
    assert self.instance is not None, \
3188 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3189 a8083063 Iustin Pop
3190 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3191 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3192 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
3193 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
3194 2a710df1 Michael Hanselmann
3195 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
3196 2a710df1 Michael Hanselmann
    if not secondary_nodes:
3197 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
3198 abdf0113 Iustin Pop
                                   "a mirrored disk template")
3199 2a710df1 Michael Hanselmann
3200 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
3201 d4f16fd9 Iustin Pop
    # check memory requirements on the secondary node
3202 b9bddb6b Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
3203 338e51e8 Iustin Pop
                         instance.name, bep[constants.BE_MEMORY],
3204 e69d05fd Iustin Pop
                         instance.hypervisor)
3205 3a7c308e Guido Trotter
3206 a8083063 Iustin Pop
    # check bridge existance
3207 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
3208 781de953 Iustin Pop
    result = self.rpc.call_bridges_exist(target_node, brlist)
3209 781de953 Iustin Pop
    result.Raise()
3210 781de953 Iustin Pop
    if not result.data:
3211 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
3212 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
3213 50ff9a7a Iustin Pop
                                 (brlist, target_node))
3214 a8083063 Iustin Pop
3215 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3216 a8083063 Iustin Pop
    """Failover an instance.
3217 a8083063 Iustin Pop

3218 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
3219 a8083063 Iustin Pop
    starting it on the secondary.
3220 a8083063 Iustin Pop

3221 a8083063 Iustin Pop
    """
3222 a8083063 Iustin Pop
    instance = self.instance
3223 a8083063 Iustin Pop
3224 a8083063 Iustin Pop
    source_node = instance.primary_node
3225 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
3226 a8083063 Iustin Pop
3227 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
3228 a8083063 Iustin Pop
    for dev in instance.disks:
3229 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
3230 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
3231 a0aaa0d0 Guido Trotter
        if instance.status == "up" and not self.op.ignore_consistency:
3232 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
3233 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
3234 a8083063 Iustin Pop
3235 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
3236 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
3237 9a4f63d1 Iustin Pop
                 instance.name, source_node)
3238 a8083063 Iustin Pop
3239 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(source_node, instance)
3240 781de953 Iustin Pop
    if result.failed or not result.data:
3241 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
3242 86d9d3bb Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
3243 86d9d3bb Iustin Pop
                             " Proceeding"
3244 86d9d3bb Iustin Pop
                             " anyway. Please make sure node %s is down",
3245 86d9d3bb Iustin Pop
                             instance.name, source_node, source_node)
3246 24a40d57 Iustin Pop
      else:
3247 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
3248 24a40d57 Iustin Pop
                                 (instance.name, source_node))
3249 a8083063 Iustin Pop
3250 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
3251 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3252 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
3253 a8083063 Iustin Pop
3254 a8083063 Iustin Pop
    instance.primary_node = target_node
3255 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
3256 b6102dab Guido Trotter
    self.cfg.Update(instance)
3257 a8083063 Iustin Pop
3258 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
3259 12a0cfbe Guido Trotter
    if instance.status == "up":
3260 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
3261 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s",
3262 9a4f63d1 Iustin Pop
                   instance.name, target_node)
3263 12a0cfbe Guido Trotter
3264 b9bddb6b Iustin Pop
      disks_ok, dummy = _AssembleInstanceDisks(self, instance,
3265 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
3266 12a0cfbe Guido Trotter
      if not disks_ok:
3267 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3268 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
3269 a8083063 Iustin Pop
3270 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
3271 781de953 Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance, None)
3272 781de953 Iustin Pop
      if result.failed or not result.data:
3273 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3274 12a0cfbe Guido Trotter
        raise errors.OpExecError("Could not start instance %s on node %s." %
3275 12a0cfbe Guido Trotter
                                 (instance.name, target_node))
3276 a8083063 Iustin Pop
3277 a8083063 Iustin Pop
3278 b9bddb6b Iustin Pop
def _CreateBlockDevOnPrimary(lu, node, instance, device, info):
3279 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
3280 a8083063 Iustin Pop

3281 a8083063 Iustin Pop
  This always creates all devices.
3282 a8083063 Iustin Pop

3283 a8083063 Iustin Pop
  """
3284 a8083063 Iustin Pop
  if device.children:
3285 a8083063 Iustin Pop
    for child in device.children:
3286 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnPrimary(lu, node, instance, child, info):
3287 a8083063 Iustin Pop
        return False
3288 a8083063 Iustin Pop
3289 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
3290 72737a7f Iustin Pop
  new_id = lu.rpc.call_blockdev_create(node, device, device.size,
3291 72737a7f Iustin Pop
                                       instance.name, True, info)
3292 781de953 Iustin Pop
  if new_id.failed or not new_id.data:
3293 a8083063 Iustin Pop
    return False
3294 a8083063 Iustin Pop
  if device.physical_id is None:
3295 a8083063 Iustin Pop
    device.physical_id = new_id
3296 a8083063 Iustin Pop
  return True
3297 a8083063 Iustin Pop
3298 a8083063 Iustin Pop
3299 b9bddb6b Iustin Pop
def _CreateBlockDevOnSecondary(lu, node, instance, device, force, info):
3300 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
3301 a8083063 Iustin Pop

3302 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
3303 a8083063 Iustin Pop
  all its children.
3304 a8083063 Iustin Pop

3305 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
3306 a8083063 Iustin Pop

3307 a8083063 Iustin Pop
  """
3308 a8083063 Iustin Pop
  if device.CreateOnSecondary():
3309 a8083063 Iustin Pop
    force = True
3310 a8083063 Iustin Pop
  if device.children:
3311 a8083063 Iustin Pop
    for child in device.children:
3312 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnSecondary(lu, node, instance,
3313 3f78eef2 Iustin Pop
                                        child, force, info):
3314 a8083063 Iustin Pop
        return False
3315 a8083063 Iustin Pop
3316 a8083063 Iustin Pop
  if not force:
3317 a8083063 Iustin Pop
    return True
3318 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
3319 72737a7f Iustin Pop
  new_id = lu.rpc.call_blockdev_create(node, device, device.size,
3320 72737a7f Iustin Pop
                                       instance.name, False, info)
3321 781de953 Iustin Pop
  if new_id.failed or not new_id.data:
3322 a8083063 Iustin Pop
    return False
3323 a8083063 Iustin Pop
  if device.physical_id is None:
3324 a8083063 Iustin Pop
    device.physical_id = new_id
3325 a8083063 Iustin Pop
  return True
3326 a8083063 Iustin Pop
3327 a8083063 Iustin Pop
3328 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
3329 923b1523 Iustin Pop
  """Generate a suitable LV name.
3330 923b1523 Iustin Pop

3331 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
3332 923b1523 Iustin Pop

3333 923b1523 Iustin Pop
  """
3334 923b1523 Iustin Pop
  results = []
3335 923b1523 Iustin Pop
  for val in exts:
3336 b9bddb6b Iustin Pop
    new_id = lu.cfg.GenerateUniqueID()
3337 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
3338 923b1523 Iustin Pop
  return results
3339 923b1523 Iustin Pop
3340 923b1523 Iustin Pop
3341 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
3342 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
3343 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
3344 a1f445d3 Iustin Pop

3345 a1f445d3 Iustin Pop
  """
3346 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
3347 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
3348 b9bddb6b Iustin Pop
  shared_secret = lu.cfg.GenerateDRBDSecret()
3349 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3350 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
3351 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3352 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
3353 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
3354 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
3355 f9518d38 Iustin Pop
                                      p_minor, s_minor,
3356 f9518d38 Iustin Pop
                                      shared_secret),
3357 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
3358 a1f445d3 Iustin Pop
                          iv_name=iv_name)
3359 a1f445d3 Iustin Pop
  return drbd_dev
3360 a1f445d3 Iustin Pop
3361 7c0d6283 Michael Hanselmann
3362 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
3363 a8083063 Iustin Pop
                          instance_name, primary_node,
3364 08db7c5c Iustin Pop
                          secondary_nodes, disk_info,
3365 e2a65344 Iustin Pop
                          file_storage_dir, file_driver,
3366 e2a65344 Iustin Pop
                          base_index):
3367 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
3368 a8083063 Iustin Pop

3369 a8083063 Iustin Pop
  """
3370 a8083063 Iustin Pop
  #TODO: compute space requirements
3371 a8083063 Iustin Pop
3372 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
3373 08db7c5c Iustin Pop
  disk_count = len(disk_info)
3374 08db7c5c Iustin Pop
  disks = []
3375 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
3376 08db7c5c Iustin Pop
    pass
3377 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
3378 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
3379 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
3380 923b1523 Iustin Pop
3381 08db7c5c Iustin Pop
    names = _GenerateUniqueNames(lu, [".disk%d" % i
3382 08db7c5c Iustin Pop
                                      for i in range(disk_count)])
3383 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
3384 e2a65344 Iustin Pop
      disk_index = idx + base_index
3385 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
3386 08db7c5c Iustin Pop
                              logical_id=(vgname, names[idx]),
3387 e2a65344 Iustin Pop
                              iv_name="disk/%d" % disk_index)
3388 08db7c5c Iustin Pop
      disks.append(disk_dev)
3389 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
3390 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
3391 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
3392 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
3393 08db7c5c Iustin Pop
    minors = lu.cfg.AllocateDRBDMinor(
3394 08db7c5c Iustin Pop
      [primary_node, remote_node] * len(disk_info), instance_name)
3395 08db7c5c Iustin Pop
3396 08db7c5c Iustin Pop
    names = _GenerateUniqueNames(lu,
3397 08db7c5c Iustin Pop
                                 [".disk%d_%s" % (i, s)
3398 08db7c5c Iustin Pop
                                  for i in range(disk_count)
3399 08db7c5c Iustin Pop
                                  for s in ("data", "meta")
3400 08db7c5c Iustin Pop
                                  ])
3401 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
3402 112050d9 Iustin Pop
      disk_index = idx + base_index
3403 08db7c5c Iustin Pop
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
3404 08db7c5c Iustin Pop
                                      disk["size"], names[idx*2:idx*2+2],
3405 e2a65344 Iustin Pop
                                      "disk/%d" % disk_index,
3406 08db7c5c Iustin Pop
                                      minors[idx*2], minors[idx*2+1])
3407 08db7c5c Iustin Pop
      disks.append(disk_dev)
3408 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
3409 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
3410 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
3411 0f1a06e3 Manuel Franceschini
3412 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
3413 112050d9 Iustin Pop
      disk_index = idx + base_index
3414 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
3415 e2a65344 Iustin Pop
                              iv_name="disk/%d" % disk_index,
3416 08db7c5c Iustin Pop
                              logical_id=(file_driver,
3417 08db7c5c Iustin Pop
                                          "%s/disk%d" % (file_storage_dir,
3418 08db7c5c Iustin Pop
                                                         idx)))
3419 08db7c5c Iustin Pop
      disks.append(disk_dev)
3420 a8083063 Iustin Pop
  else:
3421 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
3422 a8083063 Iustin Pop
  return disks
3423 a8083063 Iustin Pop
3424 a8083063 Iustin Pop
3425 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
3426 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
3427 3ecf6786 Iustin Pop

3428 3ecf6786 Iustin Pop
  """
3429 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
3430 a0c3fea1 Michael Hanselmann
3431 a0c3fea1 Michael Hanselmann
3432 b9bddb6b Iustin Pop
def _CreateDisks(lu, instance):
3433 a8083063 Iustin Pop
  """Create all disks for an instance.
3434 a8083063 Iustin Pop

3435 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
3436 a8083063 Iustin Pop

3437 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
3438 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
3439 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
3440 e4376078 Iustin Pop
  @param instance: the instance whose disks we should create
3441 e4376078 Iustin Pop
  @rtype: boolean
3442 e4376078 Iustin Pop
  @return: the success of the creation
3443 a8083063 Iustin Pop

3444 a8083063 Iustin Pop
  """
3445 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
3446 a0c3fea1 Michael Hanselmann
3447 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
3448 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3449 72737a7f Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(instance.primary_node,
3450 72737a7f Iustin Pop
                                                 file_storage_dir)
3451 0f1a06e3 Manuel Franceschini
3452 781de953 Iustin Pop
    if result.failed or not result.data:
3453 9a4f63d1 Iustin Pop
      logging.error("Could not connect to node '%s'", instance.primary_node)
3454 0f1a06e3 Manuel Franceschini
      return False
3455 0f1a06e3 Manuel Franceschini
3456 781de953 Iustin Pop
    if not result.data[0]:
3457 9a4f63d1 Iustin Pop
      logging.error("Failed to create directory '%s'", file_storage_dir)
3458 0f1a06e3 Manuel Franceschini
      return False
3459 0f1a06e3 Manuel Franceschini
3460 24991749 Iustin Pop
  # Note: this needs to be kept in sync with adding of disks in
3461 24991749 Iustin Pop
  # LUSetInstanceParams
3462 a8083063 Iustin Pop
  for device in instance.disks:
3463 9a4f63d1 Iustin Pop
    logging.info("Creating volume %s for instance %s",
3464 9a4f63d1 Iustin Pop
                 device.iv_name, instance.name)
3465 a8083063 Iustin Pop
    #HARDCODE
3466 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
3467 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnSecondary(lu, secondary_node, instance,
3468 3f78eef2 Iustin Pop
                                        device, False, info):
3469 9a4f63d1 Iustin Pop
        logging.error("Failed to create volume %s (%s) on secondary node %s!",
3470 9a4f63d1 Iustin Pop
                      device.iv_name, device, secondary_node)
3471 a8083063 Iustin Pop
        return False
3472 a8083063 Iustin Pop
    #HARDCODE
3473 b9bddb6b Iustin Pop
    if not _CreateBlockDevOnPrimary(lu, instance.primary_node,
3474 3f78eef2 Iustin Pop
                                    instance, device, info):
3475 9a4f63d1 Iustin Pop
      logging.error("Failed to create volume %s on primary!", device.iv_name)
3476 a8083063 Iustin Pop
      return False
3477 1c6e3627 Manuel Franceschini
3478 a8083063 Iustin Pop
  return True
3479 a8083063 Iustin Pop
3480 a8083063 Iustin Pop
3481 b9bddb6b Iustin Pop
def _RemoveDisks(lu, instance):
3482 a8083063 Iustin Pop
  """Remove all disks for an instance.
3483 a8083063 Iustin Pop

3484 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
3485 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
3486 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
3487 a8083063 Iustin Pop
  with `_CreateDisks()`).
3488 a8083063 Iustin Pop

3489 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
3490 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
3491 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
3492 e4376078 Iustin Pop
  @param instance: the instance whose disks we should remove
3493 e4376078 Iustin Pop
  @rtype: boolean
3494 e4376078 Iustin Pop
  @return: the success of the removal
3495 a8083063 Iustin Pop

3496 a8083063 Iustin Pop
  """
3497 9a4f63d1 Iustin Pop
  logging.info("Removing block devices for instance %s", instance.name)
3498 a8083063 Iustin Pop
3499 a8083063 Iustin Pop
  result = True
3500 a8083063 Iustin Pop
  for device in instance.disks:
3501 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
3502 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
3503 781de953 Iustin Pop
      result = lu.rpc.call_blockdev_remove(node, disk)
3504 781de953 Iustin Pop
      if result.failed or not result.data:
3505 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not remove block device %s on node %s,"
3506 86d9d3bb Iustin Pop
                           " continuing anyway", device.iv_name, node)
3507 a8083063 Iustin Pop
        result = False
3508 0f1a06e3 Manuel Franceschini
3509 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
3510 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3511 781de953 Iustin Pop
    result = lu.rpc.call_file_storage_dir_remove(instance.primary_node,
3512 781de953 Iustin Pop
                                                 file_storage_dir)
3513 781de953 Iustin Pop
    if result.failed or not result.data:
3514 9a4f63d1 Iustin Pop
      logging.error("Could not remove directory '%s'", file_storage_dir)
3515 0f1a06e3 Manuel Franceschini
      result = False
3516 0f1a06e3 Manuel Franceschini
3517 a8083063 Iustin Pop
  return result
3518 a8083063 Iustin Pop
3519 a8083063 Iustin Pop
3520 08db7c5c Iustin Pop
def _ComputeDiskSize(disk_template, disks):
3521 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
3522 e2fe6369 Iustin Pop

3523 e2fe6369 Iustin Pop
  """
3524 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
3525 e2fe6369 Iustin Pop
  req_size_dict = {
3526 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
3527 08db7c5c Iustin Pop
    constants.DT_PLAIN: sum(d["size"] for d in disks),
3528 08db7c5c Iustin Pop
    # 128 MB are added for drbd metadata for each disk
3529 08db7c5c Iustin Pop
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
3530 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
3531 e2fe6369 Iustin Pop
  }
3532 e2fe6369 Iustin Pop
3533 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
3534 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
3535 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
3536 e2fe6369 Iustin Pop
3537 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
3538 e2fe6369 Iustin Pop
3539 e2fe6369 Iustin Pop
3540 74409b12 Iustin Pop
def _CheckHVParams(lu, nodenames, hvname, hvparams):
3541 74409b12 Iustin Pop
  """Hypervisor parameter validation.
3542 74409b12 Iustin Pop

3543 74409b12 Iustin Pop
  This function abstract the hypervisor parameter validation to be
3544 74409b12 Iustin Pop
  used in both instance create and instance modify.
3545 74409b12 Iustin Pop

3546 74409b12 Iustin Pop
  @type lu: L{LogicalUnit}
3547 74409b12 Iustin Pop
  @param lu: the logical unit for which we check
3548 74409b12 Iustin Pop
  @type nodenames: list
3549 74409b12 Iustin Pop
  @param nodenames: the list of nodes on which we should check
3550 74409b12 Iustin Pop
  @type hvname: string
3551 74409b12 Iustin Pop
  @param hvname: the name of the hypervisor we should use
3552 74409b12 Iustin Pop
  @type hvparams: dict
3553 74409b12 Iustin Pop
  @param hvparams: the parameters which we need to check
3554 74409b12 Iustin Pop
  @raise errors.OpPrereqError: if the parameters are not valid
3555 74409b12 Iustin Pop

3556 74409b12 Iustin Pop
  """
3557 74409b12 Iustin Pop
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
3558 74409b12 Iustin Pop
                                                  hvname,
3559 74409b12 Iustin Pop
                                                  hvparams)
3560 74409b12 Iustin Pop
  for node in nodenames:
3561 781de953 Iustin Pop
    info = hvinfo[node]
3562 781de953 Iustin Pop
    info.Raise()
3563 781de953 Iustin Pop
    if not info.data or not isinstance(info.data, (tuple, list)):
3564 74409b12 Iustin Pop
      raise errors.OpPrereqError("Cannot get current information"
3565 781de953 Iustin Pop
                                 " from node '%s' (%s)" % (node, info.data))
3566 781de953 Iustin Pop
    if not info.data[0]:
3567 74409b12 Iustin Pop
      raise errors.OpPrereqError("Hypervisor parameter validation failed:"
3568 781de953 Iustin Pop
                                 " %s" % info.data[1])
3569 74409b12 Iustin Pop
3570 74409b12 Iustin Pop
3571 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
3572 a8083063 Iustin Pop
  """Create an instance.
3573 a8083063 Iustin Pop

3574 a8083063 Iustin Pop
  """
3575 a8083063 Iustin Pop
  HPATH = "instance-add"
3576 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3577 08db7c5c Iustin Pop
  _OP_REQP = ["instance_name", "disks", "disk_template",
3578 08db7c5c Iustin Pop
              "mode", "start",
3579 08db7c5c Iustin Pop
              "wait_for_sync", "ip_check", "nics",
3580 338e51e8 Iustin Pop
              "hvparams", "beparams"]
3581 7baf741d Guido Trotter
  REQ_BGL = False
3582 7baf741d Guido Trotter
3583 7baf741d Guido Trotter
  def _ExpandNode(self, node):
3584 7baf741d Guido Trotter
    """Expands and checks one node name.
3585 7baf741d Guido Trotter

3586 7baf741d Guido Trotter
    """
3587 7baf741d Guido Trotter
    node_full = self.cfg.ExpandNodeName(node)
3588 7baf741d Guido Trotter
    if node_full is None:
3589 7baf741d Guido Trotter
      raise errors.OpPrereqError("Unknown node %s" % node)
3590 7baf741d Guido Trotter
    return node_full
3591 7baf741d Guido Trotter
3592 7baf741d Guido Trotter
  def ExpandNames(self):
3593 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
3594 7baf741d Guido Trotter

3595 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
3596 7baf741d Guido Trotter

3597 7baf741d Guido Trotter
    """
3598 7baf741d Guido Trotter
    self.needed_locks = {}
3599 7baf741d Guido Trotter
3600 7baf741d Guido Trotter
    # set optional parameters to none if they don't exist
3601 6785674e Iustin Pop
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
3602 7baf741d Guido Trotter
      if not hasattr(self.op, attr):
3603 7baf741d Guido Trotter
        setattr(self.op, attr, None)
3604 7baf741d Guido Trotter
3605 4b2f38dd Iustin Pop
    # cheap checks, mostly valid constants given
3606 4b2f38dd Iustin Pop
3607 7baf741d Guido Trotter
    # verify creation mode
3608 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
3609 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
3610 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
3611 7baf741d Guido Trotter
                                 self.op.mode)
3612 4b2f38dd Iustin Pop
3613 7baf741d Guido Trotter
    # disk template and mirror node verification
3614 7baf741d Guido Trotter
    if self.op.disk_template not in constants.DISK_TEMPLATES:
3615 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid disk template name")
3616 7baf741d Guido Trotter
3617 4b2f38dd Iustin Pop
    if self.op.hypervisor is None:
3618 4b2f38dd Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
3619 4b2f38dd Iustin Pop
3620 8705eb96 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
3621 8705eb96 Iustin Pop
    enabled_hvs = cluster.enabled_hypervisors
3622 4b2f38dd Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
3623 4b2f38dd Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
3624 4b2f38dd Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
3625 4b2f38dd Iustin Pop
                                  ",".join(enabled_hvs)))
3626 4b2f38dd Iustin Pop
3627 6785674e Iustin Pop
    # check hypervisor parameter syntax (locally)
3628 6785674e Iustin Pop
3629 8705eb96 Iustin Pop
    filled_hvp = cluster.FillDict(cluster.hvparams[self.op.hypervisor],
3630 8705eb96 Iustin Pop
                                  self.op.hvparams)
3631 6785674e Iustin Pop
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
3632 8705eb96 Iustin Pop
    hv_type.CheckParameterSyntax(filled_hvp)
3633 6785674e Iustin Pop
3634 338e51e8 Iustin Pop
    # fill and remember the beparams dict
3635 d4b72030 Guido Trotter
    utils.CheckBEParams(self.op.beparams)
3636 338e51e8 Iustin Pop
    self.be_full = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
3637 338e51e8 Iustin Pop
                                    self.op.beparams)
3638 338e51e8 Iustin Pop
3639 7baf741d Guido Trotter
    #### instance parameters check
3640 7baf741d Guido Trotter
3641 7baf741d Guido Trotter
    # instance name verification
3642 7baf741d Guido Trotter
    hostname1 = utils.HostInfo(self.op.instance_name)
3643 7baf741d Guido Trotter
    self.op.instance_name = instance_name = hostname1.name
3644 7baf741d Guido Trotter
3645 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
3646 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
3647 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
3648 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3649 7baf741d Guido Trotter
                                 instance_name)
3650 7baf741d Guido Trotter
3651 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
3652 7baf741d Guido Trotter
3653 08db7c5c Iustin Pop
    # NIC buildup
3654 08db7c5c Iustin Pop
    self.nics = []
3655 08db7c5c Iustin Pop
    for nic in self.op.nics:
3656 08db7c5c Iustin Pop
      # ip validity checks
3657 08db7c5c Iustin Pop
      ip = nic.get("ip", None)
3658 08db7c5c Iustin Pop
      if ip is None or ip.lower() == "none":
3659 08db7c5c Iustin Pop
        nic_ip = None
3660 08db7c5c Iustin Pop
      elif ip.lower() == constants.VALUE_AUTO:
3661 08db7c5c Iustin Pop
        nic_ip = hostname1.ip
3662 08db7c5c Iustin Pop
      else:
3663 08db7c5c Iustin Pop
        if not utils.IsValidIP(ip):
3664 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
3665 08db7c5c Iustin Pop
                                     " like a valid IP" % ip)
3666 08db7c5c Iustin Pop
        nic_ip = ip
3667 08db7c5c Iustin Pop
3668 08db7c5c Iustin Pop
      # MAC address verification
3669 08db7c5c Iustin Pop
      mac = nic.get("mac", constants.VALUE_AUTO)
3670 08db7c5c Iustin Pop
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
3671 08db7c5c Iustin Pop
        if not utils.IsValidMac(mac.lower()):
3672 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Invalid MAC address specified: %s" %
3673 08db7c5c Iustin Pop
                                     mac)
3674 08db7c5c Iustin Pop
      # bridge verification
3675 08db7c5c Iustin Pop
      bridge = nic.get("bridge", self.cfg.GetDefBridge())
3676 08db7c5c Iustin Pop
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, bridge=bridge))
3677 08db7c5c Iustin Pop
3678 08db7c5c Iustin Pop
    # disk checks/pre-build
3679 08db7c5c Iustin Pop
    self.disks = []
3680 08db7c5c Iustin Pop
    for disk in self.op.disks:
3681 08db7c5c Iustin Pop
      mode = disk.get("mode", constants.DISK_RDWR)
3682 08db7c5c Iustin Pop
      if mode not in constants.DISK_ACCESS_SET:
3683 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
3684 08db7c5c Iustin Pop
                                   mode)
3685 08db7c5c Iustin Pop
      size = disk.get("size", None)
3686 08db7c5c Iustin Pop
      if size is None:
3687 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Missing disk size")
3688 08db7c5c Iustin Pop
      try:
3689 08db7c5c Iustin Pop
        size = int(size)
3690 08db7c5c Iustin Pop
      except ValueError:
3691 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk size '%s'" % size)
3692 08db7c5c Iustin Pop
      self.disks.append({"size": size, "mode": mode})
3693 08db7c5c Iustin Pop
3694 7baf741d Guido Trotter
    # used in CheckPrereq for ip ping check
3695 7baf741d Guido Trotter
    self.check_ip = hostname1.ip
3696 7baf741d Guido Trotter
3697 7baf741d Guido Trotter
    # file storage checks
3698 7baf741d Guido Trotter
    if (self.op.file_driver and
3699 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
3700 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
3701 7baf741d Guido Trotter
                                 self.op.file_driver)
3702 7baf741d Guido Trotter
3703 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
3704 7baf741d Guido Trotter
      raise errors.OpPrereqError("File storage directory path not absolute")
3705 7baf741d Guido Trotter
3706 7baf741d Guido Trotter
    ### Node/iallocator related checks
3707 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
3708 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
3709 7baf741d Guido Trotter
                                 " node must be given")
3710 7baf741d Guido Trotter
3711 7baf741d Guido Trotter
    if self.op.iallocator:
3712 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3713 7baf741d Guido Trotter
    else:
3714 7baf741d Guido Trotter
      self.op.pnode = self._ExpandNode(self.op.pnode)
3715 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
3716 7baf741d Guido Trotter
      if self.op.snode is not None:
3717 7baf741d Guido Trotter
        self.op.snode = self._ExpandNode(self.op.snode)
3718 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
3719 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
3720 7baf741d Guido Trotter
3721 7baf741d Guido Trotter
    # in case of import lock the source node too
3722 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
3723 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
3724 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
3725 7baf741d Guido Trotter
3726 b9322a9f Guido Trotter
      if src_path is None:
3727 b9322a9f Guido Trotter
        self.op.src_path = src_path = self.op.instance_name
3728 b9322a9f Guido Trotter
3729 b9322a9f Guido Trotter
      if src_node is None:
3730 b9322a9f Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3731 b9322a9f Guido Trotter
        self.op.src_node = None
3732 b9322a9f Guido Trotter
        if os.path.isabs(src_path):
3733 b9322a9f Guido Trotter
          raise errors.OpPrereqError("Importing an instance from an absolute"
3734 b9322a9f Guido Trotter
                                     " path requires a source node option.")
3735 b9322a9f Guido Trotter
      else:
3736 b9322a9f Guido Trotter
        self.op.src_node = src_node = self._ExpandNode(src_node)
3737 b9322a9f Guido Trotter
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
3738 b9322a9f Guido Trotter
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
3739 b9322a9f Guido Trotter
        if not os.path.isabs(src_path):
3740 b9322a9f Guido Trotter
          self.op.src_path = src_path = \
3741 b9322a9f Guido Trotter
            os.path.join(constants.EXPORT_DIR, src_path)
3742 7baf741d Guido Trotter
3743 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
3744 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
3745 7baf741d Guido Trotter
        raise errors.OpPrereqError("No guest OS specified")
3746 a8083063 Iustin Pop
3747 538475ca Iustin Pop
  def _RunAllocator(self):
3748 538475ca Iustin Pop
    """Run the allocator based on input opcode.
3749 538475ca Iustin Pop

3750 538475ca Iustin Pop
    """
3751 08db7c5c Iustin Pop
    nics = [n.ToDict() for n in self.nics]
3752 72737a7f Iustin Pop
    ial = IAllocator(self,
3753 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
3754 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
3755 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
3756 d1c2dd75 Iustin Pop
                     tags=[],
3757 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
3758 338e51e8 Iustin Pop
                     vcpus=self.be_full[constants.BE_VCPUS],
3759 338e51e8 Iustin Pop
                     mem_size=self.be_full[constants.BE_MEMORY],
3760 08db7c5c Iustin Pop
                     disks=self.disks,
3761 d1c2dd75 Iustin Pop
                     nics=nics,
3762 8cc7e742 Guido Trotter
                     hypervisor=self.op.hypervisor,
3763 29859cb7 Iustin Pop
                     )
3764 d1c2dd75 Iustin Pop
3765 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
3766 d1c2dd75 Iustin Pop
3767 d1c2dd75 Iustin Pop
    if not ial.success:
3768 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3769 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3770 d1c2dd75 Iustin Pop
                                                           ial.info))
3771 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3772 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3773 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
3774 97abc79f Iustin Pop
                                 (self.op.iallocator, len(ial.nodes),
3775 1ce4bbe3 Renรฉ Nussbaumer
                                  ial.required_nodes))
3776 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
3777 86d9d3bb Iustin Pop
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
3778 86d9d3bb Iustin Pop
                 self.op.instance_name, self.op.iallocator,
3779 86d9d3bb Iustin Pop
                 ", ".join(ial.nodes))
3780 27579978 Iustin Pop
    if ial.required_nodes == 2:
3781 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
3782 538475ca Iustin Pop
3783 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3784 a8083063 Iustin Pop
    """Build hooks env.
3785 a8083063 Iustin Pop

3786 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3787 a8083063 Iustin Pop

3788 a8083063 Iustin Pop
    """
3789 a8083063 Iustin Pop
    env = {
3790 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
3791 08db7c5c Iustin Pop
      "INSTANCE_DISK_SIZE": ",".join(str(d["size"]) for d in self.disks),
3792 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
3793 a8083063 Iustin Pop
      }
3794 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3795 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
3796 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
3797 09acf207 Guido Trotter
      env["INSTANCE_SRC_IMAGES"] = self.src_images
3798 396e1b78 Michael Hanselmann
3799 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
3800 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
3801 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
3802 396e1b78 Michael Hanselmann
      status=self.instance_status,
3803 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
3804 338e51e8 Iustin Pop
      memory=self.be_full[constants.BE_MEMORY],
3805 338e51e8 Iustin Pop
      vcpus=self.be_full[constants.BE_VCPUS],
3806 08db7c5c Iustin Pop
      nics=[(n.ip, n.bridge, n.mac) for n in self.nics],
3807 396e1b78 Michael Hanselmann
    ))
3808 a8083063 Iustin Pop
3809 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
3810 a8083063 Iustin Pop
          self.secondaries)
3811 a8083063 Iustin Pop
    return env, nl, nl
3812 a8083063 Iustin Pop
3813 a8083063 Iustin Pop
3814 a8083063 Iustin Pop
  def CheckPrereq(self):
3815 a8083063 Iustin Pop
    """Check prerequisites.
3816 a8083063 Iustin Pop

3817 a8083063 Iustin Pop
    """
3818 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
3819 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
3820 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
3821 eedc99de Manuel Franceschini
                                 " instances")
3822 eedc99de Manuel Franceschini
3823 e69d05fd Iustin Pop
3824 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3825 7baf741d Guido Trotter
      src_node = self.op.src_node
3826 7baf741d Guido Trotter
      src_path = self.op.src_path
3827 a8083063 Iustin Pop
3828 c0cbdc67 Guido Trotter
      if src_node is None:
3829 c0cbdc67 Guido Trotter
        exp_list = self.rpc.call_export_list(
3830 781de953 Iustin Pop
          self.acquired_locks[locking.LEVEL_NODE])
3831 c0cbdc67 Guido Trotter
        found = False
3832 c0cbdc67 Guido Trotter
        for node in exp_list:
3833 781de953 Iustin Pop
          if not exp_list[node].failed and src_path in exp_list[node].data:
3834 c0cbdc67 Guido Trotter
            found = True
3835 c0cbdc67 Guido Trotter
            self.op.src_node = src_node = node
3836 c0cbdc67 Guido Trotter
            self.op.src_path = src_path = os.path.join(constants.EXPORT_DIR,
3837 c0cbdc67 Guido Trotter
                                                       src_path)
3838 c0cbdc67 Guido Trotter
            break
3839 c0cbdc67 Guido Trotter
        if not found:
3840 c0cbdc67 Guido Trotter
          raise errors.OpPrereqError("No export found for relative path %s" %
3841 c0cbdc67 Guido Trotter
                                      src_path)
3842 c0cbdc67 Guido Trotter
3843 781de953 Iustin Pop
      result = self.rpc.call_export_info(src_node, src_path)
3844 781de953 Iustin Pop
      result.Raise()
3845 781de953 Iustin Pop
      if not result.data:
3846 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
3847 a8083063 Iustin Pop
3848 781de953 Iustin Pop
      export_info = result.data
3849 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
3850 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
3851 a8083063 Iustin Pop
3852 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
3853 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
3854 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
3855 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
3856 a8083063 Iustin Pop
3857 09acf207 Guido Trotter
      # Check that the new instance doesn't have less disks than the export
3858 08db7c5c Iustin Pop
      instance_disks = len(self.disks)
3859 09acf207 Guido Trotter
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
3860 09acf207 Guido Trotter
      if instance_disks < export_disks:
3861 09acf207 Guido Trotter
        raise errors.OpPrereqError("Not enough disks to import."
3862 09acf207 Guido Trotter
                                   " (instance: %d, export: %d)" %
3863 726d7d68 Iustin Pop
                                   (instance_disks, export_disks))
3864 a8083063 Iustin Pop
3865 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
3866 09acf207 Guido Trotter
      disk_images = []
3867 09acf207 Guido Trotter
      for idx in range(export_disks):
3868 09acf207 Guido Trotter
        option = 'disk%d_dump' % idx
3869 09acf207 Guido Trotter
        if export_info.has_option(constants.INISECT_INS, option):
3870 09acf207 Guido Trotter
          # FIXME: are the old os-es, disk sizes, etc. useful?
3871 09acf207 Guido Trotter
          export_name = export_info.get(constants.INISECT_INS, option)
3872 09acf207 Guido Trotter
          image = os.path.join(src_path, export_name)
3873 09acf207 Guido Trotter
          disk_images.append(image)
3874 09acf207 Guido Trotter
        else:
3875 09acf207 Guido Trotter
          disk_images.append(False)
3876 09acf207 Guido Trotter
3877 09acf207 Guido Trotter
      self.src_images = disk_images
3878 901a65c1 Iustin Pop
3879 b4364a6b Guido Trotter
      old_name = export_info.get(constants.INISECT_INS, 'name')
3880 b4364a6b Guido Trotter
      # FIXME: int() here could throw a ValueError on broken exports
3881 b4364a6b Guido Trotter
      exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
3882 b4364a6b Guido Trotter
      if self.op.instance_name == old_name:
3883 b4364a6b Guido Trotter
        for idx, nic in enumerate(self.nics):
3884 b4364a6b Guido Trotter
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
3885 b4364a6b Guido Trotter
            nic_mac_ini = 'nic%d_mac' % idx
3886 b4364a6b Guido Trotter
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
3887 bc89efc3 Guido Trotter
3888 7baf741d Guido Trotter
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
3889 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
3890 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
3891 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
3892 901a65c1 Iustin Pop
3893 901a65c1 Iustin Pop
    if self.op.ip_check:
3894 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
3895 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3896 7b3a8fb5 Iustin Pop
                                   (self.check_ip, self.op.instance_name))
3897 901a65c1 Iustin Pop
3898 538475ca Iustin Pop
    #### allocator run
3899 538475ca Iustin Pop
3900 538475ca Iustin Pop
    if self.op.iallocator is not None:
3901 538475ca Iustin Pop
      self._RunAllocator()
3902 0f1a06e3 Manuel Franceschini
3903 901a65c1 Iustin Pop
    #### node related checks
3904 901a65c1 Iustin Pop
3905 901a65c1 Iustin Pop
    # check primary node
3906 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
3907 7baf741d Guido Trotter
    assert self.pnode is not None, \
3908 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
3909 901a65c1 Iustin Pop
    self.secondaries = []
3910 901a65c1 Iustin Pop
3911 901a65c1 Iustin Pop
    # mirror node verification
3912 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
3913 7baf741d Guido Trotter
      if self.op.snode is None:
3914 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
3915 3ecf6786 Iustin Pop
                                   " a mirror node")
3916 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
3917 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
3918 3ecf6786 Iustin Pop
                                   " the primary node.")
3919 7baf741d Guido Trotter
      self.secondaries.append(self.op.snode)
3920 a8083063 Iustin Pop
3921 6785674e Iustin Pop
    nodenames = [pnode.name] + self.secondaries
3922 6785674e Iustin Pop
3923 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
3924 08db7c5c Iustin Pop
                                self.disks)
3925 ed1ebc60 Guido Trotter
3926 8d75db10 Iustin Pop
    # Check lv size requirements
3927 8d75db10 Iustin Pop
    if req_size is not None:
3928 72737a7f Iustin Pop
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
3929 72737a7f Iustin Pop
                                         self.op.hypervisor)
3930 8d75db10 Iustin Pop
      for node in nodenames:
3931 781de953 Iustin Pop
        info = nodeinfo[node]
3932 781de953 Iustin Pop
        info.Raise()
3933 781de953 Iustin Pop
        info = info.data
3934 8d75db10 Iustin Pop
        if not info:
3935 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
3936 3e91897b Iustin Pop
                                     " from node '%s'" % node)
3937 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
3938 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
3939 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
3940 8d75db10 Iustin Pop
                                     " node %s" % node)
3941 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
3942 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
3943 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
3944 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
3945 ed1ebc60 Guido Trotter
3946 74409b12 Iustin Pop
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
3947 6785674e Iustin Pop
3948 a8083063 Iustin Pop
    # os verification
3949 781de953 Iustin Pop
    result = self.rpc.call_os_get(pnode.name, self.op.os_type)
3950 781de953 Iustin Pop
    result.Raise()
3951 781de953 Iustin Pop
    if not isinstance(result.data, objects.OS):
3952 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
3953 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
3954 a8083063 Iustin Pop
3955 901a65c1 Iustin Pop
    # bridge check on primary node
3956 08db7c5c Iustin Pop
    bridges = [n.bridge for n in self.nics]
3957 781de953 Iustin Pop
    result = self.rpc.call_bridges_exist(self.pnode.name, bridges)
3958 781de953 Iustin Pop
    result.Raise()
3959 781de953 Iustin Pop
    if not result.data:
3960 781de953 Iustin Pop
      raise errors.OpPrereqError("One of the target bridges '%s' does not"
3961 781de953 Iustin Pop
                                 " exist on destination node '%s'" %
3962 08db7c5c Iustin Pop
                                 (",".join(bridges), pnode.name))
3963 a8083063 Iustin Pop
3964 49ce1563 Iustin Pop
    # memory check on primary node
3965 49ce1563 Iustin Pop
    if self.op.start:
3966 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
3967 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
3968 338e51e8 Iustin Pop
                           self.be_full[constants.BE_MEMORY],
3969 338e51e8 Iustin Pop
                           self.op.hypervisor)
3970 49ce1563 Iustin Pop
3971 a8083063 Iustin Pop
    if self.op.start:
3972 a8083063 Iustin Pop
      self.instance_status = 'up'
3973 a8083063 Iustin Pop
    else:
3974 a8083063 Iustin Pop
      self.instance_status = 'down'
3975 a8083063 Iustin Pop
3976 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3977 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
3978 a8083063 Iustin Pop

3979 a8083063 Iustin Pop
    """
3980 a8083063 Iustin Pop
    instance = self.op.instance_name
3981 a8083063 Iustin Pop
    pnode_name = self.pnode.name
3982 a8083063 Iustin Pop
3983 08db7c5c Iustin Pop
    for nic in self.nics:
3984 08db7c5c Iustin Pop
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
3985 08db7c5c Iustin Pop
        nic.mac = self.cfg.GenerateMAC()
3986 a8083063 Iustin Pop
3987 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
3988 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
3989 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
3990 2a6469d5 Alexander Schreiber
    else:
3991 2a6469d5 Alexander Schreiber
      network_port = None
3992 58acb49d Alexander Schreiber
3993 6785674e Iustin Pop
    ##if self.op.vnc_bind_address is None:
3994 6785674e Iustin Pop
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
3995 31a853d2 Iustin Pop
3996 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
3997 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
3998 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
3999 2c313123 Manuel Franceschini
    else:
4000 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
4001 2c313123 Manuel Franceschini
4002 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
4003 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
4004 d6a02168 Michael Hanselmann
                                        self.cfg.GetFileStorageDir(),
4005 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
4006 0f1a06e3 Manuel Franceschini
4007 0f1a06e3 Manuel Franceschini
4008 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
4009 a8083063 Iustin Pop
                                  self.op.disk_template,
4010 a8083063 Iustin Pop
                                  instance, pnode_name,
4011 08db7c5c Iustin Pop
                                  self.secondaries,
4012 08db7c5c Iustin Pop
                                  self.disks,
4013 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
4014 e2a65344 Iustin Pop
                                  self.op.file_driver,
4015 e2a65344 Iustin Pop
                                  0)
4016 a8083063 Iustin Pop
4017 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
4018 a8083063 Iustin Pop
                            primary_node=pnode_name,
4019 08db7c5c Iustin Pop
                            nics=self.nics, disks=disks,
4020 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
4021 a8083063 Iustin Pop
                            status=self.instance_status,
4022 58acb49d Alexander Schreiber
                            network_port=network_port,
4023 338e51e8 Iustin Pop
                            beparams=self.op.beparams,
4024 6785674e Iustin Pop
                            hvparams=self.op.hvparams,
4025 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
4026 a8083063 Iustin Pop
                            )
4027 a8083063 Iustin Pop
4028 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
4029 b9bddb6b Iustin Pop
    if not _CreateDisks(self, iobj):
4030 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
4031 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance)
4032 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
4033 a8083063 Iustin Pop
4034 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
4035 a8083063 Iustin Pop
4036 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
4037 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
4038 7baf741d Guido Trotter
    # added the instance to the config
4039 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
4040 a1578d63 Iustin Pop
    # Remove the temp. assignements for the instance's drbds
4041 a1578d63 Iustin Pop
    self.cfg.ReleaseDRBDMinors(instance)
4042 e36e96b4 Guido Trotter
    # Unlock all the nodes
4043 9c8971d7 Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
4044 9c8971d7 Guido Trotter
      nodes_keep = [self.op.src_node]
4045 9c8971d7 Guido Trotter
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
4046 9c8971d7 Guido Trotter
                       if node != self.op.src_node]
4047 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
4048 9c8971d7 Guido Trotter
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
4049 9c8971d7 Guido Trotter
    else:
4050 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE)
4051 9c8971d7 Guido Trotter
      del self.acquired_locks[locking.LEVEL_NODE]
4052 a8083063 Iustin Pop
4053 a8083063 Iustin Pop
    if self.op.wait_for_sync:
4054 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
4055 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
4056 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
4057 a8083063 Iustin Pop
      time.sleep(15)
4058 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
4059 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
4060 a8083063 Iustin Pop
    else:
4061 a8083063 Iustin Pop
      disk_abort = False
4062 a8083063 Iustin Pop
4063 a8083063 Iustin Pop
    if disk_abort:
4064 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
4065 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
4066 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
4067 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
4068 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
4069 3ecf6786 Iustin Pop
                               " this instance")
4070 a8083063 Iustin Pop
4071 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
4072 a8083063 Iustin Pop
                (instance, pnode_name))
4073 a8083063 Iustin Pop
4074 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
4075 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
4076 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
4077 781de953 Iustin Pop
        result = self.rpc.call_instance_os_add(pnode_name, iobj)
4078 781de953 Iustin Pop
        result.Raise()
4079 781de953 Iustin Pop
        if not result.data:
4080 781de953 Iustin Pop
          raise errors.OpExecError("Could not add os for instance %s"
4081 3ecf6786 Iustin Pop
                                   " on node %s" %
4082 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
4083 a8083063 Iustin Pop
4084 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
4085 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
4086 a8083063 Iustin Pop
        src_node = self.op.src_node
4087 09acf207 Guido Trotter
        src_images = self.src_images
4088 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
4089 6c0af70e Guido Trotter
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
4090 09acf207 Guido Trotter
                                                         src_node, src_images,
4091 6c0af70e Guido Trotter
                                                         cluster_name)
4092 781de953 Iustin Pop
        import_result.Raise()
4093 781de953 Iustin Pop
        for idx, result in enumerate(import_result.data):
4094 09acf207 Guido Trotter
          if not result:
4095 726d7d68 Iustin Pop
            self.LogWarning("Could not import the image %s for instance"
4096 726d7d68 Iustin Pop
                            " %s, disk %d, on node %s" %
4097 726d7d68 Iustin Pop
                            (src_images[idx], instance, idx, pnode_name))
4098 a8083063 Iustin Pop
      else:
4099 a8083063 Iustin Pop
        # also checked in the prereq part
4100 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
4101 3ecf6786 Iustin Pop
                                     % self.op.mode)
4102 a8083063 Iustin Pop
4103 a8083063 Iustin Pop
    if self.op.start:
4104 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s", instance, pnode_name)
4105 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
4106 781de953 Iustin Pop
      result = self.rpc.call_instance_start(pnode_name, iobj, None)
4107 781de953 Iustin Pop
      result.Raise()
4108 781de953 Iustin Pop
      if not result.data:
4109 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
4110 a8083063 Iustin Pop
4111 a8083063 Iustin Pop
4112 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
4113 a8083063 Iustin Pop
  """Connect to an instance's console.
4114 a8083063 Iustin Pop

4115 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
4116 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
4117 a8083063 Iustin Pop
  console.
4118 a8083063 Iustin Pop

4119 a8083063 Iustin Pop
  """
4120 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
4121 8659b73e Guido Trotter
  REQ_BGL = False
4122 8659b73e Guido Trotter
4123 8659b73e Guido Trotter
  def ExpandNames(self):
4124 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
4125 a8083063 Iustin Pop
4126 a8083063 Iustin Pop
  def CheckPrereq(self):
4127 a8083063 Iustin Pop
    """Check prerequisites.
4128 a8083063 Iustin Pop

4129 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4130 a8083063 Iustin Pop

4131 a8083063 Iustin Pop
    """
4132 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4133 8659b73e Guido Trotter
    assert self.instance is not None, \
4134 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4135 a8083063 Iustin Pop
4136 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4137 a8083063 Iustin Pop
    """Connect to the console of an instance
4138 a8083063 Iustin Pop

4139 a8083063 Iustin Pop
    """
4140 a8083063 Iustin Pop
    instance = self.instance
4141 a8083063 Iustin Pop
    node = instance.primary_node
4142 a8083063 Iustin Pop
4143 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
4144 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
4145 781de953 Iustin Pop
    node_insts.Raise()
4146 a8083063 Iustin Pop
4147 781de953 Iustin Pop
    if instance.name not in node_insts.data:
4148 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
4149 a8083063 Iustin Pop
4150 9a4f63d1 Iustin Pop
    logging.debug("Connecting to console of %s on %s", instance.name, node)
4151 a8083063 Iustin Pop
4152 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
4153 30989e69 Alexander Schreiber
    console_cmd = hyper.GetShellCommandForConsole(instance)
4154 b047857b Michael Hanselmann
4155 82122173 Iustin Pop
    # build ssh cmdline
4156 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
4157 a8083063 Iustin Pop
4158 a8083063 Iustin Pop
4159 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
4160 a8083063 Iustin Pop
  """Replace the disks of an instance.
4161 a8083063 Iustin Pop

4162 a8083063 Iustin Pop
  """
4163 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
4164 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4165 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
4166 efd990e4 Guido Trotter
  REQ_BGL = False
4167 efd990e4 Guido Trotter
4168 efd990e4 Guido Trotter
  def ExpandNames(self):
4169 efd990e4 Guido Trotter
    self._ExpandAndLockInstance()
4170 efd990e4 Guido Trotter
4171 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
4172 efd990e4 Guido Trotter
      self.op.remote_node = None
4173 efd990e4 Guido Trotter
4174 efd990e4 Guido Trotter
    ia_name = getattr(self.op, "iallocator", None)
4175 efd990e4 Guido Trotter
    if ia_name is not None:
4176 efd990e4 Guido Trotter
      if self.op.remote_node is not None:
4177 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Give either the iallocator or the new"
4178 efd990e4 Guido Trotter
                                   " secondary, not both")
4179 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4180 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
4181 efd990e4 Guido Trotter
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
4182 efd990e4 Guido Trotter
      if remote_node is None:
4183 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Node '%s' not known" %
4184 efd990e4 Guido Trotter
                                   self.op.remote_node)
4185 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
4186 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
4187 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
4188 efd990e4 Guido Trotter
    else:
4189 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
4190 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4191 efd990e4 Guido Trotter
4192 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
4193 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
4194 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
4195 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
4196 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
4197 efd990e4 Guido Trotter
      self._LockInstancesNodes()
4198 a8083063 Iustin Pop
4199 b6e82a65 Iustin Pop
  def _RunAllocator(self):
4200 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
4201 b6e82a65 Iustin Pop

4202 b6e82a65 Iustin Pop
    """
4203 72737a7f Iustin Pop
    ial = IAllocator(self,
4204 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
4205 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
4206 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
4207 b6e82a65 Iustin Pop
4208 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
4209 b6e82a65 Iustin Pop
4210 b6e82a65 Iustin Pop
    if not ial.success:
4211 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
4212 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
4213 b6e82a65 Iustin Pop
                                                           ial.info))
4214 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
4215 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4216 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
4217 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
4218 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
4219 86d9d3bb Iustin Pop
    self.LogInfo("Selected new secondary for the instance: %s",
4220 86d9d3bb Iustin Pop
                 self.op.remote_node)
4221 b6e82a65 Iustin Pop
4222 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4223 a8083063 Iustin Pop
    """Build hooks env.
4224 a8083063 Iustin Pop

4225 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
4226 a8083063 Iustin Pop

4227 a8083063 Iustin Pop
    """
4228 a8083063 Iustin Pop
    env = {
4229 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
4230 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
4231 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
4232 a8083063 Iustin Pop
      }
4233 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4234 0834c866 Iustin Pop
    nl = [
4235 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
4236 0834c866 Iustin Pop
      self.instance.primary_node,
4237 0834c866 Iustin Pop
      ]
4238 0834c866 Iustin Pop
    if self.op.remote_node is not None:
4239 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
4240 a8083063 Iustin Pop
    return env, nl, nl
4241 a8083063 Iustin Pop
4242 a8083063 Iustin Pop
  def CheckPrereq(self):
4243 a8083063 Iustin Pop
    """Check prerequisites.
4244 a8083063 Iustin Pop

4245 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4246 a8083063 Iustin Pop

4247 a8083063 Iustin Pop
    """
4248 efd990e4 Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4249 efd990e4 Guido Trotter
    assert instance is not None, \
4250 efd990e4 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4251 a8083063 Iustin Pop
    self.instance = instance
4252 a8083063 Iustin Pop
4253 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
4254 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
4255 a9e0c397 Iustin Pop
                                 " network mirrored.")
4256 a8083063 Iustin Pop
4257 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
4258 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
4259 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
4260 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
4261 a8083063 Iustin Pop
4262 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
4263 a9e0c397 Iustin Pop
4264 b6e82a65 Iustin Pop
    ia_name = getattr(self.op, "iallocator", None)
4265 b6e82a65 Iustin Pop
    if ia_name is not None:
4266 de8c7666 Guido Trotter
      self._RunAllocator()
4267 b6e82a65 Iustin Pop
4268 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
4269 a9e0c397 Iustin Pop
    if remote_node is not None:
4270 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
4271 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
4272 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
4273 a9e0c397 Iustin Pop
    else:
4274 a9e0c397 Iustin Pop
      self.remote_node_info = None
4275 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
4276 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
4277 3ecf6786 Iustin Pop
                                 " the instance.")
4278 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
4279 0834c866 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_SEC:
4280 0834c866 Iustin Pop
        # this is for DRBD8, where we can't execute the same mode of
4281 0834c866 Iustin Pop
        # replacement as for drbd7 (no different port allocated)
4282 0834c866 Iustin Pop
        raise errors.OpPrereqError("Same secondary given, cannot execute"
4283 0834c866 Iustin Pop
                                   " replacement")
4284 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
4285 7df43a76 Iustin Pop
      if (self.op.mode == constants.REPLACE_DISK_ALL and
4286 7df43a76 Iustin Pop
          remote_node is not None):
4287 7df43a76 Iustin Pop
        # switch to replace secondary mode
4288 7df43a76 Iustin Pop
        self.op.mode = constants.REPLACE_DISK_SEC
4289 7df43a76 Iustin Pop
4290 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
4291 12c3449a Michael Hanselmann
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
4292 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
4293 a9e0c397 Iustin Pop
                                   " both at once")
4294 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
4295 a9e0c397 Iustin Pop
        if remote_node is not None:
4296 12c3449a Michael Hanselmann
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
4297 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
4298 a9e0c397 Iustin Pop
                                     " node disk replacement")
4299 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
4300 cff90b79 Iustin Pop
        self.oth_node = instance.secondary_nodes[0]
4301 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
4302 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
4303 a9e0c397 Iustin Pop
                                    # we don't change the secondary
4304 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
4305 cff90b79 Iustin Pop
        self.oth_node = instance.primary_node
4306 a9e0c397 Iustin Pop
      else:
4307 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
4308 a9e0c397 Iustin Pop
4309 54155f52 Iustin Pop
    if not self.op.disks:
4310 54155f52 Iustin Pop
      self.op.disks = range(len(instance.disks))
4311 54155f52 Iustin Pop
4312 54155f52 Iustin Pop
    for disk_idx in self.op.disks:
4313 3e0cea06 Iustin Pop
      instance.FindDisk(disk_idx)
4314 a8083063 Iustin Pop
4315 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
4316 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
4317 a9e0c397 Iustin Pop

4318 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
4319 e4376078 Iustin Pop

4320 e4376078 Iustin Pop
      1. for each disk to be replaced:
4321 e4376078 Iustin Pop

4322 e4376078 Iustin Pop
        1. create new LVs on the target node with unique names
4323 e4376078 Iustin Pop
        1. detach old LVs from the drbd device
4324 e4376078 Iustin Pop
        1. rename old LVs to name_replaced.<time_t>
4325 e4376078 Iustin Pop
        1. rename new LVs to old LVs
4326 e4376078 Iustin Pop
        1. attach the new LVs (with the old names now) to the drbd device
4327 e4376078 Iustin Pop

4328 e4376078 Iustin Pop
      1. wait for sync across all devices
4329 e4376078 Iustin Pop

4330 e4376078 Iustin Pop
      1. for each modified disk:
4331 e4376078 Iustin Pop

4332 e4376078 Iustin Pop
        1. remove old LVs (which have the name name_replaces.<time_t>)
4333 a9e0c397 Iustin Pop

4334 a9e0c397 Iustin Pop
    Failures are not very well handled.
4335 cff90b79 Iustin Pop

4336 a9e0c397 Iustin Pop
    """
4337 cff90b79 Iustin Pop
    steps_total = 6
4338 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
4339 a9e0c397 Iustin Pop
    instance = self.instance
4340 a9e0c397 Iustin Pop
    iv_names = {}
4341 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
4342 a9e0c397 Iustin Pop
    # start of work
4343 a9e0c397 Iustin Pop
    cfg = self.cfg
4344 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
4345 cff90b79 Iustin Pop
    oth_node = self.oth_node
4346 cff90b79 Iustin Pop
4347 cff90b79 Iustin Pop
    # Step: check device activation
4348 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
4349 cff90b79 Iustin Pop
    info("checking volume groups")
4350 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
4351 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([oth_node, tgt_node])
4352 cff90b79 Iustin Pop
    if not results:
4353 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
4354 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
4355 781de953 Iustin Pop
      res = results[node]
4356 781de953 Iustin Pop
      if res.failed or not res.data or my_vg not in res.data:
4357 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
4358 cff90b79 Iustin Pop
                                 (my_vg, node))
4359 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
4360 54155f52 Iustin Pop
      if idx not in self.op.disks:
4361 cff90b79 Iustin Pop
        continue
4362 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
4363 54155f52 Iustin Pop
        info("checking disk/%d on %s" % (idx, node))
4364 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
4365 72737a7f Iustin Pop
        if not self.rpc.call_blockdev_find(node, dev):
4366 54155f52 Iustin Pop
          raise errors.OpExecError("Can't find disk/%d on node %s" %
4367 54155f52 Iustin Pop
                                   (idx, node))
4368 cff90b79 Iustin Pop
4369 cff90b79 Iustin Pop
    # Step: check other node consistency
4370 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
4371 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
4372 54155f52 Iustin Pop
      if idx not in self.op.disks:
4373 cff90b79 Iustin Pop
        continue
4374 54155f52 Iustin Pop
      info("checking disk/%d consistency on %s" % (idx, oth_node))
4375 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, oth_node,
4376 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
4377 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
4378 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
4379 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
4380 cff90b79 Iustin Pop
4381 cff90b79 Iustin Pop
    # Step: create new storage
4382 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
4383 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
4384 54155f52 Iustin Pop
      if idx not in self.op.disks:
4385 a9e0c397 Iustin Pop
        continue
4386 a9e0c397 Iustin Pop
      size = dev.size
4387 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
4388 54155f52 Iustin Pop
      lv_names = [".disk%d_%s" % (idx, suf)
4389 54155f52 Iustin Pop
                  for suf in ["data", "meta"]]
4390 b9bddb6b Iustin Pop
      names = _GenerateUniqueNames(self, lv_names)
4391 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
4392 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
4393 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
4394 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
4395 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
4396 a9e0c397 Iustin Pop
      old_lvs = dev.children
4397 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
4398 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
4399 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
4400 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
4401 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
4402 a9e0c397 Iustin Pop
      # are talking about the secondary node
4403 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
4404 b9bddb6b Iustin Pop
        if not _CreateBlockDevOnPrimary(self, tgt_node, instance, new_lv,
4405 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
4406 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
4407 a9e0c397 Iustin Pop
                                   " node '%s'" %
4408 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
4409 a9e0c397 Iustin Pop
4410 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
4411 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
4412 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
4413 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
4414 781de953 Iustin Pop
      result = self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs)
4415 781de953 Iustin Pop
      result.Raise()
4416 781de953 Iustin Pop
      if not result.data:
4417 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
4418 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
4419 cff90b79 Iustin Pop
      #dev.children = []
4420 cff90b79 Iustin Pop
      #cfg.Update(instance)
4421 a9e0c397 Iustin Pop
4422 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
4423 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
4424 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
4425 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
4426 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
4427 cff90b79 Iustin Pop
4428 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
4429 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
4430 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
4431 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
4432 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
4433 cff90b79 Iustin Pop
      rlist = []
4434 cff90b79 Iustin Pop
      for to_ren in old_lvs:
4435 72737a7f Iustin Pop
        find_res = self.rpc.call_blockdev_find(tgt_node, to_ren)
4436 781de953 Iustin Pop
        if not find_res.failed and find_res.data is not None: # device exists
4437 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
4438 cff90b79 Iustin Pop
4439 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
4440 781de953 Iustin Pop
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
4441 781de953 Iustin Pop
      result.Raise()
4442 781de953 Iustin Pop
      if not result.data:
4443 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
4444 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
4445 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
4446 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
4447 781de953 Iustin Pop
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
4448 781de953 Iustin Pop
      result.Raise()
4449 781de953 Iustin Pop
      if not result.data:
4450 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
4451 cff90b79 Iustin Pop
4452 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
4453 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
4454 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
4455 a9e0c397 Iustin Pop
4456 cff90b79 Iustin Pop
      for disk in old_lvs:
4457 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
4458 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
4459 a9e0c397 Iustin Pop
4460 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
4461 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
4462 781de953 Iustin Pop
      result =self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs)
4463 781de953 Iustin Pop
      if result.failed or not result.data:
4464 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
4465 781de953 Iustin Pop
          result = self.rpc.call_blockdev_remove(tgt_node, new_lv)
4466 781de953 Iustin Pop
          if result.failed or not result.data:
4467 79caa9ed Guido Trotter
            warning("Can't rollback device %s", hint="manually cleanup unused"
4468 cff90b79 Iustin Pop
                    " logical volumes")
4469 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
4470 a9e0c397 Iustin Pop
4471 a9e0c397 Iustin Pop
      dev.children = new_lvs
4472 a9e0c397 Iustin Pop
      cfg.Update(instance)
4473 a9e0c397 Iustin Pop
4474 cff90b79 Iustin Pop
    # Step: wait for sync
4475 a9e0c397 Iustin Pop
4476 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
4477 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
4478 a9e0c397 Iustin Pop
    # return value
4479 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
4480 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
4481 a9e0c397 Iustin Pop
4482 a9e0c397 Iustin Pop
    # so check manually all the devices
4483 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
4484 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
4485 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(instance.primary_node, dev)
4486 781de953 Iustin Pop
      if result.failed or result.data[5]:
4487 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
4488 a9e0c397 Iustin Pop
4489 cff90b79 Iustin Pop
    # Step: remove old storage
4490 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
4491 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
4492 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
4493 a9e0c397 Iustin Pop
      for lv in old_lvs:
4494 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
4495 781de953 Iustin Pop
        result = self.rpc.call_blockdev_remove(tgt_node, lv)
4496 781de953 Iustin Pop
        if result.failed or not result.data:
4497 79caa9ed Guido Trotter
          warning("Can't remove old LV", hint="manually remove unused LVs")
4498 a9e0c397 Iustin Pop
          continue
4499 a9e0c397 Iustin Pop
4500 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
4501 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
4502 a9e0c397 Iustin Pop

4503 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
4504 a9e0c397 Iustin Pop
      - for all disks of the instance:
4505 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
4506 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
4507 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
4508 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
4509 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
4510 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
4511 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
4512 a9e0c397 Iustin Pop
          not network enabled
4513 a9e0c397 Iustin Pop
      - wait for sync across all devices
4514 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
4515 a9e0c397 Iustin Pop

4516 a9e0c397 Iustin Pop
    Failures are not very well handled.
4517 0834c866 Iustin Pop

4518 a9e0c397 Iustin Pop
    """
4519 0834c866 Iustin Pop
    steps_total = 6
4520 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
4521 a9e0c397 Iustin Pop
    instance = self.instance
4522 a9e0c397 Iustin Pop
    iv_names = {}
4523 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
4524 a9e0c397 Iustin Pop
    # start of work
4525 a9e0c397 Iustin Pop
    cfg = self.cfg
4526 a9e0c397 Iustin Pop
    old_node = self.tgt_node
4527 a9e0c397 Iustin Pop
    new_node = self.new_node
4528 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
4529 0834c866 Iustin Pop
4530 0834c866 Iustin Pop
    # Step: check device activation
4531 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
4532 0834c866 Iustin Pop
    info("checking volume groups")
4533 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
4534 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([pri_node, new_node])
4535 0834c866 Iustin Pop
    for node in pri_node, new_node:
4536 781de953 Iustin Pop
      res = results[node]
4537 781de953 Iustin Pop
      if res.failed or not res.data or my_vg not in res.data:
4538 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
4539 0834c866 Iustin Pop
                                 (my_vg, node))
4540 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
4541 d418ebfb Iustin Pop
      if idx not in self.op.disks:
4542 0834c866 Iustin Pop
        continue
4543 d418ebfb Iustin Pop
      info("checking disk/%d on %s" % (idx, pri_node))
4544 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4545 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(pri_node, dev)
4546 781de953 Iustin Pop
      result.Raise()
4547 781de953 Iustin Pop
      if not result.data:
4548 d418ebfb Iustin Pop
        raise errors.OpExecError("Can't find disk/%d on node %s" %
4549 d418ebfb Iustin Pop
                                 (idx, pri_node))
4550 0834c866 Iustin Pop
4551 0834c866 Iustin Pop
    # Step: check other node consistency
4552 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
4553 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
4554 d418ebfb Iustin Pop
      if idx not in self.op.disks:
4555 0834c866 Iustin Pop
        continue
4556 d418ebfb Iustin Pop
      info("checking disk/%d consistency on %s" % (idx, pri_node))
4557 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
4558 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
4559 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
4560 0834c866 Iustin Pop
                                 pri_node)
4561 0834c866 Iustin Pop
4562 0834c866 Iustin Pop
    # Step: create new storage
4563 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
4564 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
4565 a9e0c397 Iustin Pop
      size = dev.size
4566 d418ebfb Iustin Pop
      info("adding new local storage on %s for disk/%d" %
4567 d418ebfb Iustin Pop
           (new_node, idx))
4568 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
4569 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
4570 a9e0c397 Iustin Pop
      # are talking about the secondary node
4571 a9e0c397 Iustin Pop
      for new_lv in dev.children:
4572 b9bddb6b Iustin Pop
        if not _CreateBlockDevOnPrimary(self, new_node, instance, new_lv,
4573 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
4574 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
4575 a9e0c397 Iustin Pop
                                   " node '%s'" %
4576 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
4577 a9e0c397 Iustin Pop
4578 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
4579 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
4580 a1578d63 Iustin Pop
    # error and the success paths
4581 a1578d63 Iustin Pop
    minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
4582 a1578d63 Iustin Pop
                                   instance.name)
4583 468b46f9 Iustin Pop
    logging.debug("Allocated minors %s" % (minors,))
4584 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
4585 d418ebfb Iustin Pop
    for idx, (dev, new_minor) in enumerate(zip(instance.disks, minors)):
4586 0834c866 Iustin Pop
      size = dev.size
4587 d418ebfb Iustin Pop
      info("activating a new drbd on %s for disk/%d" % (new_node, idx))
4588 a9e0c397 Iustin Pop
      # create new devices on new_node
4589 ffa1c0dc Iustin Pop
      if pri_node == dev.logical_id[0]:
4590 ffa1c0dc Iustin Pop
        new_logical_id = (pri_node, new_node,
4591 f9518d38 Iustin Pop
                          dev.logical_id[2], dev.logical_id[3], new_minor,
4592 f9518d38 Iustin Pop
                          dev.logical_id[5])
4593 ffa1c0dc Iustin Pop
      else:
4594 ffa1c0dc Iustin Pop
        new_logical_id = (new_node, pri_node,
4595 f9518d38 Iustin Pop
                          dev.logical_id[2], new_minor, dev.logical_id[4],
4596 f9518d38 Iustin Pop
                          dev.logical_id[5])
4597 d418ebfb Iustin Pop
      iv_names[idx] = (dev, dev.children, new_logical_id)
4598 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
4599 a1578d63 Iustin Pop
                    new_logical_id)
4600 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
4601 ffa1c0dc Iustin Pop
                              logical_id=new_logical_id,
4602 a9e0c397 Iustin Pop
                              children=dev.children)
4603 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnSecondary(self, new_node, instance,
4604 3f78eef2 Iustin Pop
                                        new_drbd, False,
4605 b9bddb6b Iustin Pop
                                        _GetInstanceInfoText(instance)):
4606 a1578d63 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
4607 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
4608 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
4609 a9e0c397 Iustin Pop
4610 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
4611 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
4612 d418ebfb Iustin Pop
      info("shutting down drbd for disk/%d on old node" % idx)
4613 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
4614 781de953 Iustin Pop
      result = self.rpc.call_blockdev_shutdown(old_node, dev)
4615 781de953 Iustin Pop
      if result.failed or not result.data:
4616 d418ebfb Iustin Pop
        warning("Failed to shutdown drbd for disk/%d on old node" % idx,
4617 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
4618 a9e0c397 Iustin Pop
4619 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
4620 642445d9 Iustin Pop
    done = 0
4621 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
4622 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4623 f9518d38 Iustin Pop
      # set the network part of the physical (unique in bdev terms) id
4624 f9518d38 Iustin Pop
      # to None, meaning detach from network
4625 f9518d38 Iustin Pop
      dev.physical_id = (None, None, None, None) + dev.physical_id[4:]
4626 642445d9 Iustin Pop
      # and 'find' the device, which will 'fix' it to match the
4627 642445d9 Iustin Pop
      # standalone state
4628 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(pri_node, dev)
4629 781de953 Iustin Pop
      if not result.failed and result.data:
4630 642445d9 Iustin Pop
        done += 1
4631 642445d9 Iustin Pop
      else:
4632 d418ebfb Iustin Pop
        warning("Failed to detach drbd disk/%d from network, unusual case" %
4633 d418ebfb Iustin Pop
                idx)
4634 642445d9 Iustin Pop
4635 642445d9 Iustin Pop
    if not done:
4636 642445d9 Iustin Pop
      # no detaches succeeded (very unlikely)
4637 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance.name)
4638 642445d9 Iustin Pop
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
4639 642445d9 Iustin Pop
4640 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
4641 642445d9 Iustin Pop
    # the instance to point to the new secondary
4642 642445d9 Iustin Pop
    info("updating instance configuration")
4643 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
4644 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
4645 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4646 642445d9 Iustin Pop
    cfg.Update(instance)
4647 a1578d63 Iustin Pop
    # we can remove now the temp minors as now the new values are
4648 a1578d63 Iustin Pop
    # written to the config file (and therefore stable)
4649 a1578d63 Iustin Pop
    self.cfg.ReleaseDRBDMinors(instance.name)
4650 a9e0c397 Iustin Pop
4651 642445d9 Iustin Pop
    # and now perform the drbd attach
4652 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
4653 642445d9 Iustin Pop
    failures = []
4654 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
4655 d418ebfb Iustin Pop
      info("attaching primary drbd for disk/%d to new secondary node" % idx)
4656 642445d9 Iustin Pop
      # since the attach is smart, it's enough to 'find' the device,
4657 642445d9 Iustin Pop
      # it will automatically activate the network, if the physical_id
4658 642445d9 Iustin Pop
      # is correct
4659 642445d9 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4660 ffa1c0dc Iustin Pop
      logging.debug("Disk to attach: %s", dev)
4661 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(pri_node, dev)
4662 781de953 Iustin Pop
      if result.failed or not result.data:
4663 d418ebfb Iustin Pop
        warning("can't attach drbd disk/%d to new secondary!" % idx,
4664 642445d9 Iustin Pop
                "please do a gnt-instance info to see the status of disks")
4665 a9e0c397 Iustin Pop
4666 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
4667 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
4668 a9e0c397 Iustin Pop
    # return value
4669 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
4670 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
4671 a9e0c397 Iustin Pop
4672 a9e0c397 Iustin Pop
    # so check manually all the devices
4673 d418ebfb Iustin Pop
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
4674 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4675 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(pri_node, dev)
4676 781de953 Iustin Pop
      result.Raise()
4677 781de953 Iustin Pop
      if result.data[5]:
4678 d418ebfb Iustin Pop
        raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx)
4679 a9e0c397 Iustin Pop
4680 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
4681 d418ebfb Iustin Pop
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
4682 d418ebfb Iustin Pop
      info("remove logical volumes for disk/%d" % idx)
4683 a9e0c397 Iustin Pop
      for lv in old_lvs:
4684 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
4685 781de953 Iustin Pop
        result = self.rpc.call_blockdev_remove(old_node, lv)
4686 781de953 Iustin Pop
        if result.failed or not result.data:
4687 0834c866 Iustin Pop
          warning("Can't remove LV on old secondary",
4688 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
4689 a9e0c397 Iustin Pop
4690 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
4691 a9e0c397 Iustin Pop
    """Execute disk replacement.
4692 a9e0c397 Iustin Pop

4693 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
4694 a9e0c397 Iustin Pop

4695 a9e0c397 Iustin Pop
    """
4696 a9e0c397 Iustin Pop
    instance = self.instance
4697 22985314 Guido Trotter
4698 22985314 Guido Trotter
    # Activate the instance disks if we're replacing them on a down instance
4699 22985314 Guido Trotter
    if instance.status == "down":
4700 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, True)
4701 22985314 Guido Trotter
4702 abdf0113 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
4703 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
4704 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
4705 a9e0c397 Iustin Pop
      else:
4706 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
4707 a9e0c397 Iustin Pop
    else:
4708 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
4709 22985314 Guido Trotter
4710 22985314 Guido Trotter
    ret = fn(feedback_fn)
4711 22985314 Guido Trotter
4712 22985314 Guido Trotter
    # Deactivate the instance disks if we're replacing them on a down instance
4713 22985314 Guido Trotter
    if instance.status == "down":
4714 b9bddb6b Iustin Pop
      _SafeShutdownInstanceDisks(self, instance)
4715 22985314 Guido Trotter
4716 22985314 Guido Trotter
    return ret
4717 a9e0c397 Iustin Pop
4718 a8083063 Iustin Pop
4719 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
4720 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
4721 8729e0d7 Iustin Pop

4722 8729e0d7 Iustin Pop
  """
4723 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
4724 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4725 6605411d Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
4726 31e63dbf Guido Trotter
  REQ_BGL = False
4727 31e63dbf Guido Trotter
4728 31e63dbf Guido Trotter
  def ExpandNames(self):
4729 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
4730 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4731 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4732 31e63dbf Guido Trotter
4733 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
4734 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
4735 31e63dbf Guido Trotter
      self._LockInstancesNodes()
4736 8729e0d7 Iustin Pop
4737 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
4738 8729e0d7 Iustin Pop
    """Build hooks env.
4739 8729e0d7 Iustin Pop

4740 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
4741 8729e0d7 Iustin Pop

4742 8729e0d7 Iustin Pop
    """
4743 8729e0d7 Iustin Pop
    env = {
4744 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
4745 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
4746 8729e0d7 Iustin Pop
      }
4747 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4748 8729e0d7 Iustin Pop
    nl = [
4749 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
4750 8729e0d7 Iustin Pop
      self.instance.primary_node,
4751 8729e0d7 Iustin Pop
      ]
4752 8729e0d7 Iustin Pop
    return env, nl, nl
4753 8729e0d7 Iustin Pop
4754 8729e0d7 Iustin Pop
  def CheckPrereq(self):
4755 8729e0d7 Iustin Pop
    """Check prerequisites.
4756 8729e0d7 Iustin Pop

4757 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
4758 8729e0d7 Iustin Pop

4759 8729e0d7 Iustin Pop
    """
4760 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4761 31e63dbf Guido Trotter
    assert instance is not None, \
4762 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4763 31e63dbf Guido Trotter
4764 8729e0d7 Iustin Pop
    self.instance = instance
4765 8729e0d7 Iustin Pop
4766 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
4767 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
4768 8729e0d7 Iustin Pop
                                 " growing.")
4769 8729e0d7 Iustin Pop
4770 ad24e046 Iustin Pop
    self.disk = instance.FindDisk(self.op.disk)
4771 8729e0d7 Iustin Pop
4772 8729e0d7 Iustin Pop
    nodenames = [instance.primary_node] + list(instance.secondary_nodes)
4773 72737a7f Iustin Pop
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4774 72737a7f Iustin Pop
                                       instance.hypervisor)
4775 8729e0d7 Iustin Pop
    for node in nodenames:
4776 781de953 Iustin Pop
      info = nodeinfo[node]
4777 781de953 Iustin Pop
      if info.failed or not info.data:
4778 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
4779 8729e0d7 Iustin Pop
                                   " from node '%s'" % node)
4780 781de953 Iustin Pop
      vg_free = info.data.get('vg_free', None)
4781 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
4782 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
4783 8729e0d7 Iustin Pop
                                   " node %s" % node)
4784 781de953 Iustin Pop
      if self.op.amount > vg_free:
4785 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
4786 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
4787 781de953 Iustin Pop
                                   (node, vg_free, self.op.amount))
4788 8729e0d7 Iustin Pop
4789 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
4790 8729e0d7 Iustin Pop
    """Execute disk grow.
4791 8729e0d7 Iustin Pop

4792 8729e0d7 Iustin Pop
    """
4793 8729e0d7 Iustin Pop
    instance = self.instance
4794 ad24e046 Iustin Pop
    disk = self.disk
4795 8729e0d7 Iustin Pop
    for node in (instance.secondary_nodes + (instance.primary_node,)):
4796 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
4797 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
4798 781de953 Iustin Pop
      result.Raise()
4799 781de953 Iustin Pop
      if (not result.data or not isinstance(result.data, (list, tuple)) or
4800 781de953 Iustin Pop
          len(result.data) != 2):
4801 781de953 Iustin Pop
        raise errors.OpExecError("Grow request failed to node %s" % node)
4802 781de953 Iustin Pop
      elif not result.data[0]:
4803 781de953 Iustin Pop
        raise errors.OpExecError("Grow request failed to node %s: %s" %
4804 781de953 Iustin Pop
                                 (node, result.data[1]))
4805 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
4806 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
4807 6605411d Iustin Pop
    if self.op.wait_for_sync:
4808 cd4d138f Guido Trotter
      disk_abort = not _WaitForSync(self, instance)
4809 6605411d Iustin Pop
      if disk_abort:
4810 86d9d3bb Iustin Pop
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
4811 86d9d3bb Iustin Pop
                             " status.\nPlease check the instance.")
4812 8729e0d7 Iustin Pop
4813 8729e0d7 Iustin Pop
4814 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
4815 a8083063 Iustin Pop
  """Query runtime instance data.
4816 a8083063 Iustin Pop

4817 a8083063 Iustin Pop
  """
4818 57821cac Iustin Pop
  _OP_REQP = ["instances", "static"]
4819 a987fa48 Guido Trotter
  REQ_BGL = False
4820 ae5849b5 Michael Hanselmann
4821 a987fa48 Guido Trotter
  def ExpandNames(self):
4822 a987fa48 Guido Trotter
    self.needed_locks = {}
4823 a987fa48 Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
4824 a987fa48 Guido Trotter
4825 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
4826 a987fa48 Guido Trotter
      raise errors.OpPrereqError("Invalid argument type 'instances'")
4827 a987fa48 Guido Trotter
4828 a987fa48 Guido Trotter
    if self.op.instances:
4829 a987fa48 Guido Trotter
      self.wanted_names = []
4830 a987fa48 Guido Trotter
      for name in self.op.instances:
4831 a987fa48 Guido Trotter
        full_name = self.cfg.ExpandInstanceName(name)
4832 a987fa48 Guido Trotter
        if full_name is None:
4833 a987fa48 Guido Trotter
          raise errors.OpPrereqError("Instance '%s' not known" %
4834 a987fa48 Guido Trotter
                                     self.op.instance_name)
4835 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
4836 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
4837 a987fa48 Guido Trotter
    else:
4838 a987fa48 Guido Trotter
      self.wanted_names = None
4839 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
4840 a987fa48 Guido Trotter
4841 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4842 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4843 a987fa48 Guido Trotter
4844 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
4845 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
4846 a987fa48 Guido Trotter
      self._LockInstancesNodes()
4847 a8083063 Iustin Pop
4848 a8083063 Iustin Pop
  def CheckPrereq(self):
4849 a8083063 Iustin Pop
    """Check prerequisites.
4850 a8083063 Iustin Pop

4851 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
4852 a8083063 Iustin Pop

4853 a8083063 Iustin Pop
    """
4854 a987fa48 Guido Trotter
    if self.wanted_names is None:
4855 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
4856 a8083063 Iustin Pop
4857 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
4858 a987fa48 Guido Trotter
                             in self.wanted_names]
4859 a987fa48 Guido Trotter
    return
4860 a8083063 Iustin Pop
4861 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
4862 a8083063 Iustin Pop
    """Compute block device status.
4863 a8083063 Iustin Pop

4864 a8083063 Iustin Pop
    """
4865 57821cac Iustin Pop
    static = self.op.static
4866 57821cac Iustin Pop
    if not static:
4867 57821cac Iustin Pop
      self.cfg.SetDiskID(dev, instance.primary_node)
4868 57821cac Iustin Pop
      dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
4869 781de953 Iustin Pop
      dev_pstatus.Raise()
4870 781de953 Iustin Pop
      dev_pstatus = dev_pstatus.data
4871 57821cac Iustin Pop
    else:
4872 57821cac Iustin Pop
      dev_pstatus = None
4873 57821cac Iustin Pop
4874 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
4875 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
4876 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
4877 a8083063 Iustin Pop
        snode = dev.logical_id[1]
4878 a8083063 Iustin Pop
      else:
4879 a8083063 Iustin Pop
        snode = dev.logical_id[0]
4880 a8083063 Iustin Pop
4881 57821cac Iustin Pop
    if snode and not static:
4882 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
4883 72737a7f Iustin Pop
      dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
4884 781de953 Iustin Pop
      dev_sstatus.Raise()
4885 781de953 Iustin Pop
      dev_sstatus = dev_sstatus.data
4886 a8083063 Iustin Pop
    else:
4887 a8083063 Iustin Pop
      dev_sstatus = None
4888 a8083063 Iustin Pop
4889 a8083063 Iustin Pop
    if dev.children:
4890 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
4891 a8083063 Iustin Pop
                      for child in dev.children]
4892 a8083063 Iustin Pop
    else:
4893 a8083063 Iustin Pop
      dev_children = []
4894 a8083063 Iustin Pop
4895 a8083063 Iustin Pop
    data = {
4896 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
4897 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
4898 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
4899 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
4900 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
4901 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
4902 a8083063 Iustin Pop
      "children": dev_children,
4903 b6fdf8b8 Iustin Pop
      "mode": dev.mode,
4904 a8083063 Iustin Pop
      }
4905 a8083063 Iustin Pop
4906 a8083063 Iustin Pop
    return data
4907 a8083063 Iustin Pop
4908 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4909 a8083063 Iustin Pop
    """Gather and return data"""
4910 a8083063 Iustin Pop
    result = {}
4911 338e51e8 Iustin Pop
4912 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
4913 338e51e8 Iustin Pop
4914 a8083063 Iustin Pop
    for instance in self.wanted_instances:
4915 57821cac Iustin Pop
      if not self.op.static:
4916 57821cac Iustin Pop
        remote_info = self.rpc.call_instance_info(instance.primary_node,
4917 57821cac Iustin Pop
                                                  instance.name,
4918 57821cac Iustin Pop
                                                  instance.hypervisor)
4919 781de953 Iustin Pop
        remote_info.Raise()
4920 781de953 Iustin Pop
        remote_info = remote_info.data
4921 57821cac Iustin Pop
        if remote_info and "state" in remote_info:
4922 57821cac Iustin Pop
          remote_state = "up"
4923 57821cac Iustin Pop
        else:
4924 57821cac Iustin Pop
          remote_state = "down"
4925 a8083063 Iustin Pop
      else:
4926 57821cac Iustin Pop
        remote_state = None
4927 a8083063 Iustin Pop
      if instance.status == "down":
4928 a8083063 Iustin Pop
        config_state = "down"
4929 a8083063 Iustin Pop
      else:
4930 a8083063 Iustin Pop
        config_state = "up"
4931 a8083063 Iustin Pop
4932 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
4933 a8083063 Iustin Pop
               for device in instance.disks]
4934 a8083063 Iustin Pop
4935 a8083063 Iustin Pop
      idict = {
4936 a8083063 Iustin Pop
        "name": instance.name,
4937 a8083063 Iustin Pop
        "config_state": config_state,
4938 a8083063 Iustin Pop
        "run_state": remote_state,
4939 a8083063 Iustin Pop
        "pnode": instance.primary_node,
4940 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
4941 a8083063 Iustin Pop
        "os": instance.os,
4942 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4943 a8083063 Iustin Pop
        "disks": disks,
4944 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
4945 24838135 Iustin Pop
        "network_port": instance.network_port,
4946 24838135 Iustin Pop
        "hv_instance": instance.hvparams,
4947 338e51e8 Iustin Pop
        "hv_actual": cluster.FillHV(instance),
4948 338e51e8 Iustin Pop
        "be_instance": instance.beparams,
4949 338e51e8 Iustin Pop
        "be_actual": cluster.FillBE(instance),
4950 a8083063 Iustin Pop
        }
4951 a8083063 Iustin Pop
4952 a8083063 Iustin Pop
      result[instance.name] = idict
4953 a8083063 Iustin Pop
4954 a8083063 Iustin Pop
    return result
4955 a8083063 Iustin Pop
4956 a8083063 Iustin Pop
4957 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
4958 a8083063 Iustin Pop
  """Modifies an instances's parameters.
4959 a8083063 Iustin Pop

4960 a8083063 Iustin Pop
  """
4961 a8083063 Iustin Pop
  HPATH = "instance-modify"
4962 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4963 24991749 Iustin Pop
  _OP_REQP = ["instance_name"]
4964 1a5c7281 Guido Trotter
  REQ_BGL = False
4965 1a5c7281 Guido Trotter
4966 24991749 Iustin Pop
  def CheckArguments(self):
4967 24991749 Iustin Pop
    if not hasattr(self.op, 'nics'):
4968 24991749 Iustin Pop
      self.op.nics = []
4969 24991749 Iustin Pop
    if not hasattr(self.op, 'disks'):
4970 24991749 Iustin Pop
      self.op.disks = []
4971 24991749 Iustin Pop
    if not hasattr(self.op, 'beparams'):
4972 24991749 Iustin Pop
      self.op.beparams = {}
4973 24991749 Iustin Pop
    if not hasattr(self.op, 'hvparams'):
4974 24991749 Iustin Pop
      self.op.hvparams = {}
4975 24991749 Iustin Pop
    self.op.force = getattr(self.op, "force", False)
4976 24991749 Iustin Pop
    if not (self.op.nics or self.op.disks or
4977 24991749 Iustin Pop
            self.op.hvparams or self.op.beparams):
4978 24991749 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
4979 24991749 Iustin Pop
4980 d4b72030 Guido Trotter
    utils.CheckBEParams(self.op.beparams)
4981 d4b72030 Guido Trotter
4982 24991749 Iustin Pop
    # Disk validation
4983 24991749 Iustin Pop
    disk_addremove = 0
4984 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
4985 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
4986 24991749 Iustin Pop
        disk_addremove += 1
4987 24991749 Iustin Pop
        continue
4988 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
4989 24991749 Iustin Pop
        disk_addremove += 1
4990 24991749 Iustin Pop
      else:
4991 24991749 Iustin Pop
        if not isinstance(disk_op, int):
4992 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index")
4993 24991749 Iustin Pop
      if disk_op == constants.DDM_ADD:
4994 24991749 Iustin Pop
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
4995 24991749 Iustin Pop
        if mode not in (constants.DISK_RDONLY, constants.DISK_RDWR):
4996 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode)
4997 24991749 Iustin Pop
        size = disk_dict.get('size', None)
4998 24991749 Iustin Pop
        if size is None:
4999 24991749 Iustin Pop
          raise errors.OpPrereqError("Required disk parameter size missing")
5000 24991749 Iustin Pop
        try:
5001 24991749 Iustin Pop
          size = int(size)
5002 24991749 Iustin Pop
        except ValueError, err:
5003 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
5004 24991749 Iustin Pop
                                     str(err))
5005 24991749 Iustin Pop
        disk_dict['size'] = size
5006 24991749 Iustin Pop
      else:
5007 24991749 Iustin Pop
        # modification of disk
5008 24991749 Iustin Pop
        if 'size' in disk_dict:
5009 24991749 Iustin Pop
          raise errors.OpPrereqError("Disk size change not possible, use"
5010 24991749 Iustin Pop
                                     " grow-disk")
5011 24991749 Iustin Pop
5012 24991749 Iustin Pop
    if disk_addremove > 1:
5013 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one disk add or remove operation"
5014 24991749 Iustin Pop
                                 " supported at a time")
5015 24991749 Iustin Pop
5016 24991749 Iustin Pop
    # NIC validation
5017 24991749 Iustin Pop
    nic_addremove = 0
5018 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
5019 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
5020 24991749 Iustin Pop
        nic_addremove += 1
5021 24991749 Iustin Pop
        continue
5022 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
5023 24991749 Iustin Pop
        nic_addremove += 1
5024 24991749 Iustin Pop
      else:
5025 24991749 Iustin Pop
        if not isinstance(nic_op, int):
5026 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid nic index")
5027 24991749 Iustin Pop
5028 24991749 Iustin Pop
      # nic_dict should be a dict
5029 24991749 Iustin Pop
      nic_ip = nic_dict.get('ip', None)
5030 24991749 Iustin Pop
      if nic_ip is not None:
5031 24991749 Iustin Pop
        if nic_ip.lower() == "none":
5032 24991749 Iustin Pop
          nic_dict['ip'] = None
5033 24991749 Iustin Pop
        else:
5034 24991749 Iustin Pop
          if not utils.IsValidIP(nic_ip):
5035 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip)
5036 24991749 Iustin Pop
      # we can only check None bridges and assign the default one
5037 24991749 Iustin Pop
      nic_bridge = nic_dict.get('bridge', None)
5038 24991749 Iustin Pop
      if nic_bridge is None:
5039 24991749 Iustin Pop
        nic_dict['bridge'] = self.cfg.GetDefBridge()
5040 24991749 Iustin Pop
      # but we can validate MACs
5041 24991749 Iustin Pop
      nic_mac = nic_dict.get('mac', None)
5042 24991749 Iustin Pop
      if nic_mac is not None:
5043 24991749 Iustin Pop
        if self.cfg.IsMacInUse(nic_mac):
5044 24991749 Iustin Pop
          raise errors.OpPrereqError("MAC address %s already in use"
5045 24991749 Iustin Pop
                                     " in cluster" % nic_mac)
5046 24991749 Iustin Pop
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5047 24991749 Iustin Pop
          if not utils.IsValidMac(nic_mac):
5048 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac)
5049 24991749 Iustin Pop
    if nic_addremove > 1:
5050 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one NIC add or remove operation"
5051 24991749 Iustin Pop
                                 " supported at a time")
5052 24991749 Iustin Pop
5053 1a5c7281 Guido Trotter
  def ExpandNames(self):
5054 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
5055 74409b12 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
5056 74409b12 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5057 74409b12 Iustin Pop
5058 74409b12 Iustin Pop
  def DeclareLocks(self, level):
5059 74409b12 Iustin Pop
    if level == locking.LEVEL_NODE:
5060 74409b12 Iustin Pop
      self._LockInstancesNodes()
5061 a8083063 Iustin Pop
5062 a8083063 Iustin Pop
  def BuildHooksEnv(self):
5063 a8083063 Iustin Pop
    """Build hooks env.
5064 a8083063 Iustin Pop

5065 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
5066 a8083063 Iustin Pop

5067 a8083063 Iustin Pop
    """
5068 396e1b78 Michael Hanselmann
    args = dict()
5069 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.be_new:
5070 338e51e8 Iustin Pop
      args['memory'] = self.be_new[constants.BE_MEMORY]
5071 338e51e8 Iustin Pop
    if constants.BE_VCPUS in self.be_new:
5072 61be6ba4 Iustin Pop
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
5073 24991749 Iustin Pop
    # FIXME: readd disk/nic changes
5074 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
5075 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(),
5076 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
5077 a8083063 Iustin Pop
    return env, nl, nl
5078 a8083063 Iustin Pop
5079 a8083063 Iustin Pop
  def CheckPrereq(self):
5080 a8083063 Iustin Pop
    """Check prerequisites.
5081 a8083063 Iustin Pop

5082 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
5083 a8083063 Iustin Pop

5084 a8083063 Iustin Pop
    """
5085 24991749 Iustin Pop
    force = self.force = self.op.force
5086 a8083063 Iustin Pop
5087 74409b12 Iustin Pop
    # checking the new params on the primary/secondary nodes
5088 31a853d2 Iustin Pop
5089 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5090 1a5c7281 Guido Trotter
    assert self.instance is not None, \
5091 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5092 74409b12 Iustin Pop
    pnode = self.instance.primary_node
5093 74409b12 Iustin Pop
    nodelist = [pnode]
5094 74409b12 Iustin Pop
    nodelist.extend(instance.secondary_nodes)
5095 74409b12 Iustin Pop
5096 338e51e8 Iustin Pop
    # hvparams processing
5097 74409b12 Iustin Pop
    if self.op.hvparams:
5098 74409b12 Iustin Pop
      i_hvdict = copy.deepcopy(instance.hvparams)
5099 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
5100 8edcd611 Guido Trotter
        if val == constants.VALUE_DEFAULT:
5101 74409b12 Iustin Pop
          try:
5102 74409b12 Iustin Pop
            del i_hvdict[key]
5103 74409b12 Iustin Pop
          except KeyError:
5104 74409b12 Iustin Pop
            pass
5105 8edcd611 Guido Trotter
        elif val == constants.VALUE_NONE:
5106 8edcd611 Guido Trotter
          i_hvdict[key] = None
5107 74409b12 Iustin Pop
        else:
5108 74409b12 Iustin Pop
          i_hvdict[key] = val
5109 74409b12 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
5110 74409b12 Iustin Pop
      hv_new = cluster.FillDict(cluster.hvparams[instance.hypervisor],
5111 74409b12 Iustin Pop
                                i_hvdict)
5112 74409b12 Iustin Pop
      # local check
5113 74409b12 Iustin Pop
      hypervisor.GetHypervisor(
5114 74409b12 Iustin Pop
        instance.hypervisor).CheckParameterSyntax(hv_new)
5115 74409b12 Iustin Pop
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
5116 338e51e8 Iustin Pop
      self.hv_new = hv_new # the new actual values
5117 338e51e8 Iustin Pop
      self.hv_inst = i_hvdict # the new dict (without defaults)
5118 338e51e8 Iustin Pop
    else:
5119 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
5120 338e51e8 Iustin Pop
5121 338e51e8 Iustin Pop
    # beparams processing
5122 338e51e8 Iustin Pop
    if self.op.beparams:
5123 338e51e8 Iustin Pop
      i_bedict = copy.deepcopy(instance.beparams)
5124 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
5125 8edcd611 Guido Trotter
        if val == constants.VALUE_DEFAULT:
5126 338e51e8 Iustin Pop
          try:
5127 338e51e8 Iustin Pop
            del i_bedict[key]
5128 338e51e8 Iustin Pop
          except KeyError:
5129 338e51e8 Iustin Pop
            pass
5130 338e51e8 Iustin Pop
        else:
5131 338e51e8 Iustin Pop
          i_bedict[key] = val
5132 338e51e8 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
5133 338e51e8 Iustin Pop
      be_new = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
5134 338e51e8 Iustin Pop
                                i_bedict)
5135 338e51e8 Iustin Pop
      self.be_new = be_new # the new actual values
5136 338e51e8 Iustin Pop
      self.be_inst = i_bedict # the new dict (without defaults)
5137 338e51e8 Iustin Pop
    else:
5138 b637ae4d Iustin Pop
      self.be_new = self.be_inst = {}
5139 74409b12 Iustin Pop
5140 cfefe007 Guido Trotter
    self.warn = []
5141 647a5d80 Iustin Pop
5142 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.op.beparams and not self.force:
5143 647a5d80 Iustin Pop
      mem_check_list = [pnode]
5144 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
5145 c0f2b229 Iustin Pop
        # either we changed auto_balance to yes or it was from before
5146 647a5d80 Iustin Pop
        mem_check_list.extend(instance.secondary_nodes)
5147 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
5148 72737a7f Iustin Pop
                                                  instance.hypervisor)
5149 647a5d80 Iustin Pop
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
5150 72737a7f Iustin Pop
                                         instance.hypervisor)
5151 781de953 Iustin Pop
      if nodeinfo[pnode].failed or not isinstance(nodeinfo[pnode].data, dict):
5152 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
5153 cfefe007 Guido Trotter
        self.warn.append("Can't get info from primary node %s" % pnode)
5154 cfefe007 Guido Trotter
      else:
5155 781de953 Iustin Pop
        if not instance_info.failed and instance_info.data:
5156 781de953 Iustin Pop
          current_mem = instance_info.data['memory']
5157 cfefe007 Guido Trotter
        else:
5158 cfefe007 Guido Trotter
          # Assume instance not running
5159 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
5160 cfefe007 Guido Trotter
          # and we have no other way to check)
5161 cfefe007 Guido Trotter
          current_mem = 0
5162 338e51e8 Iustin Pop
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
5163 781de953 Iustin Pop
                    nodeinfo[pnode].data['memory_free'])
5164 cfefe007 Guido Trotter
        if miss_mem > 0:
5165 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
5166 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
5167 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
5168 cfefe007 Guido Trotter
5169 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
5170 781de953 Iustin Pop
        for node, nres in instance.secondary_nodes.iteritems():
5171 781de953 Iustin Pop
          if nres.failed or not isinstance(nres.data, dict):
5172 647a5d80 Iustin Pop
            self.warn.append("Can't get info from secondary node %s" % node)
5173 781de953 Iustin Pop
          elif be_new[constants.BE_MEMORY] > nres.data['memory_free']:
5174 647a5d80 Iustin Pop
            self.warn.append("Not enough memory to failover instance to"
5175 647a5d80 Iustin Pop
                             " secondary node %s" % node)
5176 5bc84f33 Alexander Schreiber
5177 24991749 Iustin Pop
    # NIC processing
5178 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
5179 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
5180 24991749 Iustin Pop
        if not instance.nics:
5181 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance has no NICs, cannot remove")
5182 24991749 Iustin Pop
        continue
5183 24991749 Iustin Pop
      if nic_op != constants.DDM_ADD:
5184 24991749 Iustin Pop
        # an existing nic
5185 24991749 Iustin Pop
        if nic_op < 0 or nic_op >= len(instance.nics):
5186 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
5187 24991749 Iustin Pop
                                     " are 0 to %d" %
5188 24991749 Iustin Pop
                                     (nic_op, len(instance.nics)))
5189 24991749 Iustin Pop
      nic_bridge = nic_dict.get('bridge', None)
5190 24991749 Iustin Pop
      if nic_bridge is not None:
5191 24991749 Iustin Pop
        if not self.rpc.call_bridges_exist(pnode, [nic_bridge]):
5192 24991749 Iustin Pop
          msg = ("Bridge '%s' doesn't exist on one of"
5193 24991749 Iustin Pop
                 " the instance nodes" % nic_bridge)
5194 24991749 Iustin Pop
          if self.force:
5195 24991749 Iustin Pop
            self.warn.append(msg)
5196 24991749 Iustin Pop
          else:
5197 24991749 Iustin Pop
            raise errors.OpPrereqError(msg)
5198 24991749 Iustin Pop
5199 24991749 Iustin Pop
    # DISK processing
5200 24991749 Iustin Pop
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
5201 24991749 Iustin Pop
      raise errors.OpPrereqError("Disk operations not supported for"
5202 24991749 Iustin Pop
                                 " diskless instances")
5203 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
5204 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
5205 24991749 Iustin Pop
        if len(instance.disks) == 1:
5206 24991749 Iustin Pop
          raise errors.OpPrereqError("Cannot remove the last disk of"
5207 24991749 Iustin Pop
                                     " an instance")
5208 24991749 Iustin Pop
        ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
5209 24991749 Iustin Pop
        ins_l = ins_l[pnode]
5210 24991749 Iustin Pop
        if not type(ins_l) is list:
5211 24991749 Iustin Pop
          raise errors.OpPrereqError("Can't contact node '%s'" % pnode)
5212 24991749 Iustin Pop
        if instance.name in ins_l:
5213 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance is running, can't remove"
5214 24991749 Iustin Pop
                                     " disks.")
5215 24991749 Iustin Pop
5216 24991749 Iustin Pop
      if (disk_op == constants.DDM_ADD and
5217 24991749 Iustin Pop
          len(instance.nics) >= constants.MAX_DISKS):
5218 24991749 Iustin Pop
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
5219 24991749 Iustin Pop
                                   " add more" % constants.MAX_DISKS)
5220 24991749 Iustin Pop
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
5221 24991749 Iustin Pop
        # an existing disk
5222 24991749 Iustin Pop
        if disk_op < 0 or disk_op >= len(instance.disks):
5223 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
5224 24991749 Iustin Pop
                                     " are 0 to %d" %
5225 24991749 Iustin Pop
                                     (disk_op, len(instance.disks)))
5226 24991749 Iustin Pop
5227 a8083063 Iustin Pop
    return
5228 a8083063 Iustin Pop
5229 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5230 a8083063 Iustin Pop
    """Modifies an instance.
5231 a8083063 Iustin Pop

5232 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
5233 24991749 Iustin Pop

5234 a8083063 Iustin Pop
    """
5235 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
5236 cfefe007 Guido Trotter
    # feedback_fn there.
5237 cfefe007 Guido Trotter
    for warn in self.warn:
5238 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
5239 cfefe007 Guido Trotter
5240 a8083063 Iustin Pop
    result = []
5241 a8083063 Iustin Pop
    instance = self.instance
5242 24991749 Iustin Pop
    # disk changes
5243 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
5244 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
5245 24991749 Iustin Pop
        # remove the last disk
5246 24991749 Iustin Pop
        device = instance.disks.pop()
5247 24991749 Iustin Pop
        device_idx = len(instance.disks)
5248 24991749 Iustin Pop
        for node, disk in device.ComputeNodeTree(instance.primary_node):
5249 24991749 Iustin Pop
          self.cfg.SetDiskID(disk, node)
5250 781de953 Iustin Pop
          result = self.rpc.call_blockdev_remove(node, disk)
5251 781de953 Iustin Pop
          if result.failed or not result.data:
5252 24991749 Iustin Pop
            self.proc.LogWarning("Could not remove disk/%d on node %s,"
5253 24991749 Iustin Pop
                                 " continuing anyway", device_idx, node)
5254 24991749 Iustin Pop
        result.append(("disk/%d" % device_idx, "remove"))
5255 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
5256 24991749 Iustin Pop
        # add a new disk
5257 24991749 Iustin Pop
        if instance.disk_template == constants.DT_FILE:
5258 24991749 Iustin Pop
          file_driver, file_path = instance.disks[0].logical_id
5259 24991749 Iustin Pop
          file_path = os.path.dirname(file_path)
5260 24991749 Iustin Pop
        else:
5261 24991749 Iustin Pop
          file_driver = file_path = None
5262 24991749 Iustin Pop
        disk_idx_base = len(instance.disks)
5263 24991749 Iustin Pop
        new_disk = _GenerateDiskTemplate(self,
5264 24991749 Iustin Pop
                                         instance.disk_template,
5265 24991749 Iustin Pop
                                         instance, instance.primary_node,
5266 24991749 Iustin Pop
                                         instance.secondary_nodes,
5267 24991749 Iustin Pop
                                         [disk_dict],
5268 24991749 Iustin Pop
                                         file_path,
5269 24991749 Iustin Pop
                                         file_driver,
5270 24991749 Iustin Pop
                                         disk_idx_base)[0]
5271 24991749 Iustin Pop
        new_disk.mode = disk_dict['mode']
5272 24991749 Iustin Pop
        instance.disks.append(new_disk)
5273 24991749 Iustin Pop
        info = _GetInstanceInfoText(instance)
5274 24991749 Iustin Pop
5275 24991749 Iustin Pop
        logging.info("Creating volume %s for instance %s",
5276 24991749 Iustin Pop
                     new_disk.iv_name, instance.name)
5277 24991749 Iustin Pop
        # Note: this needs to be kept in sync with _CreateDisks
5278 24991749 Iustin Pop
        #HARDCODE
5279 24991749 Iustin Pop
        for secondary_node in instance.secondary_nodes:
5280 24991749 Iustin Pop
          if not _CreateBlockDevOnSecondary(self, secondary_node, instance,
5281 24991749 Iustin Pop
                                            new_disk, False, info):
5282 24991749 Iustin Pop
            self.LogWarning("Failed to create volume %s (%s) on"
5283 24991749 Iustin Pop
                            " secondary node %s!",
5284 24991749 Iustin Pop
                            new_disk.iv_name, new_disk, secondary_node)
5285 24991749 Iustin Pop
        #HARDCODE
5286 24991749 Iustin Pop
        if not _CreateBlockDevOnPrimary(self, instance.primary_node,
5287 24991749 Iustin Pop
                                        instance, new_disk, info):
5288 24991749 Iustin Pop
          self.LogWarning("Failed to create volume %s on primary!",
5289 24991749 Iustin Pop
                          new_disk.iv_name)
5290 24991749 Iustin Pop
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
5291 24991749 Iustin Pop
                       (new_disk.size, new_disk.mode)))
5292 24991749 Iustin Pop
      else:
5293 24991749 Iustin Pop
        # change a given disk
5294 24991749 Iustin Pop
        instance.disks[disk_op].mode = disk_dict['mode']
5295 24991749 Iustin Pop
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
5296 24991749 Iustin Pop
    # NIC changes
5297 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
5298 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
5299 24991749 Iustin Pop
        # remove the last nic
5300 24991749 Iustin Pop
        del instance.nics[-1]
5301 24991749 Iustin Pop
        result.append(("nic.%d" % len(instance.nics), "remove"))
5302 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
5303 24991749 Iustin Pop
        # add a new nic
5304 24991749 Iustin Pop
        if 'mac' not in nic_dict:
5305 24991749 Iustin Pop
          mac = constants.VALUE_GENERATE
5306 24991749 Iustin Pop
        else:
5307 24991749 Iustin Pop
          mac = nic_dict['mac']
5308 24991749 Iustin Pop
        if mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5309 24991749 Iustin Pop
          mac = self.cfg.GenerateMAC()
5310 24991749 Iustin Pop
        new_nic = objects.NIC(mac=mac, ip=nic_dict.get('ip', None),
5311 24991749 Iustin Pop
                              bridge=nic_dict.get('bridge', None))
5312 24991749 Iustin Pop
        instance.nics.append(new_nic)
5313 24991749 Iustin Pop
        result.append(("nic.%d" % (len(instance.nics) - 1),
5314 24991749 Iustin Pop
                       "add:mac=%s,ip=%s,bridge=%s" %
5315 24991749 Iustin Pop
                       (new_nic.mac, new_nic.ip, new_nic.bridge)))
5316 24991749 Iustin Pop
      else:
5317 24991749 Iustin Pop
        # change a given nic
5318 24991749 Iustin Pop
        for key in 'mac', 'ip', 'bridge':
5319 24991749 Iustin Pop
          if key in nic_dict:
5320 24991749 Iustin Pop
            setattr(instance.nics[nic_op], key, nic_dict[key])
5321 24991749 Iustin Pop
            result.append(("nic.%s/%d" % (key, nic_op), nic_dict[key]))
5322 24991749 Iustin Pop
5323 24991749 Iustin Pop
    # hvparams changes
5324 74409b12 Iustin Pop
    if self.op.hvparams:
5325 74409b12 Iustin Pop
      instance.hvparams = self.hv_new
5326 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
5327 74409b12 Iustin Pop
        result.append(("hv/%s" % key, val))
5328 24991749 Iustin Pop
5329 24991749 Iustin Pop
    # beparams changes
5330 338e51e8 Iustin Pop
    if self.op.beparams:
5331 338e51e8 Iustin Pop
      instance.beparams = self.be_inst
5332 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
5333 338e51e8 Iustin Pop
        result.append(("be/%s" % key, val))
5334 a8083063 Iustin Pop
5335 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
5336 a8083063 Iustin Pop
5337 a8083063 Iustin Pop
    return result
5338 a8083063 Iustin Pop
5339 a8083063 Iustin Pop
5340 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
5341 a8083063 Iustin Pop
  """Query the exports list
5342 a8083063 Iustin Pop

5343 a8083063 Iustin Pop
  """
5344 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
5345 21a15682 Guido Trotter
  REQ_BGL = False
5346 21a15682 Guido Trotter
5347 21a15682 Guido Trotter
  def ExpandNames(self):
5348 21a15682 Guido Trotter
    self.needed_locks = {}
5349 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
5350 21a15682 Guido Trotter
    if not self.op.nodes:
5351 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5352 21a15682 Guido Trotter
    else:
5353 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
5354 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
5355 a8083063 Iustin Pop
5356 a8083063 Iustin Pop
  def CheckPrereq(self):
5357 21a15682 Guido Trotter
    """Check prerequisites.
5358 a8083063 Iustin Pop

5359 a8083063 Iustin Pop
    """
5360 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
5361 a8083063 Iustin Pop
5362 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5363 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
5364 a8083063 Iustin Pop

5365 e4376078 Iustin Pop
    @rtype: dict
5366 e4376078 Iustin Pop
    @return: a dictionary with the structure node->(export-list)
5367 e4376078 Iustin Pop
        where export-list is a list of the instances exported on
5368 e4376078 Iustin Pop
        that node.
5369 a8083063 Iustin Pop

5370 a8083063 Iustin Pop
    """
5371 b04285f2 Guido Trotter
    rpcresult = self.rpc.call_export_list(self.nodes)
5372 b04285f2 Guido Trotter
    result = {}
5373 b04285f2 Guido Trotter
    for node in rpcresult:
5374 b04285f2 Guido Trotter
      if rpcresult[node].failed:
5375 b04285f2 Guido Trotter
        result[node] = False
5376 b04285f2 Guido Trotter
      else:
5377 b04285f2 Guido Trotter
        result[node] = rpcresult[node].data
5378 b04285f2 Guido Trotter
5379 b04285f2 Guido Trotter
    return result
5380 a8083063 Iustin Pop
5381 a8083063 Iustin Pop
5382 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
5383 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
5384 a8083063 Iustin Pop

5385 a8083063 Iustin Pop
  """
5386 a8083063 Iustin Pop
  HPATH = "instance-export"
5387 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5388 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
5389 6657590e Guido Trotter
  REQ_BGL = False
5390 6657590e Guido Trotter
5391 6657590e Guido Trotter
  def ExpandNames(self):
5392 6657590e Guido Trotter
    self._ExpandAndLockInstance()
5393 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
5394 6657590e Guido Trotter
    #
5395 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
5396 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
5397 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
5398 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
5399 6657590e Guido Trotter
    #    then one to remove, after
5400 6657590e Guido Trotter
    #  - removing the removal operation altoghether
5401 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5402 6657590e Guido Trotter
5403 6657590e Guido Trotter
  def DeclareLocks(self, level):
5404 6657590e Guido Trotter
    """Last minute lock declaration."""
5405 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
5406 a8083063 Iustin Pop
5407 a8083063 Iustin Pop
  def BuildHooksEnv(self):
5408 a8083063 Iustin Pop
    """Build hooks env.
5409 a8083063 Iustin Pop

5410 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
5411 a8083063 Iustin Pop

5412 a8083063 Iustin Pop
    """
5413 a8083063 Iustin Pop
    env = {
5414 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
5415 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
5416 a8083063 Iustin Pop
      }
5417 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5418 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
5419 a8083063 Iustin Pop
          self.op.target_node]
5420 a8083063 Iustin Pop
    return env, nl, nl
5421 a8083063 Iustin Pop
5422 a8083063 Iustin Pop
  def CheckPrereq(self):
5423 a8083063 Iustin Pop
    """Check prerequisites.
5424 a8083063 Iustin Pop

5425 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
5426 a8083063 Iustin Pop

5427 a8083063 Iustin Pop
    """
5428 6657590e Guido Trotter
    instance_name = self.op.instance_name
5429 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
5430 6657590e Guido Trotter
    assert self.instance is not None, \
5431 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
5432 a8083063 Iustin Pop
5433 6657590e Guido Trotter
    self.dst_node = self.cfg.GetNodeInfo(
5434 6657590e Guido Trotter
      self.cfg.ExpandNodeName(self.op.target_node))
5435 a8083063 Iustin Pop
5436 268b8e42 Iustin Pop
    if self.dst_node is None:
5437 268b8e42 Iustin Pop
      # This is wrong node name, not a non-locked node
5438 268b8e42 Iustin Pop
      raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node)
5439 a8083063 Iustin Pop
5440 b6023d6c Manuel Franceschini
    # instance disk type verification
5441 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
5442 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
5443 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
5444 b6023d6c Manuel Franceschini
                                   " file-based disks")
5445 b6023d6c Manuel Franceschini
5446 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5447 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
5448 a8083063 Iustin Pop

5449 a8083063 Iustin Pop
    """
5450 a8083063 Iustin Pop
    instance = self.instance
5451 a8083063 Iustin Pop
    dst_node = self.dst_node
5452 a8083063 Iustin Pop
    src_node = instance.primary_node
5453 a8083063 Iustin Pop
    if self.op.shutdown:
5454 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
5455 781de953 Iustin Pop
      result = self.rpc.call_instance_shutdown(src_node, instance)
5456 781de953 Iustin Pop
      result.Raise()
5457 781de953 Iustin Pop
      if not result.data:
5458 38206f3c Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
5459 38206f3c Iustin Pop
                                 (instance.name, src_node))
5460 a8083063 Iustin Pop
5461 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
5462 a8083063 Iustin Pop
5463 a8083063 Iustin Pop
    snap_disks = []
5464 a8083063 Iustin Pop
5465 a8083063 Iustin Pop
    try:
5466 a8083063 Iustin Pop
      for disk in instance.disks:
5467 19d7f90a Guido Trotter
        # new_dev_name will be a snapshot of an lvm leaf of the one we passed
5468 19d7f90a Guido Trotter
        new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk)
5469 781de953 Iustin Pop
        if new_dev_name.failed or not new_dev_name.data:
5470 19d7f90a Guido Trotter
          self.LogWarning("Could not snapshot block device %s on node %s",
5471 9a4f63d1 Iustin Pop
                          disk.logical_id[1], src_node)
5472 19d7f90a Guido Trotter
          snap_disks.append(False)
5473 19d7f90a Guido Trotter
        else:
5474 19d7f90a Guido Trotter
          new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
5475 781de953 Iustin Pop
                                 logical_id=(vgname, new_dev_name.data),
5476 781de953 Iustin Pop
                                 physical_id=(vgname, new_dev_name.data),
5477 19d7f90a Guido Trotter
                                 iv_name=disk.iv_name)
5478 19d7f90a Guido Trotter
          snap_disks.append(new_dev)
5479 a8083063 Iustin Pop
5480 a8083063 Iustin Pop
    finally:
5481 fb300fb7 Guido Trotter
      if self.op.shutdown and instance.status == "up":
5482 781de953 Iustin Pop
        result = self.rpc.call_instance_start(src_node, instance, None)
5483 781de953 Iustin Pop
        if result.failed or not result.data:
5484 b9bddb6b Iustin Pop
          _ShutdownInstanceDisks(self, instance)
5485 fb300fb7 Guido Trotter
          raise errors.OpExecError("Could not start instance")
5486 a8083063 Iustin Pop
5487 a8083063 Iustin Pop
    # TODO: check for size
5488 a8083063 Iustin Pop
5489 62c9ec92 Iustin Pop
    cluster_name = self.cfg.GetClusterName()
5490 74c47259 Iustin Pop
    for idx, dev in enumerate(snap_disks):
5491 19d7f90a Guido Trotter
      if dev:
5492 781de953 Iustin Pop
        result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
5493 781de953 Iustin Pop
                                               instance, cluster_name, idx)
5494 781de953 Iustin Pop
        if result.failed or not result.data:
5495 19d7f90a Guido Trotter
          self.LogWarning("Could not export block device %s from node %s to"
5496 19d7f90a Guido Trotter
                          " node %s", dev.logical_id[1], src_node,
5497 19d7f90a Guido Trotter
                          dst_node.name)
5498 781de953 Iustin Pop
        result = self.rpc.call_blockdev_remove(src_node, dev)
5499 781de953 Iustin Pop
        if result.failed or not result.data:
5500 19d7f90a Guido Trotter
          self.LogWarning("Could not remove snapshot block device %s from node"
5501 19d7f90a Guido Trotter
                          " %s", dev.logical_id[1], src_node)
5502 a8083063 Iustin Pop
5503 781de953 Iustin Pop
    result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
5504 781de953 Iustin Pop
    if result.failed or not result.data:
5505 19d7f90a Guido Trotter
      self.LogWarning("Could not finalize export for instance %s on node %s",
5506 19d7f90a Guido Trotter
                      instance.name, dst_node.name)
5507 a8083063 Iustin Pop
5508 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
5509 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
5510 a8083063 Iustin Pop
5511 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
5512 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
5513 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
5514 a8083063 Iustin Pop
    if nodelist:
5515 72737a7f Iustin Pop
      exportlist = self.rpc.call_export_list(nodelist)
5516 a8083063 Iustin Pop
      for node in exportlist:
5517 781de953 Iustin Pop
        if exportlist[node].failed:
5518 781de953 Iustin Pop
          continue
5519 781de953 Iustin Pop
        if instance.name in exportlist[node].data:
5520 72737a7f Iustin Pop
          if not self.rpc.call_export_remove(node, instance.name):
5521 19d7f90a Guido Trotter
            self.LogWarning("Could not remove older export for instance %s"
5522 19d7f90a Guido Trotter
                            " on node %s", instance.name, node)
5523 5c947f38 Iustin Pop
5524 5c947f38 Iustin Pop
5525 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
5526 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
5527 9ac99fda Guido Trotter

5528 9ac99fda Guido Trotter
  """
5529 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
5530 3656b3af Guido Trotter
  REQ_BGL = False
5531 3656b3af Guido Trotter
5532 3656b3af Guido Trotter
  def ExpandNames(self):
5533 3656b3af Guido Trotter
    self.needed_locks = {}
5534 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
5535 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
5536 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
5537 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5538 9ac99fda Guido Trotter
5539 9ac99fda Guido Trotter
  def CheckPrereq(self):
5540 9ac99fda Guido Trotter
    """Check prerequisites.
5541 9ac99fda Guido Trotter
    """
5542 9ac99fda Guido Trotter
    pass
5543 9ac99fda Guido Trotter
5544 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
5545 9ac99fda Guido Trotter
    """Remove any export.
5546 9ac99fda Guido Trotter

5547 9ac99fda Guido Trotter
    """
5548 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
5549 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
5550 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
5551 9ac99fda Guido Trotter
    fqdn_warn = False
5552 9ac99fda Guido Trotter
    if not instance_name:
5553 9ac99fda Guido Trotter
      fqdn_warn = True
5554 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
5555 9ac99fda Guido Trotter
5556 72737a7f Iustin Pop
    exportlist = self.rpc.call_export_list(self.acquired_locks[
5557 72737a7f Iustin Pop
      locking.LEVEL_NODE])
5558 9ac99fda Guido Trotter
    found = False
5559 9ac99fda Guido Trotter
    for node in exportlist:
5560 781de953 Iustin Pop
      if exportlist[node].failed:
5561 25361b9a Iustin Pop
        self.LogWarning("Failed to query node %s, continuing" % node)
5562 781de953 Iustin Pop
        continue
5563 781de953 Iustin Pop
      if instance_name in exportlist[node].data:
5564 9ac99fda Guido Trotter
        found = True
5565 781de953 Iustin Pop
        result = self.rpc.call_export_remove(node, instance_name)
5566 781de953 Iustin Pop
        if result.failed or not result.data:
5567 9a4f63d1 Iustin Pop
          logging.error("Could not remove export for instance %s"
5568 9a4f63d1 Iustin Pop
                        " on node %s", instance_name, node)
5569 9ac99fda Guido Trotter
5570 9ac99fda Guido Trotter
    if fqdn_warn and not found:
5571 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
5572 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
5573 9ac99fda Guido Trotter
                  " Domain Name.")
5574 9ac99fda Guido Trotter
5575 9ac99fda Guido Trotter
5576 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
5577 5c947f38 Iustin Pop
  """Generic tags LU.
5578 5c947f38 Iustin Pop

5579 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
5580 5c947f38 Iustin Pop

5581 5c947f38 Iustin Pop
  """
5582 5c947f38 Iustin Pop
5583 8646adce Guido Trotter
  def ExpandNames(self):
5584 8646adce Guido Trotter
    self.needed_locks = {}
5585 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
5586 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
5587 5c947f38 Iustin Pop
      if name is None:
5588 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
5589 3ecf6786 Iustin Pop
                                   (self.op.name,))
5590 5c947f38 Iustin Pop
      self.op.name = name
5591 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = name
5592 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
5593 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
5594 5c947f38 Iustin Pop
      if name is None:
5595 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
5596 3ecf6786 Iustin Pop
                                   (self.op.name,))
5597 5c947f38 Iustin Pop
      self.op.name = name
5598 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = name
5599 8646adce Guido Trotter
5600 8646adce Guido Trotter
  def CheckPrereq(self):
5601 8646adce Guido Trotter
    """Check prerequisites.
5602 8646adce Guido Trotter

5603 8646adce Guido Trotter
    """
5604 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
5605 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
5606 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
5607 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
5608 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
5609 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
5610 5c947f38 Iustin Pop
    else:
5611 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
5612 3ecf6786 Iustin Pop
                                 str(self.op.kind))
5613 5c947f38 Iustin Pop
5614 5c947f38 Iustin Pop
5615 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
5616 5c947f38 Iustin Pop
  """Returns the tags of a given object.
5617 5c947f38 Iustin Pop

5618 5c947f38 Iustin Pop
  """
5619 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
5620 8646adce Guido Trotter
  REQ_BGL = False
5621 5c947f38 Iustin Pop
5622 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5623 5c947f38 Iustin Pop
    """Returns the tag list.
5624 5c947f38 Iustin Pop

5625 5c947f38 Iustin Pop
    """
5626 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
5627 5c947f38 Iustin Pop
5628 5c947f38 Iustin Pop
5629 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
5630 73415719 Iustin Pop
  """Searches the tags for a given pattern.
5631 73415719 Iustin Pop

5632 73415719 Iustin Pop
  """
5633 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
5634 8646adce Guido Trotter
  REQ_BGL = False
5635 8646adce Guido Trotter
5636 8646adce Guido Trotter
  def ExpandNames(self):
5637 8646adce Guido Trotter
    self.needed_locks = {}
5638 73415719 Iustin Pop
5639 73415719 Iustin Pop
  def CheckPrereq(self):
5640 73415719 Iustin Pop
    """Check prerequisites.
5641 73415719 Iustin Pop

5642 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
5643 73415719 Iustin Pop

5644 73415719 Iustin Pop
    """
5645 73415719 Iustin Pop
    try:
5646 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
5647 73415719 Iustin Pop
    except re.error, err:
5648 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
5649 73415719 Iustin Pop
                                 (self.op.pattern, err))
5650 73415719 Iustin Pop
5651 73415719 Iustin Pop
  def Exec(self, feedback_fn):
5652 73415719 Iustin Pop
    """Returns the tag list.
5653 73415719 Iustin Pop

5654 73415719 Iustin Pop
    """
5655 73415719 Iustin Pop
    cfg = self.cfg
5656 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
5657 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
5658 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
5659 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
5660 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
5661 73415719 Iustin Pop
    results = []
5662 73415719 Iustin Pop
    for path, target in tgts:
5663 73415719 Iustin Pop
      for tag in target.GetTags():
5664 73415719 Iustin Pop
        if self.re.search(tag):
5665 73415719 Iustin Pop
          results.append((path, tag))
5666 73415719 Iustin Pop
    return results
5667 73415719 Iustin Pop
5668 73415719 Iustin Pop
5669 f27302fa Iustin Pop
class LUAddTags(TagsLU):
5670 5c947f38 Iustin Pop
  """Sets a tag on a given object.
5671 5c947f38 Iustin Pop

5672 5c947f38 Iustin Pop
  """
5673 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
5674 8646adce Guido Trotter
  REQ_BGL = False
5675 5c947f38 Iustin Pop
5676 5c947f38 Iustin Pop
  def CheckPrereq(self):
5677 5c947f38 Iustin Pop
    """Check prerequisites.
5678 5c947f38 Iustin Pop

5679 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
5680 5c947f38 Iustin Pop

5681 5c947f38 Iustin Pop
    """
5682 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
5683 f27302fa Iustin Pop
    for tag in self.op.tags:
5684 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
5685 5c947f38 Iustin Pop
5686 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5687 5c947f38 Iustin Pop
    """Sets the tag.
5688 5c947f38 Iustin Pop

5689 5c947f38 Iustin Pop
    """
5690 5c947f38 Iustin Pop
    try:
5691 f27302fa Iustin Pop
      for tag in self.op.tags:
5692 f27302fa Iustin Pop
        self.target.AddTag(tag)
5693 5c947f38 Iustin Pop
    except errors.TagError, err:
5694 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
5695 5c947f38 Iustin Pop
    try:
5696 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
5697 5c947f38 Iustin Pop
    except errors.ConfigurationError:
5698 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
5699 3ecf6786 Iustin Pop
                                " config file and the operation has been"
5700 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
5701 5c947f38 Iustin Pop
5702 5c947f38 Iustin Pop
5703 f27302fa Iustin Pop
class LUDelTags(TagsLU):
5704 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
5705 5c947f38 Iustin Pop

5706 5c947f38 Iustin Pop
  """
5707 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
5708 8646adce Guido Trotter
  REQ_BGL = False
5709 5c947f38 Iustin Pop
5710 5c947f38 Iustin Pop
  def CheckPrereq(self):
5711 5c947f38 Iustin Pop
    """Check prerequisites.
5712 5c947f38 Iustin Pop

5713 5c947f38 Iustin Pop
    This checks that we have the given tag.
5714 5c947f38 Iustin Pop

5715 5c947f38 Iustin Pop
    """
5716 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
5717 f27302fa Iustin Pop
    for tag in self.op.tags:
5718 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
5719 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
5720 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
5721 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
5722 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
5723 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
5724 f27302fa Iustin Pop
      diff_names.sort()
5725 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
5726 f27302fa Iustin Pop
                                 (",".join(diff_names)))
5727 5c947f38 Iustin Pop
5728 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5729 5c947f38 Iustin Pop
    """Remove the tag from the object.
5730 5c947f38 Iustin Pop

5731 5c947f38 Iustin Pop
    """
5732 f27302fa Iustin Pop
    for tag in self.op.tags:
5733 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
5734 5c947f38 Iustin Pop
    try:
5735 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
5736 5c947f38 Iustin Pop
    except errors.ConfigurationError:
5737 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
5738 3ecf6786 Iustin Pop
                                " config file and the operation has been"
5739 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
5740 06009e27 Iustin Pop
5741 0eed6e61 Guido Trotter
5742 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
5743 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
5744 06009e27 Iustin Pop

5745 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
5746 06009e27 Iustin Pop
  time.
5747 06009e27 Iustin Pop

5748 06009e27 Iustin Pop
  """
5749 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
5750 fbe9022f Guido Trotter
  REQ_BGL = False
5751 06009e27 Iustin Pop
5752 fbe9022f Guido Trotter
  def ExpandNames(self):
5753 fbe9022f Guido Trotter
    """Expand names and set required locks.
5754 06009e27 Iustin Pop

5755 fbe9022f Guido Trotter
    This expands the node list, if any.
5756 06009e27 Iustin Pop

5757 06009e27 Iustin Pop
    """
5758 fbe9022f Guido Trotter
    self.needed_locks = {}
5759 06009e27 Iustin Pop
    if self.op.on_nodes:
5760 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
5761 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
5762 fbe9022f Guido Trotter
      # more information.
5763 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
5764 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
5765 fbe9022f Guido Trotter
5766 fbe9022f Guido Trotter
  def CheckPrereq(self):
5767 fbe9022f Guido Trotter
    """Check prerequisites.
5768 fbe9022f Guido Trotter

5769 fbe9022f Guido Trotter
    """
5770 06009e27 Iustin Pop
5771 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
5772 06009e27 Iustin Pop
    """Do the actual sleep.
5773 06009e27 Iustin Pop

5774 06009e27 Iustin Pop
    """
5775 06009e27 Iustin Pop
    if self.op.on_master:
5776 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
5777 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
5778 06009e27 Iustin Pop
    if self.op.on_nodes:
5779 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
5780 06009e27 Iustin Pop
      if not result:
5781 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
5782 06009e27 Iustin Pop
      for node, node_result in result.items():
5783 781de953 Iustin Pop
        node_result.Raise()
5784 781de953 Iustin Pop
        if not node_result.data:
5785 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
5786 781de953 Iustin Pop
                                   " result: %s" % (node, node_result.data))
5787 d61df03e Iustin Pop
5788 d61df03e Iustin Pop
5789 d1c2dd75 Iustin Pop
class IAllocator(object):
5790 d1c2dd75 Iustin Pop
  """IAllocator framework.
5791 d61df03e Iustin Pop

5792 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
5793 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
5794 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
5795 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
5796 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
5797 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
5798 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
5799 d1c2dd75 Iustin Pop
      easy usage
5800 d61df03e Iustin Pop

5801 d61df03e Iustin Pop
  """
5802 29859cb7 Iustin Pop
  _ALLO_KEYS = [
5803 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
5804 8cc7e742 Guido Trotter
    "os", "tags", "nics", "vcpus", "hypervisor",
5805 d1c2dd75 Iustin Pop
    ]
5806 29859cb7 Iustin Pop
  _RELO_KEYS = [
5807 29859cb7 Iustin Pop
    "relocate_from",
5808 29859cb7 Iustin Pop
    ]
5809 d1c2dd75 Iustin Pop
5810 72737a7f Iustin Pop
  def __init__(self, lu, mode, name, **kwargs):
5811 72737a7f Iustin Pop
    self.lu = lu
5812 d1c2dd75 Iustin Pop
    # init buffer variables
5813 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
5814 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
5815 29859cb7 Iustin Pop
    self.mode = mode
5816 29859cb7 Iustin Pop
    self.name = name
5817 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
5818 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
5819 29859cb7 Iustin Pop
    self.relocate_from = None
5820 27579978 Iustin Pop
    # computed fields
5821 27579978 Iustin Pop
    self.required_nodes = None
5822 d1c2dd75 Iustin Pop
    # init result fields
5823 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
5824 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5825 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
5826 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
5827 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
5828 29859cb7 Iustin Pop
    else:
5829 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
5830 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
5831 d1c2dd75 Iustin Pop
    for key in kwargs:
5832 29859cb7 Iustin Pop
      if key not in keyset:
5833 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
5834 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
5835 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
5836 29859cb7 Iustin Pop
    for key in keyset:
5837 d1c2dd75 Iustin Pop
      if key not in kwargs:
5838 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
5839 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
5840 d1c2dd75 Iustin Pop
    self._BuildInputData()
5841 d1c2dd75 Iustin Pop
5842 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
5843 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
5844 d1c2dd75 Iustin Pop

5845 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
5846 d1c2dd75 Iustin Pop

5847 d1c2dd75 Iustin Pop
    """
5848 72737a7f Iustin Pop
    cfg = self.lu.cfg
5849 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
5850 d1c2dd75 Iustin Pop
    # cluster data
5851 d1c2dd75 Iustin Pop
    data = {
5852 d1c2dd75 Iustin Pop
      "version": 1,
5853 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
5854 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
5855 e69d05fd Iustin Pop
      "enable_hypervisors": list(cluster_info.enabled_hypervisors),
5856 d1c2dd75 Iustin Pop
      # we don't have job IDs
5857 d61df03e Iustin Pop
      }
5858 b57e9819 Guido Trotter
    iinfo = cfg.GetAllInstancesInfo().values()
5859 b57e9819 Guido Trotter
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
5860 6286519f Iustin Pop
5861 d1c2dd75 Iustin Pop
    # node data
5862 d1c2dd75 Iustin Pop
    node_results = {}
5863 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
5864 8cc7e742 Guido Trotter
5865 8cc7e742 Guido Trotter
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5866 8cc7e742 Guido Trotter
      hypervisor = self.hypervisor
5867 8cc7e742 Guido Trotter
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
5868 8cc7e742 Guido Trotter
      hypervisor = cfg.GetInstanceInfo(self.name).hypervisor
5869 8cc7e742 Guido Trotter
5870 72737a7f Iustin Pop
    node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
5871 8cc7e742 Guido Trotter
                                           hypervisor)
5872 18640d69 Guido Trotter
    node_iinfo = self.lu.rpc.call_all_instances_info(node_list,
5873 18640d69 Guido Trotter
                       cluster_info.enabled_hypervisors)
5874 d1c2dd75 Iustin Pop
    for nname in node_list:
5875 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
5876 781de953 Iustin Pop
      node_data[nname].Raise()
5877 781de953 Iustin Pop
      if not isinstance(node_data[nname].data, dict):
5878 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't get data for node %s" % nname)
5879 781de953 Iustin Pop
      remote_info = node_data[nname].data
5880 b2662e7f Iustin Pop
      for attr in ['memory_total', 'memory_free', 'memory_dom0',
5881 4337cf1b Iustin Pop
                   'vg_size', 'vg_free', 'cpu_total']:
5882 d1c2dd75 Iustin Pop
        if attr not in remote_info:
5883 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' didn't return attribute '%s'" %
5884 d1c2dd75 Iustin Pop
                                   (nname, attr))
5885 d1c2dd75 Iustin Pop
        try:
5886 b2662e7f Iustin Pop
          remote_info[attr] = int(remote_info[attr])
5887 d1c2dd75 Iustin Pop
        except ValueError, err:
5888 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' returned invalid value for '%s':"
5889 d1c2dd75 Iustin Pop
                                   " %s" % (nname, attr, str(err)))
5890 6286519f Iustin Pop
      # compute memory used by primary instances
5891 6286519f Iustin Pop
      i_p_mem = i_p_up_mem = 0
5892 338e51e8 Iustin Pop
      for iinfo, beinfo in i_list:
5893 6286519f Iustin Pop
        if iinfo.primary_node == nname:
5894 338e51e8 Iustin Pop
          i_p_mem += beinfo[constants.BE_MEMORY]
5895 18640d69 Guido Trotter
          if iinfo.name not in node_iinfo[nname]:
5896 18640d69 Guido Trotter
            i_used_mem = 0
5897 18640d69 Guido Trotter
          else:
5898 18640d69 Guido Trotter
            i_used_mem = int(node_iinfo[nname][iinfo.name]['memory'])
5899 18640d69 Guido Trotter
          i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
5900 18640d69 Guido Trotter
          remote_info['memory_free'] -= max(0, i_mem_diff)
5901 18640d69 Guido Trotter
5902 6286519f Iustin Pop
          if iinfo.status == "up":
5903 338e51e8 Iustin Pop
            i_p_up_mem += beinfo[constants.BE_MEMORY]
5904 6286519f Iustin Pop
5905 b2662e7f Iustin Pop
      # compute memory used by instances
5906 d1c2dd75 Iustin Pop
      pnr = {
5907 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
5908 b2662e7f Iustin Pop
        "total_memory": remote_info['memory_total'],
5909 b2662e7f Iustin Pop
        "reserved_memory": remote_info['memory_dom0'],
5910 b2662e7f Iustin Pop
        "free_memory": remote_info['memory_free'],
5911 6286519f Iustin Pop
        "i_pri_memory": i_p_mem,
5912 6286519f Iustin Pop
        "i_pri_up_memory": i_p_up_mem,
5913 b2662e7f Iustin Pop
        "total_disk": remote_info['vg_size'],
5914 b2662e7f Iustin Pop
        "free_disk": remote_info['vg_free'],
5915 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
5916 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
5917 4337cf1b Iustin Pop
        "total_cpus": remote_info['cpu_total'],
5918 fc0fe88c Iustin Pop
        "offline": ninfo.offline,
5919 d1c2dd75 Iustin Pop
        }
5920 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
5921 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
5922 d1c2dd75 Iustin Pop
5923 d1c2dd75 Iustin Pop
    # instance data
5924 d1c2dd75 Iustin Pop
    instance_data = {}
5925 338e51e8 Iustin Pop
    for iinfo, beinfo in i_list:
5926 d1c2dd75 Iustin Pop
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
5927 d1c2dd75 Iustin Pop
                  for n in iinfo.nics]
5928 d1c2dd75 Iustin Pop
      pir = {
5929 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
5930 d1c2dd75 Iustin Pop
        "should_run": iinfo.status == "up",
5931 338e51e8 Iustin Pop
        "vcpus": beinfo[constants.BE_VCPUS],
5932 338e51e8 Iustin Pop
        "memory": beinfo[constants.BE_MEMORY],
5933 d1c2dd75 Iustin Pop
        "os": iinfo.os,
5934 d1c2dd75 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
5935 d1c2dd75 Iustin Pop
        "nics": nic_data,
5936 d1c2dd75 Iustin Pop
        "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
5937 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
5938 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
5939 d1c2dd75 Iustin Pop
        }
5940 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
5941 d61df03e Iustin Pop
5942 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
5943 d61df03e Iustin Pop
5944 d1c2dd75 Iustin Pop
    self.in_data = data
5945 d61df03e Iustin Pop
5946 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
5947 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
5948 d61df03e Iustin Pop

5949 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
5950 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5951 d61df03e Iustin Pop

5952 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5953 d1c2dd75 Iustin Pop
    done.
5954 d61df03e Iustin Pop

5955 d1c2dd75 Iustin Pop
    """
5956 d1c2dd75 Iustin Pop
    data = self.in_data
5957 d1c2dd75 Iustin Pop
    if len(self.disks) != 2:
5958 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Only two-disk configurations supported")
5959 d1c2dd75 Iustin Pop
5960 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
5961 d1c2dd75 Iustin Pop
5962 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
5963 27579978 Iustin Pop
      self.required_nodes = 2
5964 27579978 Iustin Pop
    else:
5965 27579978 Iustin Pop
      self.required_nodes = 1
5966 d1c2dd75 Iustin Pop
    request = {
5967 d1c2dd75 Iustin Pop
      "type": "allocate",
5968 d1c2dd75 Iustin Pop
      "name": self.name,
5969 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
5970 d1c2dd75 Iustin Pop
      "tags": self.tags,
5971 d1c2dd75 Iustin Pop
      "os": self.os,
5972 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
5973 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
5974 d1c2dd75 Iustin Pop
      "disks": self.disks,
5975 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
5976 d1c2dd75 Iustin Pop
      "nics": self.nics,
5977 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5978 d1c2dd75 Iustin Pop
      }
5979 d1c2dd75 Iustin Pop
    data["request"] = request
5980 298fe380 Iustin Pop
5981 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
5982 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
5983 298fe380 Iustin Pop

5984 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
5985 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5986 d61df03e Iustin Pop

5987 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5988 d1c2dd75 Iustin Pop
    done.
5989 d61df03e Iustin Pop

5990 d1c2dd75 Iustin Pop
    """
5991 72737a7f Iustin Pop
    instance = self.lu.cfg.GetInstanceInfo(self.name)
5992 27579978 Iustin Pop
    if instance is None:
5993 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
5994 27579978 Iustin Pop
                                   " IAllocator" % self.name)
5995 27579978 Iustin Pop
5996 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
5997 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
5998 27579978 Iustin Pop
5999 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
6000 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
6001 2a139bb0 Iustin Pop
6002 27579978 Iustin Pop
    self.required_nodes = 1
6003 dafc7302 Guido Trotter
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
6004 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
6005 27579978 Iustin Pop
6006 d1c2dd75 Iustin Pop
    request = {
6007 2a139bb0 Iustin Pop
      "type": "relocate",
6008 d1c2dd75 Iustin Pop
      "name": self.name,
6009 27579978 Iustin Pop
      "disk_space_total": disk_space,
6010 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
6011 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
6012 d1c2dd75 Iustin Pop
      }
6013 27579978 Iustin Pop
    self.in_data["request"] = request
6014 d61df03e Iustin Pop
6015 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
6016 d1c2dd75 Iustin Pop
    """Build input data structures.
6017 d61df03e Iustin Pop

6018 d1c2dd75 Iustin Pop
    """
6019 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
6020 d61df03e Iustin Pop
6021 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6022 d1c2dd75 Iustin Pop
      self._AddNewInstance()
6023 d1c2dd75 Iustin Pop
    else:
6024 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
6025 d61df03e Iustin Pop
6026 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
6027 d61df03e Iustin Pop
6028 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
6029 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
6030 298fe380 Iustin Pop

6031 d1c2dd75 Iustin Pop
    """
6032 72737a7f Iustin Pop
    if call_fn is None:
6033 72737a7f Iustin Pop
      call_fn = self.lu.rpc.call_iallocator_runner
6034 d1c2dd75 Iustin Pop
    data = self.in_text
6035 298fe380 Iustin Pop
6036 72737a7f Iustin Pop
    result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
6037 781de953 Iustin Pop
    result.Raise()
6038 298fe380 Iustin Pop
6039 781de953 Iustin Pop
    if not isinstance(result.data, (list, tuple)) or len(result.data) != 4:
6040 8d528b7c Iustin Pop
      raise errors.OpExecError("Invalid result from master iallocator runner")
6041 8d528b7c Iustin Pop
6042 781de953 Iustin Pop
    rcode, stdout, stderr, fail = result.data
6043 8d528b7c Iustin Pop
6044 8d528b7c Iustin Pop
    if rcode == constants.IARUN_NOTFOUND:
6045 8d528b7c Iustin Pop
      raise errors.OpExecError("Can't find allocator '%s'" % name)
6046 8d528b7c Iustin Pop
    elif rcode == constants.IARUN_FAILURE:
6047 38206f3c Iustin Pop
      raise errors.OpExecError("Instance allocator call failed: %s,"
6048 38206f3c Iustin Pop
                               " output: %s" % (fail, stdout+stderr))
6049 8d528b7c Iustin Pop
    self.out_text = stdout
6050 d1c2dd75 Iustin Pop
    if validate:
6051 d1c2dd75 Iustin Pop
      self._ValidateResult()
6052 298fe380 Iustin Pop
6053 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
6054 d1c2dd75 Iustin Pop
    """Process the allocator results.
6055 538475ca Iustin Pop

6056 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
6057 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
6058 538475ca Iustin Pop

6059 d1c2dd75 Iustin Pop
    """
6060 d1c2dd75 Iustin Pop
    try:
6061 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
6062 d1c2dd75 Iustin Pop
    except Exception, err:
6063 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
6064 d1c2dd75 Iustin Pop
6065 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
6066 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
6067 538475ca Iustin Pop
6068 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
6069 d1c2dd75 Iustin Pop
      if key not in rdict:
6070 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
6071 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
6072 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
6073 538475ca Iustin Pop
6074 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
6075 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
6076 d1c2dd75 Iustin Pop
                               " is not a list")
6077 d1c2dd75 Iustin Pop
    self.out_data = rdict
6078 538475ca Iustin Pop
6079 538475ca Iustin Pop
6080 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
6081 d61df03e Iustin Pop
  """Run allocator tests.
6082 d61df03e Iustin Pop

6083 d61df03e Iustin Pop
  This LU runs the allocator tests
6084 d61df03e Iustin Pop

6085 d61df03e Iustin Pop
  """
6086 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
6087 d61df03e Iustin Pop
6088 d61df03e Iustin Pop
  def CheckPrereq(self):
6089 d61df03e Iustin Pop
    """Check prerequisites.
6090 d61df03e Iustin Pop

6091 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
6092 d61df03e Iustin Pop

6093 d61df03e Iustin Pop
    """
6094 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
6095 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
6096 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
6097 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
6098 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
6099 d61df03e Iustin Pop
                                     attr)
6100 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
6101 d61df03e Iustin Pop
      if iname is not None:
6102 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
6103 d61df03e Iustin Pop
                                   iname)
6104 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
6105 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
6106 d61df03e Iustin Pop
      for row in self.op.nics:
6107 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
6108 d61df03e Iustin Pop
            "mac" not in row or
6109 d61df03e Iustin Pop
            "ip" not in row or
6110 d61df03e Iustin Pop
            "bridge" not in row):
6111 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
6112 d61df03e Iustin Pop
                                     " 'nics' parameter")
6113 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
6114 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
6115 298fe380 Iustin Pop
      if len(self.op.disks) != 2:
6116 298fe380 Iustin Pop
        raise errors.OpPrereqError("Only two-disk configurations supported")
6117 d61df03e Iustin Pop
      for row in self.op.disks:
6118 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
6119 d61df03e Iustin Pop
            "size" not in row or
6120 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
6121 d61df03e Iustin Pop
            "mode" not in row or
6122 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
6123 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
6124 d61df03e Iustin Pop
                                     " 'disks' parameter")
6125 8cc7e742 Guido Trotter
      if self.op.hypervisor is None:
6126 8cc7e742 Guido Trotter
        self.op.hypervisor = self.cfg.GetHypervisorType()
6127 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
6128 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
6129 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
6130 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
6131 d61df03e Iustin Pop
      if fname is None:
6132 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
6133 d61df03e Iustin Pop
                                   self.op.name)
6134 d61df03e Iustin Pop
      self.op.name = fname
6135 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
6136 d61df03e Iustin Pop
    else:
6137 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
6138 d61df03e Iustin Pop
                                 self.op.mode)
6139 d61df03e Iustin Pop
6140 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
6141 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
6142 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
6143 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
6144 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
6145 d61df03e Iustin Pop
                                 self.op.direction)
6146 d61df03e Iustin Pop
6147 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
6148 d61df03e Iustin Pop
    """Run the allocator test.
6149 d61df03e Iustin Pop

6150 d61df03e Iustin Pop
    """
6151 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
6152 72737a7f Iustin Pop
      ial = IAllocator(self,
6153 29859cb7 Iustin Pop
                       mode=self.op.mode,
6154 29859cb7 Iustin Pop
                       name=self.op.name,
6155 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
6156 29859cb7 Iustin Pop
                       disks=self.op.disks,
6157 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
6158 29859cb7 Iustin Pop
                       os=self.op.os,
6159 29859cb7 Iustin Pop
                       tags=self.op.tags,
6160 29859cb7 Iustin Pop
                       nics=self.op.nics,
6161 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
6162 8cc7e742 Guido Trotter
                       hypervisor=self.op.hypervisor,
6163 29859cb7 Iustin Pop
                       )
6164 29859cb7 Iustin Pop
    else:
6165 72737a7f Iustin Pop
      ial = IAllocator(self,
6166 29859cb7 Iustin Pop
                       mode=self.op.mode,
6167 29859cb7 Iustin Pop
                       name=self.op.name,
6168 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
6169 29859cb7 Iustin Pop
                       )
6170 d61df03e Iustin Pop
6171 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
6172 d1c2dd75 Iustin Pop
      result = ial.in_text
6173 298fe380 Iustin Pop
    else:
6174 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
6175 d1c2dd75 Iustin Pop
      result = ial.out_text
6176 298fe380 Iustin Pop
    return result