Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ b775c337

History | View | Annotate | Download (252 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import time
29 a8083063 Iustin Pop
import tempfile
30 a8083063 Iustin Pop
import re
31 a8083063 Iustin Pop
import platform
32 ffa1c0dc Iustin Pop
import logging
33 74409b12 Iustin Pop
import copy
34 4b7735f9 Iustin Pop
import random
35 a8083063 Iustin Pop
36 a8083063 Iustin Pop
from ganeti import ssh
37 a8083063 Iustin Pop
from ganeti import utils
38 a8083063 Iustin Pop
from ganeti import errors
39 a8083063 Iustin Pop
from ganeti import hypervisor
40 6048c986 Guido Trotter
from ganeti import locking
41 a8083063 Iustin Pop
from ganeti import constants
42 a8083063 Iustin Pop
from ganeti import objects
43 a8083063 Iustin Pop
from ganeti import opcodes
44 8d14b30d Iustin Pop
from ganeti import serializer
45 112f18a5 Iustin Pop
from ganeti import ssconf
46 d61df03e Iustin Pop
47 d61df03e Iustin Pop
48 a8083063 Iustin Pop
class LogicalUnit(object):
49 396e1b78 Michael Hanselmann
  """Logical Unit base class.
50 a8083063 Iustin Pop

51 a8083063 Iustin Pop
  Subclasses must follow these rules:
52 d465bdc8 Guido Trotter
    - implement ExpandNames
53 d465bdc8 Guido Trotter
    - implement CheckPrereq
54 a8083063 Iustin Pop
    - implement Exec
55 a8083063 Iustin Pop
    - implement BuildHooksEnv
56 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
57 05f86716 Guido Trotter
    - optionally redefine their run requirements:
58 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
59 05f86716 Guido Trotter

60 05f86716 Guido Trotter
  Note that all commands require root permissions.
61 a8083063 Iustin Pop

62 a8083063 Iustin Pop
  """
63 a8083063 Iustin Pop
  HPATH = None
64 a8083063 Iustin Pop
  HTYPE = None
65 a8083063 Iustin Pop
  _OP_REQP = []
66 7e55040e Guido Trotter
  REQ_BGL = True
67 a8083063 Iustin Pop
68 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
69 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
70 a8083063 Iustin Pop

71 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
72 a8083063 Iustin Pop
    validity.
73 a8083063 Iustin Pop

74 a8083063 Iustin Pop
    """
75 5bfac263 Iustin Pop
    self.proc = processor
76 a8083063 Iustin Pop
    self.op = op
77 77b657a3 Guido Trotter
    self.cfg = context.cfg
78 77b657a3 Guido Trotter
    self.context = context
79 72737a7f Iustin Pop
    self.rpc = rpc
80 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
81 d465bdc8 Guido Trotter
    self.needed_locks = None
82 6683bba2 Guido Trotter
    self.acquired_locks = {}
83 3977a4c1 Guido Trotter
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
84 ca2a79e1 Guido Trotter
    self.add_locks = {}
85 ca2a79e1 Guido Trotter
    self.remove_locks = {}
86 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
87 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
88 c92b310a Michael Hanselmann
    self.__ssh = None
89 86d9d3bb Iustin Pop
    # logging
90 86d9d3bb Iustin Pop
    self.LogWarning = processor.LogWarning
91 86d9d3bb Iustin Pop
    self.LogInfo = processor.LogInfo
92 c92b310a Michael Hanselmann
93 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
94 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
95 a8083063 Iustin Pop
      if attr_val is None:
96 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
97 3ecf6786 Iustin Pop
                                   attr_name)
98 4be4691d Iustin Pop
    self.CheckArguments()
99 a8083063 Iustin Pop
100 c92b310a Michael Hanselmann
  def __GetSSH(self):
101 c92b310a Michael Hanselmann
    """Returns the SshRunner object
102 c92b310a Michael Hanselmann

103 c92b310a Michael Hanselmann
    """
104 c92b310a Michael Hanselmann
    if not self.__ssh:
105 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
106 c92b310a Michael Hanselmann
    return self.__ssh
107 c92b310a Michael Hanselmann
108 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
109 c92b310a Michael Hanselmann
110 4be4691d Iustin Pop
  def CheckArguments(self):
111 4be4691d Iustin Pop
    """Check syntactic validity for the opcode arguments.
112 4be4691d Iustin Pop

113 4be4691d Iustin Pop
    This method is for doing a simple syntactic check and ensure
114 4be4691d Iustin Pop
    validity of opcode parameters, without any cluster-related
115 4be4691d Iustin Pop
    checks. While the same can be accomplished in ExpandNames and/or
116 4be4691d Iustin Pop
    CheckPrereq, doing these separate is better because:
117 4be4691d Iustin Pop

118 4be4691d Iustin Pop
      - ExpandNames is left as as purely a lock-related function
119 4be4691d Iustin Pop
      - CheckPrereq is run after we have aquired locks (and possible
120 4be4691d Iustin Pop
        waited for them)
121 4be4691d Iustin Pop

122 4be4691d Iustin Pop
    The function is allowed to change the self.op attribute so that
123 4be4691d Iustin Pop
    later methods can no longer worry about missing parameters.
124 4be4691d Iustin Pop

125 4be4691d Iustin Pop
    """
126 4be4691d Iustin Pop
    pass
127 4be4691d Iustin Pop
128 d465bdc8 Guido Trotter
  def ExpandNames(self):
129 d465bdc8 Guido Trotter
    """Expand names for this LU.
130 d465bdc8 Guido Trotter

131 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
132 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
133 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
134 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
135 d465bdc8 Guido Trotter

136 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
137 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
138 d465bdc8 Guido Trotter
    as values. Rules:
139 e4376078 Iustin Pop

140 e4376078 Iustin Pop
      - use an empty dict if you don't need any lock
141 e4376078 Iustin Pop
      - if you don't need any lock at a particular level omit that level
142 e4376078 Iustin Pop
      - don't put anything for the BGL level
143 e4376078 Iustin Pop
      - if you want all locks at a level use locking.ALL_SET as a value
144 d465bdc8 Guido Trotter

145 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
146 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
147 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
148 3977a4c1 Guido Trotter

149 e4376078 Iustin Pop
    Examples::
150 e4376078 Iustin Pop

151 e4376078 Iustin Pop
      # Acquire all nodes and one instance
152 e4376078 Iustin Pop
      self.needed_locks = {
153 e4376078 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
154 e4376078 Iustin Pop
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
155 e4376078 Iustin Pop
      }
156 e4376078 Iustin Pop
      # Acquire just two nodes
157 e4376078 Iustin Pop
      self.needed_locks = {
158 e4376078 Iustin Pop
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
159 e4376078 Iustin Pop
      }
160 e4376078 Iustin Pop
      # Acquire no locks
161 e4376078 Iustin Pop
      self.needed_locks = {} # No, you can't leave it to the default value None
162 d465bdc8 Guido Trotter

163 d465bdc8 Guido Trotter
    """
164 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
165 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
166 d465bdc8 Guido Trotter
    # time.
167 d465bdc8 Guido Trotter
    if self.REQ_BGL:
168 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
169 d465bdc8 Guido Trotter
    else:
170 d465bdc8 Guido Trotter
      raise NotImplementedError
171 d465bdc8 Guido Trotter
172 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
173 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
174 fb8dcb62 Guido Trotter

175 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
176 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
177 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
178 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
179 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
180 fb8dcb62 Guido Trotter
    default it does nothing.
181 fb8dcb62 Guido Trotter

182 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
183 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
184 fb8dcb62 Guido Trotter

185 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
186 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
187 fb8dcb62 Guido Trotter

188 fb8dcb62 Guido Trotter
    """
189 fb8dcb62 Guido Trotter
190 a8083063 Iustin Pop
  def CheckPrereq(self):
191 a8083063 Iustin Pop
    """Check prerequisites for this LU.
192 a8083063 Iustin Pop

193 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
194 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
195 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
196 a8083063 Iustin Pop
    allowed.
197 a8083063 Iustin Pop

198 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
199 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
200 a8083063 Iustin Pop

201 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
202 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
203 a8083063 Iustin Pop

204 a8083063 Iustin Pop
    """
205 a8083063 Iustin Pop
    raise NotImplementedError
206 a8083063 Iustin Pop
207 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
208 a8083063 Iustin Pop
    """Execute the LU.
209 a8083063 Iustin Pop

210 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
211 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
212 a8083063 Iustin Pop
    code, or expected.
213 a8083063 Iustin Pop

214 a8083063 Iustin Pop
    """
215 a8083063 Iustin Pop
    raise NotImplementedError
216 a8083063 Iustin Pop
217 a8083063 Iustin Pop
  def BuildHooksEnv(self):
218 a8083063 Iustin Pop
    """Build hooks environment for this LU.
219 a8083063 Iustin Pop

220 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
221 a8083063 Iustin Pop
    containing the environment that will be used for running the
222 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
223 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
224 a8083063 Iustin Pop
    the hook should run after the execution.
225 a8083063 Iustin Pop

226 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
227 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
228 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
229 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
230 a8083063 Iustin Pop

231 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
232 a8083063 Iustin Pop

233 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
234 a8083063 Iustin Pop
    not be called.
235 a8083063 Iustin Pop

236 a8083063 Iustin Pop
    """
237 a8083063 Iustin Pop
    raise NotImplementedError
238 a8083063 Iustin Pop
239 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
240 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
241 1fce5219 Guido Trotter

242 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
243 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
244 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
245 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
246 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
247 1fce5219 Guido Trotter

248 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
249 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
250 e4376078 Iustin Pop
    @param hook_results: the results of the multi-node hooks rpc call
251 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
252 e4376078 Iustin Pop
    @param lu_result: the previous Exec result this LU had, or None
253 e4376078 Iustin Pop
        in the PRE phase
254 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
255 e4376078 Iustin Pop
        and hook results
256 1fce5219 Guido Trotter

257 1fce5219 Guido Trotter
    """
258 1fce5219 Guido Trotter
    return lu_result
259 1fce5219 Guido Trotter
260 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
261 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
262 43905206 Guido Trotter

263 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
264 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
265 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
266 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
267 43905206 Guido Trotter
    before.
268 43905206 Guido Trotter

269 43905206 Guido Trotter
    """
270 43905206 Guido Trotter
    if self.needed_locks is None:
271 43905206 Guido Trotter
      self.needed_locks = {}
272 43905206 Guido Trotter
    else:
273 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
274 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
275 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
276 43905206 Guido Trotter
    if expanded_name is None:
277 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
278 43905206 Guido Trotter
                                  self.op.instance_name)
279 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
280 43905206 Guido Trotter
    self.op.instance_name = expanded_name
281 43905206 Guido Trotter
282 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
283 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
284 c4a2fee1 Guido Trotter

285 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
286 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
287 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
288 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
289 c4a2fee1 Guido Trotter

290 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
291 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
292 c4a2fee1 Guido Trotter

293 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
294 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
295 c4a2fee1 Guido Trotter

296 e4376078 Iustin Pop
    If should be called in DeclareLocks in a way similar to::
297 c4a2fee1 Guido Trotter

298 e4376078 Iustin Pop
      if level == locking.LEVEL_NODE:
299 e4376078 Iustin Pop
        self._LockInstancesNodes()
300 c4a2fee1 Guido Trotter

301 a82ce292 Guido Trotter
    @type primary_only: boolean
302 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
303 a82ce292 Guido Trotter

304 c4a2fee1 Guido Trotter
    """
305 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
306 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
307 c4a2fee1 Guido Trotter
308 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
309 c4a2fee1 Guido Trotter
310 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
311 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
312 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
313 c4a2fee1 Guido Trotter
    wanted_nodes = []
314 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
315 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
316 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
317 a82ce292 Guido Trotter
      if not primary_only:
318 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
319 9513b6ab Guido Trotter
320 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
321 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
322 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
323 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
324 c4a2fee1 Guido Trotter
325 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
326 c4a2fee1 Guido Trotter
327 a8083063 Iustin Pop
328 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
329 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
330 a8083063 Iustin Pop

331 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
332 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
333 a8083063 Iustin Pop

334 a8083063 Iustin Pop
  """
335 a8083063 Iustin Pop
  HPATH = None
336 a8083063 Iustin Pop
  HTYPE = None
337 a8083063 Iustin Pop
338 a8083063 Iustin Pop
339 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
340 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
341 83120a01 Michael Hanselmann

342 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
343 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
344 e4376078 Iustin Pop
  @type nodes: list
345 e4376078 Iustin Pop
  @param nodes: list of node names or None for all nodes
346 e4376078 Iustin Pop
  @rtype: list
347 e4376078 Iustin Pop
  @return: the list of nodes, sorted
348 e4376078 Iustin Pop
  @raise errors.OpProgrammerError: if the nodes parameter is wrong type
349 83120a01 Michael Hanselmann

350 83120a01 Michael Hanselmann
  """
351 3312b702 Iustin Pop
  if not isinstance(nodes, list):
352 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
353 dcb93971 Michael Hanselmann
354 ea47808a Guido Trotter
  if not nodes:
355 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
356 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
357 dcb93971 Michael Hanselmann
358 ea47808a Guido Trotter
  wanted = []
359 ea47808a Guido Trotter
  for name in nodes:
360 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
361 ea47808a Guido Trotter
    if node is None:
362 ea47808a Guido Trotter
      raise errors.OpPrereqError("No such node name '%s'" % name)
363 ea47808a Guido Trotter
    wanted.append(node)
364 dcb93971 Michael Hanselmann
365 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
366 3312b702 Iustin Pop
367 3312b702 Iustin Pop
368 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
369 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
370 3312b702 Iustin Pop

371 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
372 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
373 e4376078 Iustin Pop
  @type instances: list
374 e4376078 Iustin Pop
  @param instances: list of instance names or None for all instances
375 e4376078 Iustin Pop
  @rtype: list
376 e4376078 Iustin Pop
  @return: the list of instances, sorted
377 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if the instances parameter is wrong type
378 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if any of the passed instances is not found
379 3312b702 Iustin Pop

380 3312b702 Iustin Pop
  """
381 3312b702 Iustin Pop
  if not isinstance(instances, list):
382 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
383 3312b702 Iustin Pop
384 3312b702 Iustin Pop
  if instances:
385 3312b702 Iustin Pop
    wanted = []
386 3312b702 Iustin Pop
387 3312b702 Iustin Pop
    for name in instances:
388 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
389 3312b702 Iustin Pop
      if instance is None:
390 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
391 3312b702 Iustin Pop
      wanted.append(instance)
392 3312b702 Iustin Pop
393 3312b702 Iustin Pop
  else:
394 a7f5dc98 Iustin Pop
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
395 a7f5dc98 Iustin Pop
  return wanted
396 dcb93971 Michael Hanselmann
397 dcb93971 Michael Hanselmann
398 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
399 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
400 83120a01 Michael Hanselmann

401 a2d2e1a7 Iustin Pop
  @type static: L{utils.FieldSet}
402 31bf511f Iustin Pop
  @param static: static fields set
403 a2d2e1a7 Iustin Pop
  @type dynamic: L{utils.FieldSet}
404 31bf511f Iustin Pop
  @param dynamic: dynamic fields set
405 83120a01 Michael Hanselmann

406 83120a01 Michael Hanselmann
  """
407 a2d2e1a7 Iustin Pop
  f = utils.FieldSet()
408 31bf511f Iustin Pop
  f.Extend(static)
409 31bf511f Iustin Pop
  f.Extend(dynamic)
410 dcb93971 Michael Hanselmann
411 31bf511f Iustin Pop
  delta = f.NonMatching(selected)
412 31bf511f Iustin Pop
  if delta:
413 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
414 31bf511f Iustin Pop
                               % ",".join(delta))
415 dcb93971 Michael Hanselmann
416 dcb93971 Michael Hanselmann
417 a5961235 Iustin Pop
def _CheckBooleanOpField(op, name):
418 a5961235 Iustin Pop
  """Validates boolean opcode parameters.
419 a5961235 Iustin Pop

420 a5961235 Iustin Pop
  This will ensure that an opcode parameter is either a boolean value,
421 a5961235 Iustin Pop
  or None (but that it always exists).
422 a5961235 Iustin Pop

423 a5961235 Iustin Pop
  """
424 a5961235 Iustin Pop
  val = getattr(op, name, None)
425 a5961235 Iustin Pop
  if not (val is None or isinstance(val, bool)):
426 a5961235 Iustin Pop
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
427 a5961235 Iustin Pop
                               (name, str(val)))
428 a5961235 Iustin Pop
  setattr(op, name, val)
429 a5961235 Iustin Pop
430 a5961235 Iustin Pop
431 a5961235 Iustin Pop
def _CheckNodeOnline(lu, node):
432 a5961235 Iustin Pop
  """Ensure that a given node is online.
433 a5961235 Iustin Pop

434 a5961235 Iustin Pop
  @param lu: the LU on behalf of which we make the check
435 a5961235 Iustin Pop
  @param node: the node to check
436 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is offline
437 a5961235 Iustin Pop

438 a5961235 Iustin Pop
  """
439 a5961235 Iustin Pop
  if lu.cfg.GetNodeInfo(node).offline:
440 a5961235 Iustin Pop
    raise errors.OpPrereqError("Can't use offline node %s" % node)
441 a5961235 Iustin Pop
442 a5961235 Iustin Pop
443 733a2b6a Iustin Pop
def _CheckNodeNotDrained(lu, node):
444 733a2b6a Iustin Pop
  """Ensure that a given node is not drained.
445 733a2b6a Iustin Pop

446 733a2b6a Iustin Pop
  @param lu: the LU on behalf of which we make the check
447 733a2b6a Iustin Pop
  @param node: the node to check
448 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is drained
449 733a2b6a Iustin Pop

450 733a2b6a Iustin Pop
  """
451 733a2b6a Iustin Pop
  if lu.cfg.GetNodeInfo(node).drained:
452 733a2b6a Iustin Pop
    raise errors.OpPrereqError("Can't use drained node %s" % node)
453 733a2b6a Iustin Pop
454 733a2b6a Iustin Pop
455 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
456 67fc3042 Iustin Pop
                          memory, vcpus, nics, disk_template, disks,
457 67fc3042 Iustin Pop
                          bep, hvp, hypervisor):
458 e4376078 Iustin Pop
  """Builds instance related env variables for hooks
459 e4376078 Iustin Pop

460 e4376078 Iustin Pop
  This builds the hook environment from individual variables.
461 e4376078 Iustin Pop

462 e4376078 Iustin Pop
  @type name: string
463 e4376078 Iustin Pop
  @param name: the name of the instance
464 e4376078 Iustin Pop
  @type primary_node: string
465 e4376078 Iustin Pop
  @param primary_node: the name of the instance's primary node
466 e4376078 Iustin Pop
  @type secondary_nodes: list
467 e4376078 Iustin Pop
  @param secondary_nodes: list of secondary nodes as strings
468 e4376078 Iustin Pop
  @type os_type: string
469 e4376078 Iustin Pop
  @param os_type: the name of the instance's OS
470 0d68c45d Iustin Pop
  @type status: boolean
471 0d68c45d Iustin Pop
  @param status: the should_run status of the instance
472 e4376078 Iustin Pop
  @type memory: string
473 e4376078 Iustin Pop
  @param memory: the memory size of the instance
474 e4376078 Iustin Pop
  @type vcpus: string
475 e4376078 Iustin Pop
  @param vcpus: the count of VCPUs the instance has
476 e4376078 Iustin Pop
  @type nics: list
477 e4376078 Iustin Pop
  @param nics: list of tuples (ip, bridge, mac) representing
478 e4376078 Iustin Pop
      the NICs the instance  has
479 2c2690c9 Iustin Pop
  @type disk_template: string
480 2c2690c9 Iustin Pop
  @param disk_template: the distk template of the instance
481 2c2690c9 Iustin Pop
  @type disks: list
482 2c2690c9 Iustin Pop
  @param disks: the list of (size, mode) pairs
483 67fc3042 Iustin Pop
  @type bep: dict
484 67fc3042 Iustin Pop
  @param bep: the backend parameters for the instance
485 67fc3042 Iustin Pop
  @type hvp: dict
486 67fc3042 Iustin Pop
  @param hvp: the hypervisor parameters for the instance
487 67fc3042 Iustin Pop
  @type hypervisor: string
488 67fc3042 Iustin Pop
  @param hypervisor: the hypervisor for the instance
489 e4376078 Iustin Pop
  @rtype: dict
490 e4376078 Iustin Pop
  @return: the hook environment for this instance
491 ecb215b5 Michael Hanselmann

492 396e1b78 Michael Hanselmann
  """
493 0d68c45d Iustin Pop
  if status:
494 0d68c45d Iustin Pop
    str_status = "up"
495 0d68c45d Iustin Pop
  else:
496 0d68c45d Iustin Pop
    str_status = "down"
497 396e1b78 Michael Hanselmann
  env = {
498 0e137c28 Iustin Pop
    "OP_TARGET": name,
499 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
500 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
501 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
502 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
503 0d68c45d Iustin Pop
    "INSTANCE_STATUS": str_status,
504 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
505 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
506 2c2690c9 Iustin Pop
    "INSTANCE_DISK_TEMPLATE": disk_template,
507 67fc3042 Iustin Pop
    "INSTANCE_HYPERVISOR": hypervisor,
508 396e1b78 Michael Hanselmann
  }
509 396e1b78 Michael Hanselmann
510 396e1b78 Michael Hanselmann
  if nics:
511 396e1b78 Michael Hanselmann
    nic_count = len(nics)
512 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
513 396e1b78 Michael Hanselmann
      if ip is None:
514 396e1b78 Michael Hanselmann
        ip = ""
515 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
516 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
517 2c2690c9 Iustin Pop
      env["INSTANCE_NIC%d_MAC" % idx] = mac
518 396e1b78 Michael Hanselmann
  else:
519 396e1b78 Michael Hanselmann
    nic_count = 0
520 396e1b78 Michael Hanselmann
521 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
522 396e1b78 Michael Hanselmann
523 2c2690c9 Iustin Pop
  if disks:
524 2c2690c9 Iustin Pop
    disk_count = len(disks)
525 2c2690c9 Iustin Pop
    for idx, (size, mode) in enumerate(disks):
526 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_SIZE" % idx] = size
527 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_MODE" % idx] = mode
528 2c2690c9 Iustin Pop
  else:
529 2c2690c9 Iustin Pop
    disk_count = 0
530 2c2690c9 Iustin Pop
531 2c2690c9 Iustin Pop
  env["INSTANCE_DISK_COUNT"] = disk_count
532 2c2690c9 Iustin Pop
533 67fc3042 Iustin Pop
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
534 67fc3042 Iustin Pop
    for key, value in source.items():
535 67fc3042 Iustin Pop
      env["INSTANCE_%s_%s" % (kind, key)] = value
536 67fc3042 Iustin Pop
537 396e1b78 Michael Hanselmann
  return env
538 396e1b78 Michael Hanselmann
539 396e1b78 Michael Hanselmann
540 338e51e8 Iustin Pop
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
541 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
542 ecb215b5 Michael Hanselmann

543 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
544 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
545 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
546 e4376078 Iustin Pop
  @param instance: the instance for which we should build the
547 e4376078 Iustin Pop
      environment
548 e4376078 Iustin Pop
  @type override: dict
549 e4376078 Iustin Pop
  @param override: dictionary with key/values that will override
550 e4376078 Iustin Pop
      our values
551 e4376078 Iustin Pop
  @rtype: dict
552 e4376078 Iustin Pop
  @return: the hook environment dictionary
553 e4376078 Iustin Pop

554 ecb215b5 Michael Hanselmann
  """
555 67fc3042 Iustin Pop
  cluster = lu.cfg.GetClusterInfo()
556 67fc3042 Iustin Pop
  bep = cluster.FillBE(instance)
557 67fc3042 Iustin Pop
  hvp = cluster.FillHV(instance)
558 396e1b78 Michael Hanselmann
  args = {
559 396e1b78 Michael Hanselmann
    'name': instance.name,
560 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
561 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
562 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
563 0d68c45d Iustin Pop
    'status': instance.admin_up,
564 338e51e8 Iustin Pop
    'memory': bep[constants.BE_MEMORY],
565 338e51e8 Iustin Pop
    'vcpus': bep[constants.BE_VCPUS],
566 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
567 2c2690c9 Iustin Pop
    'disk_template': instance.disk_template,
568 2c2690c9 Iustin Pop
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
569 67fc3042 Iustin Pop
    'bep': bep,
570 67fc3042 Iustin Pop
    'hvp': hvp,
571 67fc3042 Iustin Pop
    'hypervisor': instance.hypervisor,
572 396e1b78 Michael Hanselmann
  }
573 396e1b78 Michael Hanselmann
  if override:
574 396e1b78 Michael Hanselmann
    args.update(override)
575 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
576 396e1b78 Michael Hanselmann
577 396e1b78 Michael Hanselmann
578 ec0292f1 Iustin Pop
def _AdjustCandidatePool(lu):
579 ec0292f1 Iustin Pop
  """Adjust the candidate pool after node operations.
580 ec0292f1 Iustin Pop

581 ec0292f1 Iustin Pop
  """
582 ec0292f1 Iustin Pop
  mod_list = lu.cfg.MaintainCandidatePool()
583 ec0292f1 Iustin Pop
  if mod_list:
584 ec0292f1 Iustin Pop
    lu.LogInfo("Promoted nodes to master candidate role: %s",
585 ee513a66 Iustin Pop
               ", ".join(node.name for node in mod_list))
586 ec0292f1 Iustin Pop
    for name in mod_list:
587 ec0292f1 Iustin Pop
      lu.context.ReaddNode(name)
588 ec0292f1 Iustin Pop
  mc_now, mc_max = lu.cfg.GetMasterCandidateStats()
589 ec0292f1 Iustin Pop
  if mc_now > mc_max:
590 ec0292f1 Iustin Pop
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
591 ec0292f1 Iustin Pop
               (mc_now, mc_max))
592 ec0292f1 Iustin Pop
593 ec0292f1 Iustin Pop
594 b9bddb6b Iustin Pop
def _CheckInstanceBridgesExist(lu, instance):
595 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
596 bf6929a2 Alexander Schreiber

597 bf6929a2 Alexander Schreiber
  """
598 bf6929a2 Alexander Schreiber
  # check bridges existance
599 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
600 781de953 Iustin Pop
  result = lu.rpc.call_bridges_exist(instance.primary_node, brlist)
601 781de953 Iustin Pop
  result.Raise()
602 781de953 Iustin Pop
  if not result.data:
603 781de953 Iustin Pop
    raise errors.OpPrereqError("One or more target bridges %s does not"
604 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
605 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
606 bf6929a2 Alexander Schreiber
607 bf6929a2 Alexander Schreiber
608 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
609 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
610 a8083063 Iustin Pop

611 a8083063 Iustin Pop
  """
612 a8083063 Iustin Pop
  _OP_REQP = []
613 a8083063 Iustin Pop
614 a8083063 Iustin Pop
  def CheckPrereq(self):
615 a8083063 Iustin Pop
    """Check prerequisites.
616 a8083063 Iustin Pop

617 a8083063 Iustin Pop
    This checks whether the cluster is empty.
618 a8083063 Iustin Pop

619 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
620 a8083063 Iustin Pop

621 a8083063 Iustin Pop
    """
622 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
623 a8083063 Iustin Pop
624 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
625 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
626 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
627 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
628 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
629 db915bd1 Michael Hanselmann
    if instancelist:
630 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
631 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
632 a8083063 Iustin Pop
633 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
634 a8083063 Iustin Pop
    """Destroys the cluster.
635 a8083063 Iustin Pop

636 a8083063 Iustin Pop
    """
637 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
638 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
639 781de953 Iustin Pop
    result.Raise()
640 781de953 Iustin Pop
    if not result.data:
641 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
642 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
643 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
644 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
645 140aa4a8 Iustin Pop
    return master
646 a8083063 Iustin Pop
647 a8083063 Iustin Pop
648 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
649 a8083063 Iustin Pop
  """Verifies the cluster status.
650 a8083063 Iustin Pop

651 a8083063 Iustin Pop
  """
652 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
653 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
654 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
655 d4b9d97f Guido Trotter
  REQ_BGL = False
656 d4b9d97f Guido Trotter
657 d4b9d97f Guido Trotter
  def ExpandNames(self):
658 d4b9d97f Guido Trotter
    self.needed_locks = {
659 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
660 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
661 d4b9d97f Guido Trotter
    }
662 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
663 a8083063 Iustin Pop
664 25361b9a Iustin Pop
  def _VerifyNode(self, nodeinfo, file_list, local_cksum,
665 6d2e83d5 Iustin Pop
                  node_result, feedback_fn, master_files,
666 cc9e1230 Guido Trotter
                  drbd_map, vg_name):
667 a8083063 Iustin Pop
    """Run multiple tests against a node.
668 a8083063 Iustin Pop

669 112f18a5 Iustin Pop
    Test list:
670 e4376078 Iustin Pop

671 a8083063 Iustin Pop
      - compares ganeti version
672 a8083063 Iustin Pop
      - checks vg existance and size > 20G
673 a8083063 Iustin Pop
      - checks config file checksum
674 a8083063 Iustin Pop
      - checks ssh to other nodes
675 a8083063 Iustin Pop

676 112f18a5 Iustin Pop
    @type nodeinfo: L{objects.Node}
677 112f18a5 Iustin Pop
    @param nodeinfo: the node to check
678 e4376078 Iustin Pop
    @param file_list: required list of files
679 e4376078 Iustin Pop
    @param local_cksum: dictionary of local files and their checksums
680 e4376078 Iustin Pop
    @param node_result: the results from the node
681 e4376078 Iustin Pop
    @param feedback_fn: function used to accumulate results
682 112f18a5 Iustin Pop
    @param master_files: list of files that only masters should have
683 6d2e83d5 Iustin Pop
    @param drbd_map: the useddrbd minors for this node, in
684 6d2e83d5 Iustin Pop
        form of minor: (instance, must_exist) which correspond to instances
685 6d2e83d5 Iustin Pop
        and their running status
686 cc9e1230 Guido Trotter
    @param vg_name: Ganeti Volume Group (result of self.cfg.GetVGName())
687 098c0958 Michael Hanselmann

688 a8083063 Iustin Pop
    """
689 112f18a5 Iustin Pop
    node = nodeinfo.name
690 25361b9a Iustin Pop
691 25361b9a Iustin Pop
    # main result, node_result should be a non-empty dict
692 25361b9a Iustin Pop
    if not node_result or not isinstance(node_result, dict):
693 25361b9a Iustin Pop
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
694 25361b9a Iustin Pop
      return True
695 25361b9a Iustin Pop
696 a8083063 Iustin Pop
    # compares ganeti version
697 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
698 25361b9a Iustin Pop
    remote_version = node_result.get('version', None)
699 e9ce0a64 Iustin Pop
    if not (remote_version and isinstance(remote_version, (list, tuple)) and
700 e9ce0a64 Iustin Pop
            len(remote_version) == 2):
701 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
702 a8083063 Iustin Pop
      return True
703 a8083063 Iustin Pop
704 e9ce0a64 Iustin Pop
    if local_version != remote_version[0]:
705 e9ce0a64 Iustin Pop
      feedback_fn("  - ERROR: incompatible protocol versions: master %s,"
706 e9ce0a64 Iustin Pop
                  " node %s %s" % (local_version, node, remote_version[0]))
707 a8083063 Iustin Pop
      return True
708 a8083063 Iustin Pop
709 e9ce0a64 Iustin Pop
    # node seems compatible, we can actually try to look into its results
710 a8083063 Iustin Pop
711 a8083063 Iustin Pop
    bad = False
712 e9ce0a64 Iustin Pop
713 e9ce0a64 Iustin Pop
    # full package version
714 e9ce0a64 Iustin Pop
    if constants.RELEASE_VERSION != remote_version[1]:
715 e9ce0a64 Iustin Pop
      feedback_fn("  - WARNING: software version mismatch: master %s,"
716 e9ce0a64 Iustin Pop
                  " node %s %s" %
717 e9ce0a64 Iustin Pop
                  (constants.RELEASE_VERSION, node, remote_version[1]))
718 e9ce0a64 Iustin Pop
719 e9ce0a64 Iustin Pop
    # checks vg existence and size > 20G
720 cc9e1230 Guido Trotter
    if vg_name is not None:
721 cc9e1230 Guido Trotter
      vglist = node_result.get(constants.NV_VGLIST, None)
722 cc9e1230 Guido Trotter
      if not vglist:
723 cc9e1230 Guido Trotter
        feedback_fn("  - ERROR: unable to check volume groups on node %s." %
724 cc9e1230 Guido Trotter
                        (node,))
725 a8083063 Iustin Pop
        bad = True
726 cc9e1230 Guido Trotter
      else:
727 cc9e1230 Guido Trotter
        vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
728 cc9e1230 Guido Trotter
                                              constants.MIN_VG_SIZE)
729 cc9e1230 Guido Trotter
        if vgstatus:
730 cc9e1230 Guido Trotter
          feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
731 cc9e1230 Guido Trotter
          bad = True
732 a8083063 Iustin Pop
733 a8083063 Iustin Pop
    # checks config file checksum
734 a8083063 Iustin Pop
735 25361b9a Iustin Pop
    remote_cksum = node_result.get(constants.NV_FILELIST, None)
736 25361b9a Iustin Pop
    if not isinstance(remote_cksum, dict):
737 a8083063 Iustin Pop
      bad = True
738 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
739 a8083063 Iustin Pop
    else:
740 a8083063 Iustin Pop
      for file_name in file_list:
741 112f18a5 Iustin Pop
        node_is_mc = nodeinfo.master_candidate
742 112f18a5 Iustin Pop
        must_have_file = file_name not in master_files
743 a8083063 Iustin Pop
        if file_name not in remote_cksum:
744 112f18a5 Iustin Pop
          if node_is_mc or must_have_file:
745 112f18a5 Iustin Pop
            bad = True
746 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' missing" % file_name)
747 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
748 112f18a5 Iustin Pop
          if node_is_mc or must_have_file:
749 112f18a5 Iustin Pop
            bad = True
750 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
751 112f18a5 Iustin Pop
          else:
752 112f18a5 Iustin Pop
            # not candidate and this is not a must-have file
753 112f18a5 Iustin Pop
            bad = True
754 e631cb25 Iustin Pop
            feedback_fn("  - ERROR: file '%s' should not exist on non master"
755 e631cb25 Iustin Pop
                        " candidates (and the file is outdated)" % file_name)
756 112f18a5 Iustin Pop
        else:
757 112f18a5 Iustin Pop
          # all good, except non-master/non-must have combination
758 112f18a5 Iustin Pop
          if not node_is_mc and not must_have_file:
759 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' should not exist on non master"
760 112f18a5 Iustin Pop
                        " candidates" % file_name)
761 a8083063 Iustin Pop
762 25361b9a Iustin Pop
    # checks ssh to any
763 25361b9a Iustin Pop
764 25361b9a Iustin Pop
    if constants.NV_NODELIST not in node_result:
765 a8083063 Iustin Pop
      bad = True
766 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
767 a8083063 Iustin Pop
    else:
768 25361b9a Iustin Pop
      if node_result[constants.NV_NODELIST]:
769 a8083063 Iustin Pop
        bad = True
770 25361b9a Iustin Pop
        for node in node_result[constants.NV_NODELIST]:
771 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
772 25361b9a Iustin Pop
                          (node, node_result[constants.NV_NODELIST][node]))
773 25361b9a Iustin Pop
774 25361b9a Iustin Pop
    if constants.NV_NODENETTEST not in node_result:
775 9d4bfc96 Iustin Pop
      bad = True
776 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
777 9d4bfc96 Iustin Pop
    else:
778 25361b9a Iustin Pop
      if node_result[constants.NV_NODENETTEST]:
779 9d4bfc96 Iustin Pop
        bad = True
780 25361b9a Iustin Pop
        nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys())
781 9d4bfc96 Iustin Pop
        for node in nlist:
782 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
783 25361b9a Iustin Pop
                          (node, node_result[constants.NV_NODENETTEST][node]))
784 9d4bfc96 Iustin Pop
785 25361b9a Iustin Pop
    hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
786 e69d05fd Iustin Pop
    if isinstance(hyp_result, dict):
787 e69d05fd Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
788 e69d05fd Iustin Pop
        if hv_result is not None:
789 e69d05fd Iustin Pop
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
790 e69d05fd Iustin Pop
                      (hv_name, hv_result))
791 6d2e83d5 Iustin Pop
792 6d2e83d5 Iustin Pop
    # check used drbd list
793 cc9e1230 Guido Trotter
    if vg_name is not None:
794 cc9e1230 Guido Trotter
      used_minors = node_result.get(constants.NV_DRBDLIST, [])
795 cc9e1230 Guido Trotter
      if not isinstance(used_minors, (tuple, list)):
796 cc9e1230 Guido Trotter
        feedback_fn("  - ERROR: cannot parse drbd status file: %s" %
797 cc9e1230 Guido Trotter
                    str(used_minors))
798 cc9e1230 Guido Trotter
      else:
799 cc9e1230 Guido Trotter
        for minor, (iname, must_exist) in drbd_map.items():
800 cc9e1230 Guido Trotter
          if minor not in used_minors and must_exist:
801 35e994e9 Iustin Pop
            feedback_fn("  - ERROR: drbd minor %d of instance %s is"
802 35e994e9 Iustin Pop
                        " not active" % (minor, iname))
803 cc9e1230 Guido Trotter
            bad = True
804 cc9e1230 Guido Trotter
        for minor in used_minors:
805 cc9e1230 Guido Trotter
          if minor not in drbd_map:
806 35e994e9 Iustin Pop
            feedback_fn("  - ERROR: unallocated drbd minor %d is in use" %
807 35e994e9 Iustin Pop
                        minor)
808 cc9e1230 Guido Trotter
            bad = True
809 6d2e83d5 Iustin Pop
810 a8083063 Iustin Pop
    return bad
811 a8083063 Iustin Pop
812 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
813 0a66c968 Iustin Pop
                      node_instance, feedback_fn, n_offline):
814 a8083063 Iustin Pop
    """Verify an instance.
815 a8083063 Iustin Pop

816 a8083063 Iustin Pop
    This function checks to see if the required block devices are
817 a8083063 Iustin Pop
    available on the instance's node.
818 a8083063 Iustin Pop

819 a8083063 Iustin Pop
    """
820 a8083063 Iustin Pop
    bad = False
821 a8083063 Iustin Pop
822 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
823 a8083063 Iustin Pop
824 a8083063 Iustin Pop
    node_vol_should = {}
825 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
826 a8083063 Iustin Pop
827 a8083063 Iustin Pop
    for node in node_vol_should:
828 0a66c968 Iustin Pop
      if node in n_offline:
829 0a66c968 Iustin Pop
        # ignore missing volumes on offline nodes
830 0a66c968 Iustin Pop
        continue
831 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
832 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
833 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
834 a8083063 Iustin Pop
                          (volume, node))
835 a8083063 Iustin Pop
          bad = True
836 a8083063 Iustin Pop
837 0d68c45d Iustin Pop
    if instanceconfig.admin_up:
838 0a66c968 Iustin Pop
      if ((node_current not in node_instance or
839 0a66c968 Iustin Pop
          not instance in node_instance[node_current]) and
840 0a66c968 Iustin Pop
          node_current not in n_offline):
841 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
842 a8083063 Iustin Pop
                        (instance, node_current))
843 a8083063 Iustin Pop
        bad = True
844 a8083063 Iustin Pop
845 a8083063 Iustin Pop
    for node in node_instance:
846 a8083063 Iustin Pop
      if (not node == node_current):
847 a8083063 Iustin Pop
        if instance in node_instance[node]:
848 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
849 a8083063 Iustin Pop
                          (instance, node))
850 a8083063 Iustin Pop
          bad = True
851 a8083063 Iustin Pop
852 6a438c98 Michael Hanselmann
    return bad
853 a8083063 Iustin Pop
854 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
855 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
856 a8083063 Iustin Pop

857 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
858 a8083063 Iustin Pop
    reported as unknown.
859 a8083063 Iustin Pop

860 a8083063 Iustin Pop
    """
861 a8083063 Iustin Pop
    bad = False
862 a8083063 Iustin Pop
863 a8083063 Iustin Pop
    for node in node_vol_is:
864 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
865 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
866 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
867 a8083063 Iustin Pop
                      (volume, node))
868 a8083063 Iustin Pop
          bad = True
869 a8083063 Iustin Pop
    return bad
870 a8083063 Iustin Pop
871 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
872 a8083063 Iustin Pop
    """Verify the list of running instances.
873 a8083063 Iustin Pop

874 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
875 a8083063 Iustin Pop

876 a8083063 Iustin Pop
    """
877 a8083063 Iustin Pop
    bad = False
878 a8083063 Iustin Pop
    for node in node_instance:
879 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
880 a8083063 Iustin Pop
        if runninginstance not in instancelist:
881 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
882 a8083063 Iustin Pop
                          (runninginstance, node))
883 a8083063 Iustin Pop
          bad = True
884 a8083063 Iustin Pop
    return bad
885 a8083063 Iustin Pop
886 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
887 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
888 2b3b6ddd Guido Trotter

889 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
890 2b3b6ddd Guido Trotter
    was primary for.
891 2b3b6ddd Guido Trotter

892 2b3b6ddd Guido Trotter
    """
893 2b3b6ddd Guido Trotter
    bad = False
894 2b3b6ddd Guido Trotter
895 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
896 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
897 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
898 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
899 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
900 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
901 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
902 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
903 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
904 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
905 2b3b6ddd Guido Trotter
        needed_mem = 0
906 2b3b6ddd Guido Trotter
        for instance in instances:
907 338e51e8 Iustin Pop
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
908 c0f2b229 Iustin Pop
          if bep[constants.BE_AUTO_BALANCE]:
909 3924700f Iustin Pop
            needed_mem += bep[constants.BE_MEMORY]
910 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
911 2b3b6ddd Guido Trotter
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
912 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
913 2b3b6ddd Guido Trotter
          bad = True
914 2b3b6ddd Guido Trotter
    return bad
915 2b3b6ddd Guido Trotter
916 a8083063 Iustin Pop
  def CheckPrereq(self):
917 a8083063 Iustin Pop
    """Check prerequisites.
918 a8083063 Iustin Pop

919 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
920 e54c4c5e Guido Trotter
    all its members are valid.
921 a8083063 Iustin Pop

922 a8083063 Iustin Pop
    """
923 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
924 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
925 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
926 a8083063 Iustin Pop
927 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
928 d8fff41c Guido Trotter
    """Build hooks env.
929 d8fff41c Guido Trotter

930 d8fff41c Guido Trotter
    Cluster-Verify hooks just rone in the post phase and their failure makes
931 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
932 d8fff41c Guido Trotter

933 d8fff41c Guido Trotter
    """
934 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
935 35e994e9 Iustin Pop
    env = {
936 35e994e9 Iustin Pop
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
937 35e994e9 Iustin Pop
      }
938 35e994e9 Iustin Pop
    for node in self.cfg.GetAllNodesInfo().values():
939 35e994e9 Iustin Pop
      env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
940 35e994e9 Iustin Pop
941 d8fff41c Guido Trotter
    return env, [], all_nodes
942 d8fff41c Guido Trotter
943 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
944 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
945 a8083063 Iustin Pop

946 a8083063 Iustin Pop
    """
947 a8083063 Iustin Pop
    bad = False
948 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
949 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
950 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
951 a8083063 Iustin Pop
952 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
953 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
954 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
955 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
956 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
957 6d2e83d5 Iustin Pop
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
958 6d2e83d5 Iustin Pop
                        for iname in instancelist)
959 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
960 3924700f Iustin Pop
    i_non_a_balanced = [] # Non auto-balanced instances
961 0a66c968 Iustin Pop
    n_offline = [] # List of offline nodes
962 22f0f71d Iustin Pop
    n_drained = [] # List of nodes being drained
963 a8083063 Iustin Pop
    node_volume = {}
964 a8083063 Iustin Pop
    node_instance = {}
965 9c9c7d30 Guido Trotter
    node_info = {}
966 26b6af5e Guido Trotter
    instance_cfg = {}
967 a8083063 Iustin Pop
968 a8083063 Iustin Pop
    # FIXME: verify OS list
969 a8083063 Iustin Pop
    # do local checksums
970 112f18a5 Iustin Pop
    master_files = [constants.CLUSTER_CONF_FILE]
971 112f18a5 Iustin Pop
972 112f18a5 Iustin Pop
    file_names = ssconf.SimpleStore().GetFileList()
973 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
974 699777f2 Michael Hanselmann
    file_names.append(constants.RAPI_CERT_FILE)
975 112f18a5 Iustin Pop
    file_names.extend(master_files)
976 112f18a5 Iustin Pop
977 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
978 a8083063 Iustin Pop
979 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
980 a8083063 Iustin Pop
    node_verify_param = {
981 25361b9a Iustin Pop
      constants.NV_FILELIST: file_names,
982 82e37788 Iustin Pop
      constants.NV_NODELIST: [node.name for node in nodeinfo
983 82e37788 Iustin Pop
                              if not node.offline],
984 25361b9a Iustin Pop
      constants.NV_HYPERVISOR: hypervisors,
985 25361b9a Iustin Pop
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
986 82e37788 Iustin Pop
                                  node.secondary_ip) for node in nodeinfo
987 82e37788 Iustin Pop
                                 if not node.offline],
988 25361b9a Iustin Pop
      constants.NV_INSTANCELIST: hypervisors,
989 25361b9a Iustin Pop
      constants.NV_VERSION: None,
990 25361b9a Iustin Pop
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
991 a8083063 Iustin Pop
      }
992 cc9e1230 Guido Trotter
    if vg_name is not None:
993 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_VGLIST] = None
994 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_LVLIST] = vg_name
995 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_DRBDLIST] = None
996 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
997 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
998 a8083063 Iustin Pop
999 3924700f Iustin Pop
    cluster = self.cfg.GetClusterInfo()
1000 112f18a5 Iustin Pop
    master_node = self.cfg.GetMasterNode()
1001 6d2e83d5 Iustin Pop
    all_drbd_map = self.cfg.ComputeDRBDMap()
1002 6d2e83d5 Iustin Pop
1003 112f18a5 Iustin Pop
    for node_i in nodeinfo:
1004 112f18a5 Iustin Pop
      node = node_i.name
1005 25361b9a Iustin Pop
      nresult = all_nvinfo[node].data
1006 25361b9a Iustin Pop
1007 0a66c968 Iustin Pop
      if node_i.offline:
1008 0a66c968 Iustin Pop
        feedback_fn("* Skipping offline node %s" % (node,))
1009 0a66c968 Iustin Pop
        n_offline.append(node)
1010 0a66c968 Iustin Pop
        continue
1011 0a66c968 Iustin Pop
1012 112f18a5 Iustin Pop
      if node == master_node:
1013 25361b9a Iustin Pop
        ntype = "master"
1014 112f18a5 Iustin Pop
      elif node_i.master_candidate:
1015 25361b9a Iustin Pop
        ntype = "master candidate"
1016 22f0f71d Iustin Pop
      elif node_i.drained:
1017 22f0f71d Iustin Pop
        ntype = "drained"
1018 22f0f71d Iustin Pop
        n_drained.append(node)
1019 112f18a5 Iustin Pop
      else:
1020 25361b9a Iustin Pop
        ntype = "regular"
1021 112f18a5 Iustin Pop
      feedback_fn("* Verifying node %s (%s)" % (node, ntype))
1022 25361b9a Iustin Pop
1023 25361b9a Iustin Pop
      if all_nvinfo[node].failed or not isinstance(nresult, dict):
1024 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
1025 25361b9a Iustin Pop
        bad = True
1026 25361b9a Iustin Pop
        continue
1027 25361b9a Iustin Pop
1028 6d2e83d5 Iustin Pop
      node_drbd = {}
1029 6d2e83d5 Iustin Pop
      for minor, instance in all_drbd_map[node].items():
1030 c614e5fb Iustin Pop
        if instance not in instanceinfo:
1031 c614e5fb Iustin Pop
          feedback_fn("  - ERROR: ghost instance '%s' in temporary DRBD map" %
1032 c614e5fb Iustin Pop
                      instance)
1033 c614e5fb Iustin Pop
          # ghost instance should not be running, but otherwise we
1034 c614e5fb Iustin Pop
          # don't give double warnings (both ghost instance and
1035 c614e5fb Iustin Pop
          # unallocated minor in use)
1036 c614e5fb Iustin Pop
          node_drbd[minor] = (instance, False)
1037 c614e5fb Iustin Pop
        else:
1038 c614e5fb Iustin Pop
          instance = instanceinfo[instance]
1039 c614e5fb Iustin Pop
          node_drbd[minor] = (instance.name, instance.admin_up)
1040 112f18a5 Iustin Pop
      result = self._VerifyNode(node_i, file_names, local_checksums,
1041 6d2e83d5 Iustin Pop
                                nresult, feedback_fn, master_files,
1042 cc9e1230 Guido Trotter
                                node_drbd, vg_name)
1043 a8083063 Iustin Pop
      bad = bad or result
1044 a8083063 Iustin Pop
1045 25361b9a Iustin Pop
      lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1046 cc9e1230 Guido Trotter
      if vg_name is None:
1047 cc9e1230 Guido Trotter
        node_volume[node] = {}
1048 cc9e1230 Guido Trotter
      elif isinstance(lvdata, basestring):
1049 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
1050 26f15862 Iustin Pop
                    (node, utils.SafeEncode(lvdata)))
1051 b63ed789 Iustin Pop
        bad = True
1052 b63ed789 Iustin Pop
        node_volume[node] = {}
1053 25361b9a Iustin Pop
      elif not isinstance(lvdata, dict):
1054 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (lvlist)" % (node,))
1055 a8083063 Iustin Pop
        bad = True
1056 a8083063 Iustin Pop
        continue
1057 b63ed789 Iustin Pop
      else:
1058 25361b9a Iustin Pop
        node_volume[node] = lvdata
1059 a8083063 Iustin Pop
1060 a8083063 Iustin Pop
      # node_instance
1061 25361b9a Iustin Pop
      idata = nresult.get(constants.NV_INSTANCELIST, None)
1062 25361b9a Iustin Pop
      if not isinstance(idata, list):
1063 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (instancelist)" %
1064 25361b9a Iustin Pop
                    (node,))
1065 a8083063 Iustin Pop
        bad = True
1066 a8083063 Iustin Pop
        continue
1067 a8083063 Iustin Pop
1068 25361b9a Iustin Pop
      node_instance[node] = idata
1069 a8083063 Iustin Pop
1070 9c9c7d30 Guido Trotter
      # node_info
1071 25361b9a Iustin Pop
      nodeinfo = nresult.get(constants.NV_HVINFO, None)
1072 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
1073 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (hvinfo)" % (node,))
1074 9c9c7d30 Guido Trotter
        bad = True
1075 9c9c7d30 Guido Trotter
        continue
1076 9c9c7d30 Guido Trotter
1077 9c9c7d30 Guido Trotter
      try:
1078 9c9c7d30 Guido Trotter
        node_info[node] = {
1079 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
1080 93e4c50b Guido Trotter
          "pinst": [],
1081 93e4c50b Guido Trotter
          "sinst": [],
1082 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
1083 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
1084 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
1085 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
1086 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
1087 36e7da50 Guido Trotter
          # secondary.
1088 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
1089 9c9c7d30 Guido Trotter
        }
1090 cc9e1230 Guido Trotter
        # FIXME: devise a free space model for file based instances as well
1091 cc9e1230 Guido Trotter
        if vg_name is not None:
1092 9a198532 Iustin Pop
          if (constants.NV_VGLIST not in nresult or
1093 9a198532 Iustin Pop
              vg_name not in nresult[constants.NV_VGLIST]):
1094 9a198532 Iustin Pop
            feedback_fn("  - ERROR: node %s didn't return data for the"
1095 9a198532 Iustin Pop
                        " volume group '%s' - it is either missing or broken" %
1096 9a198532 Iustin Pop
                        (node, vg_name))
1097 9a198532 Iustin Pop
            bad = True
1098 9a198532 Iustin Pop
            continue
1099 cc9e1230 Guido Trotter
          node_info[node]["dfree"] = int(nresult[constants.NV_VGLIST][vg_name])
1100 9a198532 Iustin Pop
      except (ValueError, KeyError):
1101 9a198532 Iustin Pop
        feedback_fn("  - ERROR: invalid nodeinfo value returned"
1102 9a198532 Iustin Pop
                    " from node %s" % (node,))
1103 9c9c7d30 Guido Trotter
        bad = True
1104 9c9c7d30 Guido Trotter
        continue
1105 9c9c7d30 Guido Trotter
1106 a8083063 Iustin Pop
    node_vol_should = {}
1107 a8083063 Iustin Pop
1108 a8083063 Iustin Pop
    for instance in instancelist:
1109 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
1110 6d2e83d5 Iustin Pop
      inst_config = instanceinfo[instance]
1111 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
1112 0a66c968 Iustin Pop
                                     node_instance, feedback_fn, n_offline)
1113 c5705f58 Guido Trotter
      bad = bad or result
1114 832261fd Iustin Pop
      inst_nodes_offline = []
1115 a8083063 Iustin Pop
1116 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
1117 a8083063 Iustin Pop
1118 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
1119 26b6af5e Guido Trotter
1120 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
1121 93e4c50b Guido Trotter
      if pnode in node_info:
1122 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
1123 0a66c968 Iustin Pop
      elif pnode not in n_offline:
1124 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
1125 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
1126 93e4c50b Guido Trotter
        bad = True
1127 93e4c50b Guido Trotter
1128 832261fd Iustin Pop
      if pnode in n_offline:
1129 832261fd Iustin Pop
        inst_nodes_offline.append(pnode)
1130 832261fd Iustin Pop
1131 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
1132 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
1133 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
1134 93e4c50b Guido Trotter
      # supported either.
1135 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
1136 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
1137 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
1138 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
1139 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
1140 93e4c50b Guido Trotter
                    % instance)
1141 93e4c50b Guido Trotter
1142 c0f2b229 Iustin Pop
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1143 3924700f Iustin Pop
        i_non_a_balanced.append(instance)
1144 3924700f Iustin Pop
1145 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
1146 93e4c50b Guido Trotter
        if snode in node_info:
1147 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
1148 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
1149 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
1150 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
1151 0a66c968 Iustin Pop
        elif snode not in n_offline:
1152 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
1153 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
1154 832261fd Iustin Pop
          bad = True
1155 832261fd Iustin Pop
        if snode in n_offline:
1156 832261fd Iustin Pop
          inst_nodes_offline.append(snode)
1157 832261fd Iustin Pop
1158 832261fd Iustin Pop
      if inst_nodes_offline:
1159 832261fd Iustin Pop
        # warn that the instance lives on offline nodes, and set bad=True
1160 832261fd Iustin Pop
        feedback_fn("  - ERROR: instance lives on offline node(s) %s" %
1161 832261fd Iustin Pop
                    ", ".join(inst_nodes_offline))
1162 832261fd Iustin Pop
        bad = True
1163 93e4c50b Guido Trotter
1164 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
1165 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
1166 a8083063 Iustin Pop
                                       feedback_fn)
1167 a8083063 Iustin Pop
    bad = bad or result
1168 a8083063 Iustin Pop
1169 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
1170 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
1171 a8083063 Iustin Pop
                                         feedback_fn)
1172 a8083063 Iustin Pop
    bad = bad or result
1173 a8083063 Iustin Pop
1174 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1175 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
1176 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
1177 e54c4c5e Guido Trotter
      bad = bad or result
1178 2b3b6ddd Guido Trotter
1179 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
1180 2b3b6ddd Guido Trotter
    if i_non_redundant:
1181 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1182 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
1183 2b3b6ddd Guido Trotter
1184 3924700f Iustin Pop
    if i_non_a_balanced:
1185 3924700f Iustin Pop
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1186 3924700f Iustin Pop
                  % len(i_non_a_balanced))
1187 3924700f Iustin Pop
1188 0a66c968 Iustin Pop
    if n_offline:
1189 0a66c968 Iustin Pop
      feedback_fn("  - NOTICE: %d offline node(s) found." % len(n_offline))
1190 0a66c968 Iustin Pop
1191 22f0f71d Iustin Pop
    if n_drained:
1192 22f0f71d Iustin Pop
      feedback_fn("  - NOTICE: %d drained node(s) found." % len(n_drained))
1193 22f0f71d Iustin Pop
1194 34290825 Michael Hanselmann
    return not bad
1195 a8083063 Iustin Pop
1196 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1197 e4376078 Iustin Pop
    """Analize the post-hooks' result
1198 e4376078 Iustin Pop

1199 e4376078 Iustin Pop
    This method analyses the hook result, handles it, and sends some
1200 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
1201 d8fff41c Guido Trotter

1202 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1203 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1204 e4376078 Iustin Pop
    @param hooks_results: the results of the multi-node hooks rpc call
1205 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
1206 e4376078 Iustin Pop
    @param lu_result: previous Exec result
1207 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
1208 e4376078 Iustin Pop
        and hook results
1209 d8fff41c Guido Trotter

1210 d8fff41c Guido Trotter
    """
1211 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
1212 38206f3c Iustin Pop
    # their results
1213 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
1214 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
1215 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
1216 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
1217 d8fff41c Guido Trotter
      if not hooks_results:
1218 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
1219 d8fff41c Guido Trotter
        lu_result = 1
1220 d8fff41c Guido Trotter
      else:
1221 d8fff41c Guido Trotter
        for node_name in hooks_results:
1222 d8fff41c Guido Trotter
          show_node_header = True
1223 d8fff41c Guido Trotter
          res = hooks_results[node_name]
1224 25361b9a Iustin Pop
          if res.failed or res.data is False or not isinstance(res.data, list):
1225 0a66c968 Iustin Pop
            if res.offline:
1226 0a66c968 Iustin Pop
              # no need to warn or set fail return value
1227 0a66c968 Iustin Pop
              continue
1228 25361b9a Iustin Pop
            feedback_fn("    Communication failure in hooks execution")
1229 d8fff41c Guido Trotter
            lu_result = 1
1230 d8fff41c Guido Trotter
            continue
1231 25361b9a Iustin Pop
          for script, hkr, output in res.data:
1232 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
1233 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
1234 d8fff41c Guido Trotter
              # failing hooks on that node
1235 d8fff41c Guido Trotter
              if show_node_header:
1236 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
1237 d8fff41c Guido Trotter
                show_node_header = False
1238 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
1239 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
1240 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
1241 d8fff41c Guido Trotter
              lu_result = 1
1242 d8fff41c Guido Trotter
1243 d8fff41c Guido Trotter
      return lu_result
1244 d8fff41c Guido Trotter
1245 a8083063 Iustin Pop
1246 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
1247 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
1248 2c95a8d4 Iustin Pop

1249 2c95a8d4 Iustin Pop
  """
1250 2c95a8d4 Iustin Pop
  _OP_REQP = []
1251 d4b9d97f Guido Trotter
  REQ_BGL = False
1252 d4b9d97f Guido Trotter
1253 d4b9d97f Guido Trotter
  def ExpandNames(self):
1254 d4b9d97f Guido Trotter
    self.needed_locks = {
1255 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1256 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1257 d4b9d97f Guido Trotter
    }
1258 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1259 2c95a8d4 Iustin Pop
1260 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
1261 2c95a8d4 Iustin Pop
    """Check prerequisites.
1262 2c95a8d4 Iustin Pop

1263 2c95a8d4 Iustin Pop
    This has no prerequisites.
1264 2c95a8d4 Iustin Pop

1265 2c95a8d4 Iustin Pop
    """
1266 2c95a8d4 Iustin Pop
    pass
1267 2c95a8d4 Iustin Pop
1268 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
1269 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
1270 2c95a8d4 Iustin Pop

1271 2c95a8d4 Iustin Pop
    """
1272 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
1273 2c95a8d4 Iustin Pop
1274 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
1275 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1276 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
1277 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
1278 2c95a8d4 Iustin Pop
1279 2c95a8d4 Iustin Pop
    nv_dict = {}
1280 2c95a8d4 Iustin Pop
    for inst in instances:
1281 2c95a8d4 Iustin Pop
      inst_lvs = {}
1282 0d68c45d Iustin Pop
      if (not inst.admin_up or
1283 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
1284 2c95a8d4 Iustin Pop
        continue
1285 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
1286 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1287 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
1288 2c95a8d4 Iustin Pop
        for vol in vol_list:
1289 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
1290 2c95a8d4 Iustin Pop
1291 2c95a8d4 Iustin Pop
    if not nv_dict:
1292 2c95a8d4 Iustin Pop
      return result
1293 2c95a8d4 Iustin Pop
1294 72737a7f Iustin Pop
    node_lvs = self.rpc.call_volume_list(nodes, vg_name)
1295 2c95a8d4 Iustin Pop
1296 2c95a8d4 Iustin Pop
    to_act = set()
1297 2c95a8d4 Iustin Pop
    for node in nodes:
1298 2c95a8d4 Iustin Pop
      # node_volume
1299 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
1300 781de953 Iustin Pop
      if lvs.failed:
1301 0a66c968 Iustin Pop
        if not lvs.offline:
1302 0a66c968 Iustin Pop
          self.LogWarning("Connection to node %s failed: %s" %
1303 0a66c968 Iustin Pop
                          (node, lvs.data))
1304 781de953 Iustin Pop
        continue
1305 781de953 Iustin Pop
      lvs = lvs.data
1306 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
1307 9a4f63d1 Iustin Pop
        logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
1308 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
1309 ea9ddc07 Iustin Pop
        continue
1310 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
1311 9a4f63d1 Iustin Pop
        logging.warning("Connection to node %s failed or invalid data"
1312 9a4f63d1 Iustin Pop
                        " returned", node)
1313 2c95a8d4 Iustin Pop
        res_nodes.append(node)
1314 2c95a8d4 Iustin Pop
        continue
1315 2c95a8d4 Iustin Pop
1316 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
1317 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
1318 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
1319 b63ed789 Iustin Pop
            and inst.name not in res_instances):
1320 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
1321 2c95a8d4 Iustin Pop
1322 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1323 b63ed789 Iustin Pop
    # data better
1324 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1325 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1326 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1327 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1328 b63ed789 Iustin Pop
1329 2c95a8d4 Iustin Pop
    return result
1330 2c95a8d4 Iustin Pop
1331 2c95a8d4 Iustin Pop
1332 60975797 Iustin Pop
class LURepairDiskSizes(NoHooksLU):
1333 60975797 Iustin Pop
  """Verifies the cluster disks sizes.
1334 60975797 Iustin Pop

1335 60975797 Iustin Pop
  """
1336 60975797 Iustin Pop
  _OP_REQP = ["instances"]
1337 60975797 Iustin Pop
  REQ_BGL = False
1338 60975797 Iustin Pop
1339 60975797 Iustin Pop
  def ExpandNames(self):
1340 60975797 Iustin Pop
1341 60975797 Iustin Pop
    if not isinstance(self.op.instances, list):
1342 60975797 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'")
1343 60975797 Iustin Pop
1344 60975797 Iustin Pop
    if self.op.instances:
1345 60975797 Iustin Pop
      self.wanted_names = []
1346 60975797 Iustin Pop
      for name in self.op.instances:
1347 60975797 Iustin Pop
        full_name = self.cfg.ExpandInstanceName(name)
1348 60975797 Iustin Pop
        if full_name is None:
1349 60975797 Iustin Pop
          raise errors.OpPrereqError("Instance '%s' not known" % name)
1350 60975797 Iustin Pop
        self.wanted_names.append(full_name)
1351 60975797 Iustin Pop
      self.needed_locks = {
1352 60975797 Iustin Pop
        locking.LEVEL_NODE: [],
1353 60975797 Iustin Pop
        locking.LEVEL_INSTANCE: self.wanted_names,
1354 60975797 Iustin Pop
        }
1355 60975797 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1356 60975797 Iustin Pop
    else:
1357 60975797 Iustin Pop
      self.wanted_names = None
1358 60975797 Iustin Pop
      self.needed_locks = {
1359 60975797 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
1360 60975797 Iustin Pop
        locking.LEVEL_INSTANCE: locking.ALL_SET,
1361 60975797 Iustin Pop
        }
1362 60975797 Iustin Pop
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1363 60975797 Iustin Pop
1364 60975797 Iustin Pop
  def DeclareLocks(self, level):
1365 60975797 Iustin Pop
    if level == locking.LEVEL_NODE and self.wanted_names is not None:
1366 60975797 Iustin Pop
      self._LockInstancesNodes(primary_only=True)
1367 60975797 Iustin Pop
1368 60975797 Iustin Pop
  def CheckPrereq(self):
1369 60975797 Iustin Pop
    """Check prerequisites.
1370 60975797 Iustin Pop

1371 60975797 Iustin Pop
    This only checks the optional instance list against the existing names.
1372 60975797 Iustin Pop

1373 60975797 Iustin Pop
    """
1374 60975797 Iustin Pop
    if self.wanted_names is None:
1375 60975797 Iustin Pop
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
1376 60975797 Iustin Pop
1377 60975797 Iustin Pop
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
1378 60975797 Iustin Pop
                             in self.wanted_names]
1379 60975797 Iustin Pop
1380 b775c337 Iustin Pop
  def _EnsureChildSizes(self, disk):
1381 b775c337 Iustin Pop
    """Ensure children of the disk have the needed disk size.
1382 b775c337 Iustin Pop

1383 b775c337 Iustin Pop
    This is valid mainly for DRBD8 and fixes an issue where the
1384 b775c337 Iustin Pop
    children have smaller disk size.
1385 b775c337 Iustin Pop

1386 b775c337 Iustin Pop
    @param disk: an L{ganeti.objects.Disk} object
1387 b775c337 Iustin Pop

1388 b775c337 Iustin Pop
    """
1389 b775c337 Iustin Pop
    if disk.dev_type == constants.LD_DRBD8:
1390 b775c337 Iustin Pop
      assert disk.children, "Empty children for DRBD8?"
1391 b775c337 Iustin Pop
      fchild = disk.children[0]
1392 b775c337 Iustin Pop
      mismatch = fchild.size < disk.size
1393 b775c337 Iustin Pop
      if mismatch:
1394 b775c337 Iustin Pop
        self.LogInfo("Child disk has size %d, parent %d, fixing",
1395 b775c337 Iustin Pop
                     fchild.size, disk.size)
1396 b775c337 Iustin Pop
        fchild.size = disk.size
1397 b775c337 Iustin Pop
1398 b775c337 Iustin Pop
      # and we recurse on this child only, not on the metadev
1399 b775c337 Iustin Pop
      return self._EnsureChildSizes(fchild) or mismatch
1400 b775c337 Iustin Pop
    else:
1401 b775c337 Iustin Pop
      return False
1402 b775c337 Iustin Pop
1403 60975797 Iustin Pop
  def Exec(self, feedback_fn):
1404 60975797 Iustin Pop
    """Verify the size of cluster disks.
1405 60975797 Iustin Pop

1406 60975797 Iustin Pop
    """
1407 60975797 Iustin Pop
    # TODO: check child disks too
1408 60975797 Iustin Pop
    # TODO: check differences in size between primary/secondary nodes
1409 60975797 Iustin Pop
    per_node_disks = {}
1410 60975797 Iustin Pop
    for instance in self.wanted_instances:
1411 60975797 Iustin Pop
      pnode = instance.primary_node
1412 60975797 Iustin Pop
      if pnode not in per_node_disks:
1413 60975797 Iustin Pop
        per_node_disks[pnode] = []
1414 60975797 Iustin Pop
      for idx, disk in enumerate(instance.disks):
1415 60975797 Iustin Pop
        per_node_disks[pnode].append((instance, idx, disk))
1416 60975797 Iustin Pop
1417 60975797 Iustin Pop
    changed = []
1418 60975797 Iustin Pop
    for node, dskl in per_node_disks.items():
1419 4d9e6835 Iustin Pop
      newl = [v[2].Copy() for v in dskl]
1420 4d9e6835 Iustin Pop
      for dsk in newl:
1421 4d9e6835 Iustin Pop
        self.cfg.SetDiskID(dsk, node)
1422 4d9e6835 Iustin Pop
      result = self.rpc.call_blockdev_getsizes(node, newl)
1423 60975797 Iustin Pop
      if result.failed:
1424 60975797 Iustin Pop
        self.LogWarning("Failure in blockdev_getsizes call to node"
1425 60975797 Iustin Pop
                        " %s, ignoring", node)
1426 60975797 Iustin Pop
        continue
1427 60975797 Iustin Pop
      if len(result.data) != len(dskl):
1428 60975797 Iustin Pop
        self.LogWarning("Invalid result from node %s, ignoring node results",
1429 60975797 Iustin Pop
                        node)
1430 60975797 Iustin Pop
        continue
1431 60975797 Iustin Pop
      for ((instance, idx, disk), size) in zip(dskl, result.data):
1432 60975797 Iustin Pop
        if size is None:
1433 60975797 Iustin Pop
          self.LogWarning("Disk %d of instance %s did not return size"
1434 60975797 Iustin Pop
                          " information, ignoring", idx, instance.name)
1435 60975797 Iustin Pop
          continue
1436 60975797 Iustin Pop
        if not isinstance(size, (int, long)):
1437 60975797 Iustin Pop
          self.LogWarning("Disk %d of instance %s did not return valid"
1438 60975797 Iustin Pop
                          " size information, ignoring", idx, instance.name)
1439 60975797 Iustin Pop
          continue
1440 60975797 Iustin Pop
        size = size >> 20
1441 60975797 Iustin Pop
        if size != disk.size:
1442 60975797 Iustin Pop
          self.LogInfo("Disk %d of instance %s has mismatched size,"
1443 60975797 Iustin Pop
                       " correcting: recorded %d, actual %d", idx,
1444 60975797 Iustin Pop
                       instance.name, disk.size, size)
1445 60975797 Iustin Pop
          disk.size = size
1446 60975797 Iustin Pop
          self.cfg.Update(instance)
1447 60975797 Iustin Pop
          changed.append((instance.name, idx, size))
1448 b775c337 Iustin Pop
        if self._EnsureChildSizes(disk):
1449 b775c337 Iustin Pop
          self.cfg.Update(instance)
1450 b775c337 Iustin Pop
          changed.append((instance.name, idx, disk.size))
1451 60975797 Iustin Pop
    return changed
1452 60975797 Iustin Pop
1453 60975797 Iustin Pop
1454 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1455 07bd8a51 Iustin Pop
  """Rename the cluster.
1456 07bd8a51 Iustin Pop

1457 07bd8a51 Iustin Pop
  """
1458 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1459 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1460 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1461 07bd8a51 Iustin Pop
1462 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1463 07bd8a51 Iustin Pop
    """Build hooks env.
1464 07bd8a51 Iustin Pop

1465 07bd8a51 Iustin Pop
    """
1466 07bd8a51 Iustin Pop
    env = {
1467 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1468 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1469 07bd8a51 Iustin Pop
      }
1470 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1471 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1472 07bd8a51 Iustin Pop
1473 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1474 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1475 07bd8a51 Iustin Pop

1476 07bd8a51 Iustin Pop
    """
1477 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1478 07bd8a51 Iustin Pop
1479 bcf043c9 Iustin Pop
    new_name = hostname.name
1480 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1481 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
1482 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
1483 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1484 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1485 07bd8a51 Iustin Pop
                                 " cluster has changed")
1486 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1487 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1488 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1489 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1490 07bd8a51 Iustin Pop
                                   new_ip)
1491 07bd8a51 Iustin Pop
1492 07bd8a51 Iustin Pop
    self.op.name = new_name
1493 07bd8a51 Iustin Pop
1494 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1495 07bd8a51 Iustin Pop
    """Rename the cluster.
1496 07bd8a51 Iustin Pop

1497 07bd8a51 Iustin Pop
    """
1498 07bd8a51 Iustin Pop
    clustername = self.op.name
1499 07bd8a51 Iustin Pop
    ip = self.ip
1500 07bd8a51 Iustin Pop
1501 07bd8a51 Iustin Pop
    # shutdown the master IP
1502 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
1503 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
1504 781de953 Iustin Pop
    if result.failed or not result.data:
1505 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1506 07bd8a51 Iustin Pop
1507 07bd8a51 Iustin Pop
    try:
1508 55cf7d83 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
1509 55cf7d83 Iustin Pop
      cluster.cluster_name = clustername
1510 55cf7d83 Iustin Pop
      cluster.master_ip = ip
1511 55cf7d83 Iustin Pop
      self.cfg.Update(cluster)
1512 ec85e3d5 Iustin Pop
1513 ec85e3d5 Iustin Pop
      # update the known hosts file
1514 ec85e3d5 Iustin Pop
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
1515 ec85e3d5 Iustin Pop
      node_list = self.cfg.GetNodeList()
1516 ec85e3d5 Iustin Pop
      try:
1517 ec85e3d5 Iustin Pop
        node_list.remove(master)
1518 ec85e3d5 Iustin Pop
      except ValueError:
1519 ec85e3d5 Iustin Pop
        pass
1520 ec85e3d5 Iustin Pop
      result = self.rpc.call_upload_file(node_list,
1521 ec85e3d5 Iustin Pop
                                         constants.SSH_KNOWN_HOSTS_FILE)
1522 ec85e3d5 Iustin Pop
      for to_node, to_result in result.iteritems():
1523 ec85e3d5 Iustin Pop
        if to_result.failed or not to_result.data:
1524 d1dc3548 Iustin Pop
          logging.error("Copy of file %s to node %s failed",
1525 d1dc3548 Iustin Pop
                        constants.SSH_KNOWN_HOSTS_FILE, to_node)
1526 ec85e3d5 Iustin Pop
1527 07bd8a51 Iustin Pop
    finally:
1528 2503680f Guido Trotter
      result = self.rpc.call_node_start_master(master, False, False)
1529 781de953 Iustin Pop
      if result.failed or not result.data:
1530 86d9d3bb Iustin Pop
        self.LogWarning("Could not re-enable the master role on"
1531 86d9d3bb Iustin Pop
                        " the master, please restart manually.")
1532 07bd8a51 Iustin Pop
1533 07bd8a51 Iustin Pop
1534 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1535 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1536 8084f9f6 Manuel Franceschini

1537 e4376078 Iustin Pop
  @type disk: L{objects.Disk}
1538 e4376078 Iustin Pop
  @param disk: the disk to check
1539 e4376078 Iustin Pop
  @rtype: booleean
1540 e4376078 Iustin Pop
  @return: boolean indicating whether a LD_LV dev_type was found or not
1541 8084f9f6 Manuel Franceschini

1542 8084f9f6 Manuel Franceschini
  """
1543 8084f9f6 Manuel Franceschini
  if disk.children:
1544 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1545 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1546 8084f9f6 Manuel Franceschini
        return True
1547 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1548 8084f9f6 Manuel Franceschini
1549 8084f9f6 Manuel Franceschini
1550 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1551 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1552 8084f9f6 Manuel Franceschini

1553 8084f9f6 Manuel Franceschini
  """
1554 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1555 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1556 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1557 c53279cf Guido Trotter
  REQ_BGL = False
1558 c53279cf Guido Trotter
1559 3994f455 Iustin Pop
  def CheckArguments(self):
1560 4b7735f9 Iustin Pop
    """Check parameters
1561 4b7735f9 Iustin Pop

1562 4b7735f9 Iustin Pop
    """
1563 4b7735f9 Iustin Pop
    if not hasattr(self.op, "candidate_pool_size"):
1564 4b7735f9 Iustin Pop
      self.op.candidate_pool_size = None
1565 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1566 4b7735f9 Iustin Pop
      try:
1567 4b7735f9 Iustin Pop
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1568 3994f455 Iustin Pop
      except (ValueError, TypeError), err:
1569 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1570 4b7735f9 Iustin Pop
                                   str(err))
1571 4b7735f9 Iustin Pop
      if self.op.candidate_pool_size < 1:
1572 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("At least one master candidate needed")
1573 4b7735f9 Iustin Pop
1574 c53279cf Guido Trotter
  def ExpandNames(self):
1575 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
1576 c53279cf Guido Trotter
    # all nodes to be modified.
1577 c53279cf Guido Trotter
    self.needed_locks = {
1578 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1579 c53279cf Guido Trotter
    }
1580 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1581 8084f9f6 Manuel Franceschini
1582 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1583 8084f9f6 Manuel Franceschini
    """Build hooks env.
1584 8084f9f6 Manuel Franceschini

1585 8084f9f6 Manuel Franceschini
    """
1586 8084f9f6 Manuel Franceschini
    env = {
1587 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1588 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1589 8084f9f6 Manuel Franceschini
      }
1590 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1591 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1592 8084f9f6 Manuel Franceschini
1593 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1594 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1595 8084f9f6 Manuel Franceschini

1596 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1597 5f83e263 Iustin Pop
    if the given volume group is valid.
1598 8084f9f6 Manuel Franceschini

1599 8084f9f6 Manuel Franceschini
    """
1600 779c15bb Iustin Pop
    if self.op.vg_name is not None and not self.op.vg_name:
1601 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
1602 8084f9f6 Manuel Franceschini
      for inst in instances:
1603 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1604 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1605 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1606 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1607 8084f9f6 Manuel Franceschini
1608 779c15bb Iustin Pop
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1609 779c15bb Iustin Pop
1610 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1611 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1612 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
1613 8084f9f6 Manuel Franceschini
      for node in node_list:
1614 781de953 Iustin Pop
        if vglist[node].failed:
1615 781de953 Iustin Pop
          # ignoring down node
1616 781de953 Iustin Pop
          self.LogWarning("Node %s unreachable/error, ignoring" % node)
1617 781de953 Iustin Pop
          continue
1618 781de953 Iustin Pop
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].data,
1619 781de953 Iustin Pop
                                              self.op.vg_name,
1620 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1621 8084f9f6 Manuel Franceschini
        if vgstatus:
1622 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1623 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1624 8084f9f6 Manuel Franceschini
1625 779c15bb Iustin Pop
    self.cluster = cluster = self.cfg.GetClusterInfo()
1626 d4b72030 Guido Trotter
    # validate beparams changes
1627 779c15bb Iustin Pop
    if self.op.beparams:
1628 a5728081 Guido Trotter
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
1629 779c15bb Iustin Pop
      self.new_beparams = cluster.FillDict(
1630 779c15bb Iustin Pop
        cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)
1631 779c15bb Iustin Pop
1632 779c15bb Iustin Pop
    # hypervisor list/parameters
1633 779c15bb Iustin Pop
    self.new_hvparams = cluster.FillDict(cluster.hvparams, {})
1634 779c15bb Iustin Pop
    if self.op.hvparams:
1635 779c15bb Iustin Pop
      if not isinstance(self.op.hvparams, dict):
1636 779c15bb Iustin Pop
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1637 779c15bb Iustin Pop
      for hv_name, hv_dict in self.op.hvparams.items():
1638 779c15bb Iustin Pop
        if hv_name not in self.new_hvparams:
1639 779c15bb Iustin Pop
          self.new_hvparams[hv_name] = hv_dict
1640 779c15bb Iustin Pop
        else:
1641 779c15bb Iustin Pop
          self.new_hvparams[hv_name].update(hv_dict)
1642 779c15bb Iustin Pop
1643 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1644 779c15bb Iustin Pop
      self.hv_list = self.op.enabled_hypervisors
1645 779c15bb Iustin Pop
    else:
1646 779c15bb Iustin Pop
      self.hv_list = cluster.enabled_hypervisors
1647 779c15bb Iustin Pop
1648 779c15bb Iustin Pop
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1649 779c15bb Iustin Pop
      # either the enabled list has changed, or the parameters have, validate
1650 779c15bb Iustin Pop
      for hv_name, hv_params in self.new_hvparams.items():
1651 779c15bb Iustin Pop
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1652 779c15bb Iustin Pop
            (self.op.enabled_hypervisors and
1653 779c15bb Iustin Pop
             hv_name in self.op.enabled_hypervisors)):
1654 779c15bb Iustin Pop
          # either this is a new hypervisor, or its parameters have changed
1655 779c15bb Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
1656 a5728081 Guido Trotter
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1657 779c15bb Iustin Pop
          hv_class.CheckParameterSyntax(hv_params)
1658 779c15bb Iustin Pop
          _CheckHVParams(self, node_list, hv_name, hv_params)
1659 779c15bb Iustin Pop
1660 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1661 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1662 8084f9f6 Manuel Franceschini

1663 8084f9f6 Manuel Franceschini
    """
1664 779c15bb Iustin Pop
    if self.op.vg_name is not None:
1665 b2482333 Guido Trotter
      new_volume = self.op.vg_name
1666 b2482333 Guido Trotter
      if not new_volume:
1667 b2482333 Guido Trotter
        new_volume = None
1668 b2482333 Guido Trotter
      if new_volume != self.cfg.GetVGName():
1669 b2482333 Guido Trotter
        self.cfg.SetVGName(new_volume)
1670 779c15bb Iustin Pop
      else:
1671 779c15bb Iustin Pop
        feedback_fn("Cluster LVM configuration already in desired"
1672 779c15bb Iustin Pop
                    " state, not changing")
1673 779c15bb Iustin Pop
    if self.op.hvparams:
1674 779c15bb Iustin Pop
      self.cluster.hvparams = self.new_hvparams
1675 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1676 779c15bb Iustin Pop
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1677 779c15bb Iustin Pop
    if self.op.beparams:
1678 779c15bb Iustin Pop
      self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams
1679 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1680 4b7735f9 Iustin Pop
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
1681 75e914fb Iustin Pop
      # we need to update the pool size here, otherwise the save will fail
1682 75e914fb Iustin Pop
      _AdjustCandidatePool(self)
1683 4b7735f9 Iustin Pop
1684 779c15bb Iustin Pop
    self.cfg.Update(self.cluster)
1685 8084f9f6 Manuel Franceschini
1686 8084f9f6 Manuel Franceschini
1687 afee0879 Iustin Pop
class LURedistributeConfig(NoHooksLU):
1688 afee0879 Iustin Pop
  """Force the redistribution of cluster configuration.
1689 afee0879 Iustin Pop

1690 afee0879 Iustin Pop
  This is a very simple LU.
1691 afee0879 Iustin Pop

1692 afee0879 Iustin Pop
  """
1693 afee0879 Iustin Pop
  _OP_REQP = []
1694 afee0879 Iustin Pop
  REQ_BGL = False
1695 afee0879 Iustin Pop
1696 afee0879 Iustin Pop
  def ExpandNames(self):
1697 afee0879 Iustin Pop
    self.needed_locks = {
1698 afee0879 Iustin Pop
      locking.LEVEL_NODE: locking.ALL_SET,
1699 afee0879 Iustin Pop
    }
1700 afee0879 Iustin Pop
    self.share_locks[locking.LEVEL_NODE] = 1
1701 afee0879 Iustin Pop
1702 afee0879 Iustin Pop
  def CheckPrereq(self):
1703 afee0879 Iustin Pop
    """Check prerequisites.
1704 afee0879 Iustin Pop

1705 afee0879 Iustin Pop
    """
1706 afee0879 Iustin Pop
1707 afee0879 Iustin Pop
  def Exec(self, feedback_fn):
1708 afee0879 Iustin Pop
    """Redistribute the configuration.
1709 afee0879 Iustin Pop

1710 afee0879 Iustin Pop
    """
1711 afee0879 Iustin Pop
    self.cfg.Update(self.cfg.GetClusterInfo())
1712 afee0879 Iustin Pop
1713 afee0879 Iustin Pop
1714 b9bddb6b Iustin Pop
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1715 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1716 a8083063 Iustin Pop

1717 a8083063 Iustin Pop
  """
1718 a8083063 Iustin Pop
  if not instance.disks:
1719 a8083063 Iustin Pop
    return True
1720 a8083063 Iustin Pop
1721 a8083063 Iustin Pop
  if not oneshot:
1722 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1723 a8083063 Iustin Pop
1724 a8083063 Iustin Pop
  node = instance.primary_node
1725 a8083063 Iustin Pop
1726 a8083063 Iustin Pop
  for dev in instance.disks:
1727 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
1728 a8083063 Iustin Pop
1729 a8083063 Iustin Pop
  retries = 0
1730 fbafd7a8 Iustin Pop
  degr_retries = 10 # in seconds, as we sleep 1 second each time
1731 a8083063 Iustin Pop
  while True:
1732 a8083063 Iustin Pop
    max_time = 0
1733 a8083063 Iustin Pop
    done = True
1734 a8083063 Iustin Pop
    cumul_degraded = False
1735 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1736 781de953 Iustin Pop
    if rstats.failed or not rstats.data:
1737 86d9d3bb Iustin Pop
      lu.LogWarning("Can't get any data from node %s", node)
1738 a8083063 Iustin Pop
      retries += 1
1739 a8083063 Iustin Pop
      if retries >= 10:
1740 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1741 3ecf6786 Iustin Pop
                                 " aborting." % node)
1742 a8083063 Iustin Pop
      time.sleep(6)
1743 a8083063 Iustin Pop
      continue
1744 781de953 Iustin Pop
    rstats = rstats.data
1745 a8083063 Iustin Pop
    retries = 0
1746 1492cca7 Iustin Pop
    for i, mstat in enumerate(rstats):
1747 a8083063 Iustin Pop
      if mstat is None:
1748 86d9d3bb Iustin Pop
        lu.LogWarning("Can't compute data for node %s/%s",
1749 86d9d3bb Iustin Pop
                           node, instance.disks[i].iv_name)
1750 a8083063 Iustin Pop
        continue
1751 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1752 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1753 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1754 a8083063 Iustin Pop
      if perc_done is not None:
1755 a8083063 Iustin Pop
        done = False
1756 a8083063 Iustin Pop
        if est_time is not None:
1757 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1758 a8083063 Iustin Pop
          max_time = est_time
1759 a8083063 Iustin Pop
        else:
1760 a8083063 Iustin Pop
          rem_time = "no time estimate"
1761 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1762 b9bddb6b Iustin Pop
                        (instance.disks[i].iv_name, perc_done, rem_time))
1763 fbafd7a8 Iustin Pop
1764 fbafd7a8 Iustin Pop
    # if we're done but degraded, let's do a few small retries, to
1765 fbafd7a8 Iustin Pop
    # make sure we see a stable and not transient situation; therefore
1766 fbafd7a8 Iustin Pop
    # we force restart of the loop
1767 fbafd7a8 Iustin Pop
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
1768 fbafd7a8 Iustin Pop
      logging.info("Degraded disks found, %d retries left", degr_retries)
1769 fbafd7a8 Iustin Pop
      degr_retries -= 1
1770 fbafd7a8 Iustin Pop
      time.sleep(1)
1771 fbafd7a8 Iustin Pop
      continue
1772 fbafd7a8 Iustin Pop
1773 a8083063 Iustin Pop
    if done or oneshot:
1774 a8083063 Iustin Pop
      break
1775 a8083063 Iustin Pop
1776 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
1777 a8083063 Iustin Pop
1778 a8083063 Iustin Pop
  if done:
1779 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1780 a8083063 Iustin Pop
  return not cumul_degraded
1781 a8083063 Iustin Pop
1782 a8083063 Iustin Pop
1783 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1784 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1785 a8083063 Iustin Pop

1786 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1787 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1788 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1789 0834c866 Iustin Pop

1790 a8083063 Iustin Pop
  """
1791 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
1792 0834c866 Iustin Pop
  if ldisk:
1793 0834c866 Iustin Pop
    idx = 6
1794 0834c866 Iustin Pop
  else:
1795 0834c866 Iustin Pop
    idx = 5
1796 a8083063 Iustin Pop
1797 a8083063 Iustin Pop
  result = True
1798 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1799 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
1800 23829f6f Iustin Pop
    msg = rstats.RemoteFailMsg()
1801 23829f6f Iustin Pop
    if msg:
1802 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
1803 23829f6f Iustin Pop
      result = False
1804 23829f6f Iustin Pop
    elif not rstats.payload:
1805 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s", node)
1806 a8083063 Iustin Pop
      result = False
1807 a8083063 Iustin Pop
    else:
1808 23829f6f Iustin Pop
      result = result and (not rstats.payload[idx])
1809 a8083063 Iustin Pop
  if dev.children:
1810 a8083063 Iustin Pop
    for child in dev.children:
1811 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1812 a8083063 Iustin Pop
1813 a8083063 Iustin Pop
  return result
1814 a8083063 Iustin Pop
1815 a8083063 Iustin Pop
1816 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1817 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1818 a8083063 Iustin Pop

1819 a8083063 Iustin Pop
  """
1820 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1821 6bf01bbb Guido Trotter
  REQ_BGL = False
1822 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet()
1823 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
1824 a8083063 Iustin Pop
1825 6bf01bbb Guido Trotter
  def ExpandNames(self):
1826 1f9430d6 Iustin Pop
    if self.op.names:
1827 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1828 1f9430d6 Iustin Pop
1829 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1830 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1831 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1832 1f9430d6 Iustin Pop
1833 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
1834 a6ab004b Iustin Pop
    # Temporary removal of locks, should be reverted later
1835 a6ab004b Iustin Pop
    # TODO: reintroduce locks when they are lighter-weight
1836 6bf01bbb Guido Trotter
    self.needed_locks = {}
1837 a6ab004b Iustin Pop
    #self.share_locks[locking.LEVEL_NODE] = 1
1838 a6ab004b Iustin Pop
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1839 6bf01bbb Guido Trotter
1840 6bf01bbb Guido Trotter
  def CheckPrereq(self):
1841 6bf01bbb Guido Trotter
    """Check prerequisites.
1842 6bf01bbb Guido Trotter

1843 6bf01bbb Guido Trotter
    """
1844 6bf01bbb Guido Trotter
1845 1f9430d6 Iustin Pop
  @staticmethod
1846 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1847 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1848 1f9430d6 Iustin Pop

1849 e4376078 Iustin Pop
    @param node_list: a list with the names of all nodes
1850 e4376078 Iustin Pop
    @param rlist: a map with node names as keys and OS objects as values
1851 1f9430d6 Iustin Pop

1852 e4376078 Iustin Pop
    @rtype: dict
1853 5fcc718f Iustin Pop
    @return: a dictionary with osnames as keys and as value another map, with
1854 e4376078 Iustin Pop
        nodes as keys and list of OS objects as values, eg::
1855 e4376078 Iustin Pop

1856 e4376078 Iustin Pop
          {"debian-etch": {"node1": [<object>,...],
1857 e4376078 Iustin Pop
                           "node2": [<object>,]}
1858 e4376078 Iustin Pop
          }
1859 1f9430d6 Iustin Pop

1860 1f9430d6 Iustin Pop
    """
1861 1f9430d6 Iustin Pop
    all_os = {}
1862 a6ab004b Iustin Pop
    # we build here the list of nodes that didn't fail the RPC (at RPC
1863 a6ab004b Iustin Pop
    # level), so that nodes with a non-responding node daemon don't
1864 a6ab004b Iustin Pop
    # make all OSes invalid
1865 a6ab004b Iustin Pop
    good_nodes = [node_name for node_name in rlist
1866 a6ab004b Iustin Pop
                  if not rlist[node_name].failed]
1867 1f9430d6 Iustin Pop
    for node_name, nr in rlist.iteritems():
1868 781de953 Iustin Pop
      if nr.failed or not nr.data:
1869 1f9430d6 Iustin Pop
        continue
1870 781de953 Iustin Pop
      for os_obj in nr.data:
1871 b4de68a9 Iustin Pop
        if os_obj.name not in all_os:
1872 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1873 1f9430d6 Iustin Pop
          # for each node in node_list
1874 b4de68a9 Iustin Pop
          all_os[os_obj.name] = {}
1875 a6ab004b Iustin Pop
          for nname in good_nodes:
1876 b4de68a9 Iustin Pop
            all_os[os_obj.name][nname] = []
1877 b4de68a9 Iustin Pop
        all_os[os_obj.name][node_name].append(os_obj)
1878 1f9430d6 Iustin Pop
    return all_os
1879 a8083063 Iustin Pop
1880 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1881 a8083063 Iustin Pop
    """Compute the list of OSes.
1882 a8083063 Iustin Pop

1883 a8083063 Iustin Pop
    """
1884 a6ab004b Iustin Pop
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
1885 94a02bb5 Iustin Pop
    node_data = self.rpc.call_os_diagnose(valid_nodes)
1886 a8083063 Iustin Pop
    if node_data == False:
1887 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1888 94a02bb5 Iustin Pop
    pol = self._DiagnoseByOS(valid_nodes, node_data)
1889 1f9430d6 Iustin Pop
    output = []
1890 1f9430d6 Iustin Pop
    for os_name, os_data in pol.iteritems():
1891 1f9430d6 Iustin Pop
      row = []
1892 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1893 1f9430d6 Iustin Pop
        if field == "name":
1894 1f9430d6 Iustin Pop
          val = os_name
1895 1f9430d6 Iustin Pop
        elif field == "valid":
1896 1f9430d6 Iustin Pop
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1897 1f9430d6 Iustin Pop
        elif field == "node_status":
1898 1f9430d6 Iustin Pop
          val = {}
1899 1f9430d6 Iustin Pop
          for node_name, nos_list in os_data.iteritems():
1900 1f9430d6 Iustin Pop
            val[node_name] = [(v.status, v.path) for v in nos_list]
1901 1f9430d6 Iustin Pop
        else:
1902 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1903 1f9430d6 Iustin Pop
        row.append(val)
1904 1f9430d6 Iustin Pop
      output.append(row)
1905 1f9430d6 Iustin Pop
1906 1f9430d6 Iustin Pop
    return output
1907 a8083063 Iustin Pop
1908 a8083063 Iustin Pop
1909 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1910 a8083063 Iustin Pop
  """Logical unit for removing a node.
1911 a8083063 Iustin Pop

1912 a8083063 Iustin Pop
  """
1913 a8083063 Iustin Pop
  HPATH = "node-remove"
1914 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1915 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1916 a8083063 Iustin Pop
1917 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1918 a8083063 Iustin Pop
    """Build hooks env.
1919 a8083063 Iustin Pop

1920 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1921 d08869ee Guido Trotter
    node would then be impossible to remove.
1922 a8083063 Iustin Pop

1923 a8083063 Iustin Pop
    """
1924 396e1b78 Michael Hanselmann
    env = {
1925 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1926 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1927 396e1b78 Michael Hanselmann
      }
1928 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1929 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1930 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1931 a8083063 Iustin Pop
1932 a8083063 Iustin Pop
  def CheckPrereq(self):
1933 a8083063 Iustin Pop
    """Check prerequisites.
1934 a8083063 Iustin Pop

1935 a8083063 Iustin Pop
    This checks:
1936 a8083063 Iustin Pop
     - the node exists in the configuration
1937 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1938 a8083063 Iustin Pop
     - it's not the master
1939 a8083063 Iustin Pop

1940 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1941 a8083063 Iustin Pop

1942 a8083063 Iustin Pop
    """
1943 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1944 a8083063 Iustin Pop
    if node is None:
1945 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1946 a8083063 Iustin Pop
1947 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1948 a8083063 Iustin Pop
1949 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
1950 a8083063 Iustin Pop
    if node.name == masternode:
1951 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1952 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1953 a8083063 Iustin Pop
1954 a8083063 Iustin Pop
    for instance_name in instance_list:
1955 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1956 6b12959c Iustin Pop
      if node.name in instance.all_nodes:
1957 6b12959c Iustin Pop
        raise errors.OpPrereqError("Instance %s is still running on the node,"
1958 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1959 a8083063 Iustin Pop
    self.op.node_name = node.name
1960 a8083063 Iustin Pop
    self.node = node
1961 a8083063 Iustin Pop
1962 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1963 a8083063 Iustin Pop
    """Removes the node from the cluster.
1964 a8083063 Iustin Pop

1965 a8083063 Iustin Pop
    """
1966 a8083063 Iustin Pop
    node = self.node
1967 9a4f63d1 Iustin Pop
    logging.info("Stopping the node daemon and removing configs from node %s",
1968 9a4f63d1 Iustin Pop
                 node.name)
1969 a8083063 Iustin Pop
1970 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
1971 a8083063 Iustin Pop
1972 72737a7f Iustin Pop
    self.rpc.call_node_leave_cluster(node.name)
1973 c8a0948f Michael Hanselmann
1974 eb1742d5 Guido Trotter
    # Promote nodes to master candidate as needed
1975 ec0292f1 Iustin Pop
    _AdjustCandidatePool(self)
1976 eb1742d5 Guido Trotter
1977 a8083063 Iustin Pop
1978 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1979 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1980 a8083063 Iustin Pop

1981 a8083063 Iustin Pop
  """
1982 bc8e4a1a Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
1983 35705d8f Guido Trotter
  REQ_BGL = False
1984 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet(
1985 31bf511f Iustin Pop
    "dtotal", "dfree",
1986 31bf511f Iustin Pop
    "mtotal", "mnode", "mfree",
1987 31bf511f Iustin Pop
    "bootid",
1988 0105bad3 Iustin Pop
    "ctotal", "cnodes", "csockets",
1989 31bf511f Iustin Pop
    )
1990 31bf511f Iustin Pop
1991 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(
1992 31bf511f Iustin Pop
    "name", "pinst_cnt", "sinst_cnt",
1993 31bf511f Iustin Pop
    "pinst_list", "sinst_list",
1994 31bf511f Iustin Pop
    "pip", "sip", "tags",
1995 31bf511f Iustin Pop
    "serial_no",
1996 0e67cdbe Iustin Pop
    "master_candidate",
1997 0e67cdbe Iustin Pop
    "master",
1998 9ddb5e45 Iustin Pop
    "offline",
1999 0b2454b9 Iustin Pop
    "drained",
2000 c120ff34 Iustin Pop
    "role",
2001 31bf511f Iustin Pop
    )
2002 a8083063 Iustin Pop
2003 35705d8f Guido Trotter
  def ExpandNames(self):
2004 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2005 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2006 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2007 a8083063 Iustin Pop
2008 35705d8f Guido Trotter
    self.needed_locks = {}
2009 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2010 c8d8b4c8 Iustin Pop
2011 c8d8b4c8 Iustin Pop
    if self.op.names:
2012 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
2013 35705d8f Guido Trotter
    else:
2014 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
2015 c8d8b4c8 Iustin Pop
2016 bc8e4a1a Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
2017 bc8e4a1a Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
2018 c8d8b4c8 Iustin Pop
    if self.do_locking:
2019 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
2020 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
2021 c8d8b4c8 Iustin Pop
2022 35705d8f Guido Trotter
2023 35705d8f Guido Trotter
  def CheckPrereq(self):
2024 35705d8f Guido Trotter
    """Check prerequisites.
2025 35705d8f Guido Trotter

2026 35705d8f Guido Trotter
    """
2027 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
2028 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
2029 c8d8b4c8 Iustin Pop
    pass
2030 a8083063 Iustin Pop
2031 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2032 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2033 a8083063 Iustin Pop

2034 a8083063 Iustin Pop
    """
2035 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
2036 c8d8b4c8 Iustin Pop
    if self.do_locking:
2037 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
2038 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
2039 3fa93523 Guido Trotter
      nodenames = self.wanted
2040 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
2041 3fa93523 Guido Trotter
      if missing:
2042 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
2043 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
2044 c8d8b4c8 Iustin Pop
    else:
2045 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
2046 c1f1cbb2 Iustin Pop
2047 c1f1cbb2 Iustin Pop
    nodenames = utils.NiceSort(nodenames)
2048 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
2049 a8083063 Iustin Pop
2050 a8083063 Iustin Pop
    # begin data gathering
2051 a8083063 Iustin Pop
2052 bc8e4a1a Iustin Pop
    if self.do_node_query:
2053 a8083063 Iustin Pop
      live_data = {}
2054 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
2055 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
2056 a8083063 Iustin Pop
      for name in nodenames:
2057 781de953 Iustin Pop
        nodeinfo = node_data[name]
2058 781de953 Iustin Pop
        if not nodeinfo.failed and nodeinfo.data:
2059 781de953 Iustin Pop
          nodeinfo = nodeinfo.data
2060 d599d686 Iustin Pop
          fn = utils.TryConvert
2061 a8083063 Iustin Pop
          live_data[name] = {
2062 d599d686 Iustin Pop
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
2063 d599d686 Iustin Pop
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
2064 d599d686 Iustin Pop
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
2065 d599d686 Iustin Pop
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
2066 d599d686 Iustin Pop
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
2067 d599d686 Iustin Pop
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
2068 d599d686 Iustin Pop
            "bootid": nodeinfo.get('bootid', None),
2069 0105bad3 Iustin Pop
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
2070 0105bad3 Iustin Pop
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
2071 a8083063 Iustin Pop
            }
2072 a8083063 Iustin Pop
        else:
2073 a8083063 Iustin Pop
          live_data[name] = {}
2074 a8083063 Iustin Pop
    else:
2075 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
2076 a8083063 Iustin Pop
2077 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
2078 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
2079 a8083063 Iustin Pop
2080 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
2081 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
2082 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
2083 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
2084 a8083063 Iustin Pop
2085 ec223efb Iustin Pop
      for instance_name in instancelist:
2086 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
2087 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
2088 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
2089 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
2090 ec223efb Iustin Pop
          if secnode in node_to_secondary:
2091 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
2092 a8083063 Iustin Pop
2093 0e67cdbe Iustin Pop
    master_node = self.cfg.GetMasterNode()
2094 0e67cdbe Iustin Pop
2095 a8083063 Iustin Pop
    # end data gathering
2096 a8083063 Iustin Pop
2097 a8083063 Iustin Pop
    output = []
2098 a8083063 Iustin Pop
    for node in nodelist:
2099 a8083063 Iustin Pop
      node_output = []
2100 a8083063 Iustin Pop
      for field in self.op.output_fields:
2101 a8083063 Iustin Pop
        if field == "name":
2102 a8083063 Iustin Pop
          val = node.name
2103 ec223efb Iustin Pop
        elif field == "pinst_list":
2104 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
2105 ec223efb Iustin Pop
        elif field == "sinst_list":
2106 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
2107 ec223efb Iustin Pop
        elif field == "pinst_cnt":
2108 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
2109 ec223efb Iustin Pop
        elif field == "sinst_cnt":
2110 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
2111 a8083063 Iustin Pop
        elif field == "pip":
2112 a8083063 Iustin Pop
          val = node.primary_ip
2113 a8083063 Iustin Pop
        elif field == "sip":
2114 a8083063 Iustin Pop
          val = node.secondary_ip
2115 130a6a6f Iustin Pop
        elif field == "tags":
2116 130a6a6f Iustin Pop
          val = list(node.GetTags())
2117 38d7239a Iustin Pop
        elif field == "serial_no":
2118 38d7239a Iustin Pop
          val = node.serial_no
2119 0e67cdbe Iustin Pop
        elif field == "master_candidate":
2120 0e67cdbe Iustin Pop
          val = node.master_candidate
2121 0e67cdbe Iustin Pop
        elif field == "master":
2122 0e67cdbe Iustin Pop
          val = node.name == master_node
2123 9ddb5e45 Iustin Pop
        elif field == "offline":
2124 9ddb5e45 Iustin Pop
          val = node.offline
2125 0b2454b9 Iustin Pop
        elif field == "drained":
2126 0b2454b9 Iustin Pop
          val = node.drained
2127 31bf511f Iustin Pop
        elif self._FIELDS_DYNAMIC.Matches(field):
2128 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
2129 c120ff34 Iustin Pop
        elif field == "role":
2130 c120ff34 Iustin Pop
          if node.name == master_node:
2131 c120ff34 Iustin Pop
            val = "M"
2132 c120ff34 Iustin Pop
          elif node.master_candidate:
2133 c120ff34 Iustin Pop
            val = "C"
2134 c120ff34 Iustin Pop
          elif node.drained:
2135 c120ff34 Iustin Pop
            val = "D"
2136 c120ff34 Iustin Pop
          elif node.offline:
2137 c120ff34 Iustin Pop
            val = "O"
2138 c120ff34 Iustin Pop
          else:
2139 c120ff34 Iustin Pop
            val = "R"
2140 a8083063 Iustin Pop
        else:
2141 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2142 a8083063 Iustin Pop
        node_output.append(val)
2143 a8083063 Iustin Pop
      output.append(node_output)
2144 a8083063 Iustin Pop
2145 a8083063 Iustin Pop
    return output
2146 a8083063 Iustin Pop
2147 a8083063 Iustin Pop
2148 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
2149 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
2150 dcb93971 Michael Hanselmann

2151 dcb93971 Michael Hanselmann
  """
2152 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
2153 21a15682 Guido Trotter
  REQ_BGL = False
2154 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
2155 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("node")
2156 21a15682 Guido Trotter
2157 21a15682 Guido Trotter
  def ExpandNames(self):
2158 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2159 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2160 21a15682 Guido Trotter
                       selected=self.op.output_fields)
2161 21a15682 Guido Trotter
2162 21a15682 Guido Trotter
    self.needed_locks = {}
2163 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2164 21a15682 Guido Trotter
    if not self.op.nodes:
2165 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2166 21a15682 Guido Trotter
    else:
2167 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
2168 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
2169 dcb93971 Michael Hanselmann
2170 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
2171 dcb93971 Michael Hanselmann
    """Check prerequisites.
2172 dcb93971 Michael Hanselmann

2173 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
2174 dcb93971 Michael Hanselmann

2175 dcb93971 Michael Hanselmann
    """
2176 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
2177 dcb93971 Michael Hanselmann
2178 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
2179 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
2180 dcb93971 Michael Hanselmann

2181 dcb93971 Michael Hanselmann
    """
2182 a7ba5e53 Iustin Pop
    nodenames = self.nodes
2183 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
2184 dcb93971 Michael Hanselmann
2185 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
2186 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
2187 dcb93971 Michael Hanselmann
2188 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
2189 dcb93971 Michael Hanselmann
2190 dcb93971 Michael Hanselmann
    output = []
2191 dcb93971 Michael Hanselmann
    for node in nodenames:
2192 781de953 Iustin Pop
      if node not in volumes or volumes[node].failed or not volumes[node].data:
2193 37d19eb2 Michael Hanselmann
        continue
2194 37d19eb2 Michael Hanselmann
2195 781de953 Iustin Pop
      node_vols = volumes[node].data[:]
2196 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
2197 dcb93971 Michael Hanselmann
2198 dcb93971 Michael Hanselmann
      for vol in node_vols:
2199 dcb93971 Michael Hanselmann
        node_output = []
2200 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
2201 dcb93971 Michael Hanselmann
          if field == "node":
2202 dcb93971 Michael Hanselmann
            val = node
2203 dcb93971 Michael Hanselmann
          elif field == "phys":
2204 dcb93971 Michael Hanselmann
            val = vol['dev']
2205 dcb93971 Michael Hanselmann
          elif field == "vg":
2206 dcb93971 Michael Hanselmann
            val = vol['vg']
2207 dcb93971 Michael Hanselmann
          elif field == "name":
2208 dcb93971 Michael Hanselmann
            val = vol['name']
2209 dcb93971 Michael Hanselmann
          elif field == "size":
2210 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
2211 dcb93971 Michael Hanselmann
          elif field == "instance":
2212 dcb93971 Michael Hanselmann
            for inst in ilist:
2213 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
2214 dcb93971 Michael Hanselmann
                continue
2215 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
2216 dcb93971 Michael Hanselmann
                val = inst.name
2217 dcb93971 Michael Hanselmann
                break
2218 dcb93971 Michael Hanselmann
            else:
2219 dcb93971 Michael Hanselmann
              val = '-'
2220 dcb93971 Michael Hanselmann
          else:
2221 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
2222 dcb93971 Michael Hanselmann
          node_output.append(str(val))
2223 dcb93971 Michael Hanselmann
2224 dcb93971 Michael Hanselmann
        output.append(node_output)
2225 dcb93971 Michael Hanselmann
2226 dcb93971 Michael Hanselmann
    return output
2227 dcb93971 Michael Hanselmann
2228 dcb93971 Michael Hanselmann
2229 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
2230 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
2231 a8083063 Iustin Pop

2232 a8083063 Iustin Pop
  """
2233 a8083063 Iustin Pop
  HPATH = "node-add"
2234 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2235 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
2236 a8083063 Iustin Pop
2237 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2238 a8083063 Iustin Pop
    """Build hooks env.
2239 a8083063 Iustin Pop

2240 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
2241 a8083063 Iustin Pop

2242 a8083063 Iustin Pop
    """
2243 a8083063 Iustin Pop
    env = {
2244 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
2245 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
2246 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
2247 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
2248 a8083063 Iustin Pop
      }
2249 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
2250 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
2251 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
2252 a8083063 Iustin Pop
2253 a8083063 Iustin Pop
  def CheckPrereq(self):
2254 a8083063 Iustin Pop
    """Check prerequisites.
2255 a8083063 Iustin Pop

2256 a8083063 Iustin Pop
    This checks:
2257 a8083063 Iustin Pop
     - the new node is not already in the config
2258 a8083063 Iustin Pop
     - it is resolvable
2259 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
2260 a8083063 Iustin Pop

2261 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
2262 a8083063 Iustin Pop

2263 a8083063 Iustin Pop
    """
2264 a8083063 Iustin Pop
    node_name = self.op.node_name
2265 a8083063 Iustin Pop
    cfg = self.cfg
2266 a8083063 Iustin Pop
2267 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
2268 a8083063 Iustin Pop
2269 bcf043c9 Iustin Pop
    node = dns_data.name
2270 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
2271 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
2272 a8083063 Iustin Pop
    if secondary_ip is None:
2273 a8083063 Iustin Pop
      secondary_ip = primary_ip
2274 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
2275 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
2276 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
2277 e7c6e02b Michael Hanselmann
2278 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
2279 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
2280 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
2281 e7c6e02b Michael Hanselmann
                                 node)
2282 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
2283 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
2284 a8083063 Iustin Pop
2285 a8083063 Iustin Pop
    for existing_node_name in node_list:
2286 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
2287 e7c6e02b Michael Hanselmann
2288 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
2289 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
2290 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
2291 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
2292 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
2293 e7c6e02b Michael Hanselmann
        continue
2294 e7c6e02b Michael Hanselmann
2295 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
2296 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
2297 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
2298 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
2299 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
2300 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
2301 a8083063 Iustin Pop
2302 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
2303 a8083063 Iustin Pop
    # same as for the master
2304 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
2305 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
2306 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
2307 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
2308 a8083063 Iustin Pop
      if master_singlehomed:
2309 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
2310 3ecf6786 Iustin Pop
                                   " new node has one")
2311 a8083063 Iustin Pop
      else:
2312 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
2313 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
2314 a8083063 Iustin Pop
2315 a8083063 Iustin Pop
    # checks reachablity
2316 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
2317 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
2318 a8083063 Iustin Pop
2319 a8083063 Iustin Pop
    if not newbie_singlehomed:
2320 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
2321 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
2322 b15d625f Iustin Pop
                           source=myself.secondary_ip):
2323 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
2324 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
2325 a8083063 Iustin Pop
2326 0fff97e9 Guido Trotter
    cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2327 a8ae3eb5 Iustin Pop
    if self.op.readd:
2328 a8ae3eb5 Iustin Pop
      exceptions = [node]
2329 a8ae3eb5 Iustin Pop
    else:
2330 a8ae3eb5 Iustin Pop
      exceptions = []
2331 a8ae3eb5 Iustin Pop
    mc_now, mc_max = self.cfg.GetMasterCandidateStats(exceptions)
2332 a8ae3eb5 Iustin Pop
    # the new node will increase mc_max with one, so:
2333 a8ae3eb5 Iustin Pop
    mc_max = min(mc_max + 1, cp_size)
2334 a8ae3eb5 Iustin Pop
    self.master_candidate = mc_now < mc_max
2335 0fff97e9 Guido Trotter
2336 a8ae3eb5 Iustin Pop
    if self.op.readd:
2337 a8ae3eb5 Iustin Pop
      self.new_node = self.cfg.GetNodeInfo(node)
2338 a8ae3eb5 Iustin Pop
      assert self.new_node is not None, "Can't retrieve locked node %s" % node
2339 a8ae3eb5 Iustin Pop
    else:
2340 a8ae3eb5 Iustin Pop
      self.new_node = objects.Node(name=node,
2341 a8ae3eb5 Iustin Pop
                                   primary_ip=primary_ip,
2342 a8ae3eb5 Iustin Pop
                                   secondary_ip=secondary_ip,
2343 a8ae3eb5 Iustin Pop
                                   master_candidate=self.master_candidate,
2344 a8ae3eb5 Iustin Pop
                                   offline=False, drained=False)
2345 a8083063 Iustin Pop
2346 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2347 a8083063 Iustin Pop
    """Adds the new node to the cluster.
2348 a8083063 Iustin Pop

2349 a8083063 Iustin Pop
    """
2350 a8083063 Iustin Pop
    new_node = self.new_node
2351 a8083063 Iustin Pop
    node = new_node.name
2352 a8083063 Iustin Pop
2353 a8ae3eb5 Iustin Pop
    # for re-adds, reset the offline/drained/master-candidate flags;
2354 a8ae3eb5 Iustin Pop
    # we need to reset here, otherwise offline would prevent RPC calls
2355 a8ae3eb5 Iustin Pop
    # later in the procedure; this also means that if the re-add
2356 a8ae3eb5 Iustin Pop
    # fails, we are left with a non-offlined, broken node
2357 a8ae3eb5 Iustin Pop
    if self.op.readd:
2358 a8ae3eb5 Iustin Pop
      new_node.drained = new_node.offline = False
2359 a8ae3eb5 Iustin Pop
      self.LogInfo("Readding a node, the offline/drained flags were reset")
2360 a8ae3eb5 Iustin Pop
      # if we demote the node, we do cleanup later in the procedure
2361 a8ae3eb5 Iustin Pop
      new_node.master_candidate = self.master_candidate
2362 a8ae3eb5 Iustin Pop
2363 a8ae3eb5 Iustin Pop
    # notify the user about any possible mc promotion
2364 a8ae3eb5 Iustin Pop
    if new_node.master_candidate:
2365 a8ae3eb5 Iustin Pop
      self.LogInfo("Node will be a master candidate")
2366 a8ae3eb5 Iustin Pop
2367 a8083063 Iustin Pop
    # check connectivity
2368 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
2369 781de953 Iustin Pop
    result.Raise()
2370 781de953 Iustin Pop
    if result.data:
2371 781de953 Iustin Pop
      if constants.PROTOCOL_VERSION == result.data:
2372 9a4f63d1 Iustin Pop
        logging.info("Communication to node %s fine, sw version %s match",
2373 781de953 Iustin Pop
                     node, result.data)
2374 a8083063 Iustin Pop
      else:
2375 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
2376 3ecf6786 Iustin Pop
                                 " node version %s" %
2377 781de953 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result.data))
2378 a8083063 Iustin Pop
    else:
2379 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
2380 a8083063 Iustin Pop
2381 a8083063 Iustin Pop
    # setup ssh on node
2382 9a4f63d1 Iustin Pop
    logging.info("Copy ssh key to node %s", node)
2383 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
2384 a8083063 Iustin Pop
    keyarray = []
2385 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
2386 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
2387 70d9e3d8 Iustin Pop
                priv_key, pub_key]
2388 a8083063 Iustin Pop
2389 a8083063 Iustin Pop
    for i in keyfiles:
2390 a8083063 Iustin Pop
      f = open(i, 'r')
2391 a8083063 Iustin Pop
      try:
2392 a8083063 Iustin Pop
        keyarray.append(f.read())
2393 a8083063 Iustin Pop
      finally:
2394 a8083063 Iustin Pop
        f.close()
2395 a8083063 Iustin Pop
2396 72737a7f Iustin Pop
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
2397 72737a7f Iustin Pop
                                    keyarray[2],
2398 72737a7f Iustin Pop
                                    keyarray[3], keyarray[4], keyarray[5])
2399 a8083063 Iustin Pop
2400 a1b805fb Iustin Pop
    msg = result.RemoteFailMsg()
2401 a1b805fb Iustin Pop
    if msg:
2402 a1b805fb Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the"
2403 a1b805fb Iustin Pop
                               " new node: %s" % msg)
2404 a8083063 Iustin Pop
2405 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
2406 d9c02ca6 Michael Hanselmann
    utils.AddHostToEtcHosts(new_node.name)
2407 c8a0948f Michael Hanselmann
2408 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
2409 781de953 Iustin Pop
      result = self.rpc.call_node_has_ip_address(new_node.name,
2410 781de953 Iustin Pop
                                                 new_node.secondary_ip)
2411 781de953 Iustin Pop
      if result.failed or not result.data:
2412 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
2413 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
2414 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
2415 a8083063 Iustin Pop
2416 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
2417 5c0527ed Guido Trotter
    node_verify_param = {
2418 5c0527ed Guido Trotter
      'nodelist': [node],
2419 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
2420 5c0527ed Guido Trotter
    }
2421 5c0527ed Guido Trotter
2422 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
2423 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
2424 5c0527ed Guido Trotter
    for verifier in node_verify_list:
2425 f08ce603 Guido Trotter
      if result[verifier].failed or not result[verifier].data:
2426 5c0527ed Guido Trotter
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
2427 5c0527ed Guido Trotter
                                 " for remote verification" % verifier)
2428 781de953 Iustin Pop
      if result[verifier].data['nodelist']:
2429 781de953 Iustin Pop
        for failed in result[verifier].data['nodelist']:
2430 5c0527ed Guido Trotter
          feedback_fn("ssh/hostname verification failed %s -> %s" %
2431 bafc1d90 Iustin Pop
                      (verifier, result[verifier].data['nodelist'][failed]))
2432 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
2433 ff98055b Iustin Pop
2434 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
2435 a8083063 Iustin Pop
    # including the node just added
2436 d6a02168 Michael Hanselmann
    myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
2437 102b115b Michael Hanselmann
    dist_nodes = self.cfg.GetNodeList()
2438 102b115b Michael Hanselmann
    if not self.op.readd:
2439 102b115b Michael Hanselmann
      dist_nodes.append(node)
2440 a8083063 Iustin Pop
    if myself.name in dist_nodes:
2441 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
2442 a8083063 Iustin Pop
2443 9a4f63d1 Iustin Pop
    logging.debug("Copying hosts and known_hosts to all nodes")
2444 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
2445 72737a7f Iustin Pop
      result = self.rpc.call_upload_file(dist_nodes, fname)
2446 ec85e3d5 Iustin Pop
      for to_node, to_result in result.iteritems():
2447 ec85e3d5 Iustin Pop
        if to_result.failed or not to_result.data:
2448 9a4f63d1 Iustin Pop
          logging.error("Copy of file %s to node %s failed", fname, to_node)
2449 a8083063 Iustin Pop
2450 d6a02168 Michael Hanselmann
    to_copy = []
2451 2928f08d Guido Trotter
    enabled_hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
2452 ccd905ac Guido Trotter
    if constants.HTS_COPY_VNC_PASSWORD.intersection(enabled_hypervisors):
2453 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
2454 2928f08d Guido Trotter
2455 a8083063 Iustin Pop
    for fname in to_copy:
2456 72737a7f Iustin Pop
      result = self.rpc.call_upload_file([node], fname)
2457 781de953 Iustin Pop
      if result[node].failed or not result[node]:
2458 9a4f63d1 Iustin Pop
        logging.error("Could not copy file %s to node %s", fname, node)
2459 a8083063 Iustin Pop
2460 d8470559 Michael Hanselmann
    if self.op.readd:
2461 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
2462 a8ae3eb5 Iustin Pop
      # make sure we redistribute the config
2463 a8ae3eb5 Iustin Pop
      self.cfg.Update(new_node)
2464 a8ae3eb5 Iustin Pop
      # and make sure the new node will not have old files around
2465 a8ae3eb5 Iustin Pop
      if not new_node.master_candidate:
2466 a8ae3eb5 Iustin Pop
        result = self.rpc.call_node_demote_from_mc(new_node.name)
2467 a8ae3eb5 Iustin Pop
        msg = result.RemoteFailMsg()
2468 a8ae3eb5 Iustin Pop
        if msg:
2469 a8ae3eb5 Iustin Pop
          self.LogWarning("Node failed to demote itself from master"
2470 a8ae3eb5 Iustin Pop
                          " candidate status: %s" % msg)
2471 d8470559 Michael Hanselmann
    else:
2472 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
2473 a8083063 Iustin Pop
2474 a8083063 Iustin Pop
2475 b31c8676 Iustin Pop
class LUSetNodeParams(LogicalUnit):
2476 b31c8676 Iustin Pop
  """Modifies the parameters of a node.
2477 b31c8676 Iustin Pop

2478 b31c8676 Iustin Pop
  """
2479 b31c8676 Iustin Pop
  HPATH = "node-modify"
2480 b31c8676 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2481 b31c8676 Iustin Pop
  _OP_REQP = ["node_name"]
2482 b31c8676 Iustin Pop
  REQ_BGL = False
2483 b31c8676 Iustin Pop
2484 b31c8676 Iustin Pop
  def CheckArguments(self):
2485 b31c8676 Iustin Pop
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2486 b31c8676 Iustin Pop
    if node_name is None:
2487 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2488 b31c8676 Iustin Pop
    self.op.node_name = node_name
2489 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'master_candidate')
2490 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'offline')
2491 c9d443ea Iustin Pop
    _CheckBooleanOpField(self.op, 'drained')
2492 c9d443ea Iustin Pop
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
2493 c9d443ea Iustin Pop
    if all_mods.count(None) == 3:
2494 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Please pass at least one modification")
2495 c9d443ea Iustin Pop
    if all_mods.count(True) > 1:
2496 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Can't set the node into more than one"
2497 c9d443ea Iustin Pop
                                 " state at the same time")
2498 b31c8676 Iustin Pop
2499 b31c8676 Iustin Pop
  def ExpandNames(self):
2500 b31c8676 Iustin Pop
    self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
2501 b31c8676 Iustin Pop
2502 b31c8676 Iustin Pop
  def BuildHooksEnv(self):
2503 b31c8676 Iustin Pop
    """Build hooks env.
2504 b31c8676 Iustin Pop

2505 b31c8676 Iustin Pop
    This runs on the master node.
2506 b31c8676 Iustin Pop

2507 b31c8676 Iustin Pop
    """
2508 b31c8676 Iustin Pop
    env = {
2509 b31c8676 Iustin Pop
      "OP_TARGET": self.op.node_name,
2510 b31c8676 Iustin Pop
      "MASTER_CANDIDATE": str(self.op.master_candidate),
2511 3a5ba66a Iustin Pop
      "OFFLINE": str(self.op.offline),
2512 c9d443ea Iustin Pop
      "DRAINED": str(self.op.drained),
2513 b31c8676 Iustin Pop
      }
2514 b31c8676 Iustin Pop
    nl = [self.cfg.GetMasterNode(),
2515 b31c8676 Iustin Pop
          self.op.node_name]
2516 b31c8676 Iustin Pop
    return env, nl, nl
2517 b31c8676 Iustin Pop
2518 b31c8676 Iustin Pop
  def CheckPrereq(self):
2519 b31c8676 Iustin Pop
    """Check prerequisites.
2520 b31c8676 Iustin Pop

2521 b31c8676 Iustin Pop
    This only checks the instance list against the existing names.
2522 b31c8676 Iustin Pop

2523 b31c8676 Iustin Pop
    """
2524 3a5ba66a Iustin Pop
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
2525 b31c8676 Iustin Pop
2526 c9d443ea Iustin Pop
    if ((self.op.master_candidate == False or self.op.offline == True or
2527 c9d443ea Iustin Pop
         self.op.drained == True) and node.master_candidate):
2528 3a5ba66a Iustin Pop
      # we will demote the node from master_candidate
2529 3a26773f Iustin Pop
      if self.op.node_name == self.cfg.GetMasterNode():
2530 3a26773f Iustin Pop
        raise errors.OpPrereqError("The master node has to be a"
2531 c9d443ea Iustin Pop
                                   " master candidate, online and not drained")
2532 3e83dd48 Iustin Pop
      cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2533 3a5ba66a Iustin Pop
      num_candidates, _ = self.cfg.GetMasterCandidateStats()
2534 3e83dd48 Iustin Pop
      if num_candidates <= cp_size:
2535 3e83dd48 Iustin Pop
        msg = ("Not enough master candidates (desired"
2536 3e83dd48 Iustin Pop
               " %d, new value will be %d)" % (cp_size, num_candidates-1))
2537 3a5ba66a Iustin Pop
        if self.op.force:
2538 3e83dd48 Iustin Pop
          self.LogWarning(msg)
2539 3e83dd48 Iustin Pop
        else:
2540 3e83dd48 Iustin Pop
          raise errors.OpPrereqError(msg)
2541 3e83dd48 Iustin Pop
2542 c9d443ea Iustin Pop
    if (self.op.master_candidate == True and
2543 c9d443ea Iustin Pop
        ((node.offline and not self.op.offline == False) or
2544 c9d443ea Iustin Pop
         (node.drained and not self.op.drained == False))):
2545 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
2546 949bdabe Iustin Pop
                                 " to master_candidate" % node.name)
2547 3a5ba66a Iustin Pop
2548 b31c8676 Iustin Pop
    return
2549 b31c8676 Iustin Pop
2550 b31c8676 Iustin Pop
  def Exec(self, feedback_fn):
2551 b31c8676 Iustin Pop
    """Modifies a node.
2552 b31c8676 Iustin Pop

2553 b31c8676 Iustin Pop
    """
2554 3a5ba66a Iustin Pop
    node = self.node
2555 b31c8676 Iustin Pop
2556 b31c8676 Iustin Pop
    result = []
2557 c9d443ea Iustin Pop
    changed_mc = False
2558 b31c8676 Iustin Pop
2559 3a5ba66a Iustin Pop
    if self.op.offline is not None:
2560 3a5ba66a Iustin Pop
      node.offline = self.op.offline
2561 3a5ba66a Iustin Pop
      result.append(("offline", str(self.op.offline)))
2562 c9d443ea Iustin Pop
      if self.op.offline == True:
2563 c9d443ea Iustin Pop
        if node.master_candidate:
2564 c9d443ea Iustin Pop
          node.master_candidate = False
2565 c9d443ea Iustin Pop
          changed_mc = True
2566 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to offline"))
2567 c9d443ea Iustin Pop
        if node.drained:
2568 c9d443ea Iustin Pop
          node.drained = False
2569 c9d443ea Iustin Pop
          result.append(("drained", "clear drained status due to offline"))
2570 3a5ba66a Iustin Pop
2571 b31c8676 Iustin Pop
    if self.op.master_candidate is not None:
2572 b31c8676 Iustin Pop
      node.master_candidate = self.op.master_candidate
2573 c9d443ea Iustin Pop
      changed_mc = True
2574 b31c8676 Iustin Pop
      result.append(("master_candidate", str(self.op.master_candidate)))
2575 56aa9fd5 Iustin Pop
      if self.op.master_candidate == False:
2576 56aa9fd5 Iustin Pop
        rrc = self.rpc.call_node_demote_from_mc(node.name)
2577 0959c824 Iustin Pop
        msg = rrc.RemoteFailMsg()
2578 0959c824 Iustin Pop
        if msg:
2579 0959c824 Iustin Pop
          self.LogWarning("Node failed to demote itself: %s" % msg)
2580 b31c8676 Iustin Pop
2581 c9d443ea Iustin Pop
    if self.op.drained is not None:
2582 c9d443ea Iustin Pop
      node.drained = self.op.drained
2583 82e12743 Iustin Pop
      result.append(("drained", str(self.op.drained)))
2584 c9d443ea Iustin Pop
      if self.op.drained == True:
2585 c9d443ea Iustin Pop
        if node.master_candidate:
2586 c9d443ea Iustin Pop
          node.master_candidate = False
2587 c9d443ea Iustin Pop
          changed_mc = True
2588 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to drain"))
2589 dec0d9da Iustin Pop
          rrc = self.rpc.call_node_demote_from_mc(node.name)
2590 dec0d9da Iustin Pop
          msg = rrc.RemoteFailMsg()
2591 dec0d9da Iustin Pop
          if msg:
2592 dec0d9da Iustin Pop
            self.LogWarning("Node failed to demote itself: %s" % msg)
2593 c9d443ea Iustin Pop
        if node.offline:
2594 c9d443ea Iustin Pop
          node.offline = False
2595 c9d443ea Iustin Pop
          result.append(("offline", "clear offline status due to drain"))
2596 c9d443ea Iustin Pop
2597 b31c8676 Iustin Pop
    # this will trigger configuration file update, if needed
2598 b31c8676 Iustin Pop
    self.cfg.Update(node)
2599 b31c8676 Iustin Pop
    # this will trigger job queue propagation or cleanup
2600 c9d443ea Iustin Pop
    if changed_mc:
2601 3a26773f Iustin Pop
      self.context.ReaddNode(node)
2602 b31c8676 Iustin Pop
2603 b31c8676 Iustin Pop
    return result
2604 b31c8676 Iustin Pop
2605 b31c8676 Iustin Pop
2606 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
2607 a8083063 Iustin Pop
  """Query cluster configuration.
2608 a8083063 Iustin Pop

2609 a8083063 Iustin Pop
  """
2610 a8083063 Iustin Pop
  _OP_REQP = []
2611 642339cf Guido Trotter
  REQ_BGL = False
2612 642339cf Guido Trotter
2613 642339cf Guido Trotter
  def ExpandNames(self):
2614 642339cf Guido Trotter
    self.needed_locks = {}
2615 a8083063 Iustin Pop
2616 a8083063 Iustin Pop
  def CheckPrereq(self):
2617 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
2618 a8083063 Iustin Pop

2619 a8083063 Iustin Pop
    """
2620 a8083063 Iustin Pop
    pass
2621 a8083063 Iustin Pop
2622 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2623 a8083063 Iustin Pop
    """Return cluster config.
2624 a8083063 Iustin Pop

2625 a8083063 Iustin Pop
    """
2626 469f88e1 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
2627 a8083063 Iustin Pop
    result = {
2628 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
2629 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
2630 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
2631 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
2632 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
2633 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
2634 469f88e1 Iustin Pop
      "name": cluster.cluster_name,
2635 469f88e1 Iustin Pop
      "master": cluster.master_node,
2636 02691904 Alexander Schreiber
      "default_hypervisor": cluster.default_hypervisor,
2637 469f88e1 Iustin Pop
      "enabled_hypervisors": cluster.enabled_hypervisors,
2638 7a735d6a Guido Trotter
      "hvparams": dict([(hypervisor, cluster.hvparams[hypervisor])
2639 7a735d6a Guido Trotter
                        for hypervisor in cluster.enabled_hypervisors]),
2640 469f88e1 Iustin Pop
      "beparams": cluster.beparams,
2641 4b7735f9 Iustin Pop
      "candidate_pool_size": cluster.candidate_pool_size,
2642 7a56b411 Guido Trotter
      "default_bridge": cluster.default_bridge,
2643 7a56b411 Guido Trotter
      "master_netdev": cluster.master_netdev,
2644 7a56b411 Guido Trotter
      "volume_group_name": cluster.volume_group_name,
2645 7a56b411 Guido Trotter
      "file_storage_dir": cluster.file_storage_dir,
2646 a8083063 Iustin Pop
      }
2647 a8083063 Iustin Pop
2648 a8083063 Iustin Pop
    return result
2649 a8083063 Iustin Pop
2650 a8083063 Iustin Pop
2651 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
2652 ae5849b5 Michael Hanselmann
  """Return configuration values.
2653 a8083063 Iustin Pop

2654 a8083063 Iustin Pop
  """
2655 a8083063 Iustin Pop
  _OP_REQP = []
2656 642339cf Guido Trotter
  REQ_BGL = False
2657 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet()
2658 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag")
2659 642339cf Guido Trotter
2660 642339cf Guido Trotter
  def ExpandNames(self):
2661 642339cf Guido Trotter
    self.needed_locks = {}
2662 a8083063 Iustin Pop
2663 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2664 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2665 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
2666 ae5849b5 Michael Hanselmann
2667 a8083063 Iustin Pop
  def CheckPrereq(self):
2668 a8083063 Iustin Pop
    """No prerequisites.
2669 a8083063 Iustin Pop

2670 a8083063 Iustin Pop
    """
2671 a8083063 Iustin Pop
    pass
2672 a8083063 Iustin Pop
2673 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2674 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
2675 a8083063 Iustin Pop

2676 a8083063 Iustin Pop
    """
2677 ae5849b5 Michael Hanselmann
    values = []
2678 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
2679 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
2680 3ccafd0e Iustin Pop
        entry = self.cfg.GetClusterName()
2681 ae5849b5 Michael Hanselmann
      elif field == "master_node":
2682 3ccafd0e Iustin Pop
        entry = self.cfg.GetMasterNode()
2683 3ccafd0e Iustin Pop
      elif field == "drain_flag":
2684 3ccafd0e Iustin Pop
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2685 ae5849b5 Michael Hanselmann
      else:
2686 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
2687 3ccafd0e Iustin Pop
      values.append(entry)
2688 ae5849b5 Michael Hanselmann
    return values
2689 a8083063 Iustin Pop
2690 a8083063 Iustin Pop
2691 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
2692 a8083063 Iustin Pop
  """Bring up an instance's disks.
2693 a8083063 Iustin Pop

2694 a8083063 Iustin Pop
  """
2695 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2696 f22a8ba3 Guido Trotter
  REQ_BGL = False
2697 f22a8ba3 Guido Trotter
2698 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2699 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2700 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2701 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2702 f22a8ba3 Guido Trotter
2703 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2704 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2705 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2706 a8083063 Iustin Pop
2707 a8083063 Iustin Pop
  def CheckPrereq(self):
2708 a8083063 Iustin Pop
    """Check prerequisites.
2709 a8083063 Iustin Pop

2710 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2711 a8083063 Iustin Pop

2712 a8083063 Iustin Pop
    """
2713 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2714 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2715 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2716 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
2717 b4ec07f8 Iustin Pop
    if not hasattr(self.op, "ignore_size"):
2718 b4ec07f8 Iustin Pop
      self.op.ignore_size = False
2719 a8083063 Iustin Pop
2720 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2721 a8083063 Iustin Pop
    """Activate the disks.
2722 a8083063 Iustin Pop

2723 a8083063 Iustin Pop
    """
2724 b4ec07f8 Iustin Pop
    disks_ok, disks_info = \
2725 b4ec07f8 Iustin Pop
              _AssembleInstanceDisks(self, self.instance,
2726 b4ec07f8 Iustin Pop
                                     ignore_size=self.op.ignore_size)
2727 a8083063 Iustin Pop
    if not disks_ok:
2728 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
2729 a8083063 Iustin Pop
2730 a8083063 Iustin Pop
    return disks_info
2731 a8083063 Iustin Pop
2732 a8083063 Iustin Pop
2733 e3443b36 Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False,
2734 e3443b36 Iustin Pop
                           ignore_size=False):
2735 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
2736 a8083063 Iustin Pop

2737 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
2738 a8083063 Iustin Pop

2739 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
2740 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
2741 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
2742 e4376078 Iustin Pop
  @param instance: the instance for whose disks we assemble
2743 e4376078 Iustin Pop
  @type ignore_secondaries: boolean
2744 e4376078 Iustin Pop
  @param ignore_secondaries: if true, errors on secondary nodes
2745 e4376078 Iustin Pop
      won't result in an error return from the function
2746 e3443b36 Iustin Pop
  @type ignore_size: boolean
2747 e3443b36 Iustin Pop
  @param ignore_size: if true, the current known size of the disk
2748 e3443b36 Iustin Pop
      will not be used during the disk activation, useful for cases
2749 e3443b36 Iustin Pop
      when the size is wrong
2750 e4376078 Iustin Pop
  @return: False if the operation failed, otherwise a list of
2751 e4376078 Iustin Pop
      (host, instance_visible_name, node_visible_name)
2752 e4376078 Iustin Pop
      with the mapping from node devices to instance devices
2753 a8083063 Iustin Pop

2754 a8083063 Iustin Pop
  """
2755 a8083063 Iustin Pop
  device_info = []
2756 a8083063 Iustin Pop
  disks_ok = True
2757 fdbd668d Iustin Pop
  iname = instance.name
2758 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
2759 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
2760 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
2761 fdbd668d Iustin Pop
2762 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
2763 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
2764 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
2765 fdbd668d Iustin Pop
  # SyncSource, etc.)
2766 fdbd668d Iustin Pop
2767 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
2768 a8083063 Iustin Pop
  for inst_disk in instance.disks:
2769 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2770 e3443b36 Iustin Pop
      if ignore_size:
2771 e3443b36 Iustin Pop
        node_disk = node_disk.Copy()
2772 e3443b36 Iustin Pop
        node_disk.UnsetSize()
2773 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2774 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2775 53c14ef1 Iustin Pop
      msg = result.RemoteFailMsg()
2776 53c14ef1 Iustin Pop
      if msg:
2777 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2778 53c14ef1 Iustin Pop
                           " (is_primary=False, pass=1): %s",
2779 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
2780 fdbd668d Iustin Pop
        if not ignore_secondaries:
2781 a8083063 Iustin Pop
          disks_ok = False
2782 fdbd668d Iustin Pop
2783 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
2784 fdbd668d Iustin Pop
2785 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
2786 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
2787 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2788 fdbd668d Iustin Pop
      if node != instance.primary_node:
2789 fdbd668d Iustin Pop
        continue
2790 e3443b36 Iustin Pop
      if ignore_size:
2791 e3443b36 Iustin Pop
        node_disk = node_disk.Copy()
2792 e3443b36 Iustin Pop
        node_disk.UnsetSize()
2793 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2794 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2795 53c14ef1 Iustin Pop
      msg = result.RemoteFailMsg()
2796 53c14ef1 Iustin Pop
      if msg:
2797 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2798 53c14ef1 Iustin Pop
                           " (is_primary=True, pass=2): %s",
2799 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
2800 fdbd668d Iustin Pop
        disks_ok = False
2801 1dff8e07 Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name,
2802 1dff8e07 Iustin Pop
                        result.payload))
2803 a8083063 Iustin Pop
2804 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
2805 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
2806 b352ab5b Iustin Pop
  # improving the logical/physical id handling
2807 b352ab5b Iustin Pop
  for disk in instance.disks:
2808 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
2809 b352ab5b Iustin Pop
2810 a8083063 Iustin Pop
  return disks_ok, device_info
2811 a8083063 Iustin Pop
2812 a8083063 Iustin Pop
2813 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
2814 3ecf6786 Iustin Pop
  """Start the disks of an instance.
2815 3ecf6786 Iustin Pop

2816 3ecf6786 Iustin Pop
  """
2817 b9bddb6b Iustin Pop
  disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2818 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
2819 fe7b0351 Michael Hanselmann
  if not disks_ok:
2820 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
2821 fe7b0351 Michael Hanselmann
    if force is not None and not force:
2822 86d9d3bb Iustin Pop
      lu.proc.LogWarning("", hint="If the message above refers to a"
2823 86d9d3bb Iustin Pop
                         " secondary node,"
2824 86d9d3bb Iustin Pop
                         " you can retry the operation using '--force'.")
2825 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
2826 fe7b0351 Michael Hanselmann
2827 fe7b0351 Michael Hanselmann
2828 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
2829 a8083063 Iustin Pop
  """Shutdown an instance's disks.
2830 a8083063 Iustin Pop

2831 a8083063 Iustin Pop
  """
2832 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2833 f22a8ba3 Guido Trotter
  REQ_BGL = False
2834 f22a8ba3 Guido Trotter
2835 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2836 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2837 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2838 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2839 f22a8ba3 Guido Trotter
2840 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2841 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2842 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2843 a8083063 Iustin Pop
2844 a8083063 Iustin Pop
  def CheckPrereq(self):
2845 a8083063 Iustin Pop
    """Check prerequisites.
2846 a8083063 Iustin Pop

2847 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2848 a8083063 Iustin Pop

2849 a8083063 Iustin Pop
    """
2850 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2851 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2852 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2853 a8083063 Iustin Pop
2854 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2855 a8083063 Iustin Pop
    """Deactivate the disks
2856 a8083063 Iustin Pop

2857 a8083063 Iustin Pop
    """
2858 a8083063 Iustin Pop
    instance = self.instance
2859 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
2860 a8083063 Iustin Pop
2861 a8083063 Iustin Pop
2862 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
2863 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
2864 155d6c75 Guido Trotter

2865 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
2866 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
2867 155d6c75 Guido Trotter

2868 155d6c75 Guido Trotter
  """
2869 72737a7f Iustin Pop
  ins_l = lu.rpc.call_instance_list([instance.primary_node],
2870 72737a7f Iustin Pop
                                      [instance.hypervisor])
2871 155d6c75 Guido Trotter
  ins_l = ins_l[instance.primary_node]
2872 781de953 Iustin Pop
  if ins_l.failed or not isinstance(ins_l.data, list):
2873 155d6c75 Guido Trotter
    raise errors.OpExecError("Can't contact node '%s'" %
2874 155d6c75 Guido Trotter
                             instance.primary_node)
2875 155d6c75 Guido Trotter
2876 781de953 Iustin Pop
  if instance.name in ins_l.data:
2877 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
2878 155d6c75 Guido Trotter
                             " block devices.")
2879 155d6c75 Guido Trotter
2880 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
2881 a8083063 Iustin Pop
2882 a8083063 Iustin Pop
2883 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2884 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
2885 a8083063 Iustin Pop

2886 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
2887 a8083063 Iustin Pop

2888 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
2889 a8083063 Iustin Pop
  ignored.
2890 a8083063 Iustin Pop

2891 a8083063 Iustin Pop
  """
2892 cacfd1fd Iustin Pop
  all_result = True
2893 a8083063 Iustin Pop
  for disk in instance.disks:
2894 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2895 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
2896 781de953 Iustin Pop
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
2897 cacfd1fd Iustin Pop
      msg = result.RemoteFailMsg()
2898 cacfd1fd Iustin Pop
      if msg:
2899 cacfd1fd Iustin Pop
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
2900 cacfd1fd Iustin Pop
                      disk.iv_name, node, msg)
2901 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
2902 cacfd1fd Iustin Pop
          all_result = False
2903 cacfd1fd Iustin Pop
  return all_result
2904 a8083063 Iustin Pop
2905 a8083063 Iustin Pop
2906 9ca87a96 Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
2907 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
2908 d4f16fd9 Iustin Pop

2909 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
2910 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
2911 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
2912 d4f16fd9 Iustin Pop
  exception.
2913 d4f16fd9 Iustin Pop

2914 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
2915 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
2916 e69d05fd Iustin Pop
  @type node: C{str}
2917 e69d05fd Iustin Pop
  @param node: the node to check
2918 e69d05fd Iustin Pop
  @type reason: C{str}
2919 e69d05fd Iustin Pop
  @param reason: string to use in the error message
2920 e69d05fd Iustin Pop
  @type requested: C{int}
2921 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
2922 9ca87a96 Iustin Pop
  @type hypervisor_name: C{str}
2923 9ca87a96 Iustin Pop
  @param hypervisor_name: the hypervisor to ask for memory stats
2924 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2925 e69d05fd Iustin Pop
      we cannot check the node
2926 d4f16fd9 Iustin Pop

2927 d4f16fd9 Iustin Pop
  """
2928 9ca87a96 Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
2929 781de953 Iustin Pop
  nodeinfo[node].Raise()
2930 781de953 Iustin Pop
  free_mem = nodeinfo[node].data.get('memory_free')
2931 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2932 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2933 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
2934 d4f16fd9 Iustin Pop
  if requested > free_mem:
2935 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2936 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
2937 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
2938 d4f16fd9 Iustin Pop
2939 d4f16fd9 Iustin Pop
2940 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2941 a8083063 Iustin Pop
  """Starts an instance.
2942 a8083063 Iustin Pop

2943 a8083063 Iustin Pop
  """
2944 a8083063 Iustin Pop
  HPATH = "instance-start"
2945 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2946 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2947 e873317a Guido Trotter
  REQ_BGL = False
2948 e873317a Guido Trotter
2949 e873317a Guido Trotter
  def ExpandNames(self):
2950 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2951 a8083063 Iustin Pop
2952 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2953 a8083063 Iustin Pop
    """Build hooks env.
2954 a8083063 Iustin Pop

2955 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2956 a8083063 Iustin Pop

2957 a8083063 Iustin Pop
    """
2958 a8083063 Iustin Pop
    env = {
2959 a8083063 Iustin Pop
      "FORCE": self.op.force,
2960 a8083063 Iustin Pop
      }
2961 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2962 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2963 a8083063 Iustin Pop
    return env, nl, nl
2964 a8083063 Iustin Pop
2965 a8083063 Iustin Pop
  def CheckPrereq(self):
2966 a8083063 Iustin Pop
    """Check prerequisites.
2967 a8083063 Iustin Pop

2968 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2969 a8083063 Iustin Pop

2970 a8083063 Iustin Pop
    """
2971 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2972 e873317a Guido Trotter
    assert self.instance is not None, \
2973 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2974 a8083063 Iustin Pop
2975 d04aaa2f Iustin Pop
    # extra beparams
2976 d04aaa2f Iustin Pop
    self.beparams = getattr(self.op, "beparams", {})
2977 d04aaa2f Iustin Pop
    if self.beparams:
2978 d04aaa2f Iustin Pop
      if not isinstance(self.beparams, dict):
2979 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid beparams passed: %s, expected"
2980 d04aaa2f Iustin Pop
                                   " dict" % (type(self.beparams), ))
2981 d04aaa2f Iustin Pop
      # fill the beparams dict
2982 d04aaa2f Iustin Pop
      utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
2983 d04aaa2f Iustin Pop
      self.op.beparams = self.beparams
2984 d04aaa2f Iustin Pop
2985 d04aaa2f Iustin Pop
    # extra hvparams
2986 d04aaa2f Iustin Pop
    self.hvparams = getattr(self.op, "hvparams", {})
2987 d04aaa2f Iustin Pop
    if self.hvparams:
2988 d04aaa2f Iustin Pop
      if not isinstance(self.hvparams, dict):
2989 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid hvparams passed: %s, expected"
2990 d04aaa2f Iustin Pop
                                   " dict" % (type(self.hvparams), ))
2991 d04aaa2f Iustin Pop
2992 d04aaa2f Iustin Pop
      # check hypervisor parameter syntax (locally)
2993 d04aaa2f Iustin Pop
      cluster = self.cfg.GetClusterInfo()
2994 d04aaa2f Iustin Pop
      utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
2995 d04aaa2f Iustin Pop
      filled_hvp = cluster.FillDict(cluster.hvparams[instance.hypervisor],
2996 d04aaa2f Iustin Pop
                                    instance.hvparams)
2997 d04aaa2f Iustin Pop
      filled_hvp.update(self.hvparams)
2998 d04aaa2f Iustin Pop
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
2999 d04aaa2f Iustin Pop
      hv_type.CheckParameterSyntax(filled_hvp)
3000 d04aaa2f Iustin Pop
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
3001 d04aaa2f Iustin Pop
      self.op.hvparams = self.hvparams
3002 d04aaa2f Iustin Pop
3003 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3004 7527a8a4 Iustin Pop
3005 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3006 a8083063 Iustin Pop
    # check bridges existance
3007 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
3008 a8083063 Iustin Pop
3009 f1926756 Guido Trotter
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3010 f1926756 Guido Trotter
                                              instance.name,
3011 f1926756 Guido Trotter
                                              instance.hypervisor)
3012 f1926756 Guido Trotter
    remote_info.Raise()
3013 f1926756 Guido Trotter
    if not remote_info.data:
3014 f1926756 Guido Trotter
      _CheckNodeFreeMemory(self, instance.primary_node,
3015 f1926756 Guido Trotter
                           "starting instance %s" % instance.name,
3016 f1926756 Guido Trotter
                           bep[constants.BE_MEMORY], instance.hypervisor)
3017 d4f16fd9 Iustin Pop
3018 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3019 a8083063 Iustin Pop
    """Start the instance.
3020 a8083063 Iustin Pop

3021 a8083063 Iustin Pop
    """
3022 a8083063 Iustin Pop
    instance = self.instance
3023 a8083063 Iustin Pop
    force = self.op.force
3024 a8083063 Iustin Pop
3025 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
3026 fe482621 Iustin Pop
3027 a8083063 Iustin Pop
    node_current = instance.primary_node
3028 a8083063 Iustin Pop
3029 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
3030 a8083063 Iustin Pop
3031 d04aaa2f Iustin Pop
    result = self.rpc.call_instance_start(node_current, instance,
3032 d04aaa2f Iustin Pop
                                          self.hvparams, self.beparams)
3033 dd279568 Iustin Pop
    msg = result.RemoteFailMsg()
3034 dd279568 Iustin Pop
    if msg:
3035 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
3036 dd279568 Iustin Pop
      raise errors.OpExecError("Could not start instance: %s" % msg)
3037 a8083063 Iustin Pop
3038 a8083063 Iustin Pop
3039 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
3040 bf6929a2 Alexander Schreiber
  """Reboot an instance.
3041 bf6929a2 Alexander Schreiber

3042 bf6929a2 Alexander Schreiber
  """
3043 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
3044 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
3045 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
3046 e873317a Guido Trotter
  REQ_BGL = False
3047 e873317a Guido Trotter
3048 e873317a Guido Trotter
  def ExpandNames(self):
3049 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
3050 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
3051 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
3052 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
3053 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
3054 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
3055 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
3056 e873317a Guido Trotter
    self._ExpandAndLockInstance()
3057 bf6929a2 Alexander Schreiber
3058 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
3059 bf6929a2 Alexander Schreiber
    """Build hooks env.
3060 bf6929a2 Alexander Schreiber

3061 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
3062 bf6929a2 Alexander Schreiber

3063 bf6929a2 Alexander Schreiber
    """
3064 bf6929a2 Alexander Schreiber
    env = {
3065 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
3066 2c2690c9 Iustin Pop
      "REBOOT_TYPE": self.op.reboot_type,
3067 bf6929a2 Alexander Schreiber
      }
3068 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3069 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3070 bf6929a2 Alexander Schreiber
    return env, nl, nl
3071 bf6929a2 Alexander Schreiber
3072 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
3073 bf6929a2 Alexander Schreiber
    """Check prerequisites.
3074 bf6929a2 Alexander Schreiber

3075 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
3076 bf6929a2 Alexander Schreiber

3077 bf6929a2 Alexander Schreiber
    """
3078 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3079 e873317a Guido Trotter
    assert self.instance is not None, \
3080 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3081 bf6929a2 Alexander Schreiber
3082 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3083 7527a8a4 Iustin Pop
3084 bf6929a2 Alexander Schreiber
    # check bridges existance
3085 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
3086 bf6929a2 Alexander Schreiber
3087 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
3088 bf6929a2 Alexander Schreiber
    """Reboot the instance.
3089 bf6929a2 Alexander Schreiber

3090 bf6929a2 Alexander Schreiber
    """
3091 bf6929a2 Alexander Schreiber
    instance = self.instance
3092 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
3093 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
3094 bf6929a2 Alexander Schreiber
3095 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
3096 bf6929a2 Alexander Schreiber
3097 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
3098 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
3099 ae48ac32 Iustin Pop
      for disk in instance.disks:
3100 ae48ac32 Iustin Pop
        self.cfg.SetDiskID(disk, node_current)
3101 781de953 Iustin Pop
      result = self.rpc.call_instance_reboot(node_current, instance,
3102 07813a9e Iustin Pop
                                             reboot_type)
3103 489fcbe9 Iustin Pop
      msg = result.RemoteFailMsg()
3104 489fcbe9 Iustin Pop
      if msg:
3105 489fcbe9 Iustin Pop
        raise errors.OpExecError("Could not reboot instance: %s" % msg)
3106 bf6929a2 Alexander Schreiber
    else:
3107 1fae010f Iustin Pop
      result = self.rpc.call_instance_shutdown(node_current, instance)
3108 1fae010f Iustin Pop
      msg = result.RemoteFailMsg()
3109 1fae010f Iustin Pop
      if msg:
3110 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance for"
3111 1fae010f Iustin Pop
                                 " full reboot: %s" % msg)
3112 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
3113 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
3114 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(node_current, instance, None, None)
3115 dd279568 Iustin Pop
      msg = result.RemoteFailMsg()
3116 dd279568 Iustin Pop
      if msg:
3117 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3118 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance for"
3119 dd279568 Iustin Pop
                                 " full reboot: %s" % msg)
3120 bf6929a2 Alexander Schreiber
3121 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
3122 bf6929a2 Alexander Schreiber
3123 bf6929a2 Alexander Schreiber
3124 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
3125 a8083063 Iustin Pop
  """Shutdown an instance.
3126 a8083063 Iustin Pop

3127 a8083063 Iustin Pop
  """
3128 a8083063 Iustin Pop
  HPATH = "instance-stop"
3129 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3130 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3131 e873317a Guido Trotter
  REQ_BGL = False
3132 e873317a Guido Trotter
3133 e873317a Guido Trotter
  def ExpandNames(self):
3134 e873317a Guido Trotter
    self._ExpandAndLockInstance()
3135 a8083063 Iustin Pop
3136 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3137 a8083063 Iustin Pop
    """Build hooks env.
3138 a8083063 Iustin Pop

3139 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3140 a8083063 Iustin Pop

3141 a8083063 Iustin Pop
    """
3142 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3143 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3144 a8083063 Iustin Pop
    return env, nl, nl
3145 a8083063 Iustin Pop
3146 a8083063 Iustin Pop
  def CheckPrereq(self):
3147 a8083063 Iustin Pop
    """Check prerequisites.
3148 a8083063 Iustin Pop

3149 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3150 a8083063 Iustin Pop

3151 a8083063 Iustin Pop
    """
3152 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3153 e873317a Guido Trotter
    assert self.instance is not None, \
3154 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3155 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
3156 a8083063 Iustin Pop
3157 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3158 a8083063 Iustin Pop
    """Shutdown the instance.
3159 a8083063 Iustin Pop

3160 a8083063 Iustin Pop
    """
3161 a8083063 Iustin Pop
    instance = self.instance
3162 a8083063 Iustin Pop
    node_current = instance.primary_node
3163 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
3164 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(node_current, instance)
3165 1fae010f Iustin Pop
    msg = result.RemoteFailMsg()
3166 1fae010f Iustin Pop
    if msg:
3167 1fae010f Iustin Pop
      self.proc.LogWarning("Could not shutdown instance: %s" % msg)
3168 a8083063 Iustin Pop
3169 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
3170 a8083063 Iustin Pop
3171 a8083063 Iustin Pop
3172 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
3173 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
3174 fe7b0351 Michael Hanselmann

3175 fe7b0351 Michael Hanselmann
  """
3176 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
3177 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
3178 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
3179 4e0b4d2d Guido Trotter
  REQ_BGL = False
3180 4e0b4d2d Guido Trotter
3181 4e0b4d2d Guido Trotter
  def ExpandNames(self):
3182 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
3183 fe7b0351 Michael Hanselmann
3184 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
3185 fe7b0351 Michael Hanselmann
    """Build hooks env.
3186 fe7b0351 Michael Hanselmann

3187 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
3188 fe7b0351 Michael Hanselmann

3189 fe7b0351 Michael Hanselmann
    """
3190 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3191 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3192 fe7b0351 Michael Hanselmann
    return env, nl, nl
3193 fe7b0351 Michael Hanselmann
3194 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
3195 fe7b0351 Michael Hanselmann
    """Check prerequisites.
3196 fe7b0351 Michael Hanselmann

3197 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
3198 fe7b0351 Michael Hanselmann

3199 fe7b0351 Michael Hanselmann
    """
3200 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3201 4e0b4d2d Guido Trotter
    assert instance is not None, \
3202 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3203 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3204 4e0b4d2d Guido Trotter
3205 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
3206 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
3207 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3208 0d68c45d Iustin Pop
    if instance.admin_up:
3209 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3210 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3211 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3212 72737a7f Iustin Pop
                                              instance.name,
3213 72737a7f Iustin Pop
                                              instance.hypervisor)
3214 b4874c9e Guido Trotter
    remote_info.Raise()
3215 b4874c9e Guido Trotter
    if remote_info.data:
3216 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3217 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
3218 3ecf6786 Iustin Pop
                                  instance.primary_node))
3219 d0834de3 Michael Hanselmann
3220 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
3221 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
3222 d0834de3 Michael Hanselmann
      # OS verification
3223 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
3224 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
3225 d0834de3 Michael Hanselmann
      if pnode is None:
3226 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
3227 3ecf6786 Iustin Pop
                                   self.op.pnode)
3228 781de953 Iustin Pop
      result = self.rpc.call_os_get(pnode.name, self.op.os_type)
3229 781de953 Iustin Pop
      result.Raise()
3230 781de953 Iustin Pop
      if not isinstance(result.data, objects.OS):
3231 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
3232 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
3233 d0834de3 Michael Hanselmann
3234 fe7b0351 Michael Hanselmann
    self.instance = instance
3235 fe7b0351 Michael Hanselmann
3236 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
3237 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
3238 fe7b0351 Michael Hanselmann

3239 fe7b0351 Michael Hanselmann
    """
3240 fe7b0351 Michael Hanselmann
    inst = self.instance
3241 fe7b0351 Michael Hanselmann
3242 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
3243 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
3244 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
3245 97abc79f Iustin Pop
      self.cfg.Update(inst)
3246 d0834de3 Michael Hanselmann
3247 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
3248 fe7b0351 Michael Hanselmann
    try:
3249 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
3250 781de953 Iustin Pop
      result = self.rpc.call_instance_os_add(inst.primary_node, inst)
3251 20e01edd Iustin Pop
      msg = result.RemoteFailMsg()
3252 20e01edd Iustin Pop
      if msg:
3253 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
3254 20e01edd Iustin Pop
                                 " on node %s: %s" %
3255 20e01edd Iustin Pop
                                 (inst.name, inst.primary_node, msg))
3256 fe7b0351 Michael Hanselmann
    finally:
3257 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
3258 fe7b0351 Michael Hanselmann
3259 fe7b0351 Michael Hanselmann
3260 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
3261 decd5f45 Iustin Pop
  """Rename an instance.
3262 decd5f45 Iustin Pop

3263 decd5f45 Iustin Pop
  """
3264 decd5f45 Iustin Pop
  HPATH = "instance-rename"
3265 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3266 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
3267 decd5f45 Iustin Pop
3268 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
3269 decd5f45 Iustin Pop
    """Build hooks env.
3270 decd5f45 Iustin Pop

3271 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3272 decd5f45 Iustin Pop

3273 decd5f45 Iustin Pop
    """
3274 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3275 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
3276 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3277 decd5f45 Iustin Pop
    return env, nl, nl
3278 decd5f45 Iustin Pop
3279 decd5f45 Iustin Pop
  def CheckPrereq(self):
3280 decd5f45 Iustin Pop
    """Check prerequisites.
3281 decd5f45 Iustin Pop

3282 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
3283 decd5f45 Iustin Pop

3284 decd5f45 Iustin Pop
    """
3285 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3286 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3287 decd5f45 Iustin Pop
    if instance is None:
3288 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3289 decd5f45 Iustin Pop
                                 self.op.instance_name)
3290 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3291 7527a8a4 Iustin Pop
3292 0d68c45d Iustin Pop
    if instance.admin_up:
3293 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3294 decd5f45 Iustin Pop
                                 self.op.instance_name)
3295 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3296 72737a7f Iustin Pop
                                              instance.name,
3297 72737a7f Iustin Pop
                                              instance.hypervisor)
3298 781de953 Iustin Pop
    remote_info.Raise()
3299 781de953 Iustin Pop
    if remote_info.data:
3300 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3301 decd5f45 Iustin Pop
                                 (self.op.instance_name,
3302 decd5f45 Iustin Pop
                                  instance.primary_node))
3303 decd5f45 Iustin Pop
    self.instance = instance
3304 decd5f45 Iustin Pop
3305 decd5f45 Iustin Pop
    # new name verification
3306 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
3307 decd5f45 Iustin Pop
3308 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
3309 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
3310 7bde3275 Guido Trotter
    if new_name in instance_list:
3311 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3312 c09f363f Manuel Franceschini
                                 new_name)
3313 7bde3275 Guido Trotter
3314 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
3315 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
3316 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3317 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
3318 decd5f45 Iustin Pop
3319 decd5f45 Iustin Pop
3320 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
3321 decd5f45 Iustin Pop
    """Reinstall the instance.
3322 decd5f45 Iustin Pop

3323 decd5f45 Iustin Pop
    """
3324 decd5f45 Iustin Pop
    inst = self.instance
3325 decd5f45 Iustin Pop
    old_name = inst.name
3326 decd5f45 Iustin Pop
3327 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
3328 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3329 b23c4333 Manuel Franceschini
3330 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
3331 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
3332 cb4e8387 Iustin Pop
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
3333 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
3334 decd5f45 Iustin Pop
3335 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
3336 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
3337 decd5f45 Iustin Pop
3338 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
3339 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3340 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
3341 72737a7f Iustin Pop
                                                     old_file_storage_dir,
3342 72737a7f Iustin Pop
                                                     new_file_storage_dir)
3343 781de953 Iustin Pop
      result.Raise()
3344 781de953 Iustin Pop
      if not result.data:
3345 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not connect to node '%s' to rename"
3346 b23c4333 Manuel Franceschini
                                 " directory '%s' to '%s' (but the instance"
3347 b23c4333 Manuel Franceschini
                                 " has been renamed in Ganeti)" % (
3348 b23c4333 Manuel Franceschini
                                 inst.primary_node, old_file_storage_dir,
3349 b23c4333 Manuel Franceschini
                                 new_file_storage_dir))
3350 b23c4333 Manuel Franceschini
3351 781de953 Iustin Pop
      if not result.data[0]:
3352 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
3353 b23c4333 Manuel Franceschini
                                 " (but the instance has been renamed in"
3354 b23c4333 Manuel Franceschini
                                 " Ganeti)" % (old_file_storage_dir,
3355 b23c4333 Manuel Franceschini
                                               new_file_storage_dir))
3356 b23c4333 Manuel Franceschini
3357 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
3358 decd5f45 Iustin Pop
    try:
3359 781de953 Iustin Pop
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
3360 781de953 Iustin Pop
                                                 old_name)
3361 96841384 Iustin Pop
      msg = result.RemoteFailMsg()
3362 96841384 Iustin Pop
      if msg:
3363 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
3364 96841384 Iustin Pop
               " (but the instance has been renamed in Ganeti): %s" %
3365 96841384 Iustin Pop
               (inst.name, inst.primary_node, msg))
3366 86d9d3bb Iustin Pop
        self.proc.LogWarning(msg)
3367 decd5f45 Iustin Pop
    finally:
3368 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
3369 decd5f45 Iustin Pop
3370 decd5f45 Iustin Pop
3371 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
3372 a8083063 Iustin Pop
  """Remove an instance.
3373 a8083063 Iustin Pop

3374 a8083063 Iustin Pop
  """
3375 a8083063 Iustin Pop
  HPATH = "instance-remove"
3376 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3377 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
3378 cf472233 Guido Trotter
  REQ_BGL = False
3379 cf472233 Guido Trotter
3380 cf472233 Guido Trotter
  def ExpandNames(self):
3381 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
3382 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3383 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3384 cf472233 Guido Trotter
3385 cf472233 Guido Trotter
  def DeclareLocks(self, level):
3386 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
3387 cf472233 Guido Trotter
      self._LockInstancesNodes()
3388 a8083063 Iustin Pop
3389 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3390 a8083063 Iustin Pop
    """Build hooks env.
3391 a8083063 Iustin Pop

3392 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3393 a8083063 Iustin Pop

3394 a8083063 Iustin Pop
    """
3395 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3396 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
3397 a8083063 Iustin Pop
    return env, nl, nl
3398 a8083063 Iustin Pop
3399 a8083063 Iustin Pop
  def CheckPrereq(self):
3400 a8083063 Iustin Pop
    """Check prerequisites.
3401 a8083063 Iustin Pop

3402 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3403 a8083063 Iustin Pop

3404 a8083063 Iustin Pop
    """
3405 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3406 cf472233 Guido Trotter
    assert self.instance is not None, \
3407 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3408 a8083063 Iustin Pop
3409 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3410 a8083063 Iustin Pop
    """Remove the instance.
3411 a8083063 Iustin Pop

3412 a8083063 Iustin Pop
    """
3413 a8083063 Iustin Pop
    instance = self.instance
3414 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
3415 9a4f63d1 Iustin Pop
                 instance.name, instance.primary_node)
3416 a8083063 Iustin Pop
3417 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
3418 1fae010f Iustin Pop
    msg = result.RemoteFailMsg()
3419 1fae010f Iustin Pop
    if msg:
3420 1d67656e Iustin Pop
      if self.op.ignore_failures:
3421 1fae010f Iustin Pop
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
3422 1d67656e Iustin Pop
      else:
3423 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
3424 1fae010f Iustin Pop
                                 " node %s: %s" %
3425 1fae010f Iustin Pop
                                 (instance.name, instance.primary_node, msg))
3426 a8083063 Iustin Pop
3427 9a4f63d1 Iustin Pop
    logging.info("Removing block devices for instance %s", instance.name)
3428 a8083063 Iustin Pop
3429 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
3430 1d67656e Iustin Pop
      if self.op.ignore_failures:
3431 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
3432 1d67656e Iustin Pop
      else:
3433 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
3434 a8083063 Iustin Pop
3435 9a4f63d1 Iustin Pop
    logging.info("Removing instance %s out of cluster config", instance.name)
3436 a8083063 Iustin Pop
3437 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
3438 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
3439 a8083063 Iustin Pop
3440 a8083063 Iustin Pop
3441 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
3442 a8083063 Iustin Pop
  """Logical unit for querying instances.
3443 a8083063 Iustin Pop

3444 a8083063 Iustin Pop
  """
3445 ec79568d Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
3446 7eb9d8f7 Guido Trotter
  REQ_BGL = False
3447 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
3448 5b460366 Iustin Pop
                                    "admin_state",
3449 a2d2e1a7 Iustin Pop
                                    "disk_template", "ip", "mac", "bridge",
3450 a2d2e1a7 Iustin Pop
                                    "sda_size", "sdb_size", "vcpus", "tags",
3451 a2d2e1a7 Iustin Pop
                                    "network_port", "beparams",
3452 8aec325c Iustin Pop
                                    r"(disk)\.(size)/([0-9]+)",
3453 8aec325c Iustin Pop
                                    r"(disk)\.(sizes)", "disk_usage",
3454 8aec325c Iustin Pop
                                    r"(nic)\.(mac|ip|bridge)/([0-9]+)",
3455 8aec325c Iustin Pop
                                    r"(nic)\.(macs|ips|bridges)",
3456 8aec325c Iustin Pop
                                    r"(disk|nic)\.(count)",
3457 a2d2e1a7 Iustin Pop
                                    "serial_no", "hypervisor", "hvparams",] +
3458 a2d2e1a7 Iustin Pop
                                  ["hv/%s" % name
3459 a2d2e1a7 Iustin Pop
                                   for name in constants.HVS_PARAMETERS] +
3460 a2d2e1a7 Iustin Pop
                                  ["be/%s" % name
3461 a2d2e1a7 Iustin Pop
                                   for name in constants.BES_PARAMETERS])
3462 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
3463 31bf511f Iustin Pop
3464 a8083063 Iustin Pop
3465 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
3466 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
3467 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
3468 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
3469 a8083063 Iustin Pop
3470 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
3471 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
3472 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
3473 7eb9d8f7 Guido Trotter
3474 57a2fb91 Iustin Pop
    if self.op.names:
3475 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
3476 7eb9d8f7 Guido Trotter
    else:
3477 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
3478 7eb9d8f7 Guido Trotter
3479 ec79568d Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
3480 ec79568d Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
3481 57a2fb91 Iustin Pop
    if self.do_locking:
3482 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3483 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
3484 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3485 7eb9d8f7 Guido Trotter
3486 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
3487 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
3488 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
3489 7eb9d8f7 Guido Trotter
3490 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
3491 7eb9d8f7 Guido Trotter
    """Check prerequisites.
3492 7eb9d8f7 Guido Trotter

3493 7eb9d8f7 Guido Trotter
    """
3494 57a2fb91 Iustin Pop
    pass
3495 069dcc86 Iustin Pop
3496 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3497 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
3498 a8083063 Iustin Pop

3499 a8083063 Iustin Pop
    """
3500 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
3501 a7f5dc98 Iustin Pop
    if self.wanted == locking.ALL_SET:
3502 a7f5dc98 Iustin Pop
      # caller didn't specify instance names, so ordering is not important
3503 a7f5dc98 Iustin Pop
      if self.do_locking:
3504 a7f5dc98 Iustin Pop
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
3505 a7f5dc98 Iustin Pop
      else:
3506 a7f5dc98 Iustin Pop
        instance_names = all_info.keys()
3507 a7f5dc98 Iustin Pop
      instance_names = utils.NiceSort(instance_names)
3508 57a2fb91 Iustin Pop
    else:
3509 a7f5dc98 Iustin Pop
      # caller did specify names, so we must keep the ordering
3510 a7f5dc98 Iustin Pop
      if self.do_locking:
3511 a7f5dc98 Iustin Pop
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
3512 a7f5dc98 Iustin Pop
      else:
3513 a7f5dc98 Iustin Pop
        tgt_set = all_info.keys()
3514 a7f5dc98 Iustin Pop
      missing = set(self.wanted).difference(tgt_set)
3515 a7f5dc98 Iustin Pop
      if missing:
3516 a7f5dc98 Iustin Pop
        raise errors.OpExecError("Some instances were removed before"
3517 a7f5dc98 Iustin Pop
                                 " retrieving their data: %s" % missing)
3518 a7f5dc98 Iustin Pop
      instance_names = self.wanted
3519 c1f1cbb2 Iustin Pop
3520 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
3521 a8083063 Iustin Pop
3522 a8083063 Iustin Pop
    # begin data gathering
3523 a8083063 Iustin Pop
3524 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
3525 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
3526 a8083063 Iustin Pop
3527 a8083063 Iustin Pop
    bad_nodes = []
3528 cbfc4681 Iustin Pop
    off_nodes = []
3529 ec79568d Iustin Pop
    if self.do_node_query:
3530 a8083063 Iustin Pop
      live_data = {}
3531 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
3532 a8083063 Iustin Pop
      for name in nodes:
3533 a8083063 Iustin Pop
        result = node_data[name]
3534 cbfc4681 Iustin Pop
        if result.offline:
3535 cbfc4681 Iustin Pop
          # offline nodes will be in both lists
3536 cbfc4681 Iustin Pop
          off_nodes.append(name)
3537 781de953 Iustin Pop
        if result.failed:
3538 a8083063 Iustin Pop
          bad_nodes.append(name)
3539 781de953 Iustin Pop
        else:
3540 781de953 Iustin Pop
          if result.data:
3541 781de953 Iustin Pop
            live_data.update(result.data)
3542 781de953 Iustin Pop
            # else no instance is alive
3543 a8083063 Iustin Pop
    else:
3544 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
3545 a8083063 Iustin Pop
3546 a8083063 Iustin Pop
    # end data gathering
3547 a8083063 Iustin Pop
3548 5018a335 Iustin Pop
    HVPREFIX = "hv/"
3549 338e51e8 Iustin Pop
    BEPREFIX = "be/"
3550 a8083063 Iustin Pop
    output = []
3551 a8083063 Iustin Pop
    for instance in instance_list:
3552 a8083063 Iustin Pop
      iout = []
3553 5018a335 Iustin Pop
      i_hv = self.cfg.GetClusterInfo().FillHV(instance)
3554 338e51e8 Iustin Pop
      i_be = self.cfg.GetClusterInfo().FillBE(instance)
3555 a8083063 Iustin Pop
      for field in self.op.output_fields:
3556 71c1af58 Iustin Pop
        st_match = self._FIELDS_STATIC.Matches(field)
3557 a8083063 Iustin Pop
        if field == "name":
3558 a8083063 Iustin Pop
          val = instance.name
3559 a8083063 Iustin Pop
        elif field == "os":
3560 a8083063 Iustin Pop
          val = instance.os
3561 a8083063 Iustin Pop
        elif field == "pnode":
3562 a8083063 Iustin Pop
          val = instance.primary_node
3563 a8083063 Iustin Pop
        elif field == "snodes":
3564 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
3565 a8083063 Iustin Pop
        elif field == "admin_state":
3566 0d68c45d Iustin Pop
          val = instance.admin_up
3567 a8083063 Iustin Pop
        elif field == "oper_state":
3568 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
3569 8a23d2d3 Iustin Pop
            val = None
3570 a8083063 Iustin Pop
          else:
3571 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
3572 d8052456 Iustin Pop
        elif field == "status":
3573 cbfc4681 Iustin Pop
          if instance.primary_node in off_nodes:
3574 cbfc4681 Iustin Pop
            val = "ERROR_nodeoffline"
3575 cbfc4681 Iustin Pop
          elif instance.primary_node in bad_nodes:
3576 d8052456 Iustin Pop
            val = "ERROR_nodedown"
3577 d8052456 Iustin Pop
          else:
3578 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
3579 d8052456 Iustin Pop
            if running:
3580 0d68c45d Iustin Pop
              if instance.admin_up:
3581 d8052456 Iustin Pop
                val = "running"
3582 d8052456 Iustin Pop
              else:
3583 d8052456 Iustin Pop
                val = "ERROR_up"
3584 d8052456 Iustin Pop
            else:
3585 0d68c45d Iustin Pop
              if instance.admin_up:
3586 d8052456 Iustin Pop
                val = "ERROR_down"
3587 d8052456 Iustin Pop
              else:
3588 d8052456 Iustin Pop
                val = "ADMIN_down"
3589 a8083063 Iustin Pop
        elif field == "oper_ram":
3590 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
3591 8a23d2d3 Iustin Pop
            val = None
3592 a8083063 Iustin Pop
          elif instance.name in live_data:
3593 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
3594 a8083063 Iustin Pop
          else:
3595 a8083063 Iustin Pop
            val = "-"
3596 c1ce76bb Iustin Pop
        elif field == "vcpus":
3597 c1ce76bb Iustin Pop
          val = i_be[constants.BE_VCPUS]
3598 a8083063 Iustin Pop
        elif field == "disk_template":
3599 a8083063 Iustin Pop
          val = instance.disk_template
3600 a8083063 Iustin Pop
        elif field == "ip":
3601 39a02558 Guido Trotter
          if instance.nics:
3602 39a02558 Guido Trotter
            val = instance.nics[0].ip
3603 39a02558 Guido Trotter
          else:
3604 39a02558 Guido Trotter
            val = None
3605 a8083063 Iustin Pop
        elif field == "bridge":
3606 39a02558 Guido Trotter
          if instance.nics:
3607 39a02558 Guido Trotter
            val = instance.nics[0].bridge
3608 39a02558 Guido Trotter
          else:
3609 39a02558 Guido Trotter
            val = None
3610 a8083063 Iustin Pop
        elif field == "mac":
3611 39a02558 Guido Trotter
          if instance.nics:
3612 39a02558 Guido Trotter
            val = instance.nics[0].mac
3613 39a02558 Guido Trotter
          else:
3614 39a02558 Guido Trotter
            val = None
3615 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
3616 ad24e046 Iustin Pop
          idx = ord(field[2]) - ord('a')
3617 ad24e046 Iustin Pop
          try:
3618 ad24e046 Iustin Pop
            val = instance.FindDisk(idx).size
3619 ad24e046 Iustin Pop
          except errors.OpPrereqError:
3620 8a23d2d3 Iustin Pop
            val = None
3621 024e157f Iustin Pop
        elif field == "disk_usage": # total disk usage per node
3622 024e157f Iustin Pop
          disk_sizes = [{'size': disk.size} for disk in instance.disks]
3623 024e157f Iustin Pop
          val = _ComputeDiskSize(instance.disk_template, disk_sizes)
3624 130a6a6f Iustin Pop
        elif field == "tags":
3625 130a6a6f Iustin Pop
          val = list(instance.GetTags())
3626 38d7239a Iustin Pop
        elif field == "serial_no":
3627 38d7239a Iustin Pop
          val = instance.serial_no
3628 5018a335 Iustin Pop
        elif field == "network_port":
3629 5018a335 Iustin Pop
          val = instance.network_port
3630 338e51e8 Iustin Pop
        elif field == "hypervisor":
3631 338e51e8 Iustin Pop
          val = instance.hypervisor
3632 338e51e8 Iustin Pop
        elif field == "hvparams":
3633 338e51e8 Iustin Pop
          val = i_hv
3634 5018a335 Iustin Pop
        elif (field.startswith(HVPREFIX) and
3635 5018a335 Iustin Pop
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
3636 5018a335 Iustin Pop
          val = i_hv.get(field[len(HVPREFIX):], None)
3637 338e51e8 Iustin Pop
        elif field == "beparams":
3638 338e51e8 Iustin Pop
          val = i_be
3639 338e51e8 Iustin Pop
        elif (field.startswith(BEPREFIX) and
3640 338e51e8 Iustin Pop
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
3641 338e51e8 Iustin Pop
          val = i_be.get(field[len(BEPREFIX):], None)
3642 71c1af58 Iustin Pop
        elif st_match and st_match.groups():
3643 71c1af58 Iustin Pop
          # matches a variable list
3644 71c1af58 Iustin Pop
          st_groups = st_match.groups()
3645 71c1af58 Iustin Pop
          if st_groups and st_groups[0] == "disk":
3646 71c1af58 Iustin Pop
            if st_groups[1] == "count":
3647 71c1af58 Iustin Pop
              val = len(instance.disks)
3648 41a776da Iustin Pop
            elif st_groups[1] == "sizes":
3649 41a776da Iustin Pop
              val = [disk.size for disk in instance.disks]
3650 71c1af58 Iustin Pop
            elif st_groups[1] == "size":
3651 3e0cea06 Iustin Pop
              try:
3652 3e0cea06 Iustin Pop
                val = instance.FindDisk(st_groups[2]).size
3653 3e0cea06 Iustin Pop
              except errors.OpPrereqError:
3654 71c1af58 Iustin Pop
                val = None
3655 71c1af58 Iustin Pop
            else:
3656 71c1af58 Iustin Pop
              assert False, "Unhandled disk parameter"
3657 71c1af58 Iustin Pop
          elif st_groups[0] == "nic":
3658 71c1af58 Iustin Pop
            if st_groups[1] == "count":
3659 71c1af58 Iustin Pop
              val = len(instance.nics)
3660 41a776da Iustin Pop
            elif st_groups[1] == "macs":
3661 41a776da Iustin Pop
              val = [nic.mac for nic in instance.nics]
3662 41a776da Iustin Pop
            elif st_groups[1] == "ips":
3663 41a776da Iustin Pop
              val = [nic.ip for nic in instance.nics]
3664 41a776da Iustin Pop
            elif st_groups[1] == "bridges":
3665 41a776da Iustin Pop
              val = [nic.bridge for nic in instance.nics]
3666 71c1af58 Iustin Pop
            else:
3667 71c1af58 Iustin Pop
              # index-based item
3668 71c1af58 Iustin Pop
              nic_idx = int(st_groups[2])
3669 71c1af58 Iustin Pop
              if nic_idx >= len(instance.nics):
3670 71c1af58 Iustin Pop
                val = None
3671 71c1af58 Iustin Pop
              else:
3672 71c1af58 Iustin Pop
                if st_groups[1] == "mac":
3673 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].mac
3674 71c1af58 Iustin Pop
                elif st_groups[1] == "ip":
3675 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].ip
3676 71c1af58 Iustin Pop
                elif st_groups[1] == "bridge":
3677 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].bridge
3678 71c1af58 Iustin Pop
                else:
3679 71c1af58 Iustin Pop
                  assert False, "Unhandled NIC parameter"
3680 71c1af58 Iustin Pop
          else:
3681 c1ce76bb Iustin Pop
            assert False, ("Declared but unhandled variable parameter '%s'" %
3682 c1ce76bb Iustin Pop
                           field)
3683 a8083063 Iustin Pop
        else:
3684 c1ce76bb Iustin Pop
          assert False, "Declared but unhandled parameter '%s'" % field
3685 a8083063 Iustin Pop
        iout.append(val)
3686 a8083063 Iustin Pop
      output.append(iout)
3687 a8083063 Iustin Pop
3688 a8083063 Iustin Pop
    return output
3689 a8083063 Iustin Pop
3690 a8083063 Iustin Pop
3691 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
3692 a8083063 Iustin Pop
  """Failover an instance.
3693 a8083063 Iustin Pop

3694 a8083063 Iustin Pop
  """
3695 a8083063 Iustin Pop
  HPATH = "instance-failover"
3696 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3697 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
3698 c9e5c064 Guido Trotter
  REQ_BGL = False
3699 c9e5c064 Guido Trotter
3700 c9e5c064 Guido Trotter
  def ExpandNames(self):
3701 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
3702 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3703 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3704 c9e5c064 Guido Trotter
3705 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
3706 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
3707 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
3708 a8083063 Iustin Pop
3709 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3710 a8083063 Iustin Pop
    """Build hooks env.
3711 a8083063 Iustin Pop

3712 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3713 a8083063 Iustin Pop

3714 a8083063 Iustin Pop
    """
3715 a8083063 Iustin Pop
    env = {
3716 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
3717 a8083063 Iustin Pop
      }
3718 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3719 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3720 a8083063 Iustin Pop
    return env, nl, nl
3721 a8083063 Iustin Pop
3722 a8083063 Iustin Pop
  def CheckPrereq(self):
3723 a8083063 Iustin Pop
    """Check prerequisites.
3724 a8083063 Iustin Pop

3725 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3726 a8083063 Iustin Pop

3727 a8083063 Iustin Pop
    """
3728 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3729 c9e5c064 Guido Trotter
    assert self.instance is not None, \
3730 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3731 a8083063 Iustin Pop
3732 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3733 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3734 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
3735 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
3736 2a710df1 Michael Hanselmann
3737 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
3738 2a710df1 Michael Hanselmann
    if not secondary_nodes:
3739 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
3740 abdf0113 Iustin Pop
                                   "a mirrored disk template")
3741 2a710df1 Michael Hanselmann
3742 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
3743 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, target_node)
3744 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, target_node)
3745 d27776f0 Iustin Pop
3746 d27776f0 Iustin Pop
    if instance.admin_up:
3747 d27776f0 Iustin Pop
      # check memory requirements on the secondary node
3748 d27776f0 Iustin Pop
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
3749 d27776f0 Iustin Pop
                           instance.name, bep[constants.BE_MEMORY],
3750 d27776f0 Iustin Pop
                           instance.hypervisor)
3751 d27776f0 Iustin Pop
    else:
3752 d27776f0 Iustin Pop
      self.LogInfo("Not checking memory on the secondary node as"
3753 d27776f0 Iustin Pop
                   " instance will not be started")
3754 3a7c308e Guido Trotter
3755 a8083063 Iustin Pop
    # check bridge existance
3756 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
3757 781de953 Iustin Pop
    result = self.rpc.call_bridges_exist(target_node, brlist)
3758 781de953 Iustin Pop
    result.Raise()
3759 781de953 Iustin Pop
    if not result.data:
3760 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
3761 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
3762 50ff9a7a Iustin Pop
                                 (brlist, target_node))
3763 a8083063 Iustin Pop
3764 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3765 a8083063 Iustin Pop
    """Failover an instance.
3766 a8083063 Iustin Pop

3767 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
3768 a8083063 Iustin Pop
    starting it on the secondary.
3769 a8083063 Iustin Pop

3770 a8083063 Iustin Pop
    """
3771 a8083063 Iustin Pop
    instance = self.instance
3772 a8083063 Iustin Pop
3773 a8083063 Iustin Pop
    source_node = instance.primary_node
3774 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
3775 a8083063 Iustin Pop
3776 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
3777 a8083063 Iustin Pop
    for dev in instance.disks:
3778 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
3779 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
3780 0d68c45d Iustin Pop
        if instance.admin_up and not self.op.ignore_consistency:
3781 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
3782 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
3783 a8083063 Iustin Pop
3784 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
3785 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
3786 9a4f63d1 Iustin Pop
                 instance.name, source_node)
3787 a8083063 Iustin Pop
3788 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(source_node, instance)
3789 1fae010f Iustin Pop
    msg = result.RemoteFailMsg()
3790 1fae010f Iustin Pop
    if msg:
3791 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
3792 86d9d3bb Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
3793 1fae010f Iustin Pop
                             " Proceeding anyway. Please make sure node"
3794 1fae010f Iustin Pop
                             " %s is down. Error details: %s",
3795 1fae010f Iustin Pop
                             instance.name, source_node, source_node, msg)
3796 24a40d57 Iustin Pop
      else:
3797 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
3798 1fae010f Iustin Pop
                                 " node %s: %s" %
3799 1fae010f Iustin Pop
                                 (instance.name, source_node, msg))
3800 a8083063 Iustin Pop
3801 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
3802 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3803 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
3804 a8083063 Iustin Pop
3805 a8083063 Iustin Pop
    instance.primary_node = target_node
3806 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
3807 b6102dab Guido Trotter
    self.cfg.Update(instance)
3808 a8083063 Iustin Pop
3809 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
3810 0d68c45d Iustin Pop
    if instance.admin_up:
3811 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
3812 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s",
3813 9a4f63d1 Iustin Pop
                   instance.name, target_node)
3814 12a0cfbe Guido Trotter
3815 b9bddb6b Iustin Pop
      disks_ok, dummy = _AssembleInstanceDisks(self, instance,
3816 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
3817 12a0cfbe Guido Trotter
      if not disks_ok:
3818 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3819 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
3820 a8083063 Iustin Pop
3821 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
3822 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance, None, None)
3823 dd279568 Iustin Pop
      msg = result.RemoteFailMsg()
3824 dd279568 Iustin Pop
      if msg:
3825 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3826 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
3827 dd279568 Iustin Pop
                                 (instance.name, target_node, msg))
3828 a8083063 Iustin Pop
3829 a8083063 Iustin Pop
3830 53c776b5 Iustin Pop
class LUMigrateInstance(LogicalUnit):
3831 53c776b5 Iustin Pop
  """Migrate an instance.
3832 53c776b5 Iustin Pop

3833 53c776b5 Iustin Pop
  This is migration without shutting down, compared to the failover,
3834 53c776b5 Iustin Pop
  which is done with shutdown.
3835 53c776b5 Iustin Pop

3836 53c776b5 Iustin Pop
  """
3837 53c776b5 Iustin Pop
  HPATH = "instance-migrate"
3838 53c776b5 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3839 53c776b5 Iustin Pop
  _OP_REQP = ["instance_name", "live", "cleanup"]
3840 53c776b5 Iustin Pop
3841 53c776b5 Iustin Pop
  REQ_BGL = False
3842 53c776b5 Iustin Pop
3843 53c776b5 Iustin Pop
  def ExpandNames(self):
3844 53c776b5 Iustin Pop
    self._ExpandAndLockInstance()
3845 53c776b5 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
3846 53c776b5 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3847 53c776b5 Iustin Pop
3848 53c776b5 Iustin Pop
  def DeclareLocks(self, level):
3849 53c776b5 Iustin Pop
    if level == locking.LEVEL_NODE:
3850 53c776b5 Iustin Pop
      self._LockInstancesNodes()
3851 53c776b5 Iustin Pop
3852 53c776b5 Iustin Pop
  def BuildHooksEnv(self):
3853 53c776b5 Iustin Pop
    """Build hooks env.
3854 53c776b5 Iustin Pop

3855 53c776b5 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3856 53c776b5 Iustin Pop

3857 53c776b5 Iustin Pop
    """
3858 53c776b5 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3859 2c2690c9 Iustin Pop
    env["MIGRATE_LIVE"] = self.op.live
3860 2c2690c9 Iustin Pop
    env["MIGRATE_CLEANUP"] = self.op.cleanup
3861 53c776b5 Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3862 53c776b5 Iustin Pop
    return env, nl, nl
3863 53c776b5 Iustin Pop
3864 53c776b5 Iustin Pop
  def CheckPrereq(self):
3865 53c776b5 Iustin Pop
    """Check prerequisites.
3866 53c776b5 Iustin Pop

3867 53c776b5 Iustin Pop
    This checks that the instance is in the cluster.
3868 53c776b5 Iustin Pop

3869 53c776b5 Iustin Pop
    """
3870 53c776b5 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3871 53c776b5 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3872 53c776b5 Iustin Pop
    if instance is None:
3873 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3874 53c776b5 Iustin Pop
                                 self.op.instance_name)
3875 53c776b5 Iustin Pop
3876 53c776b5 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
3877 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3878 53c776b5 Iustin Pop
                                 " drbd8, cannot migrate.")
3879 53c776b5 Iustin Pop
3880 53c776b5 Iustin Pop
    secondary_nodes = instance.secondary_nodes
3881 53c776b5 Iustin Pop
    if not secondary_nodes:
3882 733a2b6a Iustin Pop
      raise errors.ConfigurationError("No secondary node but using"
3883 733a2b6a Iustin Pop
                                      " drbd8 disk template")
3884 53c776b5 Iustin Pop
3885 53c776b5 Iustin Pop
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
3886 53c776b5 Iustin Pop
3887 53c776b5 Iustin Pop
    target_node = secondary_nodes[0]
3888 53c776b5 Iustin Pop
    # check memory requirements on the secondary node
3889 53c776b5 Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
3890 53c776b5 Iustin Pop
                         instance.name, i_be[constants.BE_MEMORY],
3891 53c776b5 Iustin Pop
                         instance.hypervisor)
3892 53c776b5 Iustin Pop
3893 53c776b5 Iustin Pop
    # check bridge existance
3894 53c776b5 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
3895 53c776b5 Iustin Pop
    result = self.rpc.call_bridges_exist(target_node, brlist)
3896 53c776b5 Iustin Pop
    if result.failed or not result.data:
3897 53c776b5 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
3898 53c776b5 Iustin Pop
                                 " exist on destination node '%s'" %
3899 53c776b5 Iustin Pop
                                 (brlist, target_node))
3900 53c776b5 Iustin Pop
3901 53c776b5 Iustin Pop
    if not self.op.cleanup:
3902 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, target_node)
3903 53c776b5 Iustin Pop
      result = self.rpc.call_instance_migratable(instance.primary_node,
3904 53c776b5 Iustin Pop
                                                 instance)
3905 53c776b5 Iustin Pop
      msg = result.RemoteFailMsg()
3906 53c776b5 Iustin Pop
      if msg:
3907 53c776b5 Iustin Pop
        raise errors.OpPrereqError("Can't migrate: %s - please use failover" %
3908 53c776b5 Iustin Pop
                                   msg)
3909 53c776b5 Iustin Pop
3910 53c776b5 Iustin Pop
    self.instance = instance
3911 53c776b5 Iustin Pop
3912 53c776b5 Iustin Pop
  def _WaitUntilSync(self):
3913 53c776b5 Iustin Pop
    """Poll with custom rpc for disk sync.
3914 53c776b5 Iustin Pop

3915 53c776b5 Iustin Pop
    This uses our own step-based rpc call.
3916 53c776b5 Iustin Pop

3917 53c776b5 Iustin Pop
    """
3918 53c776b5 Iustin Pop
    self.feedback_fn("* wait until resync is done")
3919 53c776b5 Iustin Pop
    all_done = False
3920 53c776b5 Iustin Pop
    while not all_done:
3921 53c776b5 Iustin Pop
      all_done = True
3922 53c776b5 Iustin Pop
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
3923 53c776b5 Iustin Pop
                                            self.nodes_ip,
3924 53c776b5 Iustin Pop
                                            self.instance.disks)
3925 53c776b5 Iustin Pop
      min_percent = 100
3926 53c776b5 Iustin Pop
      for node, nres in result.items():
3927 53c776b5 Iustin Pop
        msg = nres.RemoteFailMsg()
3928 53c776b5 Iustin Pop
        if msg:
3929 53c776b5 Iustin Pop
          raise errors.OpExecError("Cannot resync disks on node %s: %s" %
3930 53c776b5 Iustin Pop
                                   (node, msg))
3931 0959c824 Iustin Pop
        node_done, node_percent = nres.payload
3932 53c776b5 Iustin Pop
        all_done = all_done and node_done
3933 53c776b5 Iustin Pop
        if node_percent is not None:
3934 53c776b5 Iustin Pop
          min_percent = min(min_percent, node_percent)
3935 53c776b5 Iustin Pop
      if not all_done:
3936 53c776b5 Iustin Pop
        if min_percent < 100:
3937 53c776b5 Iustin Pop
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
3938 53c776b5 Iustin Pop
        time.sleep(2)
3939 53c776b5 Iustin Pop
3940 53c776b5 Iustin Pop
  def _EnsureSecondary(self, node):
3941 53c776b5 Iustin Pop
    """Demote a node to secondary.
3942 53c776b5 Iustin Pop

3943 53c776b5 Iustin Pop
    """
3944 53c776b5 Iustin Pop
    self.feedback_fn("* switching node %s to secondary mode" % node)
3945 53c776b5 Iustin Pop
3946 53c776b5 Iustin Pop
    for dev in self.instance.disks:
3947 53c776b5 Iustin Pop
      self.cfg.SetDiskID(dev, node)
3948 53c776b5 Iustin Pop
3949 53c776b5 Iustin Pop
    result = self.rpc.call_blockdev_close(node, self.instance.name,
3950 53c776b5 Iustin Pop
                                          self.instance.disks)
3951 53c776b5 Iustin Pop
    msg = result.RemoteFailMsg()
3952 53c776b5 Iustin Pop
    if msg:
3953 53c776b5 Iustin Pop
      raise errors.OpExecError("Cannot change disk to secondary on node %s,"
3954 53c776b5 Iustin Pop
                               " error %s" % (node, msg))
3955 53c776b5 Iustin Pop
3956 53c776b5 Iustin Pop
  def _GoStandalone(self):
3957 53c776b5 Iustin Pop
    """Disconnect from the network.
3958 53c776b5 Iustin Pop

3959 53c776b5 Iustin Pop
    """
3960 53c776b5 Iustin Pop
    self.feedback_fn("* changing into standalone mode")
3961 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
3962 53c776b5 Iustin Pop
                                               self.instance.disks)
3963 53c776b5 Iustin Pop
    for node, nres in result.items():
3964 53c776b5 Iustin Pop
      msg = nres.RemoteFailMsg()
3965 53c776b5 Iustin Pop
      if msg:
3966 53c776b5 Iustin Pop
        raise errors.OpExecError("Cannot disconnect disks node %s,"
3967 53c776b5 Iustin Pop
                                 " error %s" % (node, msg))
3968 53c776b5 Iustin Pop
3969 53c776b5 Iustin Pop
  def _GoReconnect(self, multimaster):
3970 53c776b5 Iustin Pop
    """Reconnect to the network.
3971 53c776b5 Iustin Pop

3972 53c776b5 Iustin Pop
    """
3973 53c776b5 Iustin Pop
    if multimaster:
3974 53c776b5 Iustin Pop
      msg = "dual-master"
3975 53c776b5 Iustin Pop
    else:
3976 53c776b5 Iustin Pop
      msg = "single-master"
3977 53c776b5 Iustin Pop
    self.feedback_fn("* changing disks into %s mode" % msg)
3978 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
3979 53c776b5 Iustin Pop
                                           self.instance.disks,
3980 53c776b5 Iustin Pop
                                           self.instance.name, multimaster)
3981 53c776b5 Iustin Pop
    for node, nres in result.items():
3982 53c776b5 Iustin Pop
      msg = nres.RemoteFailMsg()
3983 53c776b5 Iustin Pop
      if msg:
3984 53c776b5 Iustin Pop
        raise errors.OpExecError("Cannot change disks config on node %s,"
3985 53c776b5 Iustin Pop
                                 " error: %s" % (node, msg))
3986 53c776b5 Iustin Pop
3987 53c776b5 Iustin Pop
  def _ExecCleanup(self):
3988 53c776b5 Iustin Pop
    """Try to cleanup after a failed migration.
3989 53c776b5 Iustin Pop

3990 53c776b5 Iustin Pop
    The cleanup is done by:
3991 53c776b5 Iustin Pop
      - check that the instance is running only on one node
3992 53c776b5 Iustin Pop
        (and update the config if needed)
3993 53c776b5 Iustin Pop
      - change disks on its secondary node to secondary
3994 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
3995 53c776b5 Iustin Pop
      - disconnect from the network
3996 53c776b5 Iustin Pop
      - change disks into single-master mode
3997 53c776b5 Iustin Pop
      - wait again until disks are fully synchronized
3998 53c776b5 Iustin Pop

3999 53c776b5 Iustin Pop
    """
4000 53c776b5 Iustin Pop
    instance = self.instance
4001 53c776b5 Iustin Pop
    target_node = self.target_node
4002 53c776b5 Iustin Pop
    source_node = self.source_node
4003 53c776b5 Iustin Pop
4004 53c776b5 Iustin Pop
    # check running on only one node
4005 53c776b5 Iustin Pop
    self.feedback_fn("* checking where the instance actually runs"
4006 53c776b5 Iustin Pop
                     " (if this hangs, the hypervisor might be in"
4007 53c776b5 Iustin Pop
                     " a bad state)")
4008 53c776b5 Iustin Pop
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
4009 53c776b5 Iustin Pop
    for node, result in ins_l.items():
4010 53c776b5 Iustin Pop
      result.Raise()
4011 53c776b5 Iustin Pop
      if not isinstance(result.data, list):
4012 53c776b5 Iustin Pop
        raise errors.OpExecError("Can't contact node '%s'" % node)
4013 53c776b5 Iustin Pop
4014 53c776b5 Iustin Pop
    runningon_source = instance.name in ins_l[source_node].data
4015 53c776b5 Iustin Pop
    runningon_target = instance.name in ins_l[target_node].data
4016 53c776b5 Iustin Pop
4017 53c776b5 Iustin Pop
    if runningon_source and runningon_target:
4018 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance seems to be running on two nodes,"
4019 53c776b5 Iustin Pop
                               " or the hypervisor is confused. You will have"
4020 53c776b5 Iustin Pop
                               " to ensure manually that it runs only on one"
4021 53c776b5 Iustin Pop
                               " and restart this operation.")
4022 53c776b5 Iustin Pop
4023 53c776b5 Iustin Pop
    if not (runningon_source or runningon_target):
4024 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance does not seem to be running at all."
4025 53c776b5 Iustin Pop
                               " In this case, it's safer to repair by"
4026 53c776b5 Iustin Pop
                               " running 'gnt-instance stop' to ensure disk"
4027 53c776b5 Iustin Pop
                               " shutdown, and then restarting it.")
4028 53c776b5 Iustin Pop
4029 53c776b5 Iustin Pop
    if runningon_target:
4030 53c776b5 Iustin Pop
      # the migration has actually succeeded, we need to update the config
4031 53c776b5 Iustin Pop
      self.feedback_fn("* instance running on secondary node (%s),"
4032 53c776b5 Iustin Pop
                       " updating config" % target_node)
4033 53c776b5 Iustin Pop
      instance.primary_node = target_node
4034 53c776b5 Iustin Pop
      self.cfg.Update(instance)
4035 53c776b5 Iustin Pop
      demoted_node = source_node
4036 53c776b5 Iustin Pop
    else:
4037 53c776b5 Iustin Pop
      self.feedback_fn("* instance confirmed to be running on its"
4038 53c776b5 Iustin Pop
                       " primary node (%s)" % source_node)
4039 53c776b5 Iustin Pop
      demoted_node = target_node
4040 53c776b5 Iustin Pop
4041 53c776b5 Iustin Pop
    self._EnsureSecondary(demoted_node)
4042 53c776b5 Iustin Pop
    try:
4043 53c776b5 Iustin Pop
      self._WaitUntilSync()
4044 53c776b5 Iustin Pop
    except errors.OpExecError:
4045 53c776b5 Iustin Pop
      # we ignore here errors, since if the device is standalone, it
4046 53c776b5 Iustin Pop
      # won't be able to sync
4047 53c776b5 Iustin Pop
      pass
4048 53c776b5 Iustin Pop
    self._GoStandalone()
4049 53c776b5 Iustin Pop
    self._GoReconnect(False)
4050 53c776b5 Iustin Pop
    self._WaitUntilSync()
4051 53c776b5 Iustin Pop
4052 53c776b5 Iustin Pop
    self.feedback_fn("* done")
4053 53c776b5 Iustin Pop
4054 6906a9d8 Guido Trotter
  def _RevertDiskStatus(self):
4055 6906a9d8 Guido Trotter
    """Try to revert the disk status after a failed migration.
4056 6906a9d8 Guido Trotter

4057 6906a9d8 Guido Trotter
    """
4058 6906a9d8 Guido Trotter
    target_node = self.target_node
4059 6906a9d8 Guido Trotter
    try:
4060 6906a9d8 Guido Trotter
      self._EnsureSecondary(target_node)
4061 6906a9d8 Guido Trotter
      self._GoStandalone()
4062 6906a9d8 Guido Trotter
      self._GoReconnect(False)
4063 6906a9d8 Guido Trotter
      self._WaitUntilSync()
4064 6906a9d8 Guido Trotter
    except errors.OpExecError, err:
4065 6906a9d8 Guido Trotter
      self.LogWarning("Migration failed and I can't reconnect the"
4066 6906a9d8 Guido Trotter
                      " drives: error '%s'\n"
4067 6906a9d8 Guido Trotter
                      "Please look and recover the instance status" %
4068 6906a9d8 Guido Trotter
                      str(err))
4069 6906a9d8 Guido Trotter
4070 6906a9d8 Guido Trotter
  def _AbortMigration(self):
4071 6906a9d8 Guido Trotter
    """Call the hypervisor code to abort a started migration.
4072 6906a9d8 Guido Trotter

4073 6906a9d8 Guido Trotter
    """
4074 6906a9d8 Guido Trotter
    instance = self.instance
4075 6906a9d8 Guido Trotter
    target_node = self.target_node
4076 6906a9d8 Guido Trotter
    migration_info = self.migration_info
4077 6906a9d8 Guido Trotter
4078 6906a9d8 Guido Trotter
    abort_result = self.rpc.call_finalize_migration(target_node,
4079 6906a9d8 Guido Trotter
                                                    instance,
4080 6906a9d8 Guido Trotter
                                                    migration_info,
4081 6906a9d8 Guido Trotter
                                                    False)
4082 6906a9d8 Guido Trotter
    abort_msg = abort_result.RemoteFailMsg()
4083 6906a9d8 Guido Trotter
    if abort_msg:
4084 6906a9d8 Guido Trotter
      logging.error("Aborting migration failed on target node %s: %s" %
4085 6906a9d8 Guido Trotter
                    (target_node, abort_msg))
4086 6906a9d8 Guido Trotter
      # Don't raise an exception here, as we stil have to try to revert the
4087 6906a9d8 Guido Trotter
      # disk status, even if this step failed.
4088 6906a9d8 Guido Trotter
4089 53c776b5 Iustin Pop
  def _ExecMigration(self):
4090 53c776b5 Iustin Pop
    """Migrate an instance.
4091 53c776b5 Iustin Pop

4092 53c776b5 Iustin Pop
    The migrate is done by:
4093 53c776b5 Iustin Pop
      - change the disks into dual-master mode
4094 53c776b5 Iustin Pop
      - wait until disks are fully synchronized again
4095 53c776b5 Iustin Pop
      - migrate the instance
4096 53c776b5 Iustin Pop
      - change disks on the new secondary node (the old primary) to secondary
4097 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
4098 53c776b5 Iustin Pop
      - change disks into single-master mode
4099 53c776b5 Iustin Pop

4100 53c776b5 Iustin Pop
    """
4101 53c776b5 Iustin Pop
    instance = self.instance
4102 53c776b5 Iustin Pop
    target_node = self.target_node
4103 53c776b5 Iustin Pop
    source_node = self.source_node
4104 53c776b5 Iustin Pop
4105 53c776b5 Iustin Pop
    self.feedback_fn("* checking disk consistency between source and target")
4106 53c776b5 Iustin Pop
    for dev in instance.disks:
4107 53c776b5 Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
4108 53c776b5 Iustin Pop
        raise errors.OpExecError("Disk %s is degraded or not fully"
4109 53c776b5 Iustin Pop
                                 " synchronized on target node,"
4110 53c776b5 Iustin Pop
                                 " aborting migrate." % dev.iv_name)
4111 53c776b5 Iustin Pop
4112 6906a9d8 Guido Trotter
    # First get the migration information from the remote node
4113 6906a9d8 Guido Trotter
    result = self.rpc.call_migration_info(source_node, instance)
4114 6906a9d8 Guido Trotter
    msg = result.RemoteFailMsg()
4115 6906a9d8 Guido Trotter
    if msg:
4116 6906a9d8 Guido Trotter
      log_err = ("Failed fetching source migration information from %s: %s" %
4117 0959c824 Iustin Pop
                 (source_node, msg))
4118 6906a9d8 Guido Trotter
      logging.error(log_err)
4119 6906a9d8 Guido Trotter
      raise errors.OpExecError(log_err)
4120 6906a9d8 Guido Trotter
4121 0959c824 Iustin Pop
    self.migration_info = migration_info = result.payload
4122 6906a9d8 Guido Trotter
4123 6906a9d8 Guido Trotter
    # Then switch the disks to master/master mode
4124 53c776b5 Iustin Pop
    self._EnsureSecondary(target_node)
4125 53c776b5 Iustin Pop
    self._GoStandalone()
4126 53c776b5 Iustin Pop
    self._GoReconnect(True)
4127 53c776b5 Iustin Pop
    self._WaitUntilSync()
4128 53c776b5 Iustin Pop
4129 6906a9d8 Guido Trotter
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
4130 6906a9d8 Guido Trotter
    result = self.rpc.call_accept_instance(target_node,
4131 6906a9d8 Guido Trotter
                                           instance,
4132 6906a9d8 Guido Trotter
                                           migration_info,
4133 6906a9d8 Guido Trotter
                                           self.nodes_ip[target_node])
4134 6906a9d8 Guido Trotter
4135 6906a9d8 Guido Trotter
    msg = result.RemoteFailMsg()
4136 6906a9d8 Guido Trotter
    if msg:
4137 6906a9d8 Guido Trotter
      logging.error("Instance pre-migration failed, trying to revert"
4138 6906a9d8 Guido Trotter
                    " disk status: %s", msg)
4139 6906a9d8 Guido Trotter
      self._AbortMigration()
4140 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
4141 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
4142 6906a9d8 Guido Trotter
                               (instance.name, msg))
4143 6906a9d8 Guido Trotter
4144 53c776b5 Iustin Pop
    self.feedback_fn("* migrating instance to %s" % target_node)
4145 53c776b5 Iustin Pop
    time.sleep(10)
4146 53c776b5 Iustin Pop
    result = self.rpc.call_instance_migrate(source_node, instance,
4147 53c776b5 Iustin Pop
                                            self.nodes_ip[target_node],
4148 53c776b5 Iustin Pop
                                            self.op.live)
4149 53c776b5 Iustin Pop
    msg = result.RemoteFailMsg()
4150 53c776b5 Iustin Pop
    if msg:
4151 53c776b5 Iustin Pop
      logging.error("Instance migration failed, trying to revert"
4152 53c776b5 Iustin Pop
                    " disk status: %s", msg)
4153 6906a9d8 Guido Trotter
      self._AbortMigration()
4154 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
4155 53c776b5 Iustin Pop
      raise errors.OpExecError("Could not migrate instance %s: %s" %
4156 53c776b5 Iustin Pop
                               (instance.name, msg))
4157 53c776b5 Iustin Pop
    time.sleep(10)
4158 53c776b5 Iustin Pop
4159 53c776b5 Iustin Pop
    instance.primary_node = target_node
4160 53c776b5 Iustin Pop
    # distribute new instance config to the other nodes
4161 53c776b5 Iustin Pop
    self.cfg.Update(instance)
4162 53c776b5 Iustin Pop
4163 6906a9d8 Guido Trotter
    result = self.rpc.call_finalize_migration(target_node,
4164 6906a9d8 Guido Trotter
                                              instance,
4165 6906a9d8 Guido Trotter
                                              migration_info,
4166 6906a9d8 Guido Trotter
                                              True)
4167 6906a9d8 Guido Trotter
    msg = result.RemoteFailMsg()
4168 6906a9d8 Guido Trotter
    if msg:
4169 6906a9d8 Guido Trotter
      logging.error("Instance migration succeeded, but finalization failed:"
4170 6906a9d8 Guido Trotter
                    " %s" % msg)
4171 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not finalize instance migration: %s" %
4172 6906a9d8 Guido Trotter
                               msg)
4173 6906a9d8 Guido Trotter
4174 53c776b5 Iustin Pop
    self._EnsureSecondary(source_node)
4175 53c776b5 Iustin Pop
    self._WaitUntilSync()
4176 53c776b5 Iustin Pop
    self._GoStandalone()
4177 53c776b5 Iustin Pop
    self._GoReconnect(False)
4178 53c776b5 Iustin Pop
    self._WaitUntilSync()
4179 53c776b5 Iustin Pop
4180 53c776b5 Iustin Pop
    self.feedback_fn("* done")
4181 53c776b5 Iustin Pop
4182 53c776b5 Iustin Pop
  def Exec(self, feedback_fn):
4183 53c776b5 Iustin Pop
    """Perform the migration.
4184 53c776b5 Iustin Pop

4185 53c776b5 Iustin Pop
    """
4186 53c776b5 Iustin Pop
    self.feedback_fn = feedback_fn
4187 53c776b5 Iustin Pop
4188 53c776b5 Iustin Pop
    self.source_node = self.instance.primary_node
4189 53c776b5 Iustin Pop
    self.target_node = self.instance.secondary_nodes[0]
4190 53c776b5 Iustin Pop
    self.all_nodes = [self.source_node, self.target_node]
4191 53c776b5 Iustin Pop
    self.nodes_ip = {
4192 53c776b5 Iustin Pop
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
4193 53c776b5 Iustin Pop
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
4194 53c776b5 Iustin Pop
      }
4195 53c776b5 Iustin Pop
    if self.op.cleanup:
4196 53c776b5 Iustin Pop
      return self._ExecCleanup()
4197 53c776b5 Iustin Pop
    else:
4198 53c776b5 Iustin Pop
      return self._ExecMigration()
4199 53c776b5 Iustin Pop
4200 53c776b5 Iustin Pop
4201 428958aa Iustin Pop
def _CreateBlockDev(lu, node, instance, device, force_create,
4202 428958aa Iustin Pop
                    info, force_open):
4203 428958aa Iustin Pop
  """Create a tree of block devices on a given node.
4204 a8083063 Iustin Pop

4205 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
4206 a8083063 Iustin Pop
  all its children.
4207 a8083063 Iustin Pop

4208 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
4209 a8083063 Iustin Pop

4210 428958aa Iustin Pop
  @param lu: the lu on whose behalf we execute
4211 428958aa Iustin Pop
  @param node: the node on which to create the device
4212 428958aa Iustin Pop
  @type instance: L{objects.Instance}
4213 428958aa Iustin Pop
  @param instance: the instance which owns the device
4214 428958aa Iustin Pop
  @type device: L{objects.Disk}
4215 428958aa Iustin Pop
  @param device: the device to create
4216 428958aa Iustin Pop
  @type force_create: boolean
4217 428958aa Iustin Pop
  @param force_create: whether to force creation of this device; this
4218 428958aa Iustin Pop
      will be change to True whenever we find a device which has
4219 428958aa Iustin Pop
      CreateOnSecondary() attribute
4220 428958aa Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
4221 428958aa Iustin Pop
      (this will be represented as a LVM tag)
4222 428958aa Iustin Pop
  @type force_open: boolean
4223 428958aa Iustin Pop
  @param force_open: this parameter will be passes to the
4224 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
4225 428958aa Iustin Pop
      whether we run on primary or not, and it affects both
4226 428958aa Iustin Pop
      the child assembly and the device own Open() execution
4227 428958aa Iustin Pop

4228 a8083063 Iustin Pop
  """
4229 a8083063 Iustin Pop
  if device.CreateOnSecondary():
4230 428958aa Iustin Pop
    force_create = True
4231 796cab27 Iustin Pop
4232 a8083063 Iustin Pop
  if device.children:
4233 a8083063 Iustin Pop
    for child in device.children:
4234 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, child, force_create,
4235 428958aa Iustin Pop
                      info, force_open)
4236 a8083063 Iustin Pop
4237 428958aa Iustin Pop
  if not force_create:
4238 796cab27 Iustin Pop
    return
4239 796cab27 Iustin Pop
4240 de12473a Iustin Pop
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
4241 de12473a Iustin Pop
4242 de12473a Iustin Pop
4243 de12473a Iustin Pop
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
4244 de12473a Iustin Pop
  """Create a single block device on a given node.
4245 de12473a Iustin Pop

4246 de12473a Iustin Pop
  This will not recurse over children of the device, so they must be
4247 de12473a Iustin Pop
  created in advance.
4248 de12473a Iustin Pop

4249 de12473a Iustin Pop
  @param lu: the lu on whose behalf we execute
4250 de12473a Iustin Pop
  @param node: the node on which to create the device
4251 de12473a Iustin Pop
  @type instance: L{objects.Instance}
4252 de12473a Iustin Pop
  @param instance: the instance which owns the device
4253 de12473a Iustin Pop
  @type device: L{objects.Disk}
4254 de12473a Iustin Pop
  @param device: the device to create
4255 de12473a Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
4256 de12473a Iustin Pop
      (this will be represented as a LVM tag)
4257 de12473a Iustin Pop
  @type force_open: boolean
4258 de12473a Iustin Pop
  @param force_open: this parameter will be passes to the
4259 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
4260 de12473a Iustin Pop
      whether we run on primary or not, and it affects both
4261 de12473a Iustin Pop
      the child assembly and the device own Open() execution
4262 de12473a Iustin Pop

4263 de12473a Iustin Pop
  """
4264 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
4265 7d81697f Iustin Pop
  result = lu.rpc.call_blockdev_create(node, device, device.size,
4266 428958aa Iustin Pop
                                       instance.name, force_open, info)
4267 7d81697f Iustin Pop
  msg = result.RemoteFailMsg()
4268 7d81697f Iustin Pop
  if msg:
4269 428958aa Iustin Pop
    raise errors.OpExecError("Can't create block device %s on"
4270 7d81697f Iustin Pop
                             " node %s for instance %s: %s" %
4271 7d81697f Iustin Pop
                             (device, node, instance.name, msg))
4272 a8083063 Iustin Pop
  if device.physical_id is None:
4273 0959c824 Iustin Pop
    device.physical_id = result.payload
4274 a8083063 Iustin Pop
4275 a8083063 Iustin Pop
4276 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
4277 923b1523 Iustin Pop
  """Generate a suitable LV name.
4278 923b1523 Iustin Pop

4279 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
4280 923b1523 Iustin Pop

4281 923b1523 Iustin Pop
  """
4282 923b1523 Iustin Pop
  results = []
4283 923b1523 Iustin Pop
  for val in exts:
4284 b9bddb6b Iustin Pop
    new_id = lu.cfg.GenerateUniqueID()
4285 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
4286 923b1523 Iustin Pop
  return results
4287 923b1523 Iustin Pop
4288 923b1523 Iustin Pop
4289 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
4290 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
4291 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
4292 a1f445d3 Iustin Pop

4293 a1f445d3 Iustin Pop
  """
4294 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
4295 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
4296 b9bddb6b Iustin Pop
  shared_secret = lu.cfg.GenerateDRBDSecret()
4297 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
4298 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
4299 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
4300 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
4301 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
4302 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
4303 f9518d38 Iustin Pop
                                      p_minor, s_minor,
4304 f9518d38 Iustin Pop
                                      shared_secret),
4305 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
4306 a1f445d3 Iustin Pop
                          iv_name=iv_name)
4307 a1f445d3 Iustin Pop
  return drbd_dev
4308 a1f445d3 Iustin Pop
4309 7c0d6283 Michael Hanselmann
4310 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
4311 a8083063 Iustin Pop
                          instance_name, primary_node,
4312 08db7c5c Iustin Pop
                          secondary_nodes, disk_info,
4313 e2a65344 Iustin Pop
                          file_storage_dir, file_driver,
4314 e2a65344 Iustin Pop
                          base_index):
4315 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
4316 a8083063 Iustin Pop

4317 a8083063 Iustin Pop
  """
4318 a8083063 Iustin Pop
  #TODO: compute space requirements
4319 a8083063 Iustin Pop
4320 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
4321 08db7c5c Iustin Pop
  disk_count = len(disk_info)
4322 08db7c5c Iustin Pop
  disks = []
4323 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
4324 08db7c5c Iustin Pop
    pass
4325 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
4326 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
4327 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
4328 923b1523 Iustin Pop
4329 08db7c5c Iustin Pop
    names = _GenerateUniqueNames(lu, [".disk%d" % i
4330 08db7c5c Iustin Pop
                                      for i in range(disk_count)])
4331 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4332 e2a65344 Iustin Pop
      disk_index = idx + base_index
4333 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
4334 08db7c5c Iustin Pop
                              logical_id=(vgname, names[idx]),
4335 6ec66eae Iustin Pop
                              iv_name="disk/%d" % disk_index,
4336 6ec66eae Iustin Pop
                              mode=disk["mode"])
4337 08db7c5c Iustin Pop
      disks.append(disk_dev)
4338 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
4339 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
4340 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
4341 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
4342 08db7c5c Iustin Pop
    minors = lu.cfg.AllocateDRBDMinor(
4343 08db7c5c Iustin Pop
      [primary_node, remote_node] * len(disk_info), instance_name)
4344 08db7c5c Iustin Pop
4345 e6c1ff2f Iustin Pop
    names = []
4346 e6c1ff2f Iustin Pop
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % i
4347 e6c1ff2f Iustin Pop
                                               for i in range(disk_count)]):
4348 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_data")
4349 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_meta")
4350 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4351 112050d9 Iustin Pop
      disk_index = idx + base_index
4352 08db7c5c Iustin Pop
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
4353 08db7c5c Iustin Pop
                                      disk["size"], names[idx*2:idx*2+2],
4354 e2a65344 Iustin Pop
                                      "disk/%d" % disk_index,
4355 08db7c5c Iustin Pop
                                      minors[idx*2], minors[idx*2+1])
4356 6ec66eae Iustin Pop
      disk_dev.mode = disk["mode"]
4357 08db7c5c Iustin Pop
      disks.append(disk_dev)
4358 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
4359 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
4360 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
4361 0f1a06e3 Manuel Franceschini
4362 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4363 112050d9 Iustin Pop
      disk_index = idx + base_index
4364 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
4365 e2a65344 Iustin Pop
                              iv_name="disk/%d" % disk_index,
4366 08db7c5c Iustin Pop
                              logical_id=(file_driver,
4367 08db7c5c Iustin Pop
                                          "%s/disk%d" % (file_storage_dir,
4368 43e99cff Guido Trotter
                                                         disk_index)),
4369 6ec66eae Iustin Pop
                              mode=disk["mode"])
4370 08db7c5c Iustin Pop
      disks.append(disk_dev)
4371 a8083063 Iustin Pop
  else:
4372 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
4373 a8083063 Iustin Pop
  return disks
4374 a8083063 Iustin Pop
4375 a8083063 Iustin Pop
4376 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
4377 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
4378 3ecf6786 Iustin Pop

4379 3ecf6786 Iustin Pop
  """
4380 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
4381 a0c3fea1 Michael Hanselmann
4382 a0c3fea1 Michael Hanselmann
4383 b9bddb6b Iustin Pop
def _CreateDisks(lu, instance):
4384 a8083063 Iustin Pop
  """Create all disks for an instance.
4385 a8083063 Iustin Pop

4386 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
4387 a8083063 Iustin Pop

4388 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
4389 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
4390 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
4391 e4376078 Iustin Pop
  @param instance: the instance whose disks we should create
4392 e4376078 Iustin Pop
  @rtype: boolean
4393 e4376078 Iustin Pop
  @return: the success of the creation
4394 a8083063 Iustin Pop

4395 a8083063 Iustin Pop
  """
4396 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
4397 428958aa Iustin Pop
  pnode = instance.primary_node
4398 a0c3fea1 Michael Hanselmann
4399 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
4400 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4401 428958aa Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
4402 0f1a06e3 Manuel Franceschini
4403 781de953 Iustin Pop
    if result.failed or not result.data:
4404 428958aa Iustin Pop
      raise errors.OpExecError("Could not connect to node '%s'" % pnode)
4405 0f1a06e3 Manuel Franceschini
4406 781de953 Iustin Pop
    if not result.data[0]:
4407 796cab27 Iustin Pop
      raise errors.OpExecError("Failed to create directory '%s'" %
4408 796cab27 Iustin Pop
                               file_storage_dir)
4409 0f1a06e3 Manuel Franceschini
4410 24991749 Iustin Pop
  # Note: this needs to be kept in sync with adding of disks in
4411 24991749 Iustin Pop
  # LUSetInstanceParams
4412 a8083063 Iustin Pop
  for device in instance.disks:
4413 9a4f63d1 Iustin Pop
    logging.info("Creating volume %s for instance %s",
4414 9a4f63d1 Iustin Pop
                 device.iv_name, instance.name)
4415 a8083063 Iustin Pop
    #HARDCODE
4416 428958aa Iustin Pop
    for node in instance.all_nodes:
4417 428958aa Iustin Pop
      f_create = node == pnode
4418 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
4419 a8083063 Iustin Pop
4420 a8083063 Iustin Pop
4421 b9bddb6b Iustin Pop
def _RemoveDisks(lu, instance):
4422 a8083063 Iustin Pop
  """Remove all disks for an instance.
4423 a8083063 Iustin Pop

4424 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
4425 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
4426 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
4427 a8083063 Iustin Pop
  with `_CreateDisks()`).
4428 a8083063 Iustin Pop

4429 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
4430 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
4431 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
4432 e4376078 Iustin Pop
  @param instance: the instance whose disks we should remove
4433 e4376078 Iustin Pop
  @rtype: boolean
4434 e4376078 Iustin Pop
  @return: the success of the removal
4435 a8083063 Iustin Pop

4436 a8083063 Iustin Pop
  """
4437 9a4f63d1 Iustin Pop
  logging.info("Removing block devices for instance %s", instance.name)
4438 a8083063 Iustin Pop
4439 e1bc0878 Iustin Pop
  all_result = True
4440 a8083063 Iustin Pop
  for device in instance.disks:
4441 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
4442 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
4443 e1bc0878 Iustin Pop
      msg = lu.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
4444 e1bc0878 Iustin Pop
      if msg:
4445 e1bc0878 Iustin Pop
        lu.LogWarning("Could not remove block device %s on node %s,"
4446 e1bc0878 Iustin Pop
                      " continuing anyway: %s", device.iv_name, node, msg)
4447 e1bc0878 Iustin Pop
        all_result = False
4448 0f1a06e3 Manuel Franceschini
4449 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
4450 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4451 781de953 Iustin Pop
    result = lu.rpc.call_file_storage_dir_remove(instance.primary_node,
4452 781de953 Iustin Pop
                                                 file_storage_dir)
4453 781de953 Iustin Pop
    if result.failed or not result.data:
4454 9a4f63d1 Iustin Pop
      logging.error("Could not remove directory '%s'", file_storage_dir)
4455 e1bc0878 Iustin Pop
      all_result = False
4456 0f1a06e3 Manuel Franceschini
4457 e1bc0878 Iustin Pop
  return all_result
4458 a8083063 Iustin Pop
4459 a8083063 Iustin Pop
4460 08db7c5c Iustin Pop
def _ComputeDiskSize(disk_template, disks):
4461 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
4462 e2fe6369 Iustin Pop

4463 e2fe6369 Iustin Pop
  """
4464 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
4465 e2fe6369 Iustin Pop
  req_size_dict = {
4466 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
4467 08db7c5c Iustin Pop
    constants.DT_PLAIN: sum(d["size"] for d in disks),
4468 08db7c5c Iustin Pop
    # 128 MB are added for drbd metadata for each disk
4469 08db7c5c Iustin Pop
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
4470 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
4471 e2fe6369 Iustin Pop
  }
4472 e2fe6369 Iustin Pop
4473 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
4474 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
4475 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
4476 e2fe6369 Iustin Pop
4477 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
4478 e2fe6369 Iustin Pop
4479 e2fe6369 Iustin Pop
4480 74409b12 Iustin Pop
def _CheckHVParams(lu, nodenames, hvname, hvparams):
4481 74409b12 Iustin Pop
  """Hypervisor parameter validation.
4482 74409b12 Iustin Pop

4483 74409b12 Iustin Pop
  This function abstract the hypervisor parameter validation to be
4484 74409b12 Iustin Pop
  used in both instance create and instance modify.
4485 74409b12 Iustin Pop

4486 74409b12 Iustin Pop
  @type lu: L{LogicalUnit}
4487 74409b12 Iustin Pop
  @param lu: the logical unit for which we check
4488 74409b12 Iustin Pop
  @type nodenames: list
4489 74409b12 Iustin Pop
  @param nodenames: the list of nodes on which we should check
4490 74409b12 Iustin Pop
  @type hvname: string
4491 74409b12 Iustin Pop
  @param hvname: the name of the hypervisor we should use
4492 74409b12 Iustin Pop
  @type hvparams: dict
4493 74409b12 Iustin Pop
  @param hvparams: the parameters which we need to check
4494 74409b12 Iustin Pop
  @raise errors.OpPrereqError: if the parameters are not valid
4495 74409b12 Iustin Pop

4496 74409b12 Iustin Pop
  """
4497 74409b12 Iustin Pop
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
4498 74409b12 Iustin Pop
                                                  hvname,
4499 74409b12 Iustin Pop
                                                  hvparams)
4500 74409b12 Iustin Pop
  for node in nodenames:
4501 781de953 Iustin Pop
    info = hvinfo[node]
4502 68c6f21c Iustin Pop
    if info.offline:
4503 68c6f21c Iustin Pop
      continue
4504 0959c824 Iustin Pop
    msg = info.RemoteFailMsg()
4505 0959c824 Iustin Pop
    if msg:
4506 d64769a8 Iustin Pop
      raise errors.OpPrereqError("Hypervisor parameter validation"
4507 d64769a8 Iustin Pop
                                 " failed on node %s: %s" % (node, msg))
4508 74409b12 Iustin Pop
4509 74409b12 Iustin Pop
4510 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
4511 a8083063 Iustin Pop
  """Create an instance.
4512 a8083063 Iustin Pop

4513 a8083063 Iustin Pop
  """
4514 a8083063 Iustin Pop
  HPATH = "instance-add"
4515 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4516 08db7c5c Iustin Pop
  _OP_REQP = ["instance_name", "disks", "disk_template",
4517 08db7c5c Iustin Pop
              "mode", "start",
4518 08db7c5c Iustin Pop
              "wait_for_sync", "ip_check", "nics",
4519 338e51e8 Iustin Pop
              "hvparams", "beparams"]
4520 7baf741d Guido Trotter
  REQ_BGL = False
4521 7baf741d Guido Trotter
4522 7baf741d Guido Trotter
  def _ExpandNode(self, node):
4523 7baf741d Guido Trotter
    """Expands and checks one node name.
4524 7baf741d Guido Trotter

4525 7baf741d Guido Trotter
    """
4526 7baf741d Guido Trotter
    node_full = self.cfg.ExpandNodeName(node)
4527 7baf741d Guido Trotter
    if node_full is None:
4528 7baf741d Guido Trotter
      raise errors.OpPrereqError("Unknown node %s" % node)
4529 7baf741d Guido Trotter
    return node_full
4530 7baf741d Guido Trotter
4531 7baf741d Guido Trotter
  def ExpandNames(self):
4532 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
4533 7baf741d Guido Trotter

4534 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
4535 7baf741d Guido Trotter

4536 7baf741d Guido Trotter
    """
4537 7baf741d Guido Trotter
    self.needed_locks = {}
4538 7baf741d Guido Trotter
4539 7baf741d Guido Trotter
    # set optional parameters to none if they don't exist
4540 6785674e Iustin Pop
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
4541 7baf741d Guido Trotter
      if not hasattr(self.op, attr):
4542 7baf741d Guido Trotter
        setattr(self.op, attr, None)
4543 7baf741d Guido Trotter
4544 4b2f38dd Iustin Pop
    # cheap checks, mostly valid constants given
4545 4b2f38dd Iustin Pop
4546 7baf741d Guido Trotter
    # verify creation mode
4547 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
4548 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
4549 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
4550 7baf741d Guido Trotter
                                 self.op.mode)
4551 4b2f38dd Iustin Pop
4552 7baf741d Guido Trotter
    # disk template and mirror node verification
4553 7baf741d Guido Trotter
    if self.op.disk_template not in constants.DISK_TEMPLATES:
4554 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid disk template name")
4555 7baf741d Guido Trotter
4556 4b2f38dd Iustin Pop
    if self.op.hypervisor is None:
4557 4b2f38dd Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
4558 4b2f38dd Iustin Pop
4559 8705eb96 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
4560 8705eb96 Iustin Pop
    enabled_hvs = cluster.enabled_hypervisors
4561 4b2f38dd Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
4562 4b2f38dd Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
4563 4b2f38dd Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
4564 4b2f38dd Iustin Pop
                                  ",".join(enabled_hvs)))
4565 4b2f38dd Iustin Pop
4566 6785674e Iustin Pop
    # check hypervisor parameter syntax (locally)
4567 a5728081 Guido Trotter
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
4568 8705eb96 Iustin Pop
    filled_hvp = cluster.FillDict(cluster.hvparams[self.op.hypervisor],
4569 8705eb96 Iustin Pop
                                  self.op.hvparams)
4570 6785674e Iustin Pop
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
4571 8705eb96 Iustin Pop
    hv_type.CheckParameterSyntax(filled_hvp)
4572 67fc3042 Iustin Pop
    self.hv_full = filled_hvp
4573 6785674e Iustin Pop
4574 338e51e8 Iustin Pop
    # fill and remember the beparams dict
4575 a5728081 Guido Trotter
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
4576 338e51e8 Iustin Pop
    self.be_full = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
4577 338e51e8 Iustin Pop
                                    self.op.beparams)
4578 338e51e8 Iustin Pop
4579 7baf741d Guido Trotter
    #### instance parameters check
4580 7baf741d Guido Trotter
4581 7baf741d Guido Trotter
    # instance name verification
4582 7baf741d Guido Trotter
    hostname1 = utils.HostInfo(self.op.instance_name)
4583 7baf741d Guido Trotter
    self.op.instance_name = instance_name = hostname1.name
4584 7baf741d Guido Trotter
4585 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
4586 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
4587 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
4588 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
4589 7baf741d Guido Trotter
                                 instance_name)
4590 7baf741d Guido Trotter
4591 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
4592 7baf741d Guido Trotter
4593 08db7c5c Iustin Pop
    # NIC buildup
4594 08db7c5c Iustin Pop
    self.nics = []
4595 08db7c5c Iustin Pop
    for nic in self.op.nics:
4596 08db7c5c Iustin Pop
      # ip validity checks
4597 08db7c5c Iustin Pop
      ip = nic.get("ip", None)
4598 08db7c5c Iustin Pop
      if ip is None or ip.lower() == "none":
4599 08db7c5c Iustin Pop
        nic_ip = None
4600 08db7c5c Iustin Pop
      elif ip.lower() == constants.VALUE_AUTO:
4601 08db7c5c Iustin Pop
        nic_ip = hostname1.ip
4602 08db7c5c Iustin Pop
      else:
4603 08db7c5c Iustin Pop
        if not utils.IsValidIP(ip):
4604 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
4605 08db7c5c Iustin Pop
                                     " like a valid IP" % ip)
4606 08db7c5c Iustin Pop
        nic_ip = ip
4607 08db7c5c Iustin Pop
4608 08db7c5c Iustin Pop
      # MAC address verification
4609 08db7c5c Iustin Pop
      mac = nic.get("mac", constants.VALUE_AUTO)
4610 08db7c5c Iustin Pop
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4611 08db7c5c Iustin Pop
        if not utils.IsValidMac(mac.lower()):
4612 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Invalid MAC address specified: %s" %
4613 08db7c5c Iustin Pop
                                     mac)
4614 08db7c5c Iustin Pop
      # bridge verification
4615 9939547b Iustin Pop
      bridge = nic.get("bridge", None)
4616 9939547b Iustin Pop
      if bridge is None:
4617 9939547b Iustin Pop
        bridge = self.cfg.GetDefBridge()
4618 08db7c5c Iustin Pop
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, bridge=bridge))
4619 08db7c5c Iustin Pop
4620 08db7c5c Iustin Pop
    # disk checks/pre-build
4621 08db7c5c Iustin Pop
    self.disks = []
4622 08db7c5c Iustin Pop
    for disk in self.op.disks:
4623 08db7c5c Iustin Pop
      mode = disk.get("mode", constants.DISK_RDWR)
4624 08db7c5c Iustin Pop
      if mode not in constants.DISK_ACCESS_SET:
4625 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
4626 08db7c5c Iustin Pop
                                   mode)
4627 08db7c5c Iustin Pop
      size = disk.get("size", None)
4628 08db7c5c Iustin Pop
      if size is None:
4629 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Missing disk size")
4630 08db7c5c Iustin Pop
      try:
4631 08db7c5c Iustin Pop
        size = int(size)
4632 08db7c5c Iustin Pop
      except ValueError:
4633 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk size '%s'" % size)
4634 08db7c5c Iustin Pop
      self.disks.append({"size": size, "mode": mode})
4635 08db7c5c Iustin Pop
4636 7baf741d Guido Trotter
    # used in CheckPrereq for ip ping check
4637 7baf741d Guido Trotter
    self.check_ip = hostname1.ip
4638 7baf741d Guido Trotter
4639 7baf741d Guido Trotter
    # file storage checks
4640 7baf741d Guido Trotter
    if (self.op.file_driver and
4641 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
4642 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
4643 7baf741d Guido Trotter
                                 self.op.file_driver)
4644 7baf741d Guido Trotter
4645 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
4646 7baf741d Guido Trotter
      raise errors.OpPrereqError("File storage directory path not absolute")
4647 7baf741d Guido Trotter
4648 7baf741d Guido Trotter
    ### Node/iallocator related checks
4649 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
4650 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
4651 7baf741d Guido Trotter
                                 " node must be given")
4652 7baf741d Guido Trotter
4653 7baf741d Guido Trotter
    if self.op.iallocator:
4654 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4655 7baf741d Guido Trotter
    else:
4656 7baf741d Guido Trotter
      self.op.pnode = self._ExpandNode(self.op.pnode)
4657 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
4658 7baf741d Guido Trotter
      if self.op.snode is not None:
4659 7baf741d Guido Trotter
        self.op.snode = self._ExpandNode(self.op.snode)
4660 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
4661 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
4662 7baf741d Guido Trotter
4663 7baf741d Guido Trotter
    # in case of import lock the source node too
4664 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
4665 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
4666 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
4667 7baf741d Guido Trotter
4668 b9322a9f Guido Trotter
      if src_path is None:
4669 b9322a9f Guido Trotter
        self.op.src_path = src_path = self.op.instance_name
4670 b9322a9f Guido Trotter
4671 b9322a9f Guido Trotter
      if src_node is None:
4672 b9322a9f Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4673 b9322a9f Guido Trotter
        self.op.src_node = None
4674 b9322a9f Guido Trotter
        if os.path.isabs(src_path):
4675 b9322a9f Guido Trotter
          raise errors.OpPrereqError("Importing an instance from an absolute"
4676 b9322a9f Guido Trotter
                                     " path requires a source node option.")
4677 b9322a9f Guido Trotter
      else:
4678 b9322a9f Guido Trotter
        self.op.src_node = src_node = self._ExpandNode(src_node)
4679 b9322a9f Guido Trotter
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4680 b9322a9f Guido Trotter
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
4681 b9322a9f Guido Trotter
        if not os.path.isabs(src_path):
4682 b9322a9f Guido Trotter
          self.op.src_path = src_path = \
4683 b9322a9f Guido Trotter
            os.path.join(constants.EXPORT_DIR, src_path)
4684 7baf741d Guido Trotter
4685 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
4686 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
4687 7baf741d Guido Trotter
        raise errors.OpPrereqError("No guest OS specified")
4688 a8083063 Iustin Pop
4689 538475ca Iustin Pop
  def _RunAllocator(self):
4690 538475ca Iustin Pop
    """Run the allocator based on input opcode.
4691 538475ca Iustin Pop

4692 538475ca Iustin Pop
    """
4693 08db7c5c Iustin Pop
    nics = [n.ToDict() for n in self.nics]
4694 72737a7f Iustin Pop
    ial = IAllocator(self,
4695 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
4696 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
4697 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
4698 d1c2dd75 Iustin Pop
                     tags=[],
4699 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
4700 338e51e8 Iustin Pop
                     vcpus=self.be_full[constants.BE_VCPUS],
4701 338e51e8 Iustin Pop
                     mem_size=self.be_full[constants.BE_MEMORY],
4702 08db7c5c Iustin Pop
                     disks=self.disks,
4703 d1c2dd75 Iustin Pop
                     nics=nics,
4704 8cc7e742 Guido Trotter
                     hypervisor=self.op.hypervisor,
4705 29859cb7 Iustin Pop
                     )
4706 d1c2dd75 Iustin Pop
4707 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
4708 d1c2dd75 Iustin Pop
4709 d1c2dd75 Iustin Pop
    if not ial.success:
4710 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
4711 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
4712 d1c2dd75 Iustin Pop
                                                           ial.info))
4713 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
4714 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4715 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
4716 97abc79f Iustin Pop
                                 (self.op.iallocator, len(ial.nodes),
4717 1ce4bbe3 Renรฉ Nussbaumer
                                  ial.required_nodes))
4718 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
4719 86d9d3bb Iustin Pop
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
4720 86d9d3bb Iustin Pop
                 self.op.instance_name, self.op.iallocator,
4721 86d9d3bb Iustin Pop
                 ", ".join(ial.nodes))
4722 27579978 Iustin Pop
    if ial.required_nodes == 2:
4723 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
4724 538475ca Iustin Pop
4725 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4726 a8083063 Iustin Pop
    """Build hooks env.
4727 a8083063 Iustin Pop

4728 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4729 a8083063 Iustin Pop

4730 a8083063 Iustin Pop
    """
4731 a8083063 Iustin Pop
    env = {
4732 2c2690c9 Iustin Pop
      "ADD_MODE": self.op.mode,
4733 a8083063 Iustin Pop
      }
4734 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
4735 2c2690c9 Iustin Pop
      env["SRC_NODE"] = self.op.src_node
4736 2c2690c9 Iustin Pop
      env["SRC_PATH"] = self.op.src_path
4737 2c2690c9 Iustin Pop
      env["SRC_IMAGES"] = self.src_images
4738 396e1b78 Michael Hanselmann
4739 2c2690c9 Iustin Pop
    env.update(_BuildInstanceHookEnv(
4740 2c2690c9 Iustin Pop
      name=self.op.instance_name,
4741 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
4742 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
4743 4978db17 Iustin Pop
      status=self.op.start,
4744 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
4745 338e51e8 Iustin Pop
      memory=self.be_full[constants.BE_MEMORY],
4746 338e51e8 Iustin Pop
      vcpus=self.be_full[constants.BE_VCPUS],
4747 08db7c5c Iustin Pop
      nics=[(n.ip, n.bridge, n.mac) for n in self.nics],
4748 2c2690c9 Iustin Pop
      disk_template=self.op.disk_template,
4749 2c2690c9 Iustin Pop
      disks=[(d["size"], d["mode"]) for d in self.disks],
4750 67fc3042 Iustin Pop
      bep=self.be_full,
4751 67fc3042 Iustin Pop
      hvp=self.hv_full,
4752 67fc3042 Iustin Pop
      hypervisor=self.op.hypervisor,
4753 396e1b78 Michael Hanselmann
    ))
4754 a8083063 Iustin Pop
4755 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
4756 a8083063 Iustin Pop
          self.secondaries)
4757 a8083063 Iustin Pop
    return env, nl, nl
4758 a8083063 Iustin Pop
4759 a8083063 Iustin Pop
4760 a8083063 Iustin Pop
  def CheckPrereq(self):
4761 a8083063 Iustin Pop
    """Check prerequisites.
4762 a8083063 Iustin Pop

4763 a8083063 Iustin Pop
    """
4764 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
4765 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
4766 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
4767 eedc99de Manuel Franceschini
                                 " instances")
4768 eedc99de Manuel Franceschini
4769 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
4770 7baf741d Guido Trotter
      src_node = self.op.src_node
4771 7baf741d Guido Trotter
      src_path = self.op.src_path
4772 a8083063 Iustin Pop
4773 c0cbdc67 Guido Trotter
      if src_node is None:
4774 c0cbdc67 Guido Trotter
        exp_list = self.rpc.call_export_list(
4775 781de953 Iustin Pop
          self.acquired_locks[locking.LEVEL_NODE])
4776 c0cbdc67 Guido Trotter
        found = False
4777 c0cbdc67 Guido Trotter
        for node in exp_list:
4778 781de953 Iustin Pop
          if not exp_list[node].failed and src_path in exp_list[node].data:
4779 c0cbdc67 Guido Trotter
            found = True
4780 c0cbdc67 Guido Trotter
            self.op.src_node = src_node = node
4781 c0cbdc67 Guido Trotter
            self.op.src_path = src_path = os.path.join(constants.EXPORT_DIR,
4782 c0cbdc67 Guido Trotter
                                                       src_path)
4783 c0cbdc67 Guido Trotter
            break
4784 c0cbdc67 Guido Trotter
        if not found:
4785 c0cbdc67 Guido Trotter
          raise errors.OpPrereqError("No export found for relative path %s" %
4786 c0cbdc67 Guido Trotter
                                      src_path)
4787 c0cbdc67 Guido Trotter
4788 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, src_node)
4789 781de953 Iustin Pop
      result = self.rpc.call_export_info(src_node, src_path)
4790 781de953 Iustin Pop
      result.Raise()
4791 781de953 Iustin Pop
      if not result.data:
4792 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
4793 a8083063 Iustin Pop
4794 781de953 Iustin Pop
      export_info = result.data
4795 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
4796 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
4797 a8083063 Iustin Pop
4798 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
4799 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
4800 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
4801 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
4802 a8083063 Iustin Pop
4803 09acf207 Guido Trotter
      # Check that the new instance doesn't have less disks than the export
4804 08db7c5c Iustin Pop
      instance_disks = len(self.disks)
4805 09acf207 Guido Trotter
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
4806 09acf207 Guido Trotter
      if instance_disks < export_disks:
4807 09acf207 Guido Trotter
        raise errors.OpPrereqError("Not enough disks to import."
4808 09acf207 Guido Trotter
                                   " (instance: %d, export: %d)" %
4809 726d7d68 Iustin Pop
                                   (instance_disks, export_disks))
4810 a8083063 Iustin Pop
4811 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
4812 09acf207 Guido Trotter
      disk_images = []
4813 09acf207 Guido Trotter
      for idx in range(export_disks):
4814 09acf207 Guido Trotter
        option = 'disk%d_dump' % idx
4815 09acf207 Guido Trotter
        if export_info.has_option(constants.INISECT_INS, option):
4816 09acf207 Guido Trotter
          # FIXME: are the old os-es, disk sizes, etc. useful?
4817 09acf207 Guido Trotter
          export_name = export_info.get(constants.INISECT_INS, option)
4818 09acf207 Guido Trotter
          image = os.path.join(src_path, export_name)
4819 09acf207 Guido Trotter
          disk_images.append(image)
4820 09acf207 Guido Trotter
        else:
4821 09acf207 Guido Trotter
          disk_images.append(False)
4822 09acf207 Guido Trotter
4823 09acf207 Guido Trotter
      self.src_images = disk_images
4824 901a65c1 Iustin Pop
4825 b4364a6b Guido Trotter
      old_name = export_info.get(constants.INISECT_INS, 'name')
4826 b4364a6b Guido Trotter
      # FIXME: int() here could throw a ValueError on broken exports
4827 b4364a6b Guido Trotter
      exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
4828 b4364a6b Guido Trotter
      if self.op.instance_name == old_name:
4829 b4364a6b Guido Trotter
        for idx, nic in enumerate(self.nics):
4830 b4364a6b Guido Trotter
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
4831 b4364a6b Guido Trotter
            nic_mac_ini = 'nic%d_mac' % idx
4832 b4364a6b Guido Trotter
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
4833 bc89efc3 Guido Trotter
4834 295728df Guido Trotter
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
4835 7baf741d Guido Trotter
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
4836 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
4837 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
4838 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
4839 901a65c1 Iustin Pop
4840 901a65c1 Iustin Pop
    if self.op.ip_check:
4841 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
4842 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
4843 7b3a8fb5 Iustin Pop
                                   (self.check_ip, self.op.instance_name))
4844 901a65c1 Iustin Pop
4845 295728df Guido Trotter
    #### mac address generation
4846 295728df Guido Trotter
    # By generating here the mac address both the allocator and the hooks get
4847 295728df Guido Trotter
    # the real final mac address rather than the 'auto' or 'generate' value.
4848 295728df Guido Trotter
    # There is a race condition between the generation and the instance object
4849 295728df Guido Trotter
    # creation, which means that we know the mac is valid now, but we're not
4850 295728df Guido Trotter
    # sure it will be when we actually add the instance. If things go bad
4851 295728df Guido Trotter
    # adding the instance will abort because of a duplicate mac, and the
4852 295728df Guido Trotter
    # creation job will fail.
4853 295728df Guido Trotter
    for nic in self.nics:
4854 295728df Guido Trotter
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4855 295728df Guido Trotter
        nic.mac = self.cfg.GenerateMAC()
4856 295728df Guido Trotter
4857 538475ca Iustin Pop
    #### allocator run
4858 538475ca Iustin Pop
4859 538475ca Iustin Pop
    if self.op.iallocator is not None:
4860 538475ca Iustin Pop
      self._RunAllocator()
4861 0f1a06e3 Manuel Franceschini
4862 901a65c1 Iustin Pop
    #### node related checks
4863 901a65c1 Iustin Pop
4864 901a65c1 Iustin Pop
    # check primary node
4865 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
4866 7baf741d Guido Trotter
    assert self.pnode is not None, \
4867 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
4868 7527a8a4 Iustin Pop
    if pnode.offline:
4869 7527a8a4 Iustin Pop
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
4870 7527a8a4 Iustin Pop
                                 pnode.name)
4871 733a2b6a Iustin Pop
    if pnode.drained:
4872 733a2b6a Iustin Pop
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
4873 733a2b6a Iustin Pop
                                 pnode.name)
4874 7527a8a4 Iustin Pop
4875 901a65c1 Iustin Pop
    self.secondaries = []
4876 901a65c1 Iustin Pop
4877 901a65c1 Iustin Pop
    # mirror node verification
4878 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
4879 7baf741d Guido Trotter
      if self.op.snode is None:
4880 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
4881 3ecf6786 Iustin Pop
                                   " a mirror node")
4882 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
4883 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
4884 3ecf6786 Iustin Pop
                                   " the primary node.")
4885 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, self.op.snode)
4886 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, self.op.snode)
4887 733a2b6a Iustin Pop
      self.secondaries.append(self.op.snode)
4888 a8083063 Iustin Pop
4889 6785674e Iustin Pop
    nodenames = [pnode.name] + self.secondaries
4890 6785674e Iustin Pop
4891 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
4892 08db7c5c Iustin Pop
                                self.disks)
4893 ed1ebc60 Guido Trotter
4894 8d75db10 Iustin Pop
    # Check lv size requirements
4895 8d75db10 Iustin Pop
    if req_size is not None:
4896 72737a7f Iustin Pop
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4897 72737a7f Iustin Pop
                                         self.op.hypervisor)
4898 8d75db10 Iustin Pop
      for node in nodenames:
4899 781de953 Iustin Pop
        info = nodeinfo[node]
4900 781de953 Iustin Pop
        info.Raise()
4901 781de953 Iustin Pop
        info = info.data
4902 8d75db10 Iustin Pop
        if not info:
4903 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
4904 3e91897b Iustin Pop
                                     " from node '%s'" % node)
4905 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
4906 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
4907 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
4908 8d75db10 Iustin Pop
                                     " node %s" % node)
4909 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
4910 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
4911 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
4912 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
4913 ed1ebc60 Guido Trotter
4914 74409b12 Iustin Pop
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
4915 6785674e Iustin Pop
4916 a8083063 Iustin Pop
    # os verification
4917 781de953 Iustin Pop
    result = self.rpc.call_os_get(pnode.name, self.op.os_type)
4918 781de953 Iustin Pop
    result.Raise()
4919 6dfad215 Iustin Pop
    if not isinstance(result.data, objects.OS) or not result.data:
4920 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
4921 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
4922 a8083063 Iustin Pop
4923 901a65c1 Iustin Pop
    # bridge check on primary node
4924 08db7c5c Iustin Pop
    bridges = [n.bridge for n in self.nics]
4925 781de953 Iustin Pop
    result = self.rpc.call_bridges_exist(self.pnode.name, bridges)
4926 781de953 Iustin Pop
    result.Raise()
4927 781de953 Iustin Pop
    if not result.data:
4928 781de953 Iustin Pop
      raise errors.OpPrereqError("One of the target bridges '%s' does not"
4929 781de953 Iustin Pop
                                 " exist on destination node '%s'" %
4930 08db7c5c Iustin Pop
                                 (",".join(bridges), pnode.name))
4931 a8083063 Iustin Pop
4932 49ce1563 Iustin Pop
    # memory check on primary node
4933 49ce1563 Iustin Pop
    if self.op.start:
4934 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
4935 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
4936 338e51e8 Iustin Pop
                           self.be_full[constants.BE_MEMORY],
4937 338e51e8 Iustin Pop
                           self.op.hypervisor)
4938 49ce1563 Iustin Pop
4939 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4940 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
4941 a8083063 Iustin Pop

4942 a8083063 Iustin Pop
    """
4943 a8083063 Iustin Pop
    instance = self.op.instance_name
4944 a8083063 Iustin Pop
    pnode_name = self.pnode.name
4945 a8083063 Iustin Pop
4946 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
4947 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
4948 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
4949 2a6469d5 Alexander Schreiber
    else:
4950 2a6469d5 Alexander Schreiber
      network_port = None
4951 58acb49d Alexander Schreiber
4952 6785674e Iustin Pop
    ##if self.op.vnc_bind_address is None:
4953 6785674e Iustin Pop
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
4954 31a853d2 Iustin Pop
4955 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
4956 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
4957 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
4958 2c313123 Manuel Franceschini
    else:
4959 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
4960 2c313123 Manuel Franceschini
4961 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
4962 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
4963 d6a02168 Michael Hanselmann
                                        self.cfg.GetFileStorageDir(),
4964 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
4965 0f1a06e3 Manuel Franceschini
4966 0f1a06e3 Manuel Franceschini
4967 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
4968 a8083063 Iustin Pop
                                  self.op.disk_template,
4969 a8083063 Iustin Pop
                                  instance, pnode_name,
4970 08db7c5c Iustin Pop
                                  self.secondaries,
4971 08db7c5c Iustin Pop
                                  self.disks,
4972 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
4973 e2a65344 Iustin Pop
                                  self.op.file_driver,
4974 e2a65344 Iustin Pop
                                  0)
4975 a8083063 Iustin Pop
4976 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
4977 a8083063 Iustin Pop
                            primary_node=pnode_name,
4978 08db7c5c Iustin Pop
                            nics=self.nics, disks=disks,
4979 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
4980 4978db17 Iustin Pop
                            admin_up=False,
4981 58acb49d Alexander Schreiber
                            network_port=network_port,
4982 338e51e8 Iustin Pop
                            beparams=self.op.beparams,
4983 6785674e Iustin Pop
                            hvparams=self.op.hvparams,
4984 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
4985 a8083063 Iustin Pop
                            )
4986 a8083063 Iustin Pop
4987 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
4988 796cab27 Iustin Pop
    try:
4989 796cab27 Iustin Pop
      _CreateDisks(self, iobj)
4990 796cab27 Iustin Pop
    except errors.OpExecError:
4991 796cab27 Iustin Pop
      self.LogWarning("Device creation failed, reverting...")
4992 796cab27 Iustin Pop
      try:
4993 796cab27 Iustin Pop
        _RemoveDisks(self, iobj)
4994 796cab27 Iustin Pop
      finally:
4995 796cab27 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance)
4996 796cab27 Iustin Pop
        raise
4997 a8083063 Iustin Pop
4998 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
4999 a8083063 Iustin Pop
5000 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
5001 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
5002 7baf741d Guido Trotter
    # added the instance to the config
5003 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
5004 e36e96b4 Guido Trotter
    # Unlock all the nodes
5005 9c8971d7 Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
5006 9c8971d7 Guido Trotter
      nodes_keep = [self.op.src_node]
5007 9c8971d7 Guido Trotter
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
5008 9c8971d7 Guido Trotter
                       if node != self.op.src_node]
5009 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
5010 9c8971d7 Guido Trotter
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
5011 9c8971d7 Guido Trotter
    else:
5012 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE)
5013 9c8971d7 Guido Trotter
      del self.acquired_locks[locking.LEVEL_NODE]
5014 a8083063 Iustin Pop
5015 a8083063 Iustin Pop
    if self.op.wait_for_sync:
5016 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
5017 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
5018 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
5019 a8083063 Iustin Pop
      time.sleep(15)
5020 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
5021 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
5022 a8083063 Iustin Pop
    else:
5023 a8083063 Iustin Pop
      disk_abort = False
5024 a8083063 Iustin Pop
5025 a8083063 Iustin Pop
    if disk_abort:
5026 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
5027 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
5028 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
5029 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
5030 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
5031 3ecf6786 Iustin Pop
                               " this instance")
5032 a8083063 Iustin Pop
5033 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
5034 a8083063 Iustin Pop
                (instance, pnode_name))
5035 a8083063 Iustin Pop
5036 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
5037 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
5038 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
5039 781de953 Iustin Pop
        result = self.rpc.call_instance_os_add(pnode_name, iobj)
5040 20e01edd Iustin Pop
        msg = result.RemoteFailMsg()
5041 20e01edd Iustin Pop
        if msg:
5042 781de953 Iustin Pop
          raise errors.OpExecError("Could not add os for instance %s"
5043 20e01edd Iustin Pop
                                   " on node %s: %s" %
5044 20e01edd Iustin Pop
                                   (instance, pnode_name, msg))
5045 a8083063 Iustin Pop
5046 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
5047 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
5048 a8083063 Iustin Pop
        src_node = self.op.src_node
5049 09acf207 Guido Trotter
        src_images = self.src_images
5050 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
5051 6c0af70e Guido Trotter
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
5052 09acf207 Guido Trotter
                                                         src_node, src_images,
5053 6c0af70e Guido Trotter
                                                         cluster_name)
5054 781de953 Iustin Pop
        import_result.Raise()
5055 781de953 Iustin Pop
        for idx, result in enumerate(import_result.data):
5056 09acf207 Guido Trotter
          if not result:
5057 726d7d68 Iustin Pop
            self.LogWarning("Could not import the image %s for instance"
5058 726d7d68 Iustin Pop
                            " %s, disk %d, on node %s" %
5059 726d7d68 Iustin Pop
                            (src_images[idx], instance, idx, pnode_name))
5060 a8083063 Iustin Pop
      else:
5061 a8083063 Iustin Pop
        # also checked in the prereq part
5062 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
5063 3ecf6786 Iustin Pop
                                     % self.op.mode)
5064 a8083063 Iustin Pop
5065 a8083063 Iustin Pop
    if self.op.start:
5066 4978db17 Iustin Pop
      iobj.admin_up = True
5067 4978db17 Iustin Pop
      self.cfg.Update(iobj)
5068 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s", instance, pnode_name)
5069 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
5070 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
5071 dd279568 Iustin Pop
      msg = result.RemoteFailMsg()
5072 dd279568 Iustin Pop
      if msg:
5073 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance: %s" % msg)
5074 a8083063 Iustin Pop
5075 a8083063 Iustin Pop
5076 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
5077 a8083063 Iustin Pop
  """Connect to an instance's console.
5078 a8083063 Iustin Pop

5079 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
5080 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
5081 a8083063 Iustin Pop
  console.
5082 a8083063 Iustin Pop

5083 a8083063 Iustin Pop
  """
5084 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
5085 8659b73e Guido Trotter
  REQ_BGL = False
5086 8659b73e Guido Trotter
5087 8659b73e Guido Trotter
  def ExpandNames(self):
5088 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
5089 a8083063 Iustin Pop
5090 a8083063 Iustin Pop
  def CheckPrereq(self):
5091 a8083063 Iustin Pop
    """Check prerequisites.
5092 a8083063 Iustin Pop

5093 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
5094 a8083063 Iustin Pop

5095 a8083063 Iustin Pop
    """
5096 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5097 8659b73e Guido Trotter
    assert self.instance is not None, \
5098 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5099 513e896d Guido Trotter
    _CheckNodeOnline(self, self.instance.primary_node)
5100 a8083063 Iustin Pop
5101 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5102 a8083063 Iustin Pop
    """Connect to the console of an instance
5103 a8083063 Iustin Pop

5104 a8083063 Iustin Pop
    """
5105 a8083063 Iustin Pop
    instance = self.instance
5106 a8083063 Iustin Pop
    node = instance.primary_node
5107 a8083063 Iustin Pop
5108 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
5109 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
5110 781de953 Iustin Pop
    node_insts.Raise()
5111 a8083063 Iustin Pop
5112 781de953 Iustin Pop
    if instance.name not in node_insts.data:
5113 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
5114 a8083063 Iustin Pop
5115 9a4f63d1 Iustin Pop
    logging.debug("Connecting to console of %s on %s", instance.name, node)
5116 a8083063 Iustin Pop
5117 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
5118 5431b2e4 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
5119 5431b2e4 Guido Trotter
    # beparams and hvparams are passed separately, to avoid editing the
5120 5431b2e4 Guido Trotter
    # instance and then saving the defaults in the instance itself.
5121 5431b2e4 Guido Trotter
    hvparams = cluster.FillHV(instance)
5122 5431b2e4 Guido Trotter
    beparams = cluster.FillBE(instance)
5123 5431b2e4 Guido Trotter
    console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
5124 b047857b Michael Hanselmann
5125 82122173 Iustin Pop
    # build ssh cmdline
5126 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
5127 a8083063 Iustin Pop
5128 a8083063 Iustin Pop
5129 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
5130 a8083063 Iustin Pop
  """Replace the disks of an instance.
5131 a8083063 Iustin Pop

5132 a8083063 Iustin Pop
  """
5133 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
5134 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5135 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
5136 efd990e4 Guido Trotter
  REQ_BGL = False
5137 efd990e4 Guido Trotter
5138 7e9366f7 Iustin Pop
  def CheckArguments(self):
5139 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
5140 efd990e4 Guido Trotter
      self.op.remote_node = None
5141 7e9366f7 Iustin Pop
    if not hasattr(self.op, "iallocator"):
5142 7e9366f7 Iustin Pop
      self.op.iallocator = None
5143 7e9366f7 Iustin Pop
5144 7e9366f7 Iustin Pop
    # check for valid parameter combination
5145 7e9366f7 Iustin Pop
    cnt = [self.op.remote_node, self.op.iallocator].count(None)
5146 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_CHG:
5147 7e9366f7 Iustin Pop
      if cnt == 2:
5148 7e9366f7 Iustin Pop
        raise errors.OpPrereqError("When changing the secondary either an"
5149 7e9366f7 Iustin Pop
                                   " iallocator script must be used or the"
5150 7e9366f7 Iustin Pop
                                   " new node given")
5151 7e9366f7 Iustin Pop
      elif cnt == 0:
5152 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Give either the iallocator or the new"
5153 efd990e4 Guido Trotter
                                   " secondary, not both")
5154 7e9366f7 Iustin Pop
    else: # not replacing the secondary
5155 7e9366f7 Iustin Pop
      if cnt != 2:
5156 7e9366f7 Iustin Pop
        raise errors.OpPrereqError("The iallocator and new node options can"
5157 7e9366f7 Iustin Pop
                                   " be used only when changing the"
5158 7e9366f7 Iustin Pop
                                   " secondary node")
5159 7e9366f7 Iustin Pop
5160 7e9366f7 Iustin Pop
  def ExpandNames(self):
5161 7e9366f7 Iustin Pop
    self._ExpandAndLockInstance()
5162 7e9366f7 Iustin Pop
5163 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
5164 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5165 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
5166 efd990e4 Guido Trotter
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
5167 efd990e4 Guido Trotter
      if remote_node is None:
5168 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Node '%s' not known" %
5169 efd990e4 Guido Trotter
                                   self.op.remote_node)
5170 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
5171 3b559640 Iustin Pop
      # Warning: do not remove the locking of the new secondary here
5172 3b559640 Iustin Pop
      # unless DRBD8.AddChildren is changed to work in parallel;
5173 3b559640 Iustin Pop
      # currently it doesn't since parallel invocations of
5174 3b559640 Iustin Pop
      # FindUnusedMinor will conflict
5175 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
5176 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5177 efd990e4 Guido Trotter
    else:
5178 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
5179 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5180 efd990e4 Guido Trotter
5181 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
5182 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
5183 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
5184 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
5185 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
5186 efd990e4 Guido Trotter
      self._LockInstancesNodes()
5187 a8083063 Iustin Pop
5188 b6e82a65 Iustin Pop
  def _RunAllocator(self):
5189 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
5190 b6e82a65 Iustin Pop

5191 b6e82a65 Iustin Pop
    """
5192 72737a7f Iustin Pop
    ial = IAllocator(self,
5193 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
5194 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
5195 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
5196 b6e82a65 Iustin Pop
5197 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
5198 b6e82a65 Iustin Pop
5199 b6e82a65 Iustin Pop
    if not ial.success:
5200 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
5201 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
5202 b6e82a65 Iustin Pop
                                                           ial.info))
5203 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
5204 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
5205 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
5206 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
5207 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
5208 86d9d3bb Iustin Pop
    self.LogInfo("Selected new secondary for the instance: %s",
5209 86d9d3bb Iustin Pop
                 self.op.remote_node)
5210 b6e82a65 Iustin Pop
5211 a8083063 Iustin Pop
  def BuildHooksEnv(self):
5212 a8083063 Iustin Pop
    """Build hooks env.
5213 a8083063 Iustin Pop

5214 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
5215 a8083063 Iustin Pop

5216 a8083063 Iustin Pop
    """
5217 a8083063 Iustin Pop
    env = {
5218 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
5219 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
5220 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
5221 a8083063 Iustin Pop
      }
5222 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5223 0834c866 Iustin Pop
    nl = [
5224 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
5225 0834c866 Iustin Pop
      self.instance.primary_node,
5226 0834c866 Iustin Pop
      ]
5227 0834c866 Iustin Pop
    if self.op.remote_node is not None:
5228 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
5229 a8083063 Iustin Pop
    return env, nl, nl
5230 a8083063 Iustin Pop
5231 a8083063 Iustin Pop
  def CheckPrereq(self):
5232 a8083063 Iustin Pop
    """Check prerequisites.
5233 a8083063 Iustin Pop

5234 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
5235 a8083063 Iustin Pop

5236 a8083063 Iustin Pop
    """
5237 efd990e4 Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5238 efd990e4 Guido Trotter
    assert instance is not None, \
5239 efd990e4 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5240 a8083063 Iustin Pop
    self.instance = instance
5241 a8083063 Iustin Pop
5242 7e9366f7 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
5243 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
5244 7e9366f7 Iustin Pop
                                 " instances")
5245 a8083063 Iustin Pop
5246 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
5247 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
5248 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
5249 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
5250 a8083063 Iustin Pop
5251 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
5252 a9e0c397 Iustin Pop
5253 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
5254 de8c7666 Guido Trotter
      self._RunAllocator()
5255 b6e82a65 Iustin Pop
5256 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
5257 a9e0c397 Iustin Pop
    if remote_node is not None:
5258 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
5259 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
5260 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
5261 a9e0c397 Iustin Pop
    else:
5262 a9e0c397 Iustin Pop
      self.remote_node_info = None
5263 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
5264 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
5265 3ecf6786 Iustin Pop
                                 " the instance.")
5266 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
5267 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("The specified node is already the"
5268 7e9366f7 Iustin Pop
                                 " secondary node of the instance.")
5269 7e9366f7 Iustin Pop
5270 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_PRI:
5271 7e9366f7 Iustin Pop
      n1 = self.tgt_node = instance.primary_node
5272 7e9366f7 Iustin Pop
      n2 = self.oth_node = self.sec_node
5273 7e9366f7 Iustin Pop
    elif self.op.mode == constants.REPLACE_DISK_SEC:
5274 7e9366f7 Iustin Pop
      n1 = self.tgt_node = self.sec_node
5275 7e9366f7 Iustin Pop
      n2 = self.oth_node = instance.primary_node
5276 7e9366f7 Iustin Pop
    elif self.op.mode == constants.REPLACE_DISK_CHG:
5277 7e9366f7 Iustin Pop
      n1 = self.new_node = remote_node
5278 7e9366f7 Iustin Pop
      n2 = self.oth_node = instance.primary_node
5279 7e9366f7 Iustin Pop
      self.tgt_node = self.sec_node
5280 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, remote_node)
5281 7e9366f7 Iustin Pop
    else:
5282 7e9366f7 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replace mode")
5283 7e9366f7 Iustin Pop
5284 7e9366f7 Iustin Pop
    _CheckNodeOnline(self, n1)
5285 7e9366f7 Iustin Pop
    _CheckNodeOnline(self, n2)
5286 a9e0c397 Iustin Pop
5287 54155f52 Iustin Pop
    if not self.op.disks:
5288 54155f52 Iustin Pop
      self.op.disks = range(len(instance.disks))
5289 54155f52 Iustin Pop
5290 54155f52 Iustin Pop
    for disk_idx in self.op.disks:
5291 3e0cea06 Iustin Pop
      instance.FindDisk(disk_idx)
5292 a8083063 Iustin Pop
5293 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
5294 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
5295 a9e0c397 Iustin Pop

5296 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
5297 e4376078 Iustin Pop

5298 e4376078 Iustin Pop
      1. for each disk to be replaced:
5299 e4376078 Iustin Pop

5300 e4376078 Iustin Pop
        1. create new LVs on the target node with unique names
5301 e4376078 Iustin Pop
        1. detach old LVs from the drbd device
5302 e4376078 Iustin Pop
        1. rename old LVs to name_replaced.<time_t>
5303 e4376078 Iustin Pop
        1. rename new LVs to old LVs
5304 e4376078 Iustin Pop
        1. attach the new LVs (with the old names now) to the drbd device
5305 e4376078 Iustin Pop

5306 e4376078 Iustin Pop
      1. wait for sync across all devices
5307 e4376078 Iustin Pop

5308 e4376078 Iustin Pop
      1. for each modified disk:
5309 e4376078 Iustin Pop

5310 e4376078 Iustin Pop
        1. remove old LVs (which have the name name_replaces.<time_t>)
5311 a9e0c397 Iustin Pop

5312 a9e0c397 Iustin Pop
    Failures are not very well handled.
5313 cff90b79 Iustin Pop

5314 a9e0c397 Iustin Pop
    """
5315 cff90b79 Iustin Pop
    steps_total = 6
5316 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
5317 a9e0c397 Iustin Pop
    instance = self.instance
5318 a9e0c397 Iustin Pop
    iv_names = {}
5319 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
5320 a9e0c397 Iustin Pop
    # start of work
5321 a9e0c397 Iustin Pop
    cfg = self.cfg
5322 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
5323 cff90b79 Iustin Pop
    oth_node = self.oth_node
5324 cff90b79 Iustin Pop
5325 cff90b79 Iustin Pop
    # Step: check device activation
5326 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
5327 cff90b79 Iustin Pop
    info("checking volume groups")
5328 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
5329 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([oth_node, tgt_node])
5330 cff90b79 Iustin Pop
    if not results:
5331 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
5332 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
5333 781de953 Iustin Pop
      res = results[node]
5334 781de953 Iustin Pop
      if res.failed or not res.data or my_vg not in res.data:
5335 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
5336 cff90b79 Iustin Pop
                                 (my_vg, node))
5337 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
5338 54155f52 Iustin Pop
      if idx not in self.op.disks:
5339 cff90b79 Iustin Pop
        continue
5340 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
5341 54155f52 Iustin Pop
        info("checking disk/%d on %s" % (idx, node))
5342 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
5343 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(node, dev)
5344 23829f6f Iustin Pop
        msg = result.RemoteFailMsg()
5345 23829f6f Iustin Pop
        if not msg and not result.payload:
5346 23829f6f Iustin Pop
          msg = "disk not found"
5347 23829f6f Iustin Pop
        if msg:
5348 23829f6f Iustin Pop
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5349 23829f6f Iustin Pop
                                   (idx, node, msg))
5350 cff90b79 Iustin Pop
5351 cff90b79 Iustin Pop
    # Step: check other node consistency
5352 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
5353 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
5354 54155f52 Iustin Pop
      if idx not in self.op.disks:
5355 cff90b79 Iustin Pop
        continue
5356 54155f52 Iustin Pop
      info("checking disk/%d consistency on %s" % (idx, oth_node))
5357 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, oth_node,
5358 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
5359 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
5360 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
5361 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
5362 cff90b79 Iustin Pop
5363 cff90b79 Iustin Pop
    # Step: create new storage
5364 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
5365 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
5366 54155f52 Iustin Pop
      if idx not in self.op.disks:
5367 a9e0c397 Iustin Pop
        continue
5368 a9e0c397 Iustin Pop
      size = dev.size
5369 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
5370 54155f52 Iustin Pop
      lv_names = [".disk%d_%s" % (idx, suf)
5371 54155f52 Iustin Pop
                  for suf in ["data", "meta"]]
5372 b9bddb6b Iustin Pop
      names = _GenerateUniqueNames(self, lv_names)
5373 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
5374 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
5375 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
5376 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
5377 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
5378 a9e0c397 Iustin Pop
      old_lvs = dev.children
5379 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
5380 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
5381 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
5382 428958aa Iustin Pop
      # we pass force_create=True to force the LVM creation
5383 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
5384 428958aa Iustin Pop
        _CreateBlockDev(self, tgt_node, instance, new_lv, True,
5385 428958aa Iustin Pop
                        _GetInstanceInfoText(instance), False)
5386 a9e0c397 Iustin Pop
5387 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
5388 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
5389 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
5390 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
5391 781de953 Iustin Pop
      result = self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs)
5392 781de953 Iustin Pop
      result.Raise()
5393 781de953 Iustin Pop
      if not result.data:
5394 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
5395 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
5396 cff90b79 Iustin Pop
      #dev.children = []
5397 cff90b79 Iustin Pop
      #cfg.Update(instance)
5398 a9e0c397 Iustin Pop
5399 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
5400 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
5401 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
5402 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
5403 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
5404 cff90b79 Iustin Pop
5405 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
5406 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
5407 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
5408 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
5409 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
5410 cff90b79 Iustin Pop
      rlist = []
5411 cff90b79 Iustin Pop
      for to_ren in old_lvs:
5412 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(tgt_node, to_ren)
5413 23829f6f Iustin Pop
        if not result.RemoteFailMsg() and result.payload:
5414 23829f6f Iustin Pop
          # device exists
5415 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
5416 cff90b79 Iustin Pop
5417 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
5418 781de953 Iustin Pop
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5419 781de953 Iustin Pop
      result.Raise()
5420 781de953 Iustin Pop
      if not result.data:
5421 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
5422 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
5423 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
5424 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
5425 781de953 Iustin Pop
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5426 781de953 Iustin Pop
      result.Raise()
5427 781de953 Iustin Pop
      if not result.data:
5428 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
5429 cff90b79 Iustin Pop
5430 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
5431 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
5432 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
5433 a9e0c397 Iustin Pop
5434 cff90b79 Iustin Pop
      for disk in old_lvs:
5435 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
5436 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
5437 a9e0c397 Iustin Pop
5438 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
5439 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
5440 4504c3d6 Iustin Pop
      result = self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs)
5441 781de953 Iustin Pop
      if result.failed or not result.data:
5442 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
5443 e1bc0878 Iustin Pop
          msg = self.rpc.call_blockdev_remove(tgt_node, new_lv).RemoteFailMsg()
5444 e1bc0878 Iustin Pop
          if msg:
5445 e1bc0878 Iustin Pop
            warning("Can't rollback device %s: %s", dev, msg,
5446 e1bc0878 Iustin Pop
                    hint="cleanup manually the unused logical volumes")
5447 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
5448 a9e0c397 Iustin Pop
5449 a9e0c397 Iustin Pop
      dev.children = new_lvs
5450 a9e0c397 Iustin Pop
      cfg.Update(instance)
5451 a9e0c397 Iustin Pop
5452 cff90b79 Iustin Pop
    # Step: wait for sync
5453 a9e0c397 Iustin Pop
5454 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
5455 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
5456 a9e0c397 Iustin Pop
    # return value
5457 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
5458 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
5459 a9e0c397 Iustin Pop
5460 a9e0c397 Iustin Pop
    # so check manually all the devices
5461 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5462 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
5463 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(instance.primary_node, dev)
5464 23829f6f Iustin Pop
      msg = result.RemoteFailMsg()
5465 23829f6f Iustin Pop
      if not msg and not result.payload:
5466 23829f6f Iustin Pop
        msg = "disk not found"
5467 23829f6f Iustin Pop
      if msg:
5468 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
5469 23829f6f Iustin Pop
                                 (name, msg))
5470 23829f6f Iustin Pop
      if result.payload[5]:
5471 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
5472 a9e0c397 Iustin Pop
5473 cff90b79 Iustin Pop
    # Step: remove old storage
5474 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
5475 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5476 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
5477 a9e0c397 Iustin Pop
      for lv in old_lvs:
5478 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
5479 e1bc0878 Iustin Pop
        msg = self.rpc.call_blockdev_remove(tgt_node, lv).RemoteFailMsg()
5480 e1bc0878 Iustin Pop
        if msg:
5481 e1bc0878 Iustin Pop
          warning("Can't remove old LV: %s" % msg,
5482 e1bc0878 Iustin Pop
                  hint="manually remove unused LVs")
5483 a9e0c397 Iustin Pop
          continue
5484 a9e0c397 Iustin Pop
5485 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
5486 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
5487 a9e0c397 Iustin Pop

5488 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
5489 a9e0c397 Iustin Pop
      - for all disks of the instance:
5490 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
5491 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
5492 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
5493 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
5494 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
5495 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
5496 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
5497 a9e0c397 Iustin Pop
          not network enabled
5498 a9e0c397 Iustin Pop
      - wait for sync across all devices
5499 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
5500 a9e0c397 Iustin Pop

5501 a9e0c397 Iustin Pop
    Failures are not very well handled.
5502 0834c866 Iustin Pop

5503 a9e0c397 Iustin Pop
    """
5504 0834c866 Iustin Pop
    steps_total = 6
5505 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
5506 a9e0c397 Iustin Pop
    instance = self.instance
5507 a9e0c397 Iustin Pop
    iv_names = {}
5508 a9e0c397 Iustin Pop
    # start of work
5509 a9e0c397 Iustin Pop
    cfg = self.cfg
5510 a9e0c397 Iustin Pop
    old_node = self.tgt_node
5511 a9e0c397 Iustin Pop
    new_node = self.new_node
5512 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
5513 a2d59d8b Iustin Pop
    nodes_ip = {
5514 a2d59d8b Iustin Pop
      old_node: self.cfg.GetNodeInfo(old_node).secondary_ip,
5515 a2d59d8b Iustin Pop
      new_node: self.cfg.GetNodeInfo(new_node).secondary_ip,
5516 a2d59d8b Iustin Pop
      pri_node: self.cfg.GetNodeInfo(pri_node).secondary_ip,
5517 a2d59d8b Iustin Pop
      }
5518 0834c866 Iustin Pop
5519 0834c866 Iustin Pop
    # Step: check device activation
5520 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
5521 0834c866 Iustin Pop
    info("checking volume groups")
5522 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
5523 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([pri_node, new_node])
5524 0834c866 Iustin Pop
    for node in pri_node, new_node:
5525 781de953 Iustin Pop
      res = results[node]
5526 781de953 Iustin Pop
      if res.failed or not res.data or my_vg not in res.data:
5527 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
5528 0834c866 Iustin Pop
                                 (my_vg, node))
5529 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5530 d418ebfb Iustin Pop
      if idx not in self.op.disks:
5531 0834c866 Iustin Pop
        continue
5532 d418ebfb Iustin Pop
      info("checking disk/%d on %s" % (idx, pri_node))
5533 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5534 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(pri_node, dev)
5535 23829f6f Iustin Pop
      msg = result.RemoteFailMsg()
5536 23829f6f Iustin Pop
      if not msg and not result.payload:
5537 23829f6f Iustin Pop
        msg = "disk not found"
5538 23829f6f Iustin Pop
      if msg:
5539 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5540 23829f6f Iustin Pop
                                 (idx, pri_node, msg))
5541 0834c866 Iustin Pop
5542 0834c866 Iustin Pop
    # Step: check other node consistency
5543 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
5544 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5545 d418ebfb Iustin Pop
      if idx not in self.op.disks:
5546 0834c866 Iustin Pop
        continue
5547 d418ebfb Iustin Pop
      info("checking disk/%d consistency on %s" % (idx, pri_node))
5548 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
5549 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
5550 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
5551 0834c866 Iustin Pop
                                 pri_node)
5552 0834c866 Iustin Pop
5553 0834c866 Iustin Pop
    # Step: create new storage
5554 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
5555 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5556 d418ebfb Iustin Pop
      info("adding new local storage on %s for disk/%d" %
5557 d418ebfb Iustin Pop
           (new_node, idx))
5558 428958aa Iustin Pop
      # we pass force_create=True to force LVM creation
5559 a9e0c397 Iustin Pop
      for new_lv in dev.children:
5560 428958aa Iustin Pop
        _CreateBlockDev(self, new_node, instance, new_lv, True,
5561 428958aa Iustin Pop
                        _GetInstanceInfoText(instance), False)
5562 a9e0c397 Iustin Pop
5563 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
5564 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
5565 a1578d63 Iustin Pop
    # error and the success paths
5566 a1578d63 Iustin Pop
    minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
5567 a1578d63 Iustin Pop
                                   instance.name)
5568 468b46f9 Iustin Pop
    logging.debug("Allocated minors %s" % (minors,))
5569 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
5570 d418ebfb Iustin Pop
    for idx, (dev, new_minor) in enumerate(zip(instance.disks, minors)):
5571 0834c866 Iustin Pop
      size = dev.size
5572 d418ebfb Iustin Pop
      info("activating a new drbd on %s for disk/%d" % (new_node, idx))
5573 a2d59d8b Iustin Pop
      # create new devices on new_node; note that we create two IDs:
5574 a2d59d8b Iustin Pop
      # one without port, so the drbd will be activated without
5575 a2d59d8b Iustin Pop
      # networking information on the new node at this stage, and one
5576 a2d59d8b Iustin Pop
      # with network, for the latter activation in step 4
5577 a2d59d8b Iustin Pop
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
5578 a2d59d8b Iustin Pop
      if pri_node == o_node1:
5579 a2d59d8b Iustin Pop
        p_minor = o_minor1
5580 ffa1c0dc Iustin Pop
      else:
5581 a2d59d8b Iustin Pop
        p_minor = o_minor2
5582 a2d59d8b Iustin Pop
5583 a2d59d8b Iustin Pop
      new_alone_id = (pri_node, new_node, None, p_minor, new_minor, o_secret)
5584 a2d59d8b Iustin Pop
      new_net_id = (pri_node, new_node, o_port, p_minor, new_minor, o_secret)
5585 a2d59d8b Iustin Pop
5586 a2d59d8b Iustin Pop
      iv_names[idx] = (dev, dev.children, new_net_id)
5587 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
5588 a2d59d8b Iustin Pop
                    new_net_id)
5589 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
5590 a2d59d8b Iustin Pop
                              logical_id=new_alone_id,
5591 8a6c7011 Iustin Pop
                              children=dev.children,
5592 8a6c7011 Iustin Pop
                              size=dev.size)
5593 796cab27 Iustin Pop
      try:
5594 de12473a Iustin Pop
        _CreateSingleBlockDev(self, new_node, instance, new_drbd,
5595 de12473a Iustin Pop
                              _GetInstanceInfoText(instance), False)
5596 82759cb1 Iustin Pop
      except errors.GenericError:
5597 a1578d63 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
5598 796cab27 Iustin Pop
        raise
5599 a9e0c397 Iustin Pop
5600 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5601 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
5602 d418ebfb Iustin Pop
      info("shutting down drbd for disk/%d on old node" % idx)
5603 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
5604 cacfd1fd Iustin Pop
      msg = self.rpc.call_blockdev_shutdown(old_node, dev).RemoteFailMsg()
5605 cacfd1fd Iustin Pop
      if msg:
5606 cacfd1fd Iustin Pop
        warning("Failed to shutdown drbd for disk/%d on old node: %s" %
5607 cacfd1fd Iustin Pop
                (idx, msg),
5608 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
5609 a9e0c397 Iustin Pop
5610 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
5611 a2d59d8b Iustin Pop
    result = self.rpc.call_drbd_disconnect_net([pri_node], nodes_ip,
5612 a2d59d8b Iustin Pop
                                               instance.disks)[pri_node]
5613 642445d9 Iustin Pop
5614 a2d59d8b Iustin Pop
    msg = result.RemoteFailMsg()
5615 a2d59d8b Iustin Pop
    if msg:
5616 a2d59d8b Iustin Pop
      # detaches didn't succeed (unlikely)
5617 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance.name)
5618 a2d59d8b Iustin Pop
      raise errors.OpExecError("Can't detach the disks from the network on"
5619 a2d59d8b Iustin Pop
                               " old node: %s" % (msg,))
5620 642445d9 Iustin Pop
5621 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
5622 642445d9 Iustin Pop
    # the instance to point to the new secondary
5623 642445d9 Iustin Pop
    info("updating instance configuration")
5624 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
5625 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
5626 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5627 642445d9 Iustin Pop
    cfg.Update(instance)
5628 a9e0c397 Iustin Pop
5629 642445d9 Iustin Pop
    # and now perform the drbd attach
5630 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
5631 a2d59d8b Iustin Pop
    result = self.rpc.call_drbd_attach_net([pri_node, new_node], nodes_ip,
5632 a2d59d8b Iustin Pop
                                           instance.disks, instance.name,
5633 a2d59d8b Iustin Pop
                                           False)
5634 a2d59d8b Iustin Pop
    for to_node, to_result in result.items():
5635 a2d59d8b Iustin Pop
      msg = to_result.RemoteFailMsg()
5636 a2d59d8b Iustin Pop
      if msg:
5637 a2d59d8b Iustin Pop
        warning("can't attach drbd disks on node %s: %s", to_node, msg,
5638 a2d59d8b Iustin Pop
                hint="please do a gnt-instance info to see the"
5639 a2d59d8b Iustin Pop
                " status of disks")
5640 a9e0c397 Iustin Pop
5641 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
5642 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
5643 a9e0c397 Iustin Pop
    # return value
5644 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
5645 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
5646 a9e0c397 Iustin Pop
5647 a9e0c397 Iustin Pop
    # so check manually all the devices
5648 d418ebfb Iustin Pop
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
5649 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5650 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(pri_node, dev)
5651 23829f6f Iustin Pop
      msg = result.RemoteFailMsg()
5652 23829f6f Iustin Pop
      if not msg and not result.payload:
5653 23829f6f Iustin Pop
        msg = "disk not found"
5654 23829f6f Iustin Pop
      if msg:
5655 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find DRBD device disk/%d: %s" %
5656 23829f6f Iustin Pop
                                 (idx, msg))
5657 23829f6f Iustin Pop
      if result.payload[5]:
5658 d418ebfb Iustin Pop
        raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx)
5659 a9e0c397 Iustin Pop
5660 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
5661 d418ebfb Iustin Pop
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
5662 d418ebfb Iustin Pop
      info("remove logical volumes for disk/%d" % idx)
5663 a9e0c397 Iustin Pop
      for lv in old_lvs:
5664 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
5665 e1bc0878 Iustin Pop
        msg = self.rpc.call_blockdev_remove(old_node, lv).RemoteFailMsg()
5666 e1bc0878 Iustin Pop
        if msg:
5667 e1bc0878 Iustin Pop
          warning("Can't remove LV on old secondary: %s", msg,
5668 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
5669 a9e0c397 Iustin Pop
5670 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
5671 a9e0c397 Iustin Pop
    """Execute disk replacement.
5672 a9e0c397 Iustin Pop

5673 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
5674 a9e0c397 Iustin Pop

5675 a9e0c397 Iustin Pop
    """
5676 a9e0c397 Iustin Pop
    instance = self.instance
5677 22985314 Guido Trotter
5678 22985314 Guido Trotter
    # Activate the instance disks if we're replacing them on a down instance
5679 0d68c45d Iustin Pop
    if not instance.admin_up:
5680 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, True)
5681 22985314 Guido Trotter
5682 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_CHG:
5683 7e9366f7 Iustin Pop
      fn = self._ExecD8Secondary
5684 a9e0c397 Iustin Pop
    else:
5685 7e9366f7 Iustin Pop
      fn = self._ExecD8DiskOnly
5686 22985314 Guido Trotter
5687 22985314 Guido Trotter
    ret = fn(feedback_fn)
5688 22985314 Guido Trotter
5689 22985314 Guido Trotter
    # Deactivate the instance disks if we're replacing them on a down instance
5690 0d68c45d Iustin Pop
    if not instance.admin_up:
5691 b9bddb6b Iustin Pop
      _SafeShutdownInstanceDisks(self, instance)
5692 22985314 Guido Trotter
5693 22985314 Guido Trotter
    return ret
5694 a9e0c397 Iustin Pop
5695 a8083063 Iustin Pop
5696 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
5697 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
5698 8729e0d7 Iustin Pop

5699 8729e0d7 Iustin Pop
  """
5700 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
5701 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5702 6605411d Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
5703 31e63dbf Guido Trotter
  REQ_BGL = False
5704 31e63dbf Guido Trotter
5705 31e63dbf Guido Trotter
  def ExpandNames(self):
5706 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
5707 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
5708 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5709 31e63dbf Guido Trotter
5710 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
5711 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
5712 31e63dbf Guido Trotter
      self._LockInstancesNodes()
5713 8729e0d7 Iustin Pop
5714 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
5715 8729e0d7 Iustin Pop
    """Build hooks env.
5716 8729e0d7 Iustin Pop

5717 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
5718 8729e0d7 Iustin Pop

5719 8729e0d7 Iustin Pop
    """
5720 8729e0d7 Iustin Pop
    env = {
5721 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
5722 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
5723 8729e0d7 Iustin Pop
      }
5724 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5725 8729e0d7 Iustin Pop
    nl = [
5726 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
5727 8729e0d7 Iustin Pop
      self.instance.primary_node,
5728 8729e0d7 Iustin Pop
      ]
5729 8729e0d7 Iustin Pop
    return env, nl, nl
5730 8729e0d7 Iustin Pop
5731 8729e0d7 Iustin Pop
  def CheckPrereq(self):
5732 8729e0d7 Iustin Pop
    """Check prerequisites.
5733 8729e0d7 Iustin Pop

5734 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
5735 8729e0d7 Iustin Pop

5736 8729e0d7 Iustin Pop
    """
5737 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5738 31e63dbf Guido Trotter
    assert instance is not None, \
5739 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5740 6b12959c Iustin Pop
    nodenames = list(instance.all_nodes)
5741 6b12959c Iustin Pop
    for node in nodenames:
5742 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, node)
5743 7527a8a4 Iustin Pop
5744 31e63dbf Guido Trotter
5745 8729e0d7 Iustin Pop
    self.instance = instance
5746 8729e0d7 Iustin Pop
5747 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
5748 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
5749 8729e0d7 Iustin Pop
                                 " growing.")
5750 8729e0d7 Iustin Pop
5751 ad24e046 Iustin Pop
    self.disk = instance.FindDisk(self.op.disk)
5752 8729e0d7 Iustin Pop
5753 72737a7f Iustin Pop
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
5754 72737a7f Iustin Pop
                                       instance.hypervisor)
5755 8729e0d7 Iustin Pop
    for node in nodenames:
5756 781de953 Iustin Pop
      info = nodeinfo[node]
5757 781de953 Iustin Pop
      if info.failed or not info.data:
5758 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
5759 8729e0d7 Iustin Pop
                                   " from node '%s'" % node)
5760 781de953 Iustin Pop
      vg_free = info.data.get('vg_free', None)
5761 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
5762 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
5763 8729e0d7 Iustin Pop
                                   " node %s" % node)
5764 781de953 Iustin Pop
      if self.op.amount > vg_free:
5765 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
5766 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
5767 781de953 Iustin Pop
                                   (node, vg_free, self.op.amount))
5768 8729e0d7 Iustin Pop
5769 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
5770 8729e0d7 Iustin Pop
    """Execute disk grow.
5771 8729e0d7 Iustin Pop

5772 8729e0d7 Iustin Pop
    """
5773 8729e0d7 Iustin Pop
    instance = self.instance
5774 ad24e046 Iustin Pop
    disk = self.disk
5775 6b12959c Iustin Pop
    for node in instance.all_nodes:
5776 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
5777 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
5778 0959c824 Iustin Pop
      msg = result.RemoteFailMsg()
5779 0959c824 Iustin Pop
      if msg:
5780 781de953 Iustin Pop
        raise errors.OpExecError("Grow request failed to node %s: %s" %
5781 0959c824 Iustin Pop
                                 (node, msg))
5782 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
5783 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
5784 6605411d Iustin Pop
    if self.op.wait_for_sync:
5785 cd4d138f Guido Trotter
      disk_abort = not _WaitForSync(self, instance)
5786 6605411d Iustin Pop
      if disk_abort:
5787 86d9d3bb Iustin Pop
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
5788 86d9d3bb Iustin Pop
                             " status.\nPlease check the instance.")
5789 8729e0d7 Iustin Pop
5790 8729e0d7 Iustin Pop
5791 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
5792 a8083063 Iustin Pop
  """Query runtime instance data.
5793 a8083063 Iustin Pop

5794 a8083063 Iustin Pop
  """
5795 57821cac Iustin Pop
  _OP_REQP = ["instances", "static"]
5796 a987fa48 Guido Trotter
  REQ_BGL = False
5797 ae5849b5 Michael Hanselmann
5798 a987fa48 Guido Trotter
  def ExpandNames(self):
5799 a987fa48 Guido Trotter
    self.needed_locks = {}
5800 a987fa48 Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
5801 a987fa48 Guido Trotter
5802 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
5803 a987fa48 Guido Trotter
      raise errors.OpPrereqError("Invalid argument type 'instances'")
5804 a987fa48 Guido Trotter
5805 a987fa48 Guido Trotter
    if self.op.instances:
5806 a987fa48 Guido Trotter
      self.wanted_names = []
5807 a987fa48 Guido Trotter
      for name in self.op.instances:
5808 a987fa48 Guido Trotter
        full_name = self.cfg.ExpandInstanceName(name)
5809 a987fa48 Guido Trotter
        if full_name is None:
5810 f57c76e4 Iustin Pop
          raise errors.OpPrereqError("Instance '%s' not known" % name)
5811 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
5812 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
5813 a987fa48 Guido Trotter
    else:
5814 a987fa48 Guido Trotter
      self.wanted_names = None
5815 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
5816 a987fa48 Guido Trotter
5817 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
5818 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5819 a987fa48 Guido Trotter
5820 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
5821 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
5822 a987fa48 Guido Trotter
      self._LockInstancesNodes()
5823 a8083063 Iustin Pop
5824 a8083063 Iustin Pop
  def CheckPrereq(self):
5825 a8083063 Iustin Pop
    """Check prerequisites.
5826 a8083063 Iustin Pop

5827 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
5828 a8083063 Iustin Pop

5829 a8083063 Iustin Pop
    """
5830 a987fa48 Guido Trotter
    if self.wanted_names is None:
5831 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
5832 a8083063 Iustin Pop
5833 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
5834 a987fa48 Guido Trotter
                             in self.wanted_names]
5835 a987fa48 Guido Trotter
    return
5836 a8083063 Iustin Pop
5837 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
5838 a8083063 Iustin Pop
    """Compute block device status.
5839 a8083063 Iustin Pop

5840 a8083063 Iustin Pop
    """
5841 57821cac Iustin Pop
    static = self.op.static
5842 57821cac Iustin Pop
    if not static:
5843 57821cac Iustin Pop
      self.cfg.SetDiskID(dev, instance.primary_node)
5844 57821cac Iustin Pop
      dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
5845 9854f5d0 Iustin Pop
      if dev_pstatus.offline:
5846 9854f5d0 Iustin Pop
        dev_pstatus = None
5847 9854f5d0 Iustin Pop
      else:
5848 9854f5d0 Iustin Pop
        msg = dev_pstatus.RemoteFailMsg()
5849 9854f5d0 Iustin Pop
        if msg:
5850 9854f5d0 Iustin Pop
          raise errors.OpExecError("Can't compute disk status for %s: %s" %
5851 9854f5d0 Iustin Pop
                                   (instance.name, msg))
5852 9854f5d0 Iustin Pop
        dev_pstatus = dev_pstatus.payload
5853 57821cac Iustin Pop
    else:
5854 57821cac Iustin Pop
      dev_pstatus = None
5855 57821cac Iustin Pop
5856 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
5857 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
5858 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
5859 a8083063 Iustin Pop
        snode = dev.logical_id[1]
5860 a8083063 Iustin Pop
      else:
5861 a8083063 Iustin Pop
        snode = dev.logical_id[0]
5862 a8083063 Iustin Pop
5863 57821cac Iustin Pop
    if snode and not static:
5864 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
5865 72737a7f Iustin Pop
      dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
5866 9854f5d0 Iustin Pop
      if dev_sstatus.offline:
5867 9854f5d0 Iustin Pop
        dev_sstatus = None
5868 9854f5d0 Iustin Pop
      else:
5869 9854f5d0 Iustin Pop
        msg = dev_sstatus.RemoteFailMsg()
5870 9854f5d0 Iustin Pop
        if msg:
5871 9854f5d0 Iustin Pop
          raise errors.OpExecError("Can't compute disk status for %s: %s" %
5872 9854f5d0 Iustin Pop
                                   (instance.name, msg))
5873 9854f5d0 Iustin Pop
        dev_sstatus = dev_sstatus.payload
5874 a8083063 Iustin Pop
    else:
5875 a8083063 Iustin Pop
      dev_sstatus = None
5876 a8083063 Iustin Pop
5877 a8083063 Iustin Pop
    if dev.children:
5878 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
5879 a8083063 Iustin Pop
                      for child in dev.children]
5880 a8083063 Iustin Pop
    else:
5881 a8083063 Iustin Pop
      dev_children = []
5882 a8083063 Iustin Pop
5883 a8083063 Iustin Pop
    data = {
5884 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
5885 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
5886 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
5887 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
5888 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
5889 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
5890 a8083063 Iustin Pop
      "children": dev_children,
5891 b6fdf8b8 Iustin Pop
      "mode": dev.mode,
5892 c98162a7 Iustin Pop
      "size": dev.size,
5893 a8083063 Iustin Pop
      }
5894 a8083063 Iustin Pop
5895 a8083063 Iustin Pop
    return data
5896 a8083063 Iustin Pop
5897 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5898 a8083063 Iustin Pop
    """Gather and return data"""
5899 a8083063 Iustin Pop
    result = {}
5900 338e51e8 Iustin Pop
5901 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
5902 338e51e8 Iustin Pop
5903 a8083063 Iustin Pop
    for instance in self.wanted_instances:
5904 57821cac Iustin Pop
      if not self.op.static:
5905 57821cac Iustin Pop
        remote_info = self.rpc.call_instance_info(instance.primary_node,
5906 57821cac Iustin Pop
                                                  instance.name,
5907 57821cac Iustin Pop
                                                  instance.hypervisor)
5908 781de953 Iustin Pop
        remote_info.Raise()
5909 781de953 Iustin Pop
        remote_info = remote_info.data
5910 57821cac Iustin Pop
        if remote_info and "state" in remote_info:
5911 57821cac Iustin Pop
          remote_state = "up"
5912 57821cac Iustin Pop
        else:
5913 57821cac Iustin Pop
          remote_state = "down"
5914 a8083063 Iustin Pop
      else:
5915 57821cac Iustin Pop
        remote_state = None
5916 0d68c45d Iustin Pop
      if instance.admin_up:
5917 a8083063 Iustin Pop
        config_state = "up"
5918 0d68c45d Iustin Pop
      else:
5919 0d68c45d Iustin Pop
        config_state = "down"
5920 a8083063 Iustin Pop
5921 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
5922 a8083063 Iustin Pop
               for device in instance.disks]
5923 a8083063 Iustin Pop
5924 a8083063 Iustin Pop
      idict = {
5925 a8083063 Iustin Pop
        "name": instance.name,
5926 a8083063 Iustin Pop
        "config_state": config_state,
5927 a8083063 Iustin Pop
        "run_state": remote_state,
5928 a8083063 Iustin Pop
        "pnode": instance.primary_node,
5929 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
5930 a8083063 Iustin Pop
        "os": instance.os,
5931 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
5932 a8083063 Iustin Pop
        "disks": disks,
5933 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
5934 24838135 Iustin Pop
        "network_port": instance.network_port,
5935 24838135 Iustin Pop
        "hv_instance": instance.hvparams,
5936 338e51e8 Iustin Pop
        "hv_actual": cluster.FillHV(instance),
5937 338e51e8 Iustin Pop
        "be_instance": instance.beparams,
5938 338e51e8 Iustin Pop
        "be_actual": cluster.FillBE(instance),
5939 a8083063 Iustin Pop
        }
5940 a8083063 Iustin Pop
5941 a8083063 Iustin Pop
      result[instance.name] = idict
5942 a8083063 Iustin Pop
5943 a8083063 Iustin Pop
    return result
5944 a8083063 Iustin Pop
5945 a8083063 Iustin Pop
5946 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
5947 a8083063 Iustin Pop
  """Modifies an instances's parameters.
5948 a8083063 Iustin Pop

5949 a8083063 Iustin Pop
  """
5950 a8083063 Iustin Pop
  HPATH = "instance-modify"
5951 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5952 24991749 Iustin Pop
  _OP_REQP = ["instance_name"]
5953 1a5c7281 Guido Trotter
  REQ_BGL = False
5954 1a5c7281 Guido Trotter
5955 24991749 Iustin Pop
  def CheckArguments(self):
5956 24991749 Iustin Pop
    if not hasattr(self.op, 'nics'):
5957 24991749 Iustin Pop
      self.op.nics = []
5958 24991749 Iustin Pop
    if not hasattr(self.op, 'disks'):
5959 24991749 Iustin Pop
      self.op.disks = []
5960 24991749 Iustin Pop
    if not hasattr(self.op, 'beparams'):
5961 24991749 Iustin Pop
      self.op.beparams = {}
5962 24991749 Iustin Pop
    if not hasattr(self.op, 'hvparams'):
5963 24991749 Iustin Pop
      self.op.hvparams = {}
5964 24991749 Iustin Pop
    self.op.force = getattr(self.op, "force", False)
5965 24991749 Iustin Pop
    if not (self.op.nics or self.op.disks or
5966 24991749 Iustin Pop
            self.op.hvparams or self.op.beparams):
5967 24991749 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
5968 24991749 Iustin Pop
5969 24991749 Iustin Pop
    # Disk validation
5970 24991749 Iustin Pop
    disk_addremove = 0
5971 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
5972 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
5973 24991749 Iustin Pop
        disk_addremove += 1
5974 24991749 Iustin Pop
        continue
5975 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
5976 24991749 Iustin Pop
        disk_addremove += 1
5977 24991749 Iustin Pop
      else:
5978 24991749 Iustin Pop
        if not isinstance(disk_op, int):
5979 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index")
5980 24991749 Iustin Pop
      if disk_op == constants.DDM_ADD:
5981 24991749 Iustin Pop
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
5982 6ec66eae Iustin Pop
        if mode not in constants.DISK_ACCESS_SET:
5983 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode)
5984 24991749 Iustin Pop
        size = disk_dict.get('size', None)
5985 24991749 Iustin Pop
        if size is None:
5986 24991749 Iustin Pop
          raise errors.OpPrereqError("Required disk parameter size missing")
5987 24991749 Iustin Pop
        try:
5988 24991749 Iustin Pop
          size = int(size)
5989 24991749 Iustin Pop
        except ValueError, err:
5990 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
5991 24991749 Iustin Pop
                                     str(err))
5992 24991749 Iustin Pop
        disk_dict['size'] = size
5993 24991749 Iustin Pop
      else:
5994 24991749 Iustin Pop
        # modification of disk
5995 24991749 Iustin Pop
        if 'size' in disk_dict:
5996 24991749 Iustin Pop
          raise errors.OpPrereqError("Disk size change not possible, use"
5997 24991749 Iustin Pop
                                     " grow-disk")
5998 24991749 Iustin Pop
5999 24991749 Iustin Pop
    if disk_addremove > 1:
6000 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one disk add or remove operation"
6001 24991749 Iustin Pop
                                 " supported at a time")
6002 24991749 Iustin Pop
6003 24991749 Iustin Pop
    # NIC validation
6004 24991749 Iustin Pop
    nic_addremove = 0
6005 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
6006 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
6007 24991749 Iustin Pop
        nic_addremove += 1
6008 24991749 Iustin Pop
        continue
6009 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
6010 24991749 Iustin Pop
        nic_addremove += 1
6011 24991749 Iustin Pop
      else:
6012 24991749 Iustin Pop
        if not isinstance(nic_op, int):
6013 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid nic index")
6014 24991749 Iustin Pop
6015 24991749 Iustin Pop
      # nic_dict should be a dict
6016 24991749 Iustin Pop
      nic_ip = nic_dict.get('ip', None)
6017 24991749 Iustin Pop
      if nic_ip is not None:
6018 5c44da6a Guido Trotter
        if nic_ip.lower() == constants.VALUE_NONE:
6019 24991749 Iustin Pop
          nic_dict['ip'] = None
6020 24991749 Iustin Pop
        else:
6021 24991749 Iustin Pop
          if not utils.IsValidIP(nic_ip):
6022 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip)
6023 5c44da6a Guido Trotter
6024 5c44da6a Guido Trotter
      if nic_op == constants.DDM_ADD:
6025 5c44da6a Guido Trotter
        nic_bridge = nic_dict.get('bridge', None)
6026 5c44da6a Guido Trotter
        if nic_bridge is None:
6027 5c44da6a Guido Trotter
          nic_dict['bridge'] = self.cfg.GetDefBridge()
6028 5c44da6a Guido Trotter
        nic_mac = nic_dict.get('mac', None)
6029 5c44da6a Guido Trotter
        if nic_mac is None:
6030 5c44da6a Guido Trotter
          nic_dict['mac'] = constants.VALUE_AUTO
6031 5c44da6a Guido Trotter
6032 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
6033 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
6034 24991749 Iustin Pop
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
6035 24991749 Iustin Pop
          if not utils.IsValidMac(nic_mac):
6036 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac)
6037 5c44da6a Guido Trotter
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
6038 5c44da6a Guido Trotter
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
6039 5c44da6a Guido Trotter
                                     " modifying an existing nic")
6040 5c44da6a Guido Trotter
6041 24991749 Iustin Pop
    if nic_addremove > 1:
6042 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one NIC add or remove operation"
6043 24991749 Iustin Pop
                                 " supported at a time")
6044 24991749 Iustin Pop
6045 1a5c7281 Guido Trotter
  def ExpandNames(self):
6046 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
6047 74409b12 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
6048 74409b12 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6049 74409b12 Iustin Pop
6050 74409b12 Iustin Pop
  def DeclareLocks(self, level):
6051 74409b12 Iustin Pop
    if level == locking.LEVEL_NODE:
6052 74409b12 Iustin Pop
      self._LockInstancesNodes()
6053 a8083063 Iustin Pop
6054 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6055 a8083063 Iustin Pop
    """Build hooks env.
6056 a8083063 Iustin Pop

6057 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
6058 a8083063 Iustin Pop

6059 a8083063 Iustin Pop
    """
6060 396e1b78 Michael Hanselmann
    args = dict()
6061 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.be_new:
6062 338e51e8 Iustin Pop
      args['memory'] = self.be_new[constants.BE_MEMORY]
6063 338e51e8 Iustin Pop
    if constants.BE_VCPUS in self.be_new:
6064 61be6ba4 Iustin Pop
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
6065 d8dcf3c9 Guido Trotter
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
6066 d8dcf3c9 Guido Trotter
    # information at all.
6067 d8dcf3c9 Guido Trotter
    if self.op.nics:
6068 d8dcf3c9 Guido Trotter
      args['nics'] = []
6069 d8dcf3c9 Guido Trotter
      nic_override = dict(self.op.nics)
6070 d8dcf3c9 Guido Trotter
      for idx, nic in enumerate(self.instance.nics):
6071 d8dcf3c9 Guido Trotter
        if idx in nic_override:
6072 d8dcf3c9 Guido Trotter
          this_nic_override = nic_override[idx]
6073 d8dcf3c9 Guido Trotter
        else:
6074 d8dcf3c9 Guido Trotter
          this_nic_override = {}
6075 d8dcf3c9 Guido Trotter
        if 'ip' in this_nic_override:
6076 d8dcf3c9 Guido Trotter
          ip = this_nic_override['ip']
6077 d8dcf3c9 Guido Trotter
        else:
6078 d8dcf3c9 Guido Trotter
          ip = nic.ip
6079 d8dcf3c9 Guido Trotter
        if 'bridge' in this_nic_override:
6080 d8dcf3c9 Guido Trotter
          bridge = this_nic_override['bridge']
6081 d8dcf3c9 Guido Trotter
        else:
6082 d8dcf3c9 Guido Trotter
          bridge = nic.bridge
6083 d8dcf3c9 Guido Trotter
        if 'mac' in this_nic_override:
6084 d8dcf3c9 Guido Trotter
          mac = this_nic_override['mac']
6085 d8dcf3c9 Guido Trotter
        else:
6086 d8dcf3c9 Guido Trotter
          mac = nic.mac
6087 d8dcf3c9 Guido Trotter
        args['nics'].append((ip, bridge, mac))
6088 d8dcf3c9 Guido Trotter
      if constants.DDM_ADD in nic_override:
6089 d8dcf3c9 Guido Trotter
        ip = nic_override[constants.DDM_ADD].get('ip', None)
6090 d8dcf3c9 Guido Trotter
        bridge = nic_override[constants.DDM_ADD]['bridge']
6091 d8dcf3c9 Guido Trotter
        mac = nic_override[constants.DDM_ADD]['mac']
6092 d8dcf3c9 Guido Trotter
        args['nics'].append((ip, bridge, mac))
6093 d8dcf3c9 Guido Trotter
      elif constants.DDM_REMOVE in nic_override:
6094 d8dcf3c9 Guido Trotter
        del args['nics'][-1]
6095 d8dcf3c9 Guido Trotter
6096 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
6097 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6098 a8083063 Iustin Pop
    return env, nl, nl
6099 a8083063 Iustin Pop
6100 a8083063 Iustin Pop
  def CheckPrereq(self):
6101 a8083063 Iustin Pop
    """Check prerequisites.
6102 a8083063 Iustin Pop

6103 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
6104 a8083063 Iustin Pop

6105 a8083063 Iustin Pop
    """
6106 24991749 Iustin Pop
    force = self.force = self.op.force
6107 a8083063 Iustin Pop
6108 74409b12 Iustin Pop
    # checking the new params on the primary/secondary nodes
6109 31a853d2 Iustin Pop
6110 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6111 1a5c7281 Guido Trotter
    assert self.instance is not None, \
6112 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
6113 6b12959c Iustin Pop
    pnode = instance.primary_node
6114 6b12959c Iustin Pop
    nodelist = list(instance.all_nodes)
6115 74409b12 Iustin Pop
6116 338e51e8 Iustin Pop
    # hvparams processing
6117 74409b12 Iustin Pop
    if self.op.hvparams:
6118 74409b12 Iustin Pop
      i_hvdict = copy.deepcopy(instance.hvparams)
6119 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
6120 8edcd611 Guido Trotter
        if val == constants.VALUE_DEFAULT:
6121 74409b12 Iustin Pop
          try:
6122 74409b12 Iustin Pop
            del i_hvdict[key]
6123 74409b12 Iustin Pop
          except KeyError:
6124 74409b12 Iustin Pop
            pass
6125 74409b12 Iustin Pop
        else:
6126 74409b12 Iustin Pop
          i_hvdict[key] = val
6127 74409b12 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
6128 a5728081 Guido Trotter
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
6129 74409b12 Iustin Pop
      hv_new = cluster.FillDict(cluster.hvparams[instance.hypervisor],
6130 74409b12 Iustin Pop
                                i_hvdict)
6131 74409b12 Iustin Pop
      # local check
6132 74409b12 Iustin Pop
      hypervisor.GetHypervisor(
6133 74409b12 Iustin Pop
        instance.hypervisor).CheckParameterSyntax(hv_new)
6134 74409b12 Iustin Pop
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
6135 338e51e8 Iustin Pop
      self.hv_new = hv_new # the new actual values
6136 338e51e8 Iustin Pop
      self.hv_inst = i_hvdict # the new dict (without defaults)
6137 338e51e8 Iustin Pop
    else:
6138 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
6139 338e51e8 Iustin Pop
6140 338e51e8 Iustin Pop
    # beparams processing
6141 338e51e8 Iustin Pop
    if self.op.beparams:
6142 338e51e8 Iustin Pop
      i_bedict = copy.deepcopy(instance.beparams)
6143 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
6144 8edcd611 Guido Trotter
        if val == constants.VALUE_DEFAULT:
6145 338e51e8 Iustin Pop
          try:
6146 338e51e8 Iustin Pop
            del i_bedict[key]
6147 338e51e8 Iustin Pop
          except KeyError:
6148 338e51e8 Iustin Pop
            pass
6149 338e51e8 Iustin Pop
        else:
6150 338e51e8 Iustin Pop
          i_bedict[key] = val
6151 338e51e8 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
6152 a5728081 Guido Trotter
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
6153 338e51e8 Iustin Pop
      be_new = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
6154 338e51e8 Iustin Pop
                                i_bedict)
6155 338e51e8 Iustin Pop
      self.be_new = be_new # the new actual values
6156 338e51e8 Iustin Pop
      self.be_inst = i_bedict # the new dict (without defaults)
6157 338e51e8 Iustin Pop
    else:
6158 b637ae4d Iustin Pop
      self.be_new = self.be_inst = {}
6159 74409b12 Iustin Pop
6160 cfefe007 Guido Trotter
    self.warn = []
6161 647a5d80 Iustin Pop
6162 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.op.beparams and not self.force:
6163 647a5d80 Iustin Pop
      mem_check_list = [pnode]
6164 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
6165 c0f2b229 Iustin Pop
        # either we changed auto_balance to yes or it was from before
6166 647a5d80 Iustin Pop
        mem_check_list.extend(instance.secondary_nodes)
6167 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
6168 72737a7f Iustin Pop
                                                  instance.hypervisor)
6169 647a5d80 Iustin Pop
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
6170 72737a7f Iustin Pop
                                         instance.hypervisor)
6171 781de953 Iustin Pop
      if nodeinfo[pnode].failed or not isinstance(nodeinfo[pnode].data, dict):
6172 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
6173 cfefe007 Guido Trotter
        self.warn.append("Can't get info from primary node %s" % pnode)
6174 cfefe007 Guido Trotter
      else:
6175 781de953 Iustin Pop
        if not instance_info.failed and instance_info.data:
6176 ade0e8cd Guido Trotter
          current_mem = int(instance_info.data['memory'])
6177 cfefe007 Guido Trotter
        else:
6178 cfefe007 Guido Trotter
          # Assume instance not running
6179 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
6180 cfefe007 Guido Trotter
          # and we have no other way to check)
6181 cfefe007 Guido Trotter
          current_mem = 0
6182 338e51e8 Iustin Pop
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
6183 781de953 Iustin Pop
                    nodeinfo[pnode].data['memory_free'])
6184 cfefe007 Guido Trotter
        if miss_mem > 0:
6185 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
6186 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
6187 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
6188 cfefe007 Guido Trotter
6189 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
6190 ea33068f Iustin Pop
        for node, nres in nodeinfo.iteritems():
6191 ea33068f Iustin Pop
          if node not in instance.secondary_nodes:
6192 ea33068f Iustin Pop
            continue
6193 781de953 Iustin Pop
          if nres.failed or not isinstance(nres.data, dict):
6194 647a5d80 Iustin Pop
            self.warn.append("Can't get info from secondary node %s" % node)
6195 781de953 Iustin Pop
          elif be_new[constants.BE_MEMORY] > nres.data['memory_free']:
6196 647a5d80 Iustin Pop
            self.warn.append("Not enough memory to failover instance to"
6197 647a5d80 Iustin Pop
                             " secondary node %s" % node)
6198 5bc84f33 Alexander Schreiber
6199 24991749 Iustin Pop
    # NIC processing
6200 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
6201 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
6202 24991749 Iustin Pop
        if not instance.nics:
6203 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance has no NICs, cannot remove")
6204 24991749 Iustin Pop
        continue
6205 24991749 Iustin Pop
      if nic_op != constants.DDM_ADD:
6206 24991749 Iustin Pop
        # an existing nic
6207 24991749 Iustin Pop
        if nic_op < 0 or nic_op >= len(instance.nics):
6208 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
6209 24991749 Iustin Pop
                                     " are 0 to %d" %
6210 24991749 Iustin Pop
                                     (nic_op, len(instance.nics)))
6211 5c44da6a Guido Trotter
      if 'bridge' in nic_dict:
6212 5c44da6a Guido Trotter
        nic_bridge = nic_dict['bridge']
6213 5c44da6a Guido Trotter
        if nic_bridge is None:
6214 5c44da6a Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic bridge to None')
6215 24991749 Iustin Pop
        if not self.rpc.call_bridges_exist(pnode, [nic_bridge]):
6216 24991749 Iustin Pop
          msg = ("Bridge '%s' doesn't exist on one of"
6217 24991749 Iustin Pop
                 " the instance nodes" % nic_bridge)
6218 24991749 Iustin Pop
          if self.force:
6219 24991749 Iustin Pop
            self.warn.append(msg)
6220 24991749 Iustin Pop
          else:
6221 24991749 Iustin Pop
            raise errors.OpPrereqError(msg)
6222 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
6223 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
6224 5c44da6a Guido Trotter
        if nic_mac is None:
6225 5c44da6a Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic mac to None')
6226 5c44da6a Guido Trotter
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
6227 5c44da6a Guido Trotter
          # otherwise generate the mac
6228 5c44da6a Guido Trotter
          nic_dict['mac'] = self.cfg.GenerateMAC()
6229 5c44da6a Guido Trotter
        else:
6230 5c44da6a Guido Trotter
          # or validate/reserve the current one
6231 5c44da6a Guido Trotter
          if self.cfg.IsMacInUse(nic_mac):
6232 5c44da6a Guido Trotter
            raise errors.OpPrereqError("MAC address %s already in use"
6233 5c44da6a Guido Trotter
                                       " in cluster" % nic_mac)
6234 24991749 Iustin Pop
6235 24991749 Iustin Pop
    # DISK processing
6236 24991749 Iustin Pop
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
6237 24991749 Iustin Pop
      raise errors.OpPrereqError("Disk operations not supported for"
6238 24991749 Iustin Pop
                                 " diskless instances")
6239 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
6240 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
6241 24991749 Iustin Pop
        if len(instance.disks) == 1:
6242 24991749 Iustin Pop
          raise errors.OpPrereqError("Cannot remove the last disk of"
6243 24991749 Iustin Pop
                                     " an instance")
6244 24991749 Iustin Pop
        ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
6245 24991749 Iustin Pop
        ins_l = ins_l[pnode]
6246 4cfb9426 Iustin Pop
        if ins_l.failed or not isinstance(ins_l.data, list):
6247 24991749 Iustin Pop
          raise errors.OpPrereqError("Can't contact node '%s'" % pnode)
6248 4cfb9426 Iustin Pop
        if instance.name in ins_l.data:
6249 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance is running, can't remove"
6250 24991749 Iustin Pop
                                     " disks.")
6251 24991749 Iustin Pop
6252 24991749 Iustin Pop
      if (disk_op == constants.DDM_ADD and
6253 24991749 Iustin Pop
          len(instance.nics) >= constants.MAX_DISKS):
6254 24991749 Iustin Pop
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
6255 24991749 Iustin Pop
                                   " add more" % constants.MAX_DISKS)
6256 24991749 Iustin Pop
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
6257 24991749 Iustin Pop
        # an existing disk
6258 24991749 Iustin Pop
        if disk_op < 0 or disk_op >= len(instance.disks):
6259 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
6260 24991749 Iustin Pop
                                     " are 0 to %d" %
6261 24991749 Iustin Pop
                                     (disk_op, len(instance.disks)))
6262 24991749 Iustin Pop
6263 a8083063 Iustin Pop
    return
6264 a8083063 Iustin Pop
6265 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6266 a8083063 Iustin Pop
    """Modifies an instance.
6267 a8083063 Iustin Pop

6268 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
6269 24991749 Iustin Pop

6270 a8083063 Iustin Pop
    """
6271 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
6272 cfefe007 Guido Trotter
    # feedback_fn there.
6273 cfefe007 Guido Trotter
    for warn in self.warn:
6274 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
6275 cfefe007 Guido Trotter
6276 a8083063 Iustin Pop
    result = []
6277 a8083063 Iustin Pop
    instance = self.instance
6278 24991749 Iustin Pop
    # disk changes
6279 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
6280 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
6281 24991749 Iustin Pop
        # remove the last disk
6282 24991749 Iustin Pop
        device = instance.disks.pop()
6283 24991749 Iustin Pop
        device_idx = len(instance.disks)
6284 24991749 Iustin Pop
        for node, disk in device.ComputeNodeTree(instance.primary_node):
6285 24991749 Iustin Pop
          self.cfg.SetDiskID(disk, node)
6286 e1bc0878 Iustin Pop
          msg = self.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
6287 e1bc0878 Iustin Pop
          if msg:
6288 e1bc0878 Iustin Pop
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
6289 e1bc0878 Iustin Pop
                            " continuing anyway", device_idx, node, msg)
6290 24991749 Iustin Pop
        result.append(("disk/%d" % device_idx, "remove"))
6291 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
6292 24991749 Iustin Pop
        # add a new disk
6293 24991749 Iustin Pop
        if instance.disk_template == constants.DT_FILE:
6294 24991749 Iustin Pop
          file_driver, file_path = instance.disks[0].logical_id
6295 24991749 Iustin Pop
          file_path = os.path.dirname(file_path)
6296 24991749 Iustin Pop
        else:
6297 24991749 Iustin Pop
          file_driver = file_path = None
6298 24991749 Iustin Pop
        disk_idx_base = len(instance.disks)
6299 24991749 Iustin Pop
        new_disk = _GenerateDiskTemplate(self,
6300 24991749 Iustin Pop
                                         instance.disk_template,
6301 32388e6d Iustin Pop
                                         instance.name, instance.primary_node,
6302 24991749 Iustin Pop
                                         instance.secondary_nodes,
6303 24991749 Iustin Pop
                                         [disk_dict],
6304 24991749 Iustin Pop
                                         file_path,
6305 24991749 Iustin Pop
                                         file_driver,
6306 24991749 Iustin Pop
                                         disk_idx_base)[0]
6307 24991749 Iustin Pop
        instance.disks.append(new_disk)
6308 24991749 Iustin Pop
        info = _GetInstanceInfoText(instance)
6309 24991749 Iustin Pop
6310 24991749 Iustin Pop
        logging.info("Creating volume %s for instance %s",
6311 24991749 Iustin Pop
                     new_disk.iv_name, instance.name)
6312 24991749 Iustin Pop
        # Note: this needs to be kept in sync with _CreateDisks
6313 24991749 Iustin Pop
        #HARDCODE
6314 428958aa Iustin Pop
        for node in instance.all_nodes:
6315 428958aa Iustin Pop
          f_create = node == instance.primary_node
6316 796cab27 Iustin Pop
          try:
6317 428958aa Iustin Pop
            _CreateBlockDev(self, node, instance, new_disk,
6318 428958aa Iustin Pop
                            f_create, info, f_create)
6319 1492cca7 Iustin Pop
          except errors.OpExecError, err:
6320 24991749 Iustin Pop
            self.LogWarning("Failed to create volume %s (%s) on"
6321 428958aa Iustin Pop
                            " node %s: %s",
6322 428958aa Iustin Pop
                            new_disk.iv_name, new_disk, node, err)
6323 24991749 Iustin Pop
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
6324 24991749 Iustin Pop
                       (new_disk.size, new_disk.mode)))
6325 24991749 Iustin Pop
      else:
6326 24991749 Iustin Pop
        # change a given disk
6327 24991749 Iustin Pop
        instance.disks[disk_op].mode = disk_dict['mode']
6328 24991749 Iustin Pop
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
6329 24991749 Iustin Pop
    # NIC changes
6330 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
6331 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
6332 24991749 Iustin Pop
        # remove the last nic
6333 24991749 Iustin Pop
        del instance.nics[-1]
6334 24991749 Iustin Pop
        result.append(("nic.%d" % len(instance.nics), "remove"))
6335 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
6336 5c44da6a Guido Trotter
        # mac and bridge should be set, by now
6337 5c44da6a Guido Trotter
        mac = nic_dict['mac']
6338 5c44da6a Guido Trotter
        bridge = nic_dict['bridge']
6339 24991749 Iustin Pop
        new_nic = objects.NIC(mac=mac, ip=nic_dict.get('ip', None),
6340 5c44da6a Guido Trotter
                              bridge=bridge)
6341 24991749 Iustin Pop
        instance.nics.append(new_nic)
6342 24991749 Iustin Pop
        result.append(("nic.%d" % (len(instance.nics) - 1),
6343 24991749 Iustin Pop
                       "add:mac=%s,ip=%s,bridge=%s" %
6344 24991749 Iustin Pop
                       (new_nic.mac, new_nic.ip, new_nic.bridge)))
6345 24991749 Iustin Pop
      else:
6346 24991749 Iustin Pop
        # change a given nic
6347 24991749 Iustin Pop
        for key in 'mac', 'ip', 'bridge':
6348 24991749 Iustin Pop
          if key in nic_dict:
6349 24991749 Iustin Pop
            setattr(instance.nics[nic_op], key, nic_dict[key])
6350 24991749 Iustin Pop
            result.append(("nic.%s/%d" % (key, nic_op), nic_dict[key]))
6351 24991749 Iustin Pop
6352 24991749 Iustin Pop
    # hvparams changes
6353 74409b12 Iustin Pop
    if self.op.hvparams:
6354 12649e35 Guido Trotter
      instance.hvparams = self.hv_inst
6355 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
6356 74409b12 Iustin Pop
        result.append(("hv/%s" % key, val))
6357 24991749 Iustin Pop
6358 24991749 Iustin Pop
    # beparams changes
6359 338e51e8 Iustin Pop
    if self.op.beparams:
6360 338e51e8 Iustin Pop
      instance.beparams = self.be_inst
6361 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
6362 338e51e8 Iustin Pop
        result.append(("be/%s" % key, val))
6363 a8083063 Iustin Pop
6364 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
6365 a8083063 Iustin Pop
6366 a8083063 Iustin Pop
    return result
6367 a8083063 Iustin Pop
6368 a8083063 Iustin Pop
6369 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
6370 a8083063 Iustin Pop
  """Query the exports list
6371 a8083063 Iustin Pop

6372 a8083063 Iustin Pop
  """
6373 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
6374 21a15682 Guido Trotter
  REQ_BGL = False
6375 21a15682 Guido Trotter
6376 21a15682 Guido Trotter
  def ExpandNames(self):
6377 21a15682 Guido Trotter
    self.needed_locks = {}
6378 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
6379 21a15682 Guido Trotter
    if not self.op.nodes:
6380 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6381 21a15682 Guido Trotter
    else:
6382 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
6383 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
6384 a8083063 Iustin Pop
6385 a8083063 Iustin Pop
  def CheckPrereq(self):
6386 21a15682 Guido Trotter
    """Check prerequisites.
6387 a8083063 Iustin Pop

6388 a8083063 Iustin Pop
    """
6389 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
6390 a8083063 Iustin Pop
6391 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6392 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
6393 a8083063 Iustin Pop

6394 e4376078 Iustin Pop
    @rtype: dict
6395 e4376078 Iustin Pop
    @return: a dictionary with the structure node->(export-list)
6396 e4376078 Iustin Pop
        where export-list is a list of the instances exported on
6397 e4376078 Iustin Pop
        that node.
6398 a8083063 Iustin Pop

6399 a8083063 Iustin Pop
    """
6400 b04285f2 Guido Trotter
    rpcresult = self.rpc.call_export_list(self.nodes)
6401 b04285f2 Guido Trotter
    result = {}
6402 b04285f2 Guido Trotter
    for node in rpcresult:
6403 b04285f2 Guido Trotter
      if rpcresult[node].failed:
6404 b04285f2 Guido Trotter
        result[node] = False
6405 b04285f2 Guido Trotter
      else:
6406 b04285f2 Guido Trotter
        result[node] = rpcresult[node].data
6407 b04285f2 Guido Trotter
6408 b04285f2 Guido Trotter
    return result
6409 a8083063 Iustin Pop
6410 a8083063 Iustin Pop
6411 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
6412 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
6413 a8083063 Iustin Pop

6414 a8083063 Iustin Pop
  """
6415 a8083063 Iustin Pop
  HPATH = "instance-export"
6416 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
6417 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
6418 6657590e Guido Trotter
  REQ_BGL = False
6419 6657590e Guido Trotter
6420 6657590e Guido Trotter
  def ExpandNames(self):
6421 6657590e Guido Trotter
    self._ExpandAndLockInstance()
6422 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
6423 6657590e Guido Trotter
    #
6424 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
6425 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
6426 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
6427 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
6428 6657590e Guido Trotter
    #    then one to remove, after
6429 6657590e Guido Trotter
    #  - removing the removal operation altoghether
6430 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6431 6657590e Guido Trotter
6432 6657590e Guido Trotter
  def DeclareLocks(self, level):
6433 6657590e Guido Trotter
    """Last minute lock declaration."""
6434 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
6435 a8083063 Iustin Pop
6436 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6437 a8083063 Iustin Pop
    """Build hooks env.
6438 a8083063 Iustin Pop

6439 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
6440 a8083063 Iustin Pop

6441 a8083063 Iustin Pop
    """
6442 a8083063 Iustin Pop
    env = {
6443 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
6444 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
6445 a8083063 Iustin Pop
      }
6446 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6447 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
6448 a8083063 Iustin Pop
          self.op.target_node]
6449 a8083063 Iustin Pop
    return env, nl, nl
6450 a8083063 Iustin Pop
6451 a8083063 Iustin Pop
  def CheckPrereq(self):
6452 a8083063 Iustin Pop
    """Check prerequisites.
6453 a8083063 Iustin Pop

6454 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
6455 a8083063 Iustin Pop

6456 a8083063 Iustin Pop
    """
6457 6657590e Guido Trotter
    instance_name = self.op.instance_name
6458 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
6459 6657590e Guido Trotter
    assert self.instance is not None, \
6460 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
6461 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
6462 a8083063 Iustin Pop
6463 6657590e Guido Trotter
    self.dst_node = self.cfg.GetNodeInfo(
6464 6657590e Guido Trotter
      self.cfg.ExpandNodeName(self.op.target_node))
6465 a8083063 Iustin Pop
6466 268b8e42 Iustin Pop
    if self.dst_node is None:
6467 268b8e42 Iustin Pop
      # This is wrong node name, not a non-locked node
6468 268b8e42 Iustin Pop
      raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node)
6469 aeb83a2b Iustin Pop
    _CheckNodeOnline(self, self.dst_node.name)
6470 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, self.dst_node.name)
6471 a8083063 Iustin Pop
6472 b6023d6c Manuel Franceschini
    # instance disk type verification
6473 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
6474 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
6475 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
6476 b6023d6c Manuel Franceschini
                                   " file-based disks")
6477 b6023d6c Manuel Franceschini
6478 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6479 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
6480 a8083063 Iustin Pop

6481 a8083063 Iustin Pop
    """
6482 a8083063 Iustin Pop
    instance = self.instance
6483 a8083063 Iustin Pop
    dst_node = self.dst_node
6484 a8083063 Iustin Pop
    src_node = instance.primary_node
6485 a8083063 Iustin Pop
    if self.op.shutdown:
6486 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
6487 781de953 Iustin Pop
      result = self.rpc.call_instance_shutdown(src_node, instance)
6488 1fae010f Iustin Pop
      msg = result.RemoteFailMsg()
6489 1fae010f Iustin Pop
      if msg:
6490 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
6491 1fae010f Iustin Pop
                                 " node %s: %s" %
6492 1fae010f Iustin Pop
                                 (instance.name, src_node, msg))
6493 a8083063 Iustin Pop
6494 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
6495 a8083063 Iustin Pop
6496 a8083063 Iustin Pop
    snap_disks = []
6497 a8083063 Iustin Pop
6498 998c712c Iustin Pop
    # set the disks ID correctly since call_instance_start needs the
6499 998c712c Iustin Pop
    # correct drbd minor to create the symlinks
6500 998c712c Iustin Pop
    for disk in instance.disks:
6501 998c712c Iustin Pop
      self.cfg.SetDiskID(disk, src_node)
6502 998c712c Iustin Pop
6503 084f05a5 Iustin Pop
    # per-disk results
6504 084f05a5 Iustin Pop
    dresults = []
6505 a8083063 Iustin Pop
    try:
6506 a97da6b7 Iustin Pop
      for idx, disk in enumerate(instance.disks):
6507 19d7f90a Guido Trotter
        # new_dev_name will be a snapshot of an lvm leaf of the one we passed
6508 19d7f90a Guido Trotter
        new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk)
6509 781de953 Iustin Pop
        if new_dev_name.failed or not new_dev_name.data:
6510 a97da6b7 Iustin Pop
          self.LogWarning("Could not snapshot disk/%d on node %s",
6511 a97da6b7 Iustin Pop
                          idx, src_node)
6512 19d7f90a Guido Trotter
          snap_disks.append(False)
6513 19d7f90a Guido Trotter
        else:
6514 19d7f90a Guido Trotter
          new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
6515 781de953 Iustin Pop
                                 logical_id=(vgname, new_dev_name.data),
6516 781de953 Iustin Pop
                                 physical_id=(vgname, new_dev_name.data),
6517 19d7f90a Guido Trotter
                                 iv_name=disk.iv_name)
6518 19d7f90a Guido Trotter
          snap_disks.append(new_dev)
6519 a8083063 Iustin Pop
6520 a8083063 Iustin Pop
    finally:
6521 0d68c45d Iustin Pop
      if self.op.shutdown and instance.admin_up:
6522 0eca8e0c Iustin Pop
        result = self.rpc.call_instance_start(src_node, instance, None, None)
6523 dd279568 Iustin Pop
        msg = result.RemoteFailMsg()
6524 dd279568 Iustin Pop
        if msg:
6525 b9bddb6b Iustin Pop
          _ShutdownInstanceDisks(self, instance)
6526 dd279568 Iustin Pop
          raise errors.OpExecError("Could not start instance: %s" % msg)
6527 a8083063 Iustin Pop
6528 a8083063 Iustin Pop
    # TODO: check for size
6529 a8083063 Iustin Pop
6530 62c9ec92 Iustin Pop
    cluster_name = self.cfg.GetClusterName()
6531 74c47259 Iustin Pop
    for idx, dev in enumerate(snap_disks):
6532 19d7f90a Guido Trotter
      if dev:
6533 781de953 Iustin Pop
        result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
6534 781de953 Iustin Pop
                                               instance, cluster_name, idx)
6535 781de953 Iustin Pop
        if result.failed or not result.data:
6536 a97da6b7 Iustin Pop
          self.LogWarning("Could not export disk/%d from node %s to"
6537 a97da6b7 Iustin Pop
                          " node %s", idx, src_node, dst_node.name)
6538 084f05a5 Iustin Pop
          dresults.append(False)
6539 084f05a5 Iustin Pop
        else:
6540 084f05a5 Iustin Pop
          dresults.append(True)
6541 e1bc0878 Iustin Pop
        msg = self.rpc.call_blockdev_remove(src_node, dev).RemoteFailMsg()
6542 e1bc0878 Iustin Pop
        if msg:
6543 a97da6b7 Iustin Pop
          self.LogWarning("Could not remove snapshot for disk/%d from node"
6544 a97da6b7 Iustin Pop
                          " %s: %s", idx, src_node, msg)
6545 084f05a5 Iustin Pop
      else:
6546 084f05a5 Iustin Pop
        dresults.append(False)
6547 a8083063 Iustin Pop
6548 781de953 Iustin Pop
    result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
6549 084f05a5 Iustin Pop
    fin_resu = True
6550 781de953 Iustin Pop
    if result.failed or not result.data:
6551 19d7f90a Guido Trotter
      self.LogWarning("Could not finalize export for instance %s on node %s",
6552 19d7f90a Guido Trotter
                      instance.name, dst_node.name)
6553 084f05a5 Iustin Pop
      fin_resu = False
6554 a8083063 Iustin Pop
6555 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
6556 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
6557 a8083063 Iustin Pop
6558 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
6559 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
6560 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
6561 a8083063 Iustin Pop
    if nodelist:
6562 72737a7f Iustin Pop
      exportlist = self.rpc.call_export_list(nodelist)
6563 a8083063 Iustin Pop
      for node in exportlist:
6564 781de953 Iustin Pop
        if exportlist[node].failed:
6565 781de953 Iustin Pop
          continue
6566 781de953 Iustin Pop
        if instance.name in exportlist[node].data:
6567 72737a7f Iustin Pop
          if not self.rpc.call_export_remove(node, instance.name):
6568 19d7f90a Guido Trotter
            self.LogWarning("Could not remove older export for instance %s"
6569 19d7f90a Guido Trotter
                            " on node %s", instance.name, node)
6570 084f05a5 Iustin Pop
    return fin_resu, dresults
6571 5c947f38 Iustin Pop
6572 5c947f38 Iustin Pop
6573 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
6574 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
6575 9ac99fda Guido Trotter

6576 9ac99fda Guido Trotter
  """
6577 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
6578 3656b3af Guido Trotter
  REQ_BGL = False
6579 3656b3af Guido Trotter
6580 3656b3af Guido Trotter
  def ExpandNames(self):
6581 3656b3af Guido Trotter
    self.needed_locks = {}
6582 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
6583 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
6584 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
6585 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6586 9ac99fda Guido Trotter
6587 9ac99fda Guido Trotter
  def CheckPrereq(self):
6588 9ac99fda Guido Trotter
    """Check prerequisites.
6589 9ac99fda Guido Trotter
    """
6590 9ac99fda Guido Trotter
    pass
6591 9ac99fda Guido Trotter
6592 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
6593 9ac99fda Guido Trotter
    """Remove any export.
6594 9ac99fda Guido Trotter

6595 9ac99fda Guido Trotter
    """
6596 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
6597 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
6598 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
6599 9ac99fda Guido Trotter
    fqdn_warn = False
6600 9ac99fda Guido Trotter
    if not instance_name:
6601 9ac99fda Guido Trotter
      fqdn_warn = True
6602 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
6603 9ac99fda Guido Trotter
6604 72737a7f Iustin Pop
    exportlist = self.rpc.call_export_list(self.acquired_locks[
6605 72737a7f Iustin Pop
      locking.LEVEL_NODE])
6606 9ac99fda Guido Trotter
    found = False
6607 9ac99fda Guido Trotter
    for node in exportlist:
6608 781de953 Iustin Pop
      if exportlist[node].failed:
6609 25361b9a Iustin Pop
        self.LogWarning("Failed to query node %s, continuing" % node)
6610 781de953 Iustin Pop
        continue
6611 781de953 Iustin Pop
      if instance_name in exportlist[node].data:
6612 9ac99fda Guido Trotter
        found = True
6613 781de953 Iustin Pop
        result = self.rpc.call_export_remove(node, instance_name)
6614 781de953 Iustin Pop
        if result.failed or not result.data:
6615 9a4f63d1 Iustin Pop
          logging.error("Could not remove export for instance %s"
6616 9a4f63d1 Iustin Pop
                        " on node %s", instance_name, node)
6617 9ac99fda Guido Trotter
6618 9ac99fda Guido Trotter
    if fqdn_warn and not found:
6619 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
6620 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
6621 9ac99fda Guido Trotter
                  " Domain Name.")
6622 9ac99fda Guido Trotter
6623 9ac99fda Guido Trotter
6624 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
6625 5c947f38 Iustin Pop
  """Generic tags LU.
6626 5c947f38 Iustin Pop

6627 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
6628 5c947f38 Iustin Pop

6629 5c947f38 Iustin Pop
  """
6630 5c947f38 Iustin Pop
6631 8646adce Guido Trotter
  def ExpandNames(self):
6632 8646adce Guido Trotter
    self.needed_locks = {}
6633 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
6634 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
6635 5c947f38 Iustin Pop
      if name is None:
6636 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
6637 3ecf6786 Iustin Pop
                                   (self.op.name,))
6638 5c947f38 Iustin Pop
      self.op.name = name
6639 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = name
6640 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
6641 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
6642 5c947f38 Iustin Pop
      if name is None:
6643 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
6644 3ecf6786 Iustin Pop
                                   (self.op.name,))
6645 5c947f38 Iustin Pop
      self.op.name = name
6646 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = name
6647 8646adce Guido Trotter
6648 8646adce Guido Trotter
  def CheckPrereq(self):
6649 8646adce Guido Trotter
    """Check prerequisites.
6650 8646adce Guido Trotter

6651 8646adce Guido Trotter
    """
6652 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
6653 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
6654 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
6655 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
6656 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
6657 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
6658 5c947f38 Iustin Pop
    else:
6659 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
6660 3ecf6786 Iustin Pop
                                 str(self.op.kind))
6661 5c947f38 Iustin Pop
6662 5c947f38 Iustin Pop
6663 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
6664 5c947f38 Iustin Pop
  """Returns the tags of a given object.
6665 5c947f38 Iustin Pop

6666 5c947f38 Iustin Pop
  """
6667 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
6668 8646adce Guido Trotter
  REQ_BGL = False
6669 5c947f38 Iustin Pop
6670 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6671 5c947f38 Iustin Pop
    """Returns the tag list.
6672 5c947f38 Iustin Pop

6673 5c947f38 Iustin Pop
    """
6674 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
6675 5c947f38 Iustin Pop
6676 5c947f38 Iustin Pop
6677 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
6678 73415719 Iustin Pop
  """Searches the tags for a given pattern.
6679 73415719 Iustin Pop

6680 73415719 Iustin Pop
  """
6681 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
6682 8646adce Guido Trotter
  REQ_BGL = False
6683 8646adce Guido Trotter
6684 8646adce Guido Trotter
  def ExpandNames(self):
6685 8646adce Guido Trotter
    self.needed_locks = {}
6686 73415719 Iustin Pop
6687 73415719 Iustin Pop
  def CheckPrereq(self):
6688 73415719 Iustin Pop
    """Check prerequisites.
6689 73415719 Iustin Pop

6690 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
6691 73415719 Iustin Pop

6692 73415719 Iustin Pop
    """
6693 73415719 Iustin Pop
    try:
6694 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
6695 73415719 Iustin Pop
    except re.error, err:
6696 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
6697 73415719 Iustin Pop
                                 (self.op.pattern, err))
6698 73415719 Iustin Pop
6699 73415719 Iustin Pop
  def Exec(self, feedback_fn):
6700 73415719 Iustin Pop
    """Returns the tag list.
6701 73415719 Iustin Pop

6702 73415719 Iustin Pop
    """
6703 73415719 Iustin Pop
    cfg = self.cfg
6704 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
6705 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
6706 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
6707 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
6708 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
6709 73415719 Iustin Pop
    results = []
6710 73415719 Iustin Pop
    for path, target in tgts:
6711 73415719 Iustin Pop
      for tag in target.GetTags():
6712 73415719 Iustin Pop
        if self.re.search(tag):
6713 73415719 Iustin Pop
          results.append((path, tag))
6714 73415719 Iustin Pop
    return results
6715 73415719 Iustin Pop
6716 73415719 Iustin Pop
6717 f27302fa Iustin Pop
class LUAddTags(TagsLU):
6718 5c947f38 Iustin Pop
  """Sets a tag on a given object.
6719 5c947f38 Iustin Pop

6720 5c947f38 Iustin Pop
  """
6721 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
6722 8646adce Guido Trotter
  REQ_BGL = False
6723 5c947f38 Iustin Pop
6724 5c947f38 Iustin Pop
  def CheckPrereq(self):
6725 5c947f38 Iustin Pop
    """Check prerequisites.
6726 5c947f38 Iustin Pop

6727 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
6728 5c947f38 Iustin Pop

6729 5c947f38 Iustin Pop
    """
6730 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
6731 f27302fa Iustin Pop
    for tag in self.op.tags:
6732 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
6733 5c947f38 Iustin Pop
6734 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6735 5c947f38 Iustin Pop
    """Sets the tag.
6736 5c947f38 Iustin Pop

6737 5c947f38 Iustin Pop
    """
6738 5c947f38 Iustin Pop
    try:
6739 f27302fa Iustin Pop
      for tag in self.op.tags:
6740 f27302fa Iustin Pop
        self.target.AddTag(tag)
6741 5c947f38 Iustin Pop
    except errors.TagError, err:
6742 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
6743 5c947f38 Iustin Pop
    try:
6744 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
6745 5c947f38 Iustin Pop
    except errors.ConfigurationError:
6746 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
6747 3ecf6786 Iustin Pop
                                " config file and the operation has been"
6748 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
6749 5c947f38 Iustin Pop
6750 5c947f38 Iustin Pop
6751 f27302fa Iustin Pop
class LUDelTags(TagsLU):
6752 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
6753 5c947f38 Iustin Pop

6754 5c947f38 Iustin Pop
  """
6755 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
6756 8646adce Guido Trotter
  REQ_BGL = False
6757 5c947f38 Iustin Pop
6758 5c947f38 Iustin Pop
  def CheckPrereq(self):
6759 5c947f38 Iustin Pop
    """Check prerequisites.
6760 5c947f38 Iustin Pop

6761 5c947f38 Iustin Pop
    This checks that we have the given tag.
6762 5c947f38 Iustin Pop

6763 5c947f38 Iustin Pop
    """
6764 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
6765 f27302fa Iustin Pop
    for tag in self.op.tags:
6766 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
6767 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
6768 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
6769 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
6770 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
6771 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
6772 f27302fa Iustin Pop
      diff_names.sort()
6773 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
6774 f27302fa Iustin Pop
                                 (",".join(diff_names)))
6775 5c947f38 Iustin Pop
6776 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6777 5c947f38 Iustin Pop
    """Remove the tag from the object.
6778 5c947f38 Iustin Pop

6779 5c947f38 Iustin Pop
    """
6780 f27302fa Iustin Pop
    for tag in self.op.tags:
6781 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
6782 5c947f38 Iustin Pop
    try:
6783 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
6784 5c947f38 Iustin Pop
    except errors.ConfigurationError:
6785 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
6786 3ecf6786 Iustin Pop
                                " config file and the operation has been"
6787 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
6788 06009e27 Iustin Pop
6789 0eed6e61 Guido Trotter
6790 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
6791 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
6792 06009e27 Iustin Pop

6793 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
6794 06009e27 Iustin Pop
  time.
6795 06009e27 Iustin Pop

6796 06009e27 Iustin Pop
  """
6797 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
6798 fbe9022f Guido Trotter
  REQ_BGL = False
6799 06009e27 Iustin Pop
6800 fbe9022f Guido Trotter
  def ExpandNames(self):
6801 fbe9022f Guido Trotter
    """Expand names and set required locks.
6802 06009e27 Iustin Pop

6803 fbe9022f Guido Trotter
    This expands the node list, if any.
6804 06009e27 Iustin Pop

6805 06009e27 Iustin Pop
    """
6806 fbe9022f Guido Trotter
    self.needed_locks = {}
6807 06009e27 Iustin Pop
    if self.op.on_nodes:
6808 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
6809 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
6810 fbe9022f Guido Trotter
      # more information.
6811 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
6812 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
6813 fbe9022f Guido Trotter
6814 fbe9022f Guido Trotter
  def CheckPrereq(self):
6815 fbe9022f Guido Trotter
    """Check prerequisites.
6816 fbe9022f Guido Trotter

6817 fbe9022f Guido Trotter
    """
6818 06009e27 Iustin Pop
6819 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
6820 06009e27 Iustin Pop
    """Do the actual sleep.
6821 06009e27 Iustin Pop

6822 06009e27 Iustin Pop
    """
6823 06009e27 Iustin Pop
    if self.op.on_master:
6824 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
6825 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
6826 06009e27 Iustin Pop
    if self.op.on_nodes:
6827 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
6828 06009e27 Iustin Pop
      if not result:
6829 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
6830 06009e27 Iustin Pop
      for node, node_result in result.items():
6831 781de953 Iustin Pop
        node_result.Raise()
6832 781de953 Iustin Pop
        if not node_result.data:
6833 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
6834 781de953 Iustin Pop
                                   " result: %s" % (node, node_result.data))
6835 d61df03e Iustin Pop
6836 d61df03e Iustin Pop
6837 d1c2dd75 Iustin Pop
class IAllocator(object):
6838 d1c2dd75 Iustin Pop
  """IAllocator framework.
6839 d61df03e Iustin Pop

6840 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
6841 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
6842 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
6843 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
6844 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
6845 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
6846 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
6847 d1c2dd75 Iustin Pop
      easy usage
6848 d61df03e Iustin Pop

6849 d61df03e Iustin Pop
  """
6850 29859cb7 Iustin Pop
  _ALLO_KEYS = [
6851 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
6852 8cc7e742 Guido Trotter
    "os", "tags", "nics", "vcpus", "hypervisor",
6853 d1c2dd75 Iustin Pop
    ]
6854 29859cb7 Iustin Pop
  _RELO_KEYS = [
6855 29859cb7 Iustin Pop
    "relocate_from",
6856 29859cb7 Iustin Pop
    ]
6857 d1c2dd75 Iustin Pop
6858 72737a7f Iustin Pop
  def __init__(self, lu, mode, name, **kwargs):
6859 72737a7f Iustin Pop
    self.lu = lu
6860 d1c2dd75 Iustin Pop
    # init buffer variables
6861 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
6862 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
6863 29859cb7 Iustin Pop
    self.mode = mode
6864 29859cb7 Iustin Pop
    self.name = name
6865 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
6866 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
6867 a0add446 Iustin Pop
    self.hypervisor = None
6868 29859cb7 Iustin Pop
    self.relocate_from = None
6869 27579978 Iustin Pop
    # computed fields
6870 27579978 Iustin Pop
    self.required_nodes = None
6871 d1c2dd75 Iustin Pop
    # init result fields
6872 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
6873 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6874 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
6875 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6876 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
6877 29859cb7 Iustin Pop
    else:
6878 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
6879 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
6880 d1c2dd75 Iustin Pop
    for key in kwargs:
6881 29859cb7 Iustin Pop
      if key not in keyset:
6882 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
6883 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
6884 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
6885 29859cb7 Iustin Pop
    for key in keyset:
6886 d1c2dd75 Iustin Pop
      if key not in kwargs:
6887 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
6888 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
6889 d1c2dd75 Iustin Pop
    self._BuildInputData()
6890 d1c2dd75 Iustin Pop
6891 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
6892 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
6893 d1c2dd75 Iustin Pop

6894 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
6895 d1c2dd75 Iustin Pop

6896 d1c2dd75 Iustin Pop
    """
6897 72737a7f Iustin Pop
    cfg = self.lu.cfg
6898 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
6899 d1c2dd75 Iustin Pop
    # cluster data
6900 d1c2dd75 Iustin Pop
    data = {
6901 77031881 Iustin Pop
      "version": constants.IALLOCATOR_VERSION,
6902 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
6903 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
6904 1325da74 Iustin Pop
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
6905 d1c2dd75 Iustin Pop
      # we don't have job IDs
6906 d61df03e Iustin Pop
      }
6907 b57e9819 Guido Trotter
    iinfo = cfg.GetAllInstancesInfo().values()
6908 b57e9819 Guido Trotter
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
6909 6286519f Iustin Pop
6910 d1c2dd75 Iustin Pop
    # node data
6911 d1c2dd75 Iustin Pop
    node_results = {}
6912 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
6913 8cc7e742 Guido Trotter
6914 8cc7e742 Guido Trotter
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6915 a0add446 Iustin Pop
      hypervisor_name = self.hypervisor
6916 8cc7e742 Guido Trotter
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6917 a0add446 Iustin Pop
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
6918 8cc7e742 Guido Trotter
6919 72737a7f Iustin Pop
    node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
6920 a0add446 Iustin Pop
                                           hypervisor_name)
6921 18640d69 Guido Trotter
    node_iinfo = self.lu.rpc.call_all_instances_info(node_list,
6922 18640d69 Guido Trotter
                       cluster_info.enabled_hypervisors)
6923 1325da74 Iustin Pop
    for nname, nresult in node_data.items():
6924 1325da74 Iustin Pop
      # first fill in static (config-based) values
6925 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
6926 d1c2dd75 Iustin Pop
      pnr = {
6927 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
6928 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
6929 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
6930 fc0fe88c Iustin Pop
        "offline": ninfo.offline,
6931 0b2454b9 Iustin Pop
        "drained": ninfo.drained,
6932 1325da74 Iustin Pop
        "master_candidate": ninfo.master_candidate,
6933 d1c2dd75 Iustin Pop
        }
6934 1325da74 Iustin Pop
6935 1325da74 Iustin Pop
      if not ninfo.offline:
6936 1325da74 Iustin Pop
        nresult.Raise()
6937 1325da74 Iustin Pop
        if not isinstance(nresult.data, dict):
6938 1325da74 Iustin Pop
          raise errors.OpExecError("Can't get data for node %s" % nname)
6939 1325da74 Iustin Pop
        remote_info = nresult.data
6940 1325da74 Iustin Pop
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
6941 1325da74 Iustin Pop
                     'vg_size', 'vg_free', 'cpu_total']:
6942 1325da74 Iustin Pop
          if attr not in remote_info:
6943 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' didn't return attribute"
6944 1325da74 Iustin Pop
                                     " '%s'" % (nname, attr))
6945 1325da74 Iustin Pop
          try:
6946 1325da74 Iustin Pop
            remote_info[attr] = int(remote_info[attr])
6947 1325da74 Iustin Pop
          except ValueError, err:
6948 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' returned invalid value"
6949 1325da74 Iustin Pop
                                     " for '%s': %s" % (nname, attr, err))
6950 1325da74 Iustin Pop
        # compute memory used by primary instances
6951 1325da74 Iustin Pop
        i_p_mem = i_p_up_mem = 0
6952 1325da74 Iustin Pop
        for iinfo, beinfo in i_list:
6953 1325da74 Iustin Pop
          if iinfo.primary_node == nname:
6954 1325da74 Iustin Pop
            i_p_mem += beinfo[constants.BE_MEMORY]
6955 1325da74 Iustin Pop
            if iinfo.name not in node_iinfo[nname].data:
6956 1325da74 Iustin Pop
              i_used_mem = 0
6957 1325da74 Iustin Pop
            else:
6958 1325da74 Iustin Pop
              i_used_mem = int(node_iinfo[nname].data[iinfo.name]['memory'])
6959 1325da74 Iustin Pop
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
6960 1325da74 Iustin Pop
            remote_info['memory_free'] -= max(0, i_mem_diff)
6961 1325da74 Iustin Pop
6962 1325da74 Iustin Pop
            if iinfo.admin_up:
6963 1325da74 Iustin Pop
              i_p_up_mem += beinfo[constants.BE_MEMORY]
6964 1325da74 Iustin Pop
6965 1325da74 Iustin Pop
        # compute memory used by instances
6966 1325da74 Iustin Pop
        pnr_dyn = {
6967 1325da74 Iustin Pop
          "total_memory": remote_info['memory_total'],
6968 1325da74 Iustin Pop
          "reserved_memory": remote_info['memory_dom0'],
6969 1325da74 Iustin Pop
          "free_memory": remote_info['memory_free'],
6970 1325da74 Iustin Pop
          "total_disk": remote_info['vg_size'],
6971 1325da74 Iustin Pop
          "free_disk": remote_info['vg_free'],
6972 1325da74 Iustin Pop
          "total_cpus": remote_info['cpu_total'],
6973 1325da74 Iustin Pop
          "i_pri_memory": i_p_mem,
6974 1325da74 Iustin Pop
          "i_pri_up_memory": i_p_up_mem,
6975 1325da74 Iustin Pop
          }
6976 1325da74 Iustin Pop
        pnr.update(pnr_dyn)
6977 1325da74 Iustin Pop
6978 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
6979 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
6980 d1c2dd75 Iustin Pop
6981 d1c2dd75 Iustin Pop
    # instance data
6982 d1c2dd75 Iustin Pop
    instance_data = {}
6983 338e51e8 Iustin Pop
    for iinfo, beinfo in i_list:
6984 d1c2dd75 Iustin Pop
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
6985 d1c2dd75 Iustin Pop
                  for n in iinfo.nics]
6986 d1c2dd75 Iustin Pop
      pir = {
6987 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
6988 1325da74 Iustin Pop
        "admin_up": iinfo.admin_up,
6989 338e51e8 Iustin Pop
        "vcpus": beinfo[constants.BE_VCPUS],
6990 338e51e8 Iustin Pop
        "memory": beinfo[constants.BE_MEMORY],
6991 d1c2dd75 Iustin Pop
        "os": iinfo.os,
6992 1325da74 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
6993 d1c2dd75 Iustin Pop
        "nics": nic_data,
6994 1325da74 Iustin Pop
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
6995 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
6996 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
6997 d1c2dd75 Iustin Pop
        }
6998 88ae4f85 Iustin Pop
      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
6999 88ae4f85 Iustin Pop
                                                 pir["disks"])
7000 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
7001 d61df03e Iustin Pop
7002 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
7003 d61df03e Iustin Pop
7004 d1c2dd75 Iustin Pop
    self.in_data = data
7005 d61df03e Iustin Pop
7006 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
7007 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
7008 d61df03e Iustin Pop

7009 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
7010 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
7011 d61df03e Iustin Pop

7012 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
7013 d1c2dd75 Iustin Pop
    done.
7014 d61df03e Iustin Pop

7015 d1c2dd75 Iustin Pop
    """
7016 d1c2dd75 Iustin Pop
    data = self.in_data
7017 d1c2dd75 Iustin Pop
7018 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
7019 d1c2dd75 Iustin Pop
7020 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
7021 27579978 Iustin Pop
      self.required_nodes = 2
7022 27579978 Iustin Pop
    else:
7023 27579978 Iustin Pop
      self.required_nodes = 1
7024 d1c2dd75 Iustin Pop
    request = {
7025 d1c2dd75 Iustin Pop
      "type": "allocate",
7026 d1c2dd75 Iustin Pop
      "name": self.name,
7027 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
7028 d1c2dd75 Iustin Pop
      "tags": self.tags,
7029 d1c2dd75 Iustin Pop
      "os": self.os,
7030 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
7031 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
7032 d1c2dd75 Iustin Pop
      "disks": self.disks,
7033 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
7034 d1c2dd75 Iustin Pop
      "nics": self.nics,
7035 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
7036 d1c2dd75 Iustin Pop
      }
7037 d1c2dd75 Iustin Pop
    data["request"] = request
7038 298fe380 Iustin Pop
7039 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
7040 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
7041 298fe380 Iustin Pop

7042 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
7043 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
7044 d61df03e Iustin Pop

7045 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
7046 d1c2dd75 Iustin Pop
    done.
7047 d61df03e Iustin Pop

7048 d1c2dd75 Iustin Pop
    """
7049 72737a7f Iustin Pop
    instance = self.lu.cfg.GetInstanceInfo(self.name)
7050 27579978 Iustin Pop
    if instance is None:
7051 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
7052 27579978 Iustin Pop
                                   " IAllocator" % self.name)
7053 27579978 Iustin Pop
7054 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
7055 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
7056 27579978 Iustin Pop
7057 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
7058 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
7059 2a139bb0 Iustin Pop
7060 27579978 Iustin Pop
    self.required_nodes = 1
7061 dafc7302 Guido Trotter
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
7062 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
7063 27579978 Iustin Pop
7064 d1c2dd75 Iustin Pop
    request = {
7065 2a139bb0 Iustin Pop
      "type": "relocate",
7066 d1c2dd75 Iustin Pop
      "name": self.name,
7067 27579978 Iustin Pop
      "disk_space_total": disk_space,
7068 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
7069 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
7070 d1c2dd75 Iustin Pop
      }
7071 27579978 Iustin Pop
    self.in_data["request"] = request
7072 d61df03e Iustin Pop
7073 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
7074 d1c2dd75 Iustin Pop
    """Build input data structures.
7075 d61df03e Iustin Pop

7076 d1c2dd75 Iustin Pop
    """
7077 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
7078 d61df03e Iustin Pop
7079 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
7080 d1c2dd75 Iustin Pop
      self._AddNewInstance()
7081 d1c2dd75 Iustin Pop
    else:
7082 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
7083 d61df03e Iustin Pop
7084 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
7085 d61df03e Iustin Pop
7086 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
7087 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
7088 298fe380 Iustin Pop

7089 d1c2dd75 Iustin Pop
    """
7090 72737a7f Iustin Pop
    if call_fn is None:
7091 72737a7f Iustin Pop
      call_fn = self.lu.rpc.call_iallocator_runner
7092 d1c2dd75 Iustin Pop
    data = self.in_text
7093 298fe380 Iustin Pop
7094 72737a7f Iustin Pop
    result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
7095 781de953 Iustin Pop
    result.Raise()
7096 298fe380 Iustin Pop
7097 781de953 Iustin Pop
    if not isinstance(result.data, (list, tuple)) or len(result.data) != 4:
7098 8d528b7c Iustin Pop
      raise errors.OpExecError("Invalid result from master iallocator runner")
7099 8d528b7c Iustin Pop
7100 781de953 Iustin Pop
    rcode, stdout, stderr, fail = result.data
7101 8d528b7c Iustin Pop
7102 8d528b7c Iustin Pop
    if rcode == constants.IARUN_NOTFOUND:
7103 8d528b7c Iustin Pop
      raise errors.OpExecError("Can't find allocator '%s'" % name)
7104 8d528b7c Iustin Pop
    elif rcode == constants.IARUN_FAILURE:
7105 38206f3c Iustin Pop
      raise errors.OpExecError("Instance allocator call failed: %s,"
7106 38206f3c Iustin Pop
                               " output: %s" % (fail, stdout+stderr))
7107 8d528b7c Iustin Pop
    self.out_text = stdout
7108 d1c2dd75 Iustin Pop
    if validate:
7109 d1c2dd75 Iustin Pop
      self._ValidateResult()
7110 298fe380 Iustin Pop
7111 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
7112 d1c2dd75 Iustin Pop
    """Process the allocator results.
7113 538475ca Iustin Pop

7114 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
7115 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
7116 538475ca Iustin Pop

7117 d1c2dd75 Iustin Pop
    """
7118 d1c2dd75 Iustin Pop
    try:
7119 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
7120 d1c2dd75 Iustin Pop
    except Exception, err:
7121 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
7122 d1c2dd75 Iustin Pop
7123 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
7124 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
7125 538475ca Iustin Pop
7126 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
7127 d1c2dd75 Iustin Pop
      if key not in rdict:
7128 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
7129 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
7130 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
7131 538475ca Iustin Pop
7132 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
7133 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
7134 d1c2dd75 Iustin Pop
                               " is not a list")
7135 d1c2dd75 Iustin Pop
    self.out_data = rdict
7136 538475ca Iustin Pop
7137 538475ca Iustin Pop
7138 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
7139 d61df03e Iustin Pop
  """Run allocator tests.
7140 d61df03e Iustin Pop

7141 d61df03e Iustin Pop
  This LU runs the allocator tests
7142 d61df03e Iustin Pop

7143 d61df03e Iustin Pop
  """
7144 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
7145 d61df03e Iustin Pop
7146 d61df03e Iustin Pop
  def CheckPrereq(self):
7147 d61df03e Iustin Pop
    """Check prerequisites.
7148 d61df03e Iustin Pop

7149 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
7150 d61df03e Iustin Pop

7151 d61df03e Iustin Pop
    """
7152 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
7153 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
7154 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
7155 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
7156 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
7157 d61df03e Iustin Pop
                                     attr)
7158 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
7159 d61df03e Iustin Pop
      if iname is not None:
7160 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
7161 d61df03e Iustin Pop
                                   iname)
7162 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
7163 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
7164 d61df03e Iustin Pop
      for row in self.op.nics:
7165 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
7166 d61df03e Iustin Pop
            "mac" not in row or
7167 d61df03e Iustin Pop
            "ip" not in row or
7168 d61df03e Iustin Pop
            "bridge" not in row):
7169 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
7170 d61df03e Iustin Pop
                                     " 'nics' parameter")
7171 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
7172 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
7173 d61df03e Iustin Pop
      for row in self.op.disks:
7174 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
7175 d61df03e Iustin Pop
            "size" not in row or
7176 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
7177 d61df03e Iustin Pop
            "mode" not in row or
7178 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
7179 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
7180 d61df03e Iustin Pop
                                     " 'disks' parameter")
7181 8901997e Iustin Pop
      if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None:
7182 8cc7e742 Guido Trotter
        self.op.hypervisor = self.cfg.GetHypervisorType()
7183 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
7184 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
7185 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
7186 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
7187 d61df03e Iustin Pop
      if fname is None:
7188 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
7189 d61df03e Iustin Pop
                                   self.op.name)
7190 d61df03e Iustin Pop
      self.op.name = fname
7191 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
7192 d61df03e Iustin Pop
    else:
7193 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
7194 d61df03e Iustin Pop
                                 self.op.mode)
7195 d61df03e Iustin Pop
7196 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
7197 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
7198 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
7199 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
7200 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
7201 d61df03e Iustin Pop
                                 self.op.direction)
7202 d61df03e Iustin Pop
7203 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
7204 d61df03e Iustin Pop
    """Run the allocator test.
7205 d61df03e Iustin Pop

7206 d61df03e Iustin Pop
    """
7207 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
7208 72737a7f Iustin Pop
      ial = IAllocator(self,
7209 29859cb7 Iustin Pop
                       mode=self.op.mode,
7210 29859cb7 Iustin Pop
                       name=self.op.name,
7211 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
7212 29859cb7 Iustin Pop
                       disks=self.op.disks,
7213 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
7214 29859cb7 Iustin Pop
                       os=self.op.os,
7215 29859cb7 Iustin Pop
                       tags=self.op.tags,
7216 29859cb7 Iustin Pop
                       nics=self.op.nics,
7217 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
7218 8cc7e742 Guido Trotter
                       hypervisor=self.op.hypervisor,
7219 29859cb7 Iustin Pop
                       )
7220 29859cb7 Iustin Pop
    else:
7221 72737a7f Iustin Pop
      ial = IAllocator(self,
7222 29859cb7 Iustin Pop
                       mode=self.op.mode,
7223 29859cb7 Iustin Pop
                       name=self.op.name,
7224 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
7225 29859cb7 Iustin Pop
                       )
7226 d61df03e Iustin Pop
7227 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
7228 d1c2dd75 Iustin Pop
      result = ial.in_text
7229 298fe380 Iustin Pop
    else:
7230 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
7231 d1c2dd75 Iustin Pop
      result = ial.out_text
7232 298fe380 Iustin Pop
    return result