Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 920b5878

History | View | Annotate | Download (250.9 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import time
29 a8083063 Iustin Pop
import re
30 a8083063 Iustin Pop
import platform
31 ffa1c0dc Iustin Pop
import logging
32 74409b12 Iustin Pop
import copy
33 a8083063 Iustin Pop
34 a8083063 Iustin Pop
from ganeti import ssh
35 a8083063 Iustin Pop
from ganeti import utils
36 a8083063 Iustin Pop
from ganeti import errors
37 a8083063 Iustin Pop
from ganeti import hypervisor
38 6048c986 Guido Trotter
from ganeti import locking
39 a8083063 Iustin Pop
from ganeti import constants
40 a8083063 Iustin Pop
from ganeti import objects
41 8d14b30d Iustin Pop
from ganeti import serializer
42 112f18a5 Iustin Pop
from ganeti import ssconf
43 d61df03e Iustin Pop
44 d61df03e Iustin Pop
45 a8083063 Iustin Pop
class LogicalUnit(object):
46 396e1b78 Michael Hanselmann
  """Logical Unit base class.
47 a8083063 Iustin Pop

48 a8083063 Iustin Pop
  Subclasses must follow these rules:
49 d465bdc8 Guido Trotter
    - implement ExpandNames
50 d465bdc8 Guido Trotter
    - implement CheckPrereq
51 a8083063 Iustin Pop
    - implement Exec
52 a8083063 Iustin Pop
    - implement BuildHooksEnv
53 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
54 05f86716 Guido Trotter
    - optionally redefine their run requirements:
55 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
56 05f86716 Guido Trotter

57 05f86716 Guido Trotter
  Note that all commands require root permissions.
58 a8083063 Iustin Pop

59 20777413 Iustin Pop
  @ivar dry_run_result: the value (if any) that will be returned to the caller
60 20777413 Iustin Pop
      in dry-run mode (signalled by opcode dry_run parameter)
61 20777413 Iustin Pop

62 a8083063 Iustin Pop
  """
63 a8083063 Iustin Pop
  HPATH = None
64 a8083063 Iustin Pop
  HTYPE = None
65 a8083063 Iustin Pop
  _OP_REQP = []
66 7e55040e Guido Trotter
  REQ_BGL = True
67 a8083063 Iustin Pop
68 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
69 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
70 a8083063 Iustin Pop

71 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
72 a8083063 Iustin Pop
    validity.
73 a8083063 Iustin Pop

74 a8083063 Iustin Pop
    """
75 5bfac263 Iustin Pop
    self.proc = processor
76 a8083063 Iustin Pop
    self.op = op
77 77b657a3 Guido Trotter
    self.cfg = context.cfg
78 77b657a3 Guido Trotter
    self.context = context
79 72737a7f Iustin Pop
    self.rpc = rpc
80 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
81 d465bdc8 Guido Trotter
    self.needed_locks = None
82 6683bba2 Guido Trotter
    self.acquired_locks = {}
83 3977a4c1 Guido Trotter
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
84 ca2a79e1 Guido Trotter
    self.add_locks = {}
85 ca2a79e1 Guido Trotter
    self.remove_locks = {}
86 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
87 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
88 c92b310a Michael Hanselmann
    self.__ssh = None
89 86d9d3bb Iustin Pop
    # logging
90 86d9d3bb Iustin Pop
    self.LogWarning = processor.LogWarning
91 86d9d3bb Iustin Pop
    self.LogInfo = processor.LogInfo
92 20777413 Iustin Pop
    # support for dry-run
93 20777413 Iustin Pop
    self.dry_run_result = None
94 c92b310a Michael Hanselmann
95 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
96 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
97 a8083063 Iustin Pop
      if attr_val is None:
98 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
99 3ecf6786 Iustin Pop
                                   attr_name)
100 4be4691d Iustin Pop
    self.CheckArguments()
101 a8083063 Iustin Pop
102 c92b310a Michael Hanselmann
  def __GetSSH(self):
103 c92b310a Michael Hanselmann
    """Returns the SshRunner object
104 c92b310a Michael Hanselmann

105 c92b310a Michael Hanselmann
    """
106 c92b310a Michael Hanselmann
    if not self.__ssh:
107 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
108 c92b310a Michael Hanselmann
    return self.__ssh
109 c92b310a Michael Hanselmann
110 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
111 c92b310a Michael Hanselmann
112 4be4691d Iustin Pop
  def CheckArguments(self):
113 4be4691d Iustin Pop
    """Check syntactic validity for the opcode arguments.
114 4be4691d Iustin Pop

115 4be4691d Iustin Pop
    This method is for doing a simple syntactic check and ensure
116 4be4691d Iustin Pop
    validity of opcode parameters, without any cluster-related
117 4be4691d Iustin Pop
    checks. While the same can be accomplished in ExpandNames and/or
118 4be4691d Iustin Pop
    CheckPrereq, doing these separate is better because:
119 4be4691d Iustin Pop

120 4be4691d Iustin Pop
      - ExpandNames is left as as purely a lock-related function
121 4be4691d Iustin Pop
      - CheckPrereq is run after we have aquired locks (and possible
122 4be4691d Iustin Pop
        waited for them)
123 4be4691d Iustin Pop

124 4be4691d Iustin Pop
    The function is allowed to change the self.op attribute so that
125 4be4691d Iustin Pop
    later methods can no longer worry about missing parameters.
126 4be4691d Iustin Pop

127 4be4691d Iustin Pop
    """
128 4be4691d Iustin Pop
    pass
129 4be4691d Iustin Pop
130 d465bdc8 Guido Trotter
  def ExpandNames(self):
131 d465bdc8 Guido Trotter
    """Expand names for this LU.
132 d465bdc8 Guido Trotter

133 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
134 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
135 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
136 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
137 d465bdc8 Guido Trotter

138 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
139 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
140 d465bdc8 Guido Trotter
    as values. Rules:
141 e4376078 Iustin Pop

142 e4376078 Iustin Pop
      - use an empty dict if you don't need any lock
143 e4376078 Iustin Pop
      - if you don't need any lock at a particular level omit that level
144 e4376078 Iustin Pop
      - don't put anything for the BGL level
145 e4376078 Iustin Pop
      - if you want all locks at a level use locking.ALL_SET as a value
146 d465bdc8 Guido Trotter

147 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
148 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
149 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
150 3977a4c1 Guido Trotter

151 e4376078 Iustin Pop
    Examples::
152 e4376078 Iustin Pop

153 e4376078 Iustin Pop
      # Acquire all nodes and one instance
154 e4376078 Iustin Pop
      self.needed_locks = {
155 e4376078 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
156 e4376078 Iustin Pop
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
157 e4376078 Iustin Pop
      }
158 e4376078 Iustin Pop
      # Acquire just two nodes
159 e4376078 Iustin Pop
      self.needed_locks = {
160 e4376078 Iustin Pop
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
161 e4376078 Iustin Pop
      }
162 e4376078 Iustin Pop
      # Acquire no locks
163 e4376078 Iustin Pop
      self.needed_locks = {} # No, you can't leave it to the default value None
164 d465bdc8 Guido Trotter

165 d465bdc8 Guido Trotter
    """
166 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
167 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
168 d465bdc8 Guido Trotter
    # time.
169 d465bdc8 Guido Trotter
    if self.REQ_BGL:
170 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
171 d465bdc8 Guido Trotter
    else:
172 d465bdc8 Guido Trotter
      raise NotImplementedError
173 d465bdc8 Guido Trotter
174 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
175 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
176 fb8dcb62 Guido Trotter

177 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
178 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
179 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
180 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
181 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
182 fb8dcb62 Guido Trotter
    default it does nothing.
183 fb8dcb62 Guido Trotter

184 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
185 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
186 fb8dcb62 Guido Trotter

187 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
188 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
189 fb8dcb62 Guido Trotter

190 fb8dcb62 Guido Trotter
    """
191 fb8dcb62 Guido Trotter
192 a8083063 Iustin Pop
  def CheckPrereq(self):
193 a8083063 Iustin Pop
    """Check prerequisites for this LU.
194 a8083063 Iustin Pop

195 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
196 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
197 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
198 a8083063 Iustin Pop
    allowed.
199 a8083063 Iustin Pop

200 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
201 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
202 a8083063 Iustin Pop

203 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
204 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
205 a8083063 Iustin Pop

206 a8083063 Iustin Pop
    """
207 a8083063 Iustin Pop
    raise NotImplementedError
208 a8083063 Iustin Pop
209 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
210 a8083063 Iustin Pop
    """Execute the LU.
211 a8083063 Iustin Pop

212 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
213 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
214 a8083063 Iustin Pop
    code, or expected.
215 a8083063 Iustin Pop

216 a8083063 Iustin Pop
    """
217 a8083063 Iustin Pop
    raise NotImplementedError
218 a8083063 Iustin Pop
219 a8083063 Iustin Pop
  def BuildHooksEnv(self):
220 a8083063 Iustin Pop
    """Build hooks environment for this LU.
221 a8083063 Iustin Pop

222 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
223 a8083063 Iustin Pop
    containing the environment that will be used for running the
224 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
225 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
226 a8083063 Iustin Pop
    the hook should run after the execution.
227 a8083063 Iustin Pop

228 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
229 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
230 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
231 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
232 a8083063 Iustin Pop

233 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
234 a8083063 Iustin Pop

235 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
236 a8083063 Iustin Pop
    not be called.
237 a8083063 Iustin Pop

238 a8083063 Iustin Pop
    """
239 a8083063 Iustin Pop
    raise NotImplementedError
240 a8083063 Iustin Pop
241 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
242 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
243 1fce5219 Guido Trotter

244 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
245 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
246 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
247 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
248 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
249 1fce5219 Guido Trotter

250 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
251 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
252 e4376078 Iustin Pop
    @param hook_results: the results of the multi-node hooks rpc call
253 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
254 e4376078 Iustin Pop
    @param lu_result: the previous Exec result this LU had, or None
255 e4376078 Iustin Pop
        in the PRE phase
256 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
257 e4376078 Iustin Pop
        and hook results
258 1fce5219 Guido Trotter

259 1fce5219 Guido Trotter
    """
260 1fce5219 Guido Trotter
    return lu_result
261 1fce5219 Guido Trotter
262 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
263 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
264 43905206 Guido Trotter

265 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
266 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
267 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
268 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
269 43905206 Guido Trotter
    before.
270 43905206 Guido Trotter

271 43905206 Guido Trotter
    """
272 43905206 Guido Trotter
    if self.needed_locks is None:
273 43905206 Guido Trotter
      self.needed_locks = {}
274 43905206 Guido Trotter
    else:
275 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
276 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
277 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
278 43905206 Guido Trotter
    if expanded_name is None:
279 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
280 43905206 Guido Trotter
                                  self.op.instance_name)
281 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
282 43905206 Guido Trotter
    self.op.instance_name = expanded_name
283 43905206 Guido Trotter
284 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
285 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
286 c4a2fee1 Guido Trotter

287 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
288 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
289 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
290 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
291 c4a2fee1 Guido Trotter

292 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
293 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
294 c4a2fee1 Guido Trotter

295 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
296 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
297 c4a2fee1 Guido Trotter

298 e4376078 Iustin Pop
    If should be called in DeclareLocks in a way similar to::
299 c4a2fee1 Guido Trotter

300 e4376078 Iustin Pop
      if level == locking.LEVEL_NODE:
301 e4376078 Iustin Pop
        self._LockInstancesNodes()
302 c4a2fee1 Guido Trotter

303 a82ce292 Guido Trotter
    @type primary_only: boolean
304 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
305 a82ce292 Guido Trotter

306 c4a2fee1 Guido Trotter
    """
307 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
308 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
309 c4a2fee1 Guido Trotter
310 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
311 c4a2fee1 Guido Trotter
312 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
313 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
314 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
315 c4a2fee1 Guido Trotter
    wanted_nodes = []
316 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
317 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
318 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
319 a82ce292 Guido Trotter
      if not primary_only:
320 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
321 9513b6ab Guido Trotter
322 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
323 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
324 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
325 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
326 c4a2fee1 Guido Trotter
327 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
328 c4a2fee1 Guido Trotter
329 a8083063 Iustin Pop
330 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
331 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
332 a8083063 Iustin Pop

333 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
334 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
335 a8083063 Iustin Pop

336 a8083063 Iustin Pop
  """
337 a8083063 Iustin Pop
  HPATH = None
338 a8083063 Iustin Pop
  HTYPE = None
339 a8083063 Iustin Pop
340 a8083063 Iustin Pop
341 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
342 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
343 83120a01 Michael Hanselmann

344 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
345 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
346 e4376078 Iustin Pop
  @type nodes: list
347 e4376078 Iustin Pop
  @param nodes: list of node names or None for all nodes
348 e4376078 Iustin Pop
  @rtype: list
349 e4376078 Iustin Pop
  @return: the list of nodes, sorted
350 e4376078 Iustin Pop
  @raise errors.OpProgrammerError: if the nodes parameter is wrong type
351 83120a01 Michael Hanselmann

352 83120a01 Michael Hanselmann
  """
353 3312b702 Iustin Pop
  if not isinstance(nodes, list):
354 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
355 dcb93971 Michael Hanselmann
356 ea47808a Guido Trotter
  if not nodes:
357 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
358 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
359 dcb93971 Michael Hanselmann
360 ea47808a Guido Trotter
  wanted = []
361 ea47808a Guido Trotter
  for name in nodes:
362 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
363 ea47808a Guido Trotter
    if node is None:
364 ea47808a Guido Trotter
      raise errors.OpPrereqError("No such node name '%s'" % name)
365 ea47808a Guido Trotter
    wanted.append(node)
366 dcb93971 Michael Hanselmann
367 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
368 3312b702 Iustin Pop
369 3312b702 Iustin Pop
370 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
371 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
372 3312b702 Iustin Pop

373 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
374 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
375 e4376078 Iustin Pop
  @type instances: list
376 e4376078 Iustin Pop
  @param instances: list of instance names or None for all instances
377 e4376078 Iustin Pop
  @rtype: list
378 e4376078 Iustin Pop
  @return: the list of instances, sorted
379 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if the instances parameter is wrong type
380 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if any of the passed instances is not found
381 3312b702 Iustin Pop

382 3312b702 Iustin Pop
  """
383 3312b702 Iustin Pop
  if not isinstance(instances, list):
384 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
385 3312b702 Iustin Pop
386 3312b702 Iustin Pop
  if instances:
387 3312b702 Iustin Pop
    wanted = []
388 3312b702 Iustin Pop
389 3312b702 Iustin Pop
    for name in instances:
390 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
391 3312b702 Iustin Pop
      if instance is None:
392 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
393 3312b702 Iustin Pop
      wanted.append(instance)
394 3312b702 Iustin Pop
395 3312b702 Iustin Pop
  else:
396 a7f5dc98 Iustin Pop
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
397 a7f5dc98 Iustin Pop
  return wanted
398 dcb93971 Michael Hanselmann
399 dcb93971 Michael Hanselmann
400 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
401 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
402 83120a01 Michael Hanselmann

403 a2d2e1a7 Iustin Pop
  @type static: L{utils.FieldSet}
404 31bf511f Iustin Pop
  @param static: static fields set
405 a2d2e1a7 Iustin Pop
  @type dynamic: L{utils.FieldSet}
406 31bf511f Iustin Pop
  @param dynamic: dynamic fields set
407 83120a01 Michael Hanselmann

408 83120a01 Michael Hanselmann
  """
409 a2d2e1a7 Iustin Pop
  f = utils.FieldSet()
410 31bf511f Iustin Pop
  f.Extend(static)
411 31bf511f Iustin Pop
  f.Extend(dynamic)
412 dcb93971 Michael Hanselmann
413 31bf511f Iustin Pop
  delta = f.NonMatching(selected)
414 31bf511f Iustin Pop
  if delta:
415 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
416 31bf511f Iustin Pop
                               % ",".join(delta))
417 dcb93971 Michael Hanselmann
418 dcb93971 Michael Hanselmann
419 a5961235 Iustin Pop
def _CheckBooleanOpField(op, name):
420 a5961235 Iustin Pop
  """Validates boolean opcode parameters.
421 a5961235 Iustin Pop

422 a5961235 Iustin Pop
  This will ensure that an opcode parameter is either a boolean value,
423 a5961235 Iustin Pop
  or None (but that it always exists).
424 a5961235 Iustin Pop

425 a5961235 Iustin Pop
  """
426 a5961235 Iustin Pop
  val = getattr(op, name, None)
427 a5961235 Iustin Pop
  if not (val is None or isinstance(val, bool)):
428 a5961235 Iustin Pop
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
429 a5961235 Iustin Pop
                               (name, str(val)))
430 a5961235 Iustin Pop
  setattr(op, name, val)
431 a5961235 Iustin Pop
432 a5961235 Iustin Pop
433 a5961235 Iustin Pop
def _CheckNodeOnline(lu, node):
434 a5961235 Iustin Pop
  """Ensure that a given node is online.
435 a5961235 Iustin Pop

436 a5961235 Iustin Pop
  @param lu: the LU on behalf of which we make the check
437 a5961235 Iustin Pop
  @param node: the node to check
438 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is offline
439 a5961235 Iustin Pop

440 a5961235 Iustin Pop
  """
441 a5961235 Iustin Pop
  if lu.cfg.GetNodeInfo(node).offline:
442 a5961235 Iustin Pop
    raise errors.OpPrereqError("Can't use offline node %s" % node)
443 a5961235 Iustin Pop
444 a5961235 Iustin Pop
445 733a2b6a Iustin Pop
def _CheckNodeNotDrained(lu, node):
446 733a2b6a Iustin Pop
  """Ensure that a given node is not drained.
447 733a2b6a Iustin Pop

448 733a2b6a Iustin Pop
  @param lu: the LU on behalf of which we make the check
449 733a2b6a Iustin Pop
  @param node: the node to check
450 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is drained
451 733a2b6a Iustin Pop

452 733a2b6a Iustin Pop
  """
453 733a2b6a Iustin Pop
  if lu.cfg.GetNodeInfo(node).drained:
454 733a2b6a Iustin Pop
    raise errors.OpPrereqError("Can't use drained node %s" % node)
455 733a2b6a Iustin Pop
456 733a2b6a Iustin Pop
457 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
458 67fc3042 Iustin Pop
                          memory, vcpus, nics, disk_template, disks,
459 67fc3042 Iustin Pop
                          bep, hvp, hypervisor):
460 e4376078 Iustin Pop
  """Builds instance related env variables for hooks
461 e4376078 Iustin Pop

462 e4376078 Iustin Pop
  This builds the hook environment from individual variables.
463 e4376078 Iustin Pop

464 e4376078 Iustin Pop
  @type name: string
465 e4376078 Iustin Pop
  @param name: the name of the instance
466 e4376078 Iustin Pop
  @type primary_node: string
467 e4376078 Iustin Pop
  @param primary_node: the name of the instance's primary node
468 e4376078 Iustin Pop
  @type secondary_nodes: list
469 e4376078 Iustin Pop
  @param secondary_nodes: list of secondary nodes as strings
470 e4376078 Iustin Pop
  @type os_type: string
471 e4376078 Iustin Pop
  @param os_type: the name of the instance's OS
472 0d68c45d Iustin Pop
  @type status: boolean
473 0d68c45d Iustin Pop
  @param status: the should_run status of the instance
474 e4376078 Iustin Pop
  @type memory: string
475 e4376078 Iustin Pop
  @param memory: the memory size of the instance
476 e4376078 Iustin Pop
  @type vcpus: string
477 e4376078 Iustin Pop
  @param vcpus: the count of VCPUs the instance has
478 e4376078 Iustin Pop
  @type nics: list
479 5e3d3eb3 Guido Trotter
  @param nics: list of tuples (ip, mac, mode, link) representing
480 5e3d3eb3 Guido Trotter
      the NICs the instance has
481 2c2690c9 Iustin Pop
  @type disk_template: string
482 2c2690c9 Iustin Pop
  @param disk_template: the distk template of the instance
483 2c2690c9 Iustin Pop
  @type disks: list
484 2c2690c9 Iustin Pop
  @param disks: the list of (size, mode) pairs
485 67fc3042 Iustin Pop
  @type bep: dict
486 67fc3042 Iustin Pop
  @param bep: the backend parameters for the instance
487 67fc3042 Iustin Pop
  @type hvp: dict
488 67fc3042 Iustin Pop
  @param hvp: the hypervisor parameters for the instance
489 67fc3042 Iustin Pop
  @type hypervisor: string
490 67fc3042 Iustin Pop
  @param hypervisor: the hypervisor for the instance
491 e4376078 Iustin Pop
  @rtype: dict
492 e4376078 Iustin Pop
  @return: the hook environment for this instance
493 ecb215b5 Michael Hanselmann

494 396e1b78 Michael Hanselmann
  """
495 0d68c45d Iustin Pop
  if status:
496 0d68c45d Iustin Pop
    str_status = "up"
497 0d68c45d Iustin Pop
  else:
498 0d68c45d Iustin Pop
    str_status = "down"
499 396e1b78 Michael Hanselmann
  env = {
500 0e137c28 Iustin Pop
    "OP_TARGET": name,
501 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
502 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
503 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
504 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
505 0d68c45d Iustin Pop
    "INSTANCE_STATUS": str_status,
506 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
507 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
508 2c2690c9 Iustin Pop
    "INSTANCE_DISK_TEMPLATE": disk_template,
509 67fc3042 Iustin Pop
    "INSTANCE_HYPERVISOR": hypervisor,
510 396e1b78 Michael Hanselmann
  }
511 396e1b78 Michael Hanselmann
512 396e1b78 Michael Hanselmann
  if nics:
513 396e1b78 Michael Hanselmann
    nic_count = len(nics)
514 62f0dd02 Guido Trotter
    for idx, (ip, mac, mode, link) in enumerate(nics):
515 396e1b78 Michael Hanselmann
      if ip is None:
516 396e1b78 Michael Hanselmann
        ip = ""
517 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
518 2c2690c9 Iustin Pop
      env["INSTANCE_NIC%d_MAC" % idx] = mac
519 62f0dd02 Guido Trotter
      env["INSTANCE_NIC%d_MODE" % idx] = mode
520 62f0dd02 Guido Trotter
      env["INSTANCE_NIC%d_LINK" % idx] = link
521 62f0dd02 Guido Trotter
      if mode == constants.NIC_MODE_BRIDGED:
522 62f0dd02 Guido Trotter
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
523 396e1b78 Michael Hanselmann
  else:
524 396e1b78 Michael Hanselmann
    nic_count = 0
525 396e1b78 Michael Hanselmann
526 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
527 396e1b78 Michael Hanselmann
528 2c2690c9 Iustin Pop
  if disks:
529 2c2690c9 Iustin Pop
    disk_count = len(disks)
530 2c2690c9 Iustin Pop
    for idx, (size, mode) in enumerate(disks):
531 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_SIZE" % idx] = size
532 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_MODE" % idx] = mode
533 2c2690c9 Iustin Pop
  else:
534 2c2690c9 Iustin Pop
    disk_count = 0
535 2c2690c9 Iustin Pop
536 2c2690c9 Iustin Pop
  env["INSTANCE_DISK_COUNT"] = disk_count
537 2c2690c9 Iustin Pop
538 67fc3042 Iustin Pop
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
539 67fc3042 Iustin Pop
    for key, value in source.items():
540 67fc3042 Iustin Pop
      env["INSTANCE_%s_%s" % (kind, key)] = value
541 67fc3042 Iustin Pop
542 396e1b78 Michael Hanselmann
  return env
543 396e1b78 Michael Hanselmann
544 f9b10246 Guido Trotter
def _NICListToTuple(lu, nics):
545 62f0dd02 Guido Trotter
  """Build a list of nic information tuples.
546 62f0dd02 Guido Trotter

547 f9b10246 Guido Trotter
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
548 f9b10246 Guido Trotter
  value in LUQueryInstanceData.
549 62f0dd02 Guido Trotter

550 62f0dd02 Guido Trotter
  @type lu:  L{LogicalUnit}
551 62f0dd02 Guido Trotter
  @param lu: the logical unit on whose behalf we execute
552 62f0dd02 Guido Trotter
  @type nics: list of L{objects.NIC}
553 62f0dd02 Guido Trotter
  @param nics: list of nics to convert to hooks tuples
554 62f0dd02 Guido Trotter

555 62f0dd02 Guido Trotter
  """
556 62f0dd02 Guido Trotter
  hooks_nics = []
557 62f0dd02 Guido Trotter
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[constants.PP_DEFAULT]
558 62f0dd02 Guido Trotter
  for nic in nics:
559 62f0dd02 Guido Trotter
    ip = nic.ip
560 62f0dd02 Guido Trotter
    mac = nic.mac
561 62f0dd02 Guido Trotter
    filled_params = objects.FillDict(c_nicparams, nic.nicparams)
562 62f0dd02 Guido Trotter
    mode = filled_params[constants.NIC_MODE]
563 62f0dd02 Guido Trotter
    link = filled_params[constants.NIC_LINK]
564 62f0dd02 Guido Trotter
    hooks_nics.append((ip, mac, mode, link))
565 62f0dd02 Guido Trotter
  return hooks_nics
566 396e1b78 Michael Hanselmann
567 338e51e8 Iustin Pop
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
568 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
569 ecb215b5 Michael Hanselmann

570 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
571 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
572 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
573 e4376078 Iustin Pop
  @param instance: the instance for which we should build the
574 e4376078 Iustin Pop
      environment
575 e4376078 Iustin Pop
  @type override: dict
576 e4376078 Iustin Pop
  @param override: dictionary with key/values that will override
577 e4376078 Iustin Pop
      our values
578 e4376078 Iustin Pop
  @rtype: dict
579 e4376078 Iustin Pop
  @return: the hook environment dictionary
580 e4376078 Iustin Pop

581 ecb215b5 Michael Hanselmann
  """
582 67fc3042 Iustin Pop
  cluster = lu.cfg.GetClusterInfo()
583 67fc3042 Iustin Pop
  bep = cluster.FillBE(instance)
584 67fc3042 Iustin Pop
  hvp = cluster.FillHV(instance)
585 396e1b78 Michael Hanselmann
  args = {
586 396e1b78 Michael Hanselmann
    'name': instance.name,
587 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
588 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
589 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
590 0d68c45d Iustin Pop
    'status': instance.admin_up,
591 338e51e8 Iustin Pop
    'memory': bep[constants.BE_MEMORY],
592 338e51e8 Iustin Pop
    'vcpus': bep[constants.BE_VCPUS],
593 f9b10246 Guido Trotter
    'nics': _NICListToTuple(lu, instance.nics),
594 2c2690c9 Iustin Pop
    'disk_template': instance.disk_template,
595 2c2690c9 Iustin Pop
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
596 67fc3042 Iustin Pop
    'bep': bep,
597 67fc3042 Iustin Pop
    'hvp': hvp,
598 67fc3042 Iustin Pop
    'hypervisor': instance.hypervisor,
599 396e1b78 Michael Hanselmann
  }
600 396e1b78 Michael Hanselmann
  if override:
601 396e1b78 Michael Hanselmann
    args.update(override)
602 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
603 396e1b78 Michael Hanselmann
604 396e1b78 Michael Hanselmann
605 ec0292f1 Iustin Pop
def _AdjustCandidatePool(lu):
606 ec0292f1 Iustin Pop
  """Adjust the candidate pool after node operations.
607 ec0292f1 Iustin Pop

608 ec0292f1 Iustin Pop
  """
609 ec0292f1 Iustin Pop
  mod_list = lu.cfg.MaintainCandidatePool()
610 ec0292f1 Iustin Pop
  if mod_list:
611 ec0292f1 Iustin Pop
    lu.LogInfo("Promoted nodes to master candidate role: %s",
612 ee513a66 Iustin Pop
               ", ".join(node.name for node in mod_list))
613 ec0292f1 Iustin Pop
    for name in mod_list:
614 ec0292f1 Iustin Pop
      lu.context.ReaddNode(name)
615 ec0292f1 Iustin Pop
  mc_now, mc_max = lu.cfg.GetMasterCandidateStats()
616 ec0292f1 Iustin Pop
  if mc_now > mc_max:
617 ec0292f1 Iustin Pop
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
618 ec0292f1 Iustin Pop
               (mc_now, mc_max))
619 ec0292f1 Iustin Pop
620 ec0292f1 Iustin Pop
621 b165e77e Guido Trotter
def _CheckNicsBridgesExist(lu, target_nics, target_node,
622 b165e77e Guido Trotter
                               profile=constants.PP_DEFAULT):
623 b165e77e Guido Trotter
  """Check that the brigdes needed by a list of nics exist.
624 b165e77e Guido Trotter

625 b165e77e Guido Trotter
  """
626 b165e77e Guido Trotter
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[profile]
627 b165e77e Guido Trotter
  paramslist = [objects.FillDict(c_nicparams, nic.nicparams)
628 b165e77e Guido Trotter
                for nic in target_nics]
629 b165e77e Guido Trotter
  brlist = [params[constants.NIC_LINK] for params in paramslist
630 b165e77e Guido Trotter
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
631 b165e77e Guido Trotter
  if brlist:
632 b165e77e Guido Trotter
    result = lu.rpc.call_bridges_exist(target_node, brlist)
633 4c4e4e1e Iustin Pop
    result.Raise("Error checking bridges on destination node '%s'" %
634 4c4e4e1e Iustin Pop
                 target_node, prereq=True)
635 b165e77e Guido Trotter
636 b165e77e Guido Trotter
637 b165e77e Guido Trotter
def _CheckInstanceBridgesExist(lu, instance, node=None):
638 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
639 bf6929a2 Alexander Schreiber

640 bf6929a2 Alexander Schreiber
  """
641 b165e77e Guido Trotter
  if node is None:
642 29921401 Iustin Pop
    node = instance.primary_node
643 b165e77e Guido Trotter
  _CheckNicsBridgesExist(lu, instance.nics, node)
644 bf6929a2 Alexander Schreiber
645 bf6929a2 Alexander Schreiber
646 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
647 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
648 a8083063 Iustin Pop

649 a8083063 Iustin Pop
  """
650 a8083063 Iustin Pop
  _OP_REQP = []
651 a8083063 Iustin Pop
652 a8083063 Iustin Pop
  def CheckPrereq(self):
653 a8083063 Iustin Pop
    """Check prerequisites.
654 a8083063 Iustin Pop

655 a8083063 Iustin Pop
    This checks whether the cluster is empty.
656 a8083063 Iustin Pop

657 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
658 a8083063 Iustin Pop

659 a8083063 Iustin Pop
    """
660 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
661 a8083063 Iustin Pop
662 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
663 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
664 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
665 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
666 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
667 db915bd1 Michael Hanselmann
    if instancelist:
668 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
669 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
670 a8083063 Iustin Pop
671 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
672 a8083063 Iustin Pop
    """Destroys the cluster.
673 a8083063 Iustin Pop

674 a8083063 Iustin Pop
    """
675 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
676 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
677 4c4e4e1e Iustin Pop
    result.Raise("Could not disable the master role")
678 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
679 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
680 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
681 140aa4a8 Iustin Pop
    return master
682 a8083063 Iustin Pop
683 a8083063 Iustin Pop
684 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
685 a8083063 Iustin Pop
  """Verifies the cluster status.
686 a8083063 Iustin Pop

687 a8083063 Iustin Pop
  """
688 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
689 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
690 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
691 d4b9d97f Guido Trotter
  REQ_BGL = False
692 d4b9d97f Guido Trotter
693 d4b9d97f Guido Trotter
  def ExpandNames(self):
694 d4b9d97f Guido Trotter
    self.needed_locks = {
695 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
696 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
697 d4b9d97f Guido Trotter
    }
698 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
699 a8083063 Iustin Pop
700 25361b9a Iustin Pop
  def _VerifyNode(self, nodeinfo, file_list, local_cksum,
701 6d2e83d5 Iustin Pop
                  node_result, feedback_fn, master_files,
702 cc9e1230 Guido Trotter
                  drbd_map, vg_name):
703 a8083063 Iustin Pop
    """Run multiple tests against a node.
704 a8083063 Iustin Pop

705 112f18a5 Iustin Pop
    Test list:
706 e4376078 Iustin Pop

707 a8083063 Iustin Pop
      - compares ganeti version
708 a8083063 Iustin Pop
      - checks vg existance and size > 20G
709 a8083063 Iustin Pop
      - checks config file checksum
710 a8083063 Iustin Pop
      - checks ssh to other nodes
711 a8083063 Iustin Pop

712 112f18a5 Iustin Pop
    @type nodeinfo: L{objects.Node}
713 112f18a5 Iustin Pop
    @param nodeinfo: the node to check
714 e4376078 Iustin Pop
    @param file_list: required list of files
715 e4376078 Iustin Pop
    @param local_cksum: dictionary of local files and their checksums
716 e4376078 Iustin Pop
    @param node_result: the results from the node
717 e4376078 Iustin Pop
    @param feedback_fn: function used to accumulate results
718 112f18a5 Iustin Pop
    @param master_files: list of files that only masters should have
719 6d2e83d5 Iustin Pop
    @param drbd_map: the useddrbd minors for this node, in
720 6d2e83d5 Iustin Pop
        form of minor: (instance, must_exist) which correspond to instances
721 6d2e83d5 Iustin Pop
        and their running status
722 cc9e1230 Guido Trotter
    @param vg_name: Ganeti Volume Group (result of self.cfg.GetVGName())
723 098c0958 Michael Hanselmann

724 a8083063 Iustin Pop
    """
725 112f18a5 Iustin Pop
    node = nodeinfo.name
726 25361b9a Iustin Pop
727 25361b9a Iustin Pop
    # main result, node_result should be a non-empty dict
728 25361b9a Iustin Pop
    if not node_result or not isinstance(node_result, dict):
729 25361b9a Iustin Pop
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
730 25361b9a Iustin Pop
      return True
731 25361b9a Iustin Pop
732 a8083063 Iustin Pop
    # compares ganeti version
733 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
734 25361b9a Iustin Pop
    remote_version = node_result.get('version', None)
735 e9ce0a64 Iustin Pop
    if not (remote_version and isinstance(remote_version, (list, tuple)) and
736 e9ce0a64 Iustin Pop
            len(remote_version) == 2):
737 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
738 a8083063 Iustin Pop
      return True
739 a8083063 Iustin Pop
740 e9ce0a64 Iustin Pop
    if local_version != remote_version[0]:
741 e9ce0a64 Iustin Pop
      feedback_fn("  - ERROR: incompatible protocol versions: master %s,"
742 e9ce0a64 Iustin Pop
                  " node %s %s" % (local_version, node, remote_version[0]))
743 a8083063 Iustin Pop
      return True
744 a8083063 Iustin Pop
745 e9ce0a64 Iustin Pop
    # node seems compatible, we can actually try to look into its results
746 a8083063 Iustin Pop
747 a8083063 Iustin Pop
    bad = False
748 e9ce0a64 Iustin Pop
749 e9ce0a64 Iustin Pop
    # full package version
750 e9ce0a64 Iustin Pop
    if constants.RELEASE_VERSION != remote_version[1]:
751 e9ce0a64 Iustin Pop
      feedback_fn("  - WARNING: software version mismatch: master %s,"
752 e9ce0a64 Iustin Pop
                  " node %s %s" %
753 e9ce0a64 Iustin Pop
                  (constants.RELEASE_VERSION, node, remote_version[1]))
754 e9ce0a64 Iustin Pop
755 e9ce0a64 Iustin Pop
    # checks vg existence and size > 20G
756 cc9e1230 Guido Trotter
    if vg_name is not None:
757 cc9e1230 Guido Trotter
      vglist = node_result.get(constants.NV_VGLIST, None)
758 cc9e1230 Guido Trotter
      if not vglist:
759 cc9e1230 Guido Trotter
        feedback_fn("  - ERROR: unable to check volume groups on node %s." %
760 cc9e1230 Guido Trotter
                        (node,))
761 a8083063 Iustin Pop
        bad = True
762 cc9e1230 Guido Trotter
      else:
763 cc9e1230 Guido Trotter
        vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
764 cc9e1230 Guido Trotter
                                              constants.MIN_VG_SIZE)
765 cc9e1230 Guido Trotter
        if vgstatus:
766 cc9e1230 Guido Trotter
          feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
767 cc9e1230 Guido Trotter
          bad = True
768 a8083063 Iustin Pop
769 a8083063 Iustin Pop
    # checks config file checksum
770 a8083063 Iustin Pop
771 25361b9a Iustin Pop
    remote_cksum = node_result.get(constants.NV_FILELIST, None)
772 25361b9a Iustin Pop
    if not isinstance(remote_cksum, dict):
773 a8083063 Iustin Pop
      bad = True
774 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
775 a8083063 Iustin Pop
    else:
776 a8083063 Iustin Pop
      for file_name in file_list:
777 112f18a5 Iustin Pop
        node_is_mc = nodeinfo.master_candidate
778 112f18a5 Iustin Pop
        must_have_file = file_name not in master_files
779 a8083063 Iustin Pop
        if file_name not in remote_cksum:
780 112f18a5 Iustin Pop
          if node_is_mc or must_have_file:
781 112f18a5 Iustin Pop
            bad = True
782 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' missing" % file_name)
783 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
784 112f18a5 Iustin Pop
          if node_is_mc or must_have_file:
785 112f18a5 Iustin Pop
            bad = True
786 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
787 112f18a5 Iustin Pop
          else:
788 112f18a5 Iustin Pop
            # not candidate and this is not a must-have file
789 112f18a5 Iustin Pop
            bad = True
790 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: non master-candidate has old/wrong file"
791 112f18a5 Iustin Pop
                        " '%s'" % file_name)
792 112f18a5 Iustin Pop
        else:
793 112f18a5 Iustin Pop
          # all good, except non-master/non-must have combination
794 112f18a5 Iustin Pop
          if not node_is_mc and not must_have_file:
795 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' should not exist on non master"
796 112f18a5 Iustin Pop
                        " candidates" % file_name)
797 a8083063 Iustin Pop
798 25361b9a Iustin Pop
    # checks ssh to any
799 25361b9a Iustin Pop
800 25361b9a Iustin Pop
    if constants.NV_NODELIST not in node_result:
801 a8083063 Iustin Pop
      bad = True
802 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
803 a8083063 Iustin Pop
    else:
804 25361b9a Iustin Pop
      if node_result[constants.NV_NODELIST]:
805 a8083063 Iustin Pop
        bad = True
806 25361b9a Iustin Pop
        for node in node_result[constants.NV_NODELIST]:
807 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
808 25361b9a Iustin Pop
                          (node, node_result[constants.NV_NODELIST][node]))
809 25361b9a Iustin Pop
810 25361b9a Iustin Pop
    if constants.NV_NODENETTEST not in node_result:
811 9d4bfc96 Iustin Pop
      bad = True
812 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
813 9d4bfc96 Iustin Pop
    else:
814 25361b9a Iustin Pop
      if node_result[constants.NV_NODENETTEST]:
815 9d4bfc96 Iustin Pop
        bad = True
816 25361b9a Iustin Pop
        nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys())
817 9d4bfc96 Iustin Pop
        for node in nlist:
818 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
819 25361b9a Iustin Pop
                          (node, node_result[constants.NV_NODENETTEST][node]))
820 9d4bfc96 Iustin Pop
821 25361b9a Iustin Pop
    hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
822 e69d05fd Iustin Pop
    if isinstance(hyp_result, dict):
823 e69d05fd Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
824 e69d05fd Iustin Pop
        if hv_result is not None:
825 e69d05fd Iustin Pop
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
826 e69d05fd Iustin Pop
                      (hv_name, hv_result))
827 6d2e83d5 Iustin Pop
828 6d2e83d5 Iustin Pop
    # check used drbd list
829 cc9e1230 Guido Trotter
    if vg_name is not None:
830 cc9e1230 Guido Trotter
      used_minors = node_result.get(constants.NV_DRBDLIST, [])
831 cc9e1230 Guido Trotter
      if not isinstance(used_minors, (tuple, list)):
832 cc9e1230 Guido Trotter
        feedback_fn("  - ERROR: cannot parse drbd status file: %s" %
833 cc9e1230 Guido Trotter
                    str(used_minors))
834 cc9e1230 Guido Trotter
      else:
835 cc9e1230 Guido Trotter
        for minor, (iname, must_exist) in drbd_map.items():
836 cc9e1230 Guido Trotter
          if minor not in used_minors and must_exist:
837 35e994e9 Iustin Pop
            feedback_fn("  - ERROR: drbd minor %d of instance %s is"
838 35e994e9 Iustin Pop
                        " not active" % (minor, iname))
839 cc9e1230 Guido Trotter
            bad = True
840 cc9e1230 Guido Trotter
        for minor in used_minors:
841 cc9e1230 Guido Trotter
          if minor not in drbd_map:
842 35e994e9 Iustin Pop
            feedback_fn("  - ERROR: unallocated drbd minor %d is in use" %
843 35e994e9 Iustin Pop
                        minor)
844 cc9e1230 Guido Trotter
            bad = True
845 6d2e83d5 Iustin Pop
846 a8083063 Iustin Pop
    return bad
847 a8083063 Iustin Pop
848 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
849 0a66c968 Iustin Pop
                      node_instance, feedback_fn, n_offline):
850 a8083063 Iustin Pop
    """Verify an instance.
851 a8083063 Iustin Pop

852 a8083063 Iustin Pop
    This function checks to see if the required block devices are
853 a8083063 Iustin Pop
    available on the instance's node.
854 a8083063 Iustin Pop

855 a8083063 Iustin Pop
    """
856 a8083063 Iustin Pop
    bad = False
857 a8083063 Iustin Pop
858 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
859 a8083063 Iustin Pop
860 a8083063 Iustin Pop
    node_vol_should = {}
861 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
862 a8083063 Iustin Pop
863 a8083063 Iustin Pop
    for node in node_vol_should:
864 0a66c968 Iustin Pop
      if node in n_offline:
865 0a66c968 Iustin Pop
        # ignore missing volumes on offline nodes
866 0a66c968 Iustin Pop
        continue
867 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
868 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
869 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
870 a8083063 Iustin Pop
                          (volume, node))
871 a8083063 Iustin Pop
          bad = True
872 a8083063 Iustin Pop
873 0d68c45d Iustin Pop
    if instanceconfig.admin_up:
874 0a66c968 Iustin Pop
      if ((node_current not in node_instance or
875 0a66c968 Iustin Pop
          not instance in node_instance[node_current]) and
876 0a66c968 Iustin Pop
          node_current not in n_offline):
877 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
878 a8083063 Iustin Pop
                        (instance, node_current))
879 a8083063 Iustin Pop
        bad = True
880 a8083063 Iustin Pop
881 a8083063 Iustin Pop
    for node in node_instance:
882 a8083063 Iustin Pop
      if (not node == node_current):
883 a8083063 Iustin Pop
        if instance in node_instance[node]:
884 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
885 a8083063 Iustin Pop
                          (instance, node))
886 a8083063 Iustin Pop
          bad = True
887 a8083063 Iustin Pop
888 6a438c98 Michael Hanselmann
    return bad
889 a8083063 Iustin Pop
890 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
891 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
892 a8083063 Iustin Pop

893 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
894 a8083063 Iustin Pop
    reported as unknown.
895 a8083063 Iustin Pop

896 a8083063 Iustin Pop
    """
897 a8083063 Iustin Pop
    bad = False
898 a8083063 Iustin Pop
899 a8083063 Iustin Pop
    for node in node_vol_is:
900 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
901 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
902 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
903 a8083063 Iustin Pop
                      (volume, node))
904 a8083063 Iustin Pop
          bad = True
905 a8083063 Iustin Pop
    return bad
906 a8083063 Iustin Pop
907 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
908 a8083063 Iustin Pop
    """Verify the list of running instances.
909 a8083063 Iustin Pop

910 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
911 a8083063 Iustin Pop

912 a8083063 Iustin Pop
    """
913 a8083063 Iustin Pop
    bad = False
914 a8083063 Iustin Pop
    for node in node_instance:
915 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
916 a8083063 Iustin Pop
        if runninginstance not in instancelist:
917 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
918 a8083063 Iustin Pop
                          (runninginstance, node))
919 a8083063 Iustin Pop
          bad = True
920 a8083063 Iustin Pop
    return bad
921 a8083063 Iustin Pop
922 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
923 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
924 2b3b6ddd Guido Trotter

925 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
926 2b3b6ddd Guido Trotter
    was primary for.
927 2b3b6ddd Guido Trotter

928 2b3b6ddd Guido Trotter
    """
929 2b3b6ddd Guido Trotter
    bad = False
930 2b3b6ddd Guido Trotter
931 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
932 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
933 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
934 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
935 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
936 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
937 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
938 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
939 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
940 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
941 2b3b6ddd Guido Trotter
        needed_mem = 0
942 2b3b6ddd Guido Trotter
        for instance in instances:
943 338e51e8 Iustin Pop
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
944 c0f2b229 Iustin Pop
          if bep[constants.BE_AUTO_BALANCE]:
945 3924700f Iustin Pop
            needed_mem += bep[constants.BE_MEMORY]
946 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
947 2b3b6ddd Guido Trotter
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
948 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
949 2b3b6ddd Guido Trotter
          bad = True
950 2b3b6ddd Guido Trotter
    return bad
951 2b3b6ddd Guido Trotter
952 a8083063 Iustin Pop
  def CheckPrereq(self):
953 a8083063 Iustin Pop
    """Check prerequisites.
954 a8083063 Iustin Pop

955 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
956 e54c4c5e Guido Trotter
    all its members are valid.
957 a8083063 Iustin Pop

958 a8083063 Iustin Pop
    """
959 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
960 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
961 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
962 a8083063 Iustin Pop
963 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
964 d8fff41c Guido Trotter
    """Build hooks env.
965 d8fff41c Guido Trotter

966 d8fff41c Guido Trotter
    Cluster-Verify hooks just rone in the post phase and their failure makes
967 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
968 d8fff41c Guido Trotter

969 d8fff41c Guido Trotter
    """
970 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
971 35e994e9 Iustin Pop
    env = {
972 35e994e9 Iustin Pop
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
973 35e994e9 Iustin Pop
      }
974 35e994e9 Iustin Pop
    for node in self.cfg.GetAllNodesInfo().values():
975 35e994e9 Iustin Pop
      env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
976 35e994e9 Iustin Pop
977 d8fff41c Guido Trotter
    return env, [], all_nodes
978 d8fff41c Guido Trotter
979 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
980 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
981 a8083063 Iustin Pop

982 a8083063 Iustin Pop
    """
983 a8083063 Iustin Pop
    bad = False
984 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
985 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
986 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
987 a8083063 Iustin Pop
988 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
989 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
990 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
991 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
992 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
993 6d2e83d5 Iustin Pop
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
994 6d2e83d5 Iustin Pop
                        for iname in instancelist)
995 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
996 3924700f Iustin Pop
    i_non_a_balanced = [] # Non auto-balanced instances
997 0a66c968 Iustin Pop
    n_offline = [] # List of offline nodes
998 22f0f71d Iustin Pop
    n_drained = [] # List of nodes being drained
999 a8083063 Iustin Pop
    node_volume = {}
1000 a8083063 Iustin Pop
    node_instance = {}
1001 9c9c7d30 Guido Trotter
    node_info = {}
1002 26b6af5e Guido Trotter
    instance_cfg = {}
1003 a8083063 Iustin Pop
1004 a8083063 Iustin Pop
    # FIXME: verify OS list
1005 a8083063 Iustin Pop
    # do local checksums
1006 112f18a5 Iustin Pop
    master_files = [constants.CLUSTER_CONF_FILE]
1007 112f18a5 Iustin Pop
1008 112f18a5 Iustin Pop
    file_names = ssconf.SimpleStore().GetFileList()
1009 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
1010 699777f2 Michael Hanselmann
    file_names.append(constants.RAPI_CERT_FILE)
1011 112f18a5 Iustin Pop
    file_names.extend(master_files)
1012 112f18a5 Iustin Pop
1013 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
1014 a8083063 Iustin Pop
1015 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
1016 a8083063 Iustin Pop
    node_verify_param = {
1017 25361b9a Iustin Pop
      constants.NV_FILELIST: file_names,
1018 82e37788 Iustin Pop
      constants.NV_NODELIST: [node.name for node in nodeinfo
1019 82e37788 Iustin Pop
                              if not node.offline],
1020 25361b9a Iustin Pop
      constants.NV_HYPERVISOR: hypervisors,
1021 25361b9a Iustin Pop
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
1022 82e37788 Iustin Pop
                                  node.secondary_ip) for node in nodeinfo
1023 82e37788 Iustin Pop
                                 if not node.offline],
1024 25361b9a Iustin Pop
      constants.NV_INSTANCELIST: hypervisors,
1025 25361b9a Iustin Pop
      constants.NV_VERSION: None,
1026 25361b9a Iustin Pop
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
1027 a8083063 Iustin Pop
      }
1028 cc9e1230 Guido Trotter
    if vg_name is not None:
1029 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_VGLIST] = None
1030 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_LVLIST] = vg_name
1031 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_DRBDLIST] = None
1032 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
1033 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
1034 a8083063 Iustin Pop
1035 3924700f Iustin Pop
    cluster = self.cfg.GetClusterInfo()
1036 112f18a5 Iustin Pop
    master_node = self.cfg.GetMasterNode()
1037 6d2e83d5 Iustin Pop
    all_drbd_map = self.cfg.ComputeDRBDMap()
1038 6d2e83d5 Iustin Pop
1039 112f18a5 Iustin Pop
    for node_i in nodeinfo:
1040 112f18a5 Iustin Pop
      node = node_i.name
1041 25361b9a Iustin Pop
1042 0a66c968 Iustin Pop
      if node_i.offline:
1043 0a66c968 Iustin Pop
        feedback_fn("* Skipping offline node %s" % (node,))
1044 0a66c968 Iustin Pop
        n_offline.append(node)
1045 0a66c968 Iustin Pop
        continue
1046 0a66c968 Iustin Pop
1047 112f18a5 Iustin Pop
      if node == master_node:
1048 25361b9a Iustin Pop
        ntype = "master"
1049 112f18a5 Iustin Pop
      elif node_i.master_candidate:
1050 25361b9a Iustin Pop
        ntype = "master candidate"
1051 22f0f71d Iustin Pop
      elif node_i.drained:
1052 22f0f71d Iustin Pop
        ntype = "drained"
1053 22f0f71d Iustin Pop
        n_drained.append(node)
1054 112f18a5 Iustin Pop
      else:
1055 25361b9a Iustin Pop
        ntype = "regular"
1056 112f18a5 Iustin Pop
      feedback_fn("* Verifying node %s (%s)" % (node, ntype))
1057 25361b9a Iustin Pop
1058 4c4e4e1e Iustin Pop
      msg = all_nvinfo[node].fail_msg
1059 6f68a739 Iustin Pop
      if msg:
1060 6f68a739 Iustin Pop
        feedback_fn("  - ERROR: while contacting node %s: %s" % (node, msg))
1061 25361b9a Iustin Pop
        bad = True
1062 25361b9a Iustin Pop
        continue
1063 25361b9a Iustin Pop
1064 6f68a739 Iustin Pop
      nresult = all_nvinfo[node].payload
1065 6d2e83d5 Iustin Pop
      node_drbd = {}
1066 6d2e83d5 Iustin Pop
      for minor, instance in all_drbd_map[node].items():
1067 c614e5fb Iustin Pop
        if instance not in instanceinfo:
1068 c614e5fb Iustin Pop
          feedback_fn("  - ERROR: ghost instance '%s' in temporary DRBD map" %
1069 c614e5fb Iustin Pop
                      instance)
1070 c614e5fb Iustin Pop
          # ghost instance should not be running, but otherwise we
1071 c614e5fb Iustin Pop
          # don't give double warnings (both ghost instance and
1072 c614e5fb Iustin Pop
          # unallocated minor in use)
1073 c614e5fb Iustin Pop
          node_drbd[minor] = (instance, False)
1074 c614e5fb Iustin Pop
        else:
1075 c614e5fb Iustin Pop
          instance = instanceinfo[instance]
1076 c614e5fb Iustin Pop
          node_drbd[minor] = (instance.name, instance.admin_up)
1077 112f18a5 Iustin Pop
      result = self._VerifyNode(node_i, file_names, local_checksums,
1078 6d2e83d5 Iustin Pop
                                nresult, feedback_fn, master_files,
1079 cc9e1230 Guido Trotter
                                node_drbd, vg_name)
1080 a8083063 Iustin Pop
      bad = bad or result
1081 a8083063 Iustin Pop
1082 25361b9a Iustin Pop
      lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1083 cc9e1230 Guido Trotter
      if vg_name is None:
1084 cc9e1230 Guido Trotter
        node_volume[node] = {}
1085 cc9e1230 Guido Trotter
      elif isinstance(lvdata, basestring):
1086 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
1087 26f15862 Iustin Pop
                    (node, utils.SafeEncode(lvdata)))
1088 b63ed789 Iustin Pop
        bad = True
1089 b63ed789 Iustin Pop
        node_volume[node] = {}
1090 25361b9a Iustin Pop
      elif not isinstance(lvdata, dict):
1091 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (lvlist)" % (node,))
1092 a8083063 Iustin Pop
        bad = True
1093 a8083063 Iustin Pop
        continue
1094 b63ed789 Iustin Pop
      else:
1095 25361b9a Iustin Pop
        node_volume[node] = lvdata
1096 a8083063 Iustin Pop
1097 a8083063 Iustin Pop
      # node_instance
1098 25361b9a Iustin Pop
      idata = nresult.get(constants.NV_INSTANCELIST, None)
1099 25361b9a Iustin Pop
      if not isinstance(idata, list):
1100 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (instancelist)" %
1101 25361b9a Iustin Pop
                    (node,))
1102 a8083063 Iustin Pop
        bad = True
1103 a8083063 Iustin Pop
        continue
1104 a8083063 Iustin Pop
1105 25361b9a Iustin Pop
      node_instance[node] = idata
1106 a8083063 Iustin Pop
1107 9c9c7d30 Guido Trotter
      # node_info
1108 25361b9a Iustin Pop
      nodeinfo = nresult.get(constants.NV_HVINFO, None)
1109 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
1110 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (hvinfo)" % (node,))
1111 9c9c7d30 Guido Trotter
        bad = True
1112 9c9c7d30 Guido Trotter
        continue
1113 9c9c7d30 Guido Trotter
1114 9c9c7d30 Guido Trotter
      try:
1115 9c9c7d30 Guido Trotter
        node_info[node] = {
1116 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
1117 93e4c50b Guido Trotter
          "pinst": [],
1118 93e4c50b Guido Trotter
          "sinst": [],
1119 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
1120 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
1121 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
1122 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
1123 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
1124 36e7da50 Guido Trotter
          # secondary.
1125 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
1126 9c9c7d30 Guido Trotter
        }
1127 cc9e1230 Guido Trotter
        # FIXME: devise a free space model for file based instances as well
1128 cc9e1230 Guido Trotter
        if vg_name is not None:
1129 9a198532 Iustin Pop
          if (constants.NV_VGLIST not in nresult or
1130 9a198532 Iustin Pop
              vg_name not in nresult[constants.NV_VGLIST]):
1131 9a198532 Iustin Pop
            feedback_fn("  - ERROR: node %s didn't return data for the"
1132 9a198532 Iustin Pop
                        " volume group '%s' - it is either missing or broken" %
1133 9a198532 Iustin Pop
                        (node, vg_name))
1134 9a198532 Iustin Pop
            bad = True
1135 9a198532 Iustin Pop
            continue
1136 cc9e1230 Guido Trotter
          node_info[node]["dfree"] = int(nresult[constants.NV_VGLIST][vg_name])
1137 9a198532 Iustin Pop
      except (ValueError, KeyError):
1138 9a198532 Iustin Pop
        feedback_fn("  - ERROR: invalid nodeinfo value returned"
1139 9a198532 Iustin Pop
                    " from node %s" % (node,))
1140 9c9c7d30 Guido Trotter
        bad = True
1141 9c9c7d30 Guido Trotter
        continue
1142 9c9c7d30 Guido Trotter
1143 a8083063 Iustin Pop
    node_vol_should = {}
1144 a8083063 Iustin Pop
1145 a8083063 Iustin Pop
    for instance in instancelist:
1146 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
1147 6d2e83d5 Iustin Pop
      inst_config = instanceinfo[instance]
1148 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
1149 0a66c968 Iustin Pop
                                     node_instance, feedback_fn, n_offline)
1150 c5705f58 Guido Trotter
      bad = bad or result
1151 832261fd Iustin Pop
      inst_nodes_offline = []
1152 a8083063 Iustin Pop
1153 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
1154 a8083063 Iustin Pop
1155 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
1156 26b6af5e Guido Trotter
1157 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
1158 93e4c50b Guido Trotter
      if pnode in node_info:
1159 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
1160 0a66c968 Iustin Pop
      elif pnode not in n_offline:
1161 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
1162 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
1163 93e4c50b Guido Trotter
        bad = True
1164 93e4c50b Guido Trotter
1165 832261fd Iustin Pop
      if pnode in n_offline:
1166 832261fd Iustin Pop
        inst_nodes_offline.append(pnode)
1167 832261fd Iustin Pop
1168 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
1169 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
1170 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
1171 93e4c50b Guido Trotter
      # supported either.
1172 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
1173 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
1174 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
1175 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
1176 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
1177 93e4c50b Guido Trotter
                    % instance)
1178 93e4c50b Guido Trotter
1179 c0f2b229 Iustin Pop
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1180 3924700f Iustin Pop
        i_non_a_balanced.append(instance)
1181 3924700f Iustin Pop
1182 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
1183 93e4c50b Guido Trotter
        if snode in node_info:
1184 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
1185 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
1186 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
1187 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
1188 0a66c968 Iustin Pop
        elif snode not in n_offline:
1189 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
1190 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
1191 832261fd Iustin Pop
          bad = True
1192 832261fd Iustin Pop
        if snode in n_offline:
1193 832261fd Iustin Pop
          inst_nodes_offline.append(snode)
1194 832261fd Iustin Pop
1195 832261fd Iustin Pop
      if inst_nodes_offline:
1196 832261fd Iustin Pop
        # warn that the instance lives on offline nodes, and set bad=True
1197 832261fd Iustin Pop
        feedback_fn("  - ERROR: instance lives on offline node(s) %s" %
1198 832261fd Iustin Pop
                    ", ".join(inst_nodes_offline))
1199 832261fd Iustin Pop
        bad = True
1200 93e4c50b Guido Trotter
1201 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
1202 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
1203 a8083063 Iustin Pop
                                       feedback_fn)
1204 a8083063 Iustin Pop
    bad = bad or result
1205 a8083063 Iustin Pop
1206 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
1207 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
1208 a8083063 Iustin Pop
                                         feedback_fn)
1209 a8083063 Iustin Pop
    bad = bad or result
1210 a8083063 Iustin Pop
1211 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1212 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
1213 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
1214 e54c4c5e Guido Trotter
      bad = bad or result
1215 2b3b6ddd Guido Trotter
1216 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
1217 2b3b6ddd Guido Trotter
    if i_non_redundant:
1218 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1219 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
1220 2b3b6ddd Guido Trotter
1221 3924700f Iustin Pop
    if i_non_a_balanced:
1222 3924700f Iustin Pop
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1223 3924700f Iustin Pop
                  % len(i_non_a_balanced))
1224 3924700f Iustin Pop
1225 0a66c968 Iustin Pop
    if n_offline:
1226 0a66c968 Iustin Pop
      feedback_fn("  - NOTICE: %d offline node(s) found." % len(n_offline))
1227 0a66c968 Iustin Pop
1228 22f0f71d Iustin Pop
    if n_drained:
1229 22f0f71d Iustin Pop
      feedback_fn("  - NOTICE: %d drained node(s) found." % len(n_drained))
1230 22f0f71d Iustin Pop
1231 34290825 Michael Hanselmann
    return not bad
1232 a8083063 Iustin Pop
1233 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1234 e4376078 Iustin Pop
    """Analize the post-hooks' result
1235 e4376078 Iustin Pop

1236 e4376078 Iustin Pop
    This method analyses the hook result, handles it, and sends some
1237 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
1238 d8fff41c Guido Trotter

1239 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1240 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1241 e4376078 Iustin Pop
    @param hooks_results: the results of the multi-node hooks rpc call
1242 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
1243 e4376078 Iustin Pop
    @param lu_result: previous Exec result
1244 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
1245 e4376078 Iustin Pop
        and hook results
1246 d8fff41c Guido Trotter

1247 d8fff41c Guido Trotter
    """
1248 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
1249 38206f3c Iustin Pop
    # their results
1250 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
1251 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
1252 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
1253 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
1254 d8fff41c Guido Trotter
      if not hooks_results:
1255 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
1256 d8fff41c Guido Trotter
        lu_result = 1
1257 d8fff41c Guido Trotter
      else:
1258 d8fff41c Guido Trotter
        for node_name in hooks_results:
1259 d8fff41c Guido Trotter
          show_node_header = True
1260 d8fff41c Guido Trotter
          res = hooks_results[node_name]
1261 4c4e4e1e Iustin Pop
          msg = res.fail_msg
1262 3fb4f740 Iustin Pop
          if msg:
1263 0a66c968 Iustin Pop
            if res.offline:
1264 0a66c968 Iustin Pop
              # no need to warn or set fail return value
1265 0a66c968 Iustin Pop
              continue
1266 3fb4f740 Iustin Pop
            feedback_fn("    Communication failure in hooks execution: %s" %
1267 3fb4f740 Iustin Pop
                        msg)
1268 d8fff41c Guido Trotter
            lu_result = 1
1269 d8fff41c Guido Trotter
            continue
1270 3fb4f740 Iustin Pop
          for script, hkr, output in res.payload:
1271 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
1272 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
1273 d8fff41c Guido Trotter
              # failing hooks on that node
1274 d8fff41c Guido Trotter
              if show_node_header:
1275 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
1276 d8fff41c Guido Trotter
                show_node_header = False
1277 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
1278 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
1279 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
1280 d8fff41c Guido Trotter
              lu_result = 1
1281 d8fff41c Guido Trotter
1282 d8fff41c Guido Trotter
      return lu_result
1283 d8fff41c Guido Trotter
1284 a8083063 Iustin Pop
1285 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
1286 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
1287 2c95a8d4 Iustin Pop

1288 2c95a8d4 Iustin Pop
  """
1289 2c95a8d4 Iustin Pop
  _OP_REQP = []
1290 d4b9d97f Guido Trotter
  REQ_BGL = False
1291 d4b9d97f Guido Trotter
1292 d4b9d97f Guido Trotter
  def ExpandNames(self):
1293 d4b9d97f Guido Trotter
    self.needed_locks = {
1294 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1295 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1296 d4b9d97f Guido Trotter
    }
1297 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1298 2c95a8d4 Iustin Pop
1299 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
1300 2c95a8d4 Iustin Pop
    """Check prerequisites.
1301 2c95a8d4 Iustin Pop

1302 2c95a8d4 Iustin Pop
    This has no prerequisites.
1303 2c95a8d4 Iustin Pop

1304 2c95a8d4 Iustin Pop
    """
1305 2c95a8d4 Iustin Pop
    pass
1306 2c95a8d4 Iustin Pop
1307 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
1308 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
1309 2c95a8d4 Iustin Pop

1310 29d376ec Iustin Pop
    @rtype: tuple of three items
1311 29d376ec Iustin Pop
    @return: a tuple of (dict of node-to-node_error, list of instances
1312 29d376ec Iustin Pop
        which need activate-disks, dict of instance: (node, volume) for
1313 29d376ec Iustin Pop
        missing volumes
1314 29d376ec Iustin Pop

1315 2c95a8d4 Iustin Pop
    """
1316 29d376ec Iustin Pop
    result = res_nodes, res_instances, res_missing = {}, [], {}
1317 2c95a8d4 Iustin Pop
1318 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
1319 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1320 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
1321 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
1322 2c95a8d4 Iustin Pop
1323 2c95a8d4 Iustin Pop
    nv_dict = {}
1324 2c95a8d4 Iustin Pop
    for inst in instances:
1325 2c95a8d4 Iustin Pop
      inst_lvs = {}
1326 0d68c45d Iustin Pop
      if (not inst.admin_up or
1327 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
1328 2c95a8d4 Iustin Pop
        continue
1329 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
1330 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1331 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
1332 2c95a8d4 Iustin Pop
        for vol in vol_list:
1333 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
1334 2c95a8d4 Iustin Pop
1335 2c95a8d4 Iustin Pop
    if not nv_dict:
1336 2c95a8d4 Iustin Pop
      return result
1337 2c95a8d4 Iustin Pop
1338 72737a7f Iustin Pop
    node_lvs = self.rpc.call_volume_list(nodes, vg_name)
1339 2c95a8d4 Iustin Pop
1340 2c95a8d4 Iustin Pop
    to_act = set()
1341 2c95a8d4 Iustin Pop
    for node in nodes:
1342 2c95a8d4 Iustin Pop
      # node_volume
1343 29d376ec Iustin Pop
      node_res = node_lvs[node]
1344 29d376ec Iustin Pop
      if node_res.offline:
1345 ea9ddc07 Iustin Pop
        continue
1346 4c4e4e1e Iustin Pop
      msg = node_res.fail_msg
1347 29d376ec Iustin Pop
      if msg:
1348 29d376ec Iustin Pop
        logging.warning("Error enumerating LVs on node %s: %s", node, msg)
1349 29d376ec Iustin Pop
        res_nodes[node] = msg
1350 2c95a8d4 Iustin Pop
        continue
1351 2c95a8d4 Iustin Pop
1352 29d376ec Iustin Pop
      lvs = node_res.payload
1353 29d376ec Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.items():
1354 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
1355 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
1356 b63ed789 Iustin Pop
            and inst.name not in res_instances):
1357 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
1358 2c95a8d4 Iustin Pop
1359 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1360 b63ed789 Iustin Pop
    # data better
1361 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1362 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1363 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1364 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1365 b63ed789 Iustin Pop
1366 2c95a8d4 Iustin Pop
    return result
1367 2c95a8d4 Iustin Pop
1368 2c95a8d4 Iustin Pop
1369 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1370 07bd8a51 Iustin Pop
  """Rename the cluster.
1371 07bd8a51 Iustin Pop

1372 07bd8a51 Iustin Pop
  """
1373 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1374 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1375 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1376 07bd8a51 Iustin Pop
1377 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1378 07bd8a51 Iustin Pop
    """Build hooks env.
1379 07bd8a51 Iustin Pop

1380 07bd8a51 Iustin Pop
    """
1381 07bd8a51 Iustin Pop
    env = {
1382 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1383 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1384 07bd8a51 Iustin Pop
      }
1385 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1386 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1387 07bd8a51 Iustin Pop
1388 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1389 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1390 07bd8a51 Iustin Pop

1391 07bd8a51 Iustin Pop
    """
1392 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1393 07bd8a51 Iustin Pop
1394 bcf043c9 Iustin Pop
    new_name = hostname.name
1395 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1396 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
1397 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
1398 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1399 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1400 07bd8a51 Iustin Pop
                                 " cluster has changed")
1401 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1402 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1403 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1404 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1405 07bd8a51 Iustin Pop
                                   new_ip)
1406 07bd8a51 Iustin Pop
1407 07bd8a51 Iustin Pop
    self.op.name = new_name
1408 07bd8a51 Iustin Pop
1409 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1410 07bd8a51 Iustin Pop
    """Rename the cluster.
1411 07bd8a51 Iustin Pop

1412 07bd8a51 Iustin Pop
    """
1413 07bd8a51 Iustin Pop
    clustername = self.op.name
1414 07bd8a51 Iustin Pop
    ip = self.ip
1415 07bd8a51 Iustin Pop
1416 07bd8a51 Iustin Pop
    # shutdown the master IP
1417 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
1418 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
1419 4c4e4e1e Iustin Pop
    result.Raise("Could not disable the master role")
1420 07bd8a51 Iustin Pop
1421 07bd8a51 Iustin Pop
    try:
1422 55cf7d83 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
1423 55cf7d83 Iustin Pop
      cluster.cluster_name = clustername
1424 55cf7d83 Iustin Pop
      cluster.master_ip = ip
1425 55cf7d83 Iustin Pop
      self.cfg.Update(cluster)
1426 ec85e3d5 Iustin Pop
1427 ec85e3d5 Iustin Pop
      # update the known hosts file
1428 ec85e3d5 Iustin Pop
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
1429 ec85e3d5 Iustin Pop
      node_list = self.cfg.GetNodeList()
1430 ec85e3d5 Iustin Pop
      try:
1431 ec85e3d5 Iustin Pop
        node_list.remove(master)
1432 ec85e3d5 Iustin Pop
      except ValueError:
1433 ec85e3d5 Iustin Pop
        pass
1434 ec85e3d5 Iustin Pop
      result = self.rpc.call_upload_file(node_list,
1435 ec85e3d5 Iustin Pop
                                         constants.SSH_KNOWN_HOSTS_FILE)
1436 ec85e3d5 Iustin Pop
      for to_node, to_result in result.iteritems():
1437 6f7d4e75 Iustin Pop
        msg = to_result.fail_msg
1438 6f7d4e75 Iustin Pop
        if msg:
1439 6f7d4e75 Iustin Pop
          msg = ("Copy of file %s to node %s failed: %s" %
1440 6f7d4e75 Iustin Pop
                 (constants.SSH_KNOWN_HOSTS_FILE, to_node, msg))
1441 6f7d4e75 Iustin Pop
          self.proc.LogWarning(msg)
1442 ec85e3d5 Iustin Pop
1443 07bd8a51 Iustin Pop
    finally:
1444 781de953 Iustin Pop
      result = self.rpc.call_node_start_master(master, False)
1445 4c4e4e1e Iustin Pop
      msg = result.fail_msg
1446 b726aff0 Iustin Pop
      if msg:
1447 86d9d3bb Iustin Pop
        self.LogWarning("Could not re-enable the master role on"
1448 b726aff0 Iustin Pop
                        " the master, please restart manually: %s", msg)
1449 07bd8a51 Iustin Pop
1450 07bd8a51 Iustin Pop
1451 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1452 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1453 8084f9f6 Manuel Franceschini

1454 e4376078 Iustin Pop
  @type disk: L{objects.Disk}
1455 e4376078 Iustin Pop
  @param disk: the disk to check
1456 e4376078 Iustin Pop
  @rtype: booleean
1457 e4376078 Iustin Pop
  @return: boolean indicating whether a LD_LV dev_type was found or not
1458 8084f9f6 Manuel Franceschini

1459 8084f9f6 Manuel Franceschini
  """
1460 8084f9f6 Manuel Franceschini
  if disk.children:
1461 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1462 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1463 8084f9f6 Manuel Franceschini
        return True
1464 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1465 8084f9f6 Manuel Franceschini
1466 8084f9f6 Manuel Franceschini
1467 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1468 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1469 8084f9f6 Manuel Franceschini

1470 8084f9f6 Manuel Franceschini
  """
1471 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1472 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1473 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1474 c53279cf Guido Trotter
  REQ_BGL = False
1475 c53279cf Guido Trotter
1476 3994f455 Iustin Pop
  def CheckArguments(self):
1477 4b7735f9 Iustin Pop
    """Check parameters
1478 4b7735f9 Iustin Pop

1479 4b7735f9 Iustin Pop
    """
1480 4b7735f9 Iustin Pop
    if not hasattr(self.op, "candidate_pool_size"):
1481 4b7735f9 Iustin Pop
      self.op.candidate_pool_size = None
1482 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1483 4b7735f9 Iustin Pop
      try:
1484 4b7735f9 Iustin Pop
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1485 3994f455 Iustin Pop
      except (ValueError, TypeError), err:
1486 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1487 4b7735f9 Iustin Pop
                                   str(err))
1488 4b7735f9 Iustin Pop
      if self.op.candidate_pool_size < 1:
1489 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("At least one master candidate needed")
1490 4b7735f9 Iustin Pop
1491 c53279cf Guido Trotter
  def ExpandNames(self):
1492 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
1493 c53279cf Guido Trotter
    # all nodes to be modified.
1494 c53279cf Guido Trotter
    self.needed_locks = {
1495 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1496 c53279cf Guido Trotter
    }
1497 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1498 8084f9f6 Manuel Franceschini
1499 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1500 8084f9f6 Manuel Franceschini
    """Build hooks env.
1501 8084f9f6 Manuel Franceschini

1502 8084f9f6 Manuel Franceschini
    """
1503 8084f9f6 Manuel Franceschini
    env = {
1504 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1505 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1506 8084f9f6 Manuel Franceschini
      }
1507 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1508 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1509 8084f9f6 Manuel Franceschini
1510 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1511 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1512 8084f9f6 Manuel Franceschini

1513 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1514 5f83e263 Iustin Pop
    if the given volume group is valid.
1515 8084f9f6 Manuel Franceschini

1516 8084f9f6 Manuel Franceschini
    """
1517 779c15bb Iustin Pop
    if self.op.vg_name is not None and not self.op.vg_name:
1518 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
1519 8084f9f6 Manuel Franceschini
      for inst in instances:
1520 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1521 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1522 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1523 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1524 8084f9f6 Manuel Franceschini
1525 779c15bb Iustin Pop
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1526 779c15bb Iustin Pop
1527 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1528 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1529 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
1530 8084f9f6 Manuel Franceschini
      for node in node_list:
1531 4c4e4e1e Iustin Pop
        msg = vglist[node].fail_msg
1532 e480923b Iustin Pop
        if msg:
1533 781de953 Iustin Pop
          # ignoring down node
1534 e480923b Iustin Pop
          self.LogWarning("Error while gathering data on node %s"
1535 e480923b Iustin Pop
                          " (ignoring node): %s", node, msg)
1536 781de953 Iustin Pop
          continue
1537 e480923b Iustin Pop
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
1538 781de953 Iustin Pop
                                              self.op.vg_name,
1539 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1540 8084f9f6 Manuel Franceschini
        if vgstatus:
1541 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1542 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1543 8084f9f6 Manuel Franceschini
1544 779c15bb Iustin Pop
    self.cluster = cluster = self.cfg.GetClusterInfo()
1545 5af3da74 Guido Trotter
    # validate params changes
1546 779c15bb Iustin Pop
    if self.op.beparams:
1547 a5728081 Guido Trotter
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
1548 abe609b2 Guido Trotter
      self.new_beparams = objects.FillDict(
1549 4ef7f423 Guido Trotter
        cluster.beparams[constants.PP_DEFAULT], self.op.beparams)
1550 779c15bb Iustin Pop
1551 5af3da74 Guido Trotter
    if self.op.nicparams:
1552 5af3da74 Guido Trotter
      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
1553 5af3da74 Guido Trotter
      self.new_nicparams = objects.FillDict(
1554 5af3da74 Guido Trotter
        cluster.nicparams[constants.PP_DEFAULT], self.op.nicparams)
1555 5af3da74 Guido Trotter
      objects.NIC.CheckParameterSyntax(self.new_nicparams)
1556 5af3da74 Guido Trotter
1557 779c15bb Iustin Pop
    # hypervisor list/parameters
1558 abe609b2 Guido Trotter
    self.new_hvparams = objects.FillDict(cluster.hvparams, {})
1559 779c15bb Iustin Pop
    if self.op.hvparams:
1560 779c15bb Iustin Pop
      if not isinstance(self.op.hvparams, dict):
1561 779c15bb Iustin Pop
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1562 779c15bb Iustin Pop
      for hv_name, hv_dict in self.op.hvparams.items():
1563 779c15bb Iustin Pop
        if hv_name not in self.new_hvparams:
1564 779c15bb Iustin Pop
          self.new_hvparams[hv_name] = hv_dict
1565 779c15bb Iustin Pop
        else:
1566 779c15bb Iustin Pop
          self.new_hvparams[hv_name].update(hv_dict)
1567 779c15bb Iustin Pop
1568 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1569 779c15bb Iustin Pop
      self.hv_list = self.op.enabled_hypervisors
1570 779c15bb Iustin Pop
    else:
1571 779c15bb Iustin Pop
      self.hv_list = cluster.enabled_hypervisors
1572 779c15bb Iustin Pop
1573 779c15bb Iustin Pop
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1574 779c15bb Iustin Pop
      # either the enabled list has changed, or the parameters have, validate
1575 779c15bb Iustin Pop
      for hv_name, hv_params in self.new_hvparams.items():
1576 779c15bb Iustin Pop
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1577 779c15bb Iustin Pop
            (self.op.enabled_hypervisors and
1578 779c15bb Iustin Pop
             hv_name in self.op.enabled_hypervisors)):
1579 779c15bb Iustin Pop
          # either this is a new hypervisor, or its parameters have changed
1580 779c15bb Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
1581 a5728081 Guido Trotter
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1582 779c15bb Iustin Pop
          hv_class.CheckParameterSyntax(hv_params)
1583 779c15bb Iustin Pop
          _CheckHVParams(self, node_list, hv_name, hv_params)
1584 779c15bb Iustin Pop
1585 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1586 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1587 8084f9f6 Manuel Franceschini

1588 8084f9f6 Manuel Franceschini
    """
1589 779c15bb Iustin Pop
    if self.op.vg_name is not None:
1590 b2482333 Guido Trotter
      new_volume = self.op.vg_name
1591 b2482333 Guido Trotter
      if not new_volume:
1592 b2482333 Guido Trotter
        new_volume = None
1593 b2482333 Guido Trotter
      if new_volume != self.cfg.GetVGName():
1594 b2482333 Guido Trotter
        self.cfg.SetVGName(new_volume)
1595 779c15bb Iustin Pop
      else:
1596 779c15bb Iustin Pop
        feedback_fn("Cluster LVM configuration already in desired"
1597 779c15bb Iustin Pop
                    " state, not changing")
1598 779c15bb Iustin Pop
    if self.op.hvparams:
1599 779c15bb Iustin Pop
      self.cluster.hvparams = self.new_hvparams
1600 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1601 779c15bb Iustin Pop
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1602 779c15bb Iustin Pop
    if self.op.beparams:
1603 4ef7f423 Guido Trotter
      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
1604 5af3da74 Guido Trotter
    if self.op.nicparams:
1605 5af3da74 Guido Trotter
      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
1606 5af3da74 Guido Trotter
1607 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1608 4b7735f9 Iustin Pop
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
1609 4b7735f9 Iustin Pop
1610 779c15bb Iustin Pop
    self.cfg.Update(self.cluster)
1611 8084f9f6 Manuel Franceschini
1612 4b7735f9 Iustin Pop
    # we want to update nodes after the cluster so that if any errors
1613 4b7735f9 Iustin Pop
    # happen, we have recorded and saved the cluster info
1614 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1615 ec0292f1 Iustin Pop
      _AdjustCandidatePool(self)
1616 4b7735f9 Iustin Pop
1617 8084f9f6 Manuel Franceschini
1618 28eddce5 Guido Trotter
def _RedistributeAncillaryFiles(lu, additional_nodes=None):
1619 28eddce5 Guido Trotter
  """Distribute additional files which are part of the cluster configuration.
1620 28eddce5 Guido Trotter

1621 28eddce5 Guido Trotter
  ConfigWriter takes care of distributing the config and ssconf files, but
1622 28eddce5 Guido Trotter
  there are more files which should be distributed to all nodes. This function
1623 28eddce5 Guido Trotter
  makes sure those are copied.
1624 28eddce5 Guido Trotter

1625 28eddce5 Guido Trotter
  @param lu: calling logical unit
1626 28eddce5 Guido Trotter
  @param additional_nodes: list of nodes not in the config to distribute to
1627 28eddce5 Guido Trotter

1628 28eddce5 Guido Trotter
  """
1629 28eddce5 Guido Trotter
  # 1. Gather target nodes
1630 28eddce5 Guido Trotter
  myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
1631 28eddce5 Guido Trotter
  dist_nodes = lu.cfg.GetNodeList()
1632 28eddce5 Guido Trotter
  if additional_nodes is not None:
1633 28eddce5 Guido Trotter
    dist_nodes.extend(additional_nodes)
1634 28eddce5 Guido Trotter
  if myself.name in dist_nodes:
1635 28eddce5 Guido Trotter
    dist_nodes.remove(myself.name)
1636 28eddce5 Guido Trotter
  # 2. Gather files to distribute
1637 28eddce5 Guido Trotter
  dist_files = set([constants.ETC_HOSTS,
1638 28eddce5 Guido Trotter
                    constants.SSH_KNOWN_HOSTS_FILE,
1639 28eddce5 Guido Trotter
                    constants.RAPI_CERT_FILE,
1640 28eddce5 Guido Trotter
                    constants.RAPI_USERS_FILE,
1641 28eddce5 Guido Trotter
                   ])
1642 e1b8653f Guido Trotter
1643 e1b8653f Guido Trotter
  enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
1644 e1b8653f Guido Trotter
  for hv_name in enabled_hypervisors:
1645 e1b8653f Guido Trotter
    hv_class = hypervisor.GetHypervisor(hv_name)
1646 e1b8653f Guido Trotter
    dist_files.update(hv_class.GetAncillaryFiles())
1647 e1b8653f Guido Trotter
1648 28eddce5 Guido Trotter
  # 3. Perform the files upload
1649 28eddce5 Guido Trotter
  for fname in dist_files:
1650 28eddce5 Guido Trotter
    if os.path.exists(fname):
1651 28eddce5 Guido Trotter
      result = lu.rpc.call_upload_file(dist_nodes, fname)
1652 28eddce5 Guido Trotter
      for to_node, to_result in result.items():
1653 6f7d4e75 Iustin Pop
        msg = to_result.fail_msg
1654 6f7d4e75 Iustin Pop
        if msg:
1655 6f7d4e75 Iustin Pop
          msg = ("Copy of file %s to node %s failed: %s" %
1656 6f7d4e75 Iustin Pop
                 (fname, to_node, msg))
1657 6f7d4e75 Iustin Pop
          lu.proc.LogWarning(msg)
1658 28eddce5 Guido Trotter
1659 28eddce5 Guido Trotter
1660 afee0879 Iustin Pop
class LURedistributeConfig(NoHooksLU):
1661 afee0879 Iustin Pop
  """Force the redistribution of cluster configuration.
1662 afee0879 Iustin Pop

1663 afee0879 Iustin Pop
  This is a very simple LU.
1664 afee0879 Iustin Pop

1665 afee0879 Iustin Pop
  """
1666 afee0879 Iustin Pop
  _OP_REQP = []
1667 afee0879 Iustin Pop
  REQ_BGL = False
1668 afee0879 Iustin Pop
1669 afee0879 Iustin Pop
  def ExpandNames(self):
1670 afee0879 Iustin Pop
    self.needed_locks = {
1671 afee0879 Iustin Pop
      locking.LEVEL_NODE: locking.ALL_SET,
1672 afee0879 Iustin Pop
    }
1673 afee0879 Iustin Pop
    self.share_locks[locking.LEVEL_NODE] = 1
1674 afee0879 Iustin Pop
1675 afee0879 Iustin Pop
  def CheckPrereq(self):
1676 afee0879 Iustin Pop
    """Check prerequisites.
1677 afee0879 Iustin Pop

1678 afee0879 Iustin Pop
    """
1679 afee0879 Iustin Pop
1680 afee0879 Iustin Pop
  def Exec(self, feedback_fn):
1681 afee0879 Iustin Pop
    """Redistribute the configuration.
1682 afee0879 Iustin Pop

1683 afee0879 Iustin Pop
    """
1684 afee0879 Iustin Pop
    self.cfg.Update(self.cfg.GetClusterInfo())
1685 28eddce5 Guido Trotter
    _RedistributeAncillaryFiles(self)
1686 afee0879 Iustin Pop
1687 afee0879 Iustin Pop
1688 b9bddb6b Iustin Pop
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1689 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1690 a8083063 Iustin Pop

1691 a8083063 Iustin Pop
  """
1692 a8083063 Iustin Pop
  if not instance.disks:
1693 a8083063 Iustin Pop
    return True
1694 a8083063 Iustin Pop
1695 a8083063 Iustin Pop
  if not oneshot:
1696 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1697 a8083063 Iustin Pop
1698 a8083063 Iustin Pop
  node = instance.primary_node
1699 a8083063 Iustin Pop
1700 a8083063 Iustin Pop
  for dev in instance.disks:
1701 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
1702 a8083063 Iustin Pop
1703 a8083063 Iustin Pop
  retries = 0
1704 fbafd7a8 Iustin Pop
  degr_retries = 10 # in seconds, as we sleep 1 second each time
1705 a8083063 Iustin Pop
  while True:
1706 a8083063 Iustin Pop
    max_time = 0
1707 a8083063 Iustin Pop
    done = True
1708 a8083063 Iustin Pop
    cumul_degraded = False
1709 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1710 4c4e4e1e Iustin Pop
    msg = rstats.fail_msg
1711 3efa9051 Iustin Pop
    if msg:
1712 3efa9051 Iustin Pop
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
1713 a8083063 Iustin Pop
      retries += 1
1714 a8083063 Iustin Pop
      if retries >= 10:
1715 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1716 3ecf6786 Iustin Pop
                                 " aborting." % node)
1717 a8083063 Iustin Pop
      time.sleep(6)
1718 a8083063 Iustin Pop
      continue
1719 3efa9051 Iustin Pop
    rstats = rstats.payload
1720 a8083063 Iustin Pop
    retries = 0
1721 1492cca7 Iustin Pop
    for i, mstat in enumerate(rstats):
1722 a8083063 Iustin Pop
      if mstat is None:
1723 86d9d3bb Iustin Pop
        lu.LogWarning("Can't compute data for node %s/%s",
1724 86d9d3bb Iustin Pop
                           node, instance.disks[i].iv_name)
1725 a8083063 Iustin Pop
        continue
1726 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1727 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1728 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1729 a8083063 Iustin Pop
      if perc_done is not None:
1730 a8083063 Iustin Pop
        done = False
1731 a8083063 Iustin Pop
        if est_time is not None:
1732 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1733 a8083063 Iustin Pop
          max_time = est_time
1734 a8083063 Iustin Pop
        else:
1735 a8083063 Iustin Pop
          rem_time = "no time estimate"
1736 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1737 b9bddb6b Iustin Pop
                        (instance.disks[i].iv_name, perc_done, rem_time))
1738 fbafd7a8 Iustin Pop
1739 fbafd7a8 Iustin Pop
    # if we're done but degraded, let's do a few small retries, to
1740 fbafd7a8 Iustin Pop
    # make sure we see a stable and not transient situation; therefore
1741 fbafd7a8 Iustin Pop
    # we force restart of the loop
1742 fbafd7a8 Iustin Pop
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
1743 fbafd7a8 Iustin Pop
      logging.info("Degraded disks found, %d retries left", degr_retries)
1744 fbafd7a8 Iustin Pop
      degr_retries -= 1
1745 fbafd7a8 Iustin Pop
      time.sleep(1)
1746 fbafd7a8 Iustin Pop
      continue
1747 fbafd7a8 Iustin Pop
1748 a8083063 Iustin Pop
    if done or oneshot:
1749 a8083063 Iustin Pop
      break
1750 a8083063 Iustin Pop
1751 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
1752 a8083063 Iustin Pop
1753 a8083063 Iustin Pop
  if done:
1754 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1755 a8083063 Iustin Pop
  return not cumul_degraded
1756 a8083063 Iustin Pop
1757 a8083063 Iustin Pop
1758 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1759 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1760 a8083063 Iustin Pop

1761 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1762 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1763 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1764 0834c866 Iustin Pop

1765 a8083063 Iustin Pop
  """
1766 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
1767 0834c866 Iustin Pop
  if ldisk:
1768 0834c866 Iustin Pop
    idx = 6
1769 0834c866 Iustin Pop
  else:
1770 0834c866 Iustin Pop
    idx = 5
1771 a8083063 Iustin Pop
1772 a8083063 Iustin Pop
  result = True
1773 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1774 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
1775 4c4e4e1e Iustin Pop
    msg = rstats.fail_msg
1776 23829f6f Iustin Pop
    if msg:
1777 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
1778 23829f6f Iustin Pop
      result = False
1779 23829f6f Iustin Pop
    elif not rstats.payload:
1780 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s", node)
1781 a8083063 Iustin Pop
      result = False
1782 a8083063 Iustin Pop
    else:
1783 23829f6f Iustin Pop
      result = result and (not rstats.payload[idx])
1784 a8083063 Iustin Pop
  if dev.children:
1785 a8083063 Iustin Pop
    for child in dev.children:
1786 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1787 a8083063 Iustin Pop
1788 a8083063 Iustin Pop
  return result
1789 a8083063 Iustin Pop
1790 a8083063 Iustin Pop
1791 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1792 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1793 a8083063 Iustin Pop

1794 a8083063 Iustin Pop
  """
1795 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1796 6bf01bbb Guido Trotter
  REQ_BGL = False
1797 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet()
1798 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
1799 a8083063 Iustin Pop
1800 6bf01bbb Guido Trotter
  def ExpandNames(self):
1801 1f9430d6 Iustin Pop
    if self.op.names:
1802 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1803 1f9430d6 Iustin Pop
1804 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1805 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1806 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1807 1f9430d6 Iustin Pop
1808 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
1809 a6ab004b Iustin Pop
    # Temporary removal of locks, should be reverted later
1810 a6ab004b Iustin Pop
    # TODO: reintroduce locks when they are lighter-weight
1811 6bf01bbb Guido Trotter
    self.needed_locks = {}
1812 a6ab004b Iustin Pop
    #self.share_locks[locking.LEVEL_NODE] = 1
1813 a6ab004b Iustin Pop
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1814 6bf01bbb Guido Trotter
1815 6bf01bbb Guido Trotter
  def CheckPrereq(self):
1816 6bf01bbb Guido Trotter
    """Check prerequisites.
1817 6bf01bbb Guido Trotter

1818 6bf01bbb Guido Trotter
    """
1819 6bf01bbb Guido Trotter
1820 1f9430d6 Iustin Pop
  @staticmethod
1821 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1822 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1823 1f9430d6 Iustin Pop

1824 e4376078 Iustin Pop
    @param node_list: a list with the names of all nodes
1825 e4376078 Iustin Pop
    @param rlist: a map with node names as keys and OS objects as values
1826 1f9430d6 Iustin Pop

1827 e4376078 Iustin Pop
    @rtype: dict
1828 5fcc718f Iustin Pop
    @return: a dictionary with osnames as keys and as value another map, with
1829 255dcebd Iustin Pop
        nodes as keys and tuples of (path, status, diagnose) as values, eg::
1830 e4376078 Iustin Pop

1831 255dcebd Iustin Pop
          {"debian-etch": {"node1": [(/usr/lib/..., True, ""),
1832 255dcebd Iustin Pop
                                     (/srv/..., False, "invalid api")],
1833 255dcebd Iustin Pop
                           "node2": [(/srv/..., True, "")]}
1834 e4376078 Iustin Pop
          }
1835 1f9430d6 Iustin Pop

1836 1f9430d6 Iustin Pop
    """
1837 1f9430d6 Iustin Pop
    all_os = {}
1838 a6ab004b Iustin Pop
    # we build here the list of nodes that didn't fail the RPC (at RPC
1839 a6ab004b Iustin Pop
    # level), so that nodes with a non-responding node daemon don't
1840 a6ab004b Iustin Pop
    # make all OSes invalid
1841 a6ab004b Iustin Pop
    good_nodes = [node_name for node_name in rlist
1842 4c4e4e1e Iustin Pop
                  if not rlist[node_name].fail_msg]
1843 83d92ad8 Iustin Pop
    for node_name, nr in rlist.items():
1844 4c4e4e1e Iustin Pop
      if nr.fail_msg or not nr.payload:
1845 1f9430d6 Iustin Pop
        continue
1846 255dcebd Iustin Pop
      for name, path, status, diagnose in nr.payload:
1847 255dcebd Iustin Pop
        if name not in all_os:
1848 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1849 1f9430d6 Iustin Pop
          # for each node in node_list
1850 255dcebd Iustin Pop
          all_os[name] = {}
1851 a6ab004b Iustin Pop
          for nname in good_nodes:
1852 255dcebd Iustin Pop
            all_os[name][nname] = []
1853 255dcebd Iustin Pop
        all_os[name][node_name].append((path, status, diagnose))
1854 1f9430d6 Iustin Pop
    return all_os
1855 a8083063 Iustin Pop
1856 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1857 a8083063 Iustin Pop
    """Compute the list of OSes.
1858 a8083063 Iustin Pop

1859 a8083063 Iustin Pop
    """
1860 a6ab004b Iustin Pop
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
1861 94a02bb5 Iustin Pop
    node_data = self.rpc.call_os_diagnose(valid_nodes)
1862 94a02bb5 Iustin Pop
    pol = self._DiagnoseByOS(valid_nodes, node_data)
1863 1f9430d6 Iustin Pop
    output = []
1864 83d92ad8 Iustin Pop
    for os_name, os_data in pol.items():
1865 1f9430d6 Iustin Pop
      row = []
1866 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1867 1f9430d6 Iustin Pop
        if field == "name":
1868 1f9430d6 Iustin Pop
          val = os_name
1869 1f9430d6 Iustin Pop
        elif field == "valid":
1870 255dcebd Iustin Pop
          val = utils.all([osl and osl[0][1] for osl in os_data.values()])
1871 1f9430d6 Iustin Pop
        elif field == "node_status":
1872 255dcebd Iustin Pop
          # this is just a copy of the dict
1873 1f9430d6 Iustin Pop
          val = {}
1874 255dcebd Iustin Pop
          for node_name, nos_list in os_data.items():
1875 255dcebd Iustin Pop
            val[node_name] = nos_list
1876 1f9430d6 Iustin Pop
        else:
1877 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1878 1f9430d6 Iustin Pop
        row.append(val)
1879 1f9430d6 Iustin Pop
      output.append(row)
1880 1f9430d6 Iustin Pop
1881 1f9430d6 Iustin Pop
    return output
1882 a8083063 Iustin Pop
1883 a8083063 Iustin Pop
1884 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1885 a8083063 Iustin Pop
  """Logical unit for removing a node.
1886 a8083063 Iustin Pop

1887 a8083063 Iustin Pop
  """
1888 a8083063 Iustin Pop
  HPATH = "node-remove"
1889 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1890 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1891 a8083063 Iustin Pop
1892 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1893 a8083063 Iustin Pop
    """Build hooks env.
1894 a8083063 Iustin Pop

1895 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1896 d08869ee Guido Trotter
    node would then be impossible to remove.
1897 a8083063 Iustin Pop

1898 a8083063 Iustin Pop
    """
1899 396e1b78 Michael Hanselmann
    env = {
1900 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1901 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1902 396e1b78 Michael Hanselmann
      }
1903 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1904 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1905 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1906 a8083063 Iustin Pop
1907 a8083063 Iustin Pop
  def CheckPrereq(self):
1908 a8083063 Iustin Pop
    """Check prerequisites.
1909 a8083063 Iustin Pop

1910 a8083063 Iustin Pop
    This checks:
1911 a8083063 Iustin Pop
     - the node exists in the configuration
1912 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1913 a8083063 Iustin Pop
     - it's not the master
1914 a8083063 Iustin Pop

1915 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1916 a8083063 Iustin Pop

1917 a8083063 Iustin Pop
    """
1918 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1919 a8083063 Iustin Pop
    if node is None:
1920 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1921 a8083063 Iustin Pop
1922 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1923 a8083063 Iustin Pop
1924 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
1925 a8083063 Iustin Pop
    if node.name == masternode:
1926 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1927 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1928 a8083063 Iustin Pop
1929 a8083063 Iustin Pop
    for instance_name in instance_list:
1930 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1931 6b12959c Iustin Pop
      if node.name in instance.all_nodes:
1932 6b12959c Iustin Pop
        raise errors.OpPrereqError("Instance %s is still running on the node,"
1933 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1934 a8083063 Iustin Pop
    self.op.node_name = node.name
1935 a8083063 Iustin Pop
    self.node = node
1936 a8083063 Iustin Pop
1937 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1938 a8083063 Iustin Pop
    """Removes the node from the cluster.
1939 a8083063 Iustin Pop

1940 a8083063 Iustin Pop
    """
1941 a8083063 Iustin Pop
    node = self.node
1942 9a4f63d1 Iustin Pop
    logging.info("Stopping the node daemon and removing configs from node %s",
1943 9a4f63d1 Iustin Pop
                 node.name)
1944 a8083063 Iustin Pop
1945 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
1946 a8083063 Iustin Pop
1947 0623d351 Iustin Pop
    result = self.rpc.call_node_leave_cluster(node.name)
1948 4c4e4e1e Iustin Pop
    msg = result.fail_msg
1949 0623d351 Iustin Pop
    if msg:
1950 0623d351 Iustin Pop
      self.LogWarning("Errors encountered on the remote node while leaving"
1951 0623d351 Iustin Pop
                      " the cluster: %s", msg)
1952 c8a0948f Michael Hanselmann
1953 eb1742d5 Guido Trotter
    # Promote nodes to master candidate as needed
1954 ec0292f1 Iustin Pop
    _AdjustCandidatePool(self)
1955 eb1742d5 Guido Trotter
1956 a8083063 Iustin Pop
1957 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1958 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1959 a8083063 Iustin Pop

1960 a8083063 Iustin Pop
  """
1961 bc8e4a1a Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
1962 35705d8f Guido Trotter
  REQ_BGL = False
1963 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet(
1964 31bf511f Iustin Pop
    "dtotal", "dfree",
1965 31bf511f Iustin Pop
    "mtotal", "mnode", "mfree",
1966 31bf511f Iustin Pop
    "bootid",
1967 0105bad3 Iustin Pop
    "ctotal", "cnodes", "csockets",
1968 31bf511f Iustin Pop
    )
1969 31bf511f Iustin Pop
1970 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(
1971 31bf511f Iustin Pop
    "name", "pinst_cnt", "sinst_cnt",
1972 31bf511f Iustin Pop
    "pinst_list", "sinst_list",
1973 31bf511f Iustin Pop
    "pip", "sip", "tags",
1974 31bf511f Iustin Pop
    "serial_no",
1975 0e67cdbe Iustin Pop
    "master_candidate",
1976 0e67cdbe Iustin Pop
    "master",
1977 9ddb5e45 Iustin Pop
    "offline",
1978 0b2454b9 Iustin Pop
    "drained",
1979 31bf511f Iustin Pop
    )
1980 a8083063 Iustin Pop
1981 35705d8f Guido Trotter
  def ExpandNames(self):
1982 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1983 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1984 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1985 a8083063 Iustin Pop
1986 35705d8f Guido Trotter
    self.needed_locks = {}
1987 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1988 c8d8b4c8 Iustin Pop
1989 c8d8b4c8 Iustin Pop
    if self.op.names:
1990 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
1991 35705d8f Guido Trotter
    else:
1992 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
1993 c8d8b4c8 Iustin Pop
1994 bc8e4a1a Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
1995 bc8e4a1a Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
1996 c8d8b4c8 Iustin Pop
    if self.do_locking:
1997 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
1998 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
1999 c8d8b4c8 Iustin Pop
2000 35705d8f Guido Trotter
2001 35705d8f Guido Trotter
  def CheckPrereq(self):
2002 35705d8f Guido Trotter
    """Check prerequisites.
2003 35705d8f Guido Trotter

2004 35705d8f Guido Trotter
    """
2005 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
2006 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
2007 c8d8b4c8 Iustin Pop
    pass
2008 a8083063 Iustin Pop
2009 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2010 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2011 a8083063 Iustin Pop

2012 a8083063 Iustin Pop
    """
2013 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
2014 c8d8b4c8 Iustin Pop
    if self.do_locking:
2015 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
2016 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
2017 3fa93523 Guido Trotter
      nodenames = self.wanted
2018 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
2019 3fa93523 Guido Trotter
      if missing:
2020 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
2021 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
2022 c8d8b4c8 Iustin Pop
    else:
2023 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
2024 c1f1cbb2 Iustin Pop
2025 c1f1cbb2 Iustin Pop
    nodenames = utils.NiceSort(nodenames)
2026 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
2027 a8083063 Iustin Pop
2028 a8083063 Iustin Pop
    # begin data gathering
2029 a8083063 Iustin Pop
2030 bc8e4a1a Iustin Pop
    if self.do_node_query:
2031 a8083063 Iustin Pop
      live_data = {}
2032 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
2033 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
2034 a8083063 Iustin Pop
      for name in nodenames:
2035 781de953 Iustin Pop
        nodeinfo = node_data[name]
2036 4c4e4e1e Iustin Pop
        if not nodeinfo.fail_msg and nodeinfo.payload:
2037 070e998b Iustin Pop
          nodeinfo = nodeinfo.payload
2038 d599d686 Iustin Pop
          fn = utils.TryConvert
2039 a8083063 Iustin Pop
          live_data[name] = {
2040 d599d686 Iustin Pop
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
2041 d599d686 Iustin Pop
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
2042 d599d686 Iustin Pop
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
2043 d599d686 Iustin Pop
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
2044 d599d686 Iustin Pop
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
2045 d599d686 Iustin Pop
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
2046 d599d686 Iustin Pop
            "bootid": nodeinfo.get('bootid', None),
2047 0105bad3 Iustin Pop
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
2048 0105bad3 Iustin Pop
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
2049 a8083063 Iustin Pop
            }
2050 a8083063 Iustin Pop
        else:
2051 a8083063 Iustin Pop
          live_data[name] = {}
2052 a8083063 Iustin Pop
    else:
2053 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
2054 a8083063 Iustin Pop
2055 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
2056 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
2057 a8083063 Iustin Pop
2058 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
2059 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
2060 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
2061 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
2062 a8083063 Iustin Pop
2063 ec223efb Iustin Pop
      for instance_name in instancelist:
2064 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
2065 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
2066 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
2067 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
2068 ec223efb Iustin Pop
          if secnode in node_to_secondary:
2069 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
2070 a8083063 Iustin Pop
2071 0e67cdbe Iustin Pop
    master_node = self.cfg.GetMasterNode()
2072 0e67cdbe Iustin Pop
2073 a8083063 Iustin Pop
    # end data gathering
2074 a8083063 Iustin Pop
2075 a8083063 Iustin Pop
    output = []
2076 a8083063 Iustin Pop
    for node in nodelist:
2077 a8083063 Iustin Pop
      node_output = []
2078 a8083063 Iustin Pop
      for field in self.op.output_fields:
2079 a8083063 Iustin Pop
        if field == "name":
2080 a8083063 Iustin Pop
          val = node.name
2081 ec223efb Iustin Pop
        elif field == "pinst_list":
2082 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
2083 ec223efb Iustin Pop
        elif field == "sinst_list":
2084 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
2085 ec223efb Iustin Pop
        elif field == "pinst_cnt":
2086 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
2087 ec223efb Iustin Pop
        elif field == "sinst_cnt":
2088 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
2089 a8083063 Iustin Pop
        elif field == "pip":
2090 a8083063 Iustin Pop
          val = node.primary_ip
2091 a8083063 Iustin Pop
        elif field == "sip":
2092 a8083063 Iustin Pop
          val = node.secondary_ip
2093 130a6a6f Iustin Pop
        elif field == "tags":
2094 130a6a6f Iustin Pop
          val = list(node.GetTags())
2095 38d7239a Iustin Pop
        elif field == "serial_no":
2096 38d7239a Iustin Pop
          val = node.serial_no
2097 0e67cdbe Iustin Pop
        elif field == "master_candidate":
2098 0e67cdbe Iustin Pop
          val = node.master_candidate
2099 0e67cdbe Iustin Pop
        elif field == "master":
2100 0e67cdbe Iustin Pop
          val = node.name == master_node
2101 9ddb5e45 Iustin Pop
        elif field == "offline":
2102 9ddb5e45 Iustin Pop
          val = node.offline
2103 0b2454b9 Iustin Pop
        elif field == "drained":
2104 0b2454b9 Iustin Pop
          val = node.drained
2105 31bf511f Iustin Pop
        elif self._FIELDS_DYNAMIC.Matches(field):
2106 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
2107 a8083063 Iustin Pop
        else:
2108 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2109 a8083063 Iustin Pop
        node_output.append(val)
2110 a8083063 Iustin Pop
      output.append(node_output)
2111 a8083063 Iustin Pop
2112 a8083063 Iustin Pop
    return output
2113 a8083063 Iustin Pop
2114 a8083063 Iustin Pop
2115 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
2116 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
2117 dcb93971 Michael Hanselmann

2118 dcb93971 Michael Hanselmann
  """
2119 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
2120 21a15682 Guido Trotter
  REQ_BGL = False
2121 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
2122 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("node")
2123 21a15682 Guido Trotter
2124 21a15682 Guido Trotter
  def ExpandNames(self):
2125 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2126 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2127 21a15682 Guido Trotter
                       selected=self.op.output_fields)
2128 21a15682 Guido Trotter
2129 21a15682 Guido Trotter
    self.needed_locks = {}
2130 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2131 21a15682 Guido Trotter
    if not self.op.nodes:
2132 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2133 21a15682 Guido Trotter
    else:
2134 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
2135 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
2136 dcb93971 Michael Hanselmann
2137 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
2138 dcb93971 Michael Hanselmann
    """Check prerequisites.
2139 dcb93971 Michael Hanselmann

2140 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
2141 dcb93971 Michael Hanselmann

2142 dcb93971 Michael Hanselmann
    """
2143 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
2144 dcb93971 Michael Hanselmann
2145 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
2146 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
2147 dcb93971 Michael Hanselmann

2148 dcb93971 Michael Hanselmann
    """
2149 a7ba5e53 Iustin Pop
    nodenames = self.nodes
2150 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
2151 dcb93971 Michael Hanselmann
2152 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
2153 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
2154 dcb93971 Michael Hanselmann
2155 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
2156 dcb93971 Michael Hanselmann
2157 dcb93971 Michael Hanselmann
    output = []
2158 dcb93971 Michael Hanselmann
    for node in nodenames:
2159 10bfe6cb Iustin Pop
      nresult = volumes[node]
2160 10bfe6cb Iustin Pop
      if nresult.offline:
2161 10bfe6cb Iustin Pop
        continue
2162 4c4e4e1e Iustin Pop
      msg = nresult.fail_msg
2163 10bfe6cb Iustin Pop
      if msg:
2164 10bfe6cb Iustin Pop
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
2165 37d19eb2 Michael Hanselmann
        continue
2166 37d19eb2 Michael Hanselmann
2167 10bfe6cb Iustin Pop
      node_vols = nresult.payload[:]
2168 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
2169 dcb93971 Michael Hanselmann
2170 dcb93971 Michael Hanselmann
      for vol in node_vols:
2171 dcb93971 Michael Hanselmann
        node_output = []
2172 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
2173 dcb93971 Michael Hanselmann
          if field == "node":
2174 dcb93971 Michael Hanselmann
            val = node
2175 dcb93971 Michael Hanselmann
          elif field == "phys":
2176 dcb93971 Michael Hanselmann
            val = vol['dev']
2177 dcb93971 Michael Hanselmann
          elif field == "vg":
2178 dcb93971 Michael Hanselmann
            val = vol['vg']
2179 dcb93971 Michael Hanselmann
          elif field == "name":
2180 dcb93971 Michael Hanselmann
            val = vol['name']
2181 dcb93971 Michael Hanselmann
          elif field == "size":
2182 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
2183 dcb93971 Michael Hanselmann
          elif field == "instance":
2184 dcb93971 Michael Hanselmann
            for inst in ilist:
2185 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
2186 dcb93971 Michael Hanselmann
                continue
2187 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
2188 dcb93971 Michael Hanselmann
                val = inst.name
2189 dcb93971 Michael Hanselmann
                break
2190 dcb93971 Michael Hanselmann
            else:
2191 dcb93971 Michael Hanselmann
              val = '-'
2192 dcb93971 Michael Hanselmann
          else:
2193 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
2194 dcb93971 Michael Hanselmann
          node_output.append(str(val))
2195 dcb93971 Michael Hanselmann
2196 dcb93971 Michael Hanselmann
        output.append(node_output)
2197 dcb93971 Michael Hanselmann
2198 dcb93971 Michael Hanselmann
    return output
2199 dcb93971 Michael Hanselmann
2200 dcb93971 Michael Hanselmann
2201 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
2202 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
2203 a8083063 Iustin Pop

2204 a8083063 Iustin Pop
  """
2205 a8083063 Iustin Pop
  HPATH = "node-add"
2206 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2207 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
2208 a8083063 Iustin Pop
2209 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2210 a8083063 Iustin Pop
    """Build hooks env.
2211 a8083063 Iustin Pop

2212 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
2213 a8083063 Iustin Pop

2214 a8083063 Iustin Pop
    """
2215 a8083063 Iustin Pop
    env = {
2216 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
2217 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
2218 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
2219 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
2220 a8083063 Iustin Pop
      }
2221 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
2222 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
2223 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
2224 a8083063 Iustin Pop
2225 a8083063 Iustin Pop
  def CheckPrereq(self):
2226 a8083063 Iustin Pop
    """Check prerequisites.
2227 a8083063 Iustin Pop

2228 a8083063 Iustin Pop
    This checks:
2229 a8083063 Iustin Pop
     - the new node is not already in the config
2230 a8083063 Iustin Pop
     - it is resolvable
2231 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
2232 a8083063 Iustin Pop

2233 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
2234 a8083063 Iustin Pop

2235 a8083063 Iustin Pop
    """
2236 a8083063 Iustin Pop
    node_name = self.op.node_name
2237 a8083063 Iustin Pop
    cfg = self.cfg
2238 a8083063 Iustin Pop
2239 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
2240 a8083063 Iustin Pop
2241 bcf043c9 Iustin Pop
    node = dns_data.name
2242 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
2243 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
2244 a8083063 Iustin Pop
    if secondary_ip is None:
2245 a8083063 Iustin Pop
      secondary_ip = primary_ip
2246 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
2247 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
2248 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
2249 e7c6e02b Michael Hanselmann
2250 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
2251 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
2252 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
2253 e7c6e02b Michael Hanselmann
                                 node)
2254 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
2255 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
2256 a8083063 Iustin Pop
2257 a8083063 Iustin Pop
    for existing_node_name in node_list:
2258 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
2259 e7c6e02b Michael Hanselmann
2260 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
2261 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
2262 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
2263 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
2264 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
2265 e7c6e02b Michael Hanselmann
        continue
2266 e7c6e02b Michael Hanselmann
2267 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
2268 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
2269 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
2270 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
2271 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
2272 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
2273 a8083063 Iustin Pop
2274 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
2275 a8083063 Iustin Pop
    # same as for the master
2276 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
2277 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
2278 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
2279 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
2280 a8083063 Iustin Pop
      if master_singlehomed:
2281 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
2282 3ecf6786 Iustin Pop
                                   " new node has one")
2283 a8083063 Iustin Pop
      else:
2284 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
2285 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
2286 a8083063 Iustin Pop
2287 a8083063 Iustin Pop
    # checks reachablity
2288 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
2289 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
2290 a8083063 Iustin Pop
2291 a8083063 Iustin Pop
    if not newbie_singlehomed:
2292 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
2293 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
2294 b15d625f Iustin Pop
                           source=myself.secondary_ip):
2295 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
2296 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
2297 a8083063 Iustin Pop
2298 0fff97e9 Guido Trotter
    cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2299 ec0292f1 Iustin Pop
    mc_now, _ = self.cfg.GetMasterCandidateStats()
2300 ec0292f1 Iustin Pop
    master_candidate = mc_now < cp_size
2301 0fff97e9 Guido Trotter
2302 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
2303 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
2304 0fff97e9 Guido Trotter
                                 secondary_ip=secondary_ip,
2305 fc0fe88c Iustin Pop
                                 master_candidate=master_candidate,
2306 af64c0ea Iustin Pop
                                 offline=False, drained=False)
2307 a8083063 Iustin Pop
2308 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2309 a8083063 Iustin Pop
    """Adds the new node to the cluster.
2310 a8083063 Iustin Pop

2311 a8083063 Iustin Pop
    """
2312 a8083063 Iustin Pop
    new_node = self.new_node
2313 a8083063 Iustin Pop
    node = new_node.name
2314 a8083063 Iustin Pop
2315 a8083063 Iustin Pop
    # check connectivity
2316 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
2317 4c4e4e1e Iustin Pop
    result.Raise("Can't get version information from node %s" % node)
2318 90b54c26 Iustin Pop
    if constants.PROTOCOL_VERSION == result.payload:
2319 90b54c26 Iustin Pop
      logging.info("Communication to node %s fine, sw version %s match",
2320 90b54c26 Iustin Pop
                   node, result.payload)
2321 a8083063 Iustin Pop
    else:
2322 90b54c26 Iustin Pop
      raise errors.OpExecError("Version mismatch master version %s,"
2323 90b54c26 Iustin Pop
                               " node version %s" %
2324 90b54c26 Iustin Pop
                               (constants.PROTOCOL_VERSION, result.payload))
2325 a8083063 Iustin Pop
2326 a8083063 Iustin Pop
    # setup ssh on node
2327 9a4f63d1 Iustin Pop
    logging.info("Copy ssh key to node %s", node)
2328 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
2329 a8083063 Iustin Pop
    keyarray = []
2330 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
2331 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
2332 70d9e3d8 Iustin Pop
                priv_key, pub_key]
2333 a8083063 Iustin Pop
2334 a8083063 Iustin Pop
    for i in keyfiles:
2335 a8083063 Iustin Pop
      f = open(i, 'r')
2336 a8083063 Iustin Pop
      try:
2337 a8083063 Iustin Pop
        keyarray.append(f.read())
2338 a8083063 Iustin Pop
      finally:
2339 a8083063 Iustin Pop
        f.close()
2340 a8083063 Iustin Pop
2341 72737a7f Iustin Pop
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
2342 72737a7f Iustin Pop
                                    keyarray[2],
2343 72737a7f Iustin Pop
                                    keyarray[3], keyarray[4], keyarray[5])
2344 4c4e4e1e Iustin Pop
    result.Raise("Cannot transfer ssh keys to the new node")
2345 a8083063 Iustin Pop
2346 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
2347 b86a6bcd Guido Trotter
    if self.cfg.GetClusterInfo().modify_etc_hosts:
2348 b86a6bcd Guido Trotter
      utils.AddHostToEtcHosts(new_node.name)
2349 c8a0948f Michael Hanselmann
2350 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
2351 781de953 Iustin Pop
      result = self.rpc.call_node_has_ip_address(new_node.name,
2352 781de953 Iustin Pop
                                                 new_node.secondary_ip)
2353 4c4e4e1e Iustin Pop
      result.Raise("Failure checking secondary ip on node %s" % new_node.name,
2354 4c4e4e1e Iustin Pop
                   prereq=True)
2355 c2fc8250 Iustin Pop
      if not result.payload:
2356 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
2357 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
2358 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
2359 a8083063 Iustin Pop
2360 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
2361 5c0527ed Guido Trotter
    node_verify_param = {
2362 5c0527ed Guido Trotter
      'nodelist': [node],
2363 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
2364 5c0527ed Guido Trotter
    }
2365 5c0527ed Guido Trotter
2366 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
2367 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
2368 5c0527ed Guido Trotter
    for verifier in node_verify_list:
2369 4c4e4e1e Iustin Pop
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
2370 6f68a739 Iustin Pop
      nl_payload = result[verifier].payload['nodelist']
2371 6f68a739 Iustin Pop
      if nl_payload:
2372 6f68a739 Iustin Pop
        for failed in nl_payload:
2373 5c0527ed Guido Trotter
          feedback_fn("ssh/hostname verification failed %s -> %s" %
2374 6f68a739 Iustin Pop
                      (verifier, nl_payload[failed]))
2375 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
2376 ff98055b Iustin Pop
2377 d8470559 Michael Hanselmann
    if self.op.readd:
2378 28eddce5 Guido Trotter
      _RedistributeAncillaryFiles(self)
2379 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
2380 d8470559 Michael Hanselmann
    else:
2381 035566e3 Iustin Pop
      _RedistributeAncillaryFiles(self, additional_nodes=[node])
2382 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
2383 a8083063 Iustin Pop
2384 a8083063 Iustin Pop
2385 b31c8676 Iustin Pop
class LUSetNodeParams(LogicalUnit):
2386 b31c8676 Iustin Pop
  """Modifies the parameters of a node.
2387 b31c8676 Iustin Pop

2388 b31c8676 Iustin Pop
  """
2389 b31c8676 Iustin Pop
  HPATH = "node-modify"
2390 b31c8676 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2391 b31c8676 Iustin Pop
  _OP_REQP = ["node_name"]
2392 b31c8676 Iustin Pop
  REQ_BGL = False
2393 b31c8676 Iustin Pop
2394 b31c8676 Iustin Pop
  def CheckArguments(self):
2395 b31c8676 Iustin Pop
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2396 b31c8676 Iustin Pop
    if node_name is None:
2397 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2398 b31c8676 Iustin Pop
    self.op.node_name = node_name
2399 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'master_candidate')
2400 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'offline')
2401 c9d443ea Iustin Pop
    _CheckBooleanOpField(self.op, 'drained')
2402 c9d443ea Iustin Pop
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
2403 c9d443ea Iustin Pop
    if all_mods.count(None) == 3:
2404 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Please pass at least one modification")
2405 c9d443ea Iustin Pop
    if all_mods.count(True) > 1:
2406 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Can't set the node into more than one"
2407 c9d443ea Iustin Pop
                                 " state at the same time")
2408 b31c8676 Iustin Pop
2409 b31c8676 Iustin Pop
  def ExpandNames(self):
2410 b31c8676 Iustin Pop
    self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
2411 b31c8676 Iustin Pop
2412 b31c8676 Iustin Pop
  def BuildHooksEnv(self):
2413 b31c8676 Iustin Pop
    """Build hooks env.
2414 b31c8676 Iustin Pop

2415 b31c8676 Iustin Pop
    This runs on the master node.
2416 b31c8676 Iustin Pop

2417 b31c8676 Iustin Pop
    """
2418 b31c8676 Iustin Pop
    env = {
2419 b31c8676 Iustin Pop
      "OP_TARGET": self.op.node_name,
2420 b31c8676 Iustin Pop
      "MASTER_CANDIDATE": str(self.op.master_candidate),
2421 3a5ba66a Iustin Pop
      "OFFLINE": str(self.op.offline),
2422 c9d443ea Iustin Pop
      "DRAINED": str(self.op.drained),
2423 b31c8676 Iustin Pop
      }
2424 b31c8676 Iustin Pop
    nl = [self.cfg.GetMasterNode(),
2425 b31c8676 Iustin Pop
          self.op.node_name]
2426 b31c8676 Iustin Pop
    return env, nl, nl
2427 b31c8676 Iustin Pop
2428 b31c8676 Iustin Pop
  def CheckPrereq(self):
2429 b31c8676 Iustin Pop
    """Check prerequisites.
2430 b31c8676 Iustin Pop

2431 b31c8676 Iustin Pop
    This only checks the instance list against the existing names.
2432 b31c8676 Iustin Pop

2433 b31c8676 Iustin Pop
    """
2434 3a5ba66a Iustin Pop
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
2435 b31c8676 Iustin Pop
2436 c9d443ea Iustin Pop
    if ((self.op.master_candidate == False or self.op.offline == True or
2437 c9d443ea Iustin Pop
         self.op.drained == True) and node.master_candidate):
2438 3a5ba66a Iustin Pop
      # we will demote the node from master_candidate
2439 3a26773f Iustin Pop
      if self.op.node_name == self.cfg.GetMasterNode():
2440 3a26773f Iustin Pop
        raise errors.OpPrereqError("The master node has to be a"
2441 c9d443ea Iustin Pop
                                   " master candidate, online and not drained")
2442 3e83dd48 Iustin Pop
      cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2443 3a5ba66a Iustin Pop
      num_candidates, _ = self.cfg.GetMasterCandidateStats()
2444 3e83dd48 Iustin Pop
      if num_candidates <= cp_size:
2445 3e83dd48 Iustin Pop
        msg = ("Not enough master candidates (desired"
2446 3e83dd48 Iustin Pop
               " %d, new value will be %d)" % (cp_size, num_candidates-1))
2447 3a5ba66a Iustin Pop
        if self.op.force:
2448 3e83dd48 Iustin Pop
          self.LogWarning(msg)
2449 3e83dd48 Iustin Pop
        else:
2450 3e83dd48 Iustin Pop
          raise errors.OpPrereqError(msg)
2451 3e83dd48 Iustin Pop
2452 c9d443ea Iustin Pop
    if (self.op.master_candidate == True and
2453 c9d443ea Iustin Pop
        ((node.offline and not self.op.offline == False) or
2454 c9d443ea Iustin Pop
         (node.drained and not self.op.drained == False))):
2455 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
2456 949bdabe Iustin Pop
                                 " to master_candidate" % node.name)
2457 3a5ba66a Iustin Pop
2458 b31c8676 Iustin Pop
    return
2459 b31c8676 Iustin Pop
2460 b31c8676 Iustin Pop
  def Exec(self, feedback_fn):
2461 b31c8676 Iustin Pop
    """Modifies a node.
2462 b31c8676 Iustin Pop

2463 b31c8676 Iustin Pop
    """
2464 3a5ba66a Iustin Pop
    node = self.node
2465 b31c8676 Iustin Pop
2466 b31c8676 Iustin Pop
    result = []
2467 c9d443ea Iustin Pop
    changed_mc = False
2468 b31c8676 Iustin Pop
2469 3a5ba66a Iustin Pop
    if self.op.offline is not None:
2470 3a5ba66a Iustin Pop
      node.offline = self.op.offline
2471 3a5ba66a Iustin Pop
      result.append(("offline", str(self.op.offline)))
2472 c9d443ea Iustin Pop
      if self.op.offline == True:
2473 c9d443ea Iustin Pop
        if node.master_candidate:
2474 c9d443ea Iustin Pop
          node.master_candidate = False
2475 c9d443ea Iustin Pop
          changed_mc = True
2476 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to offline"))
2477 c9d443ea Iustin Pop
        if node.drained:
2478 c9d443ea Iustin Pop
          node.drained = False
2479 c9d443ea Iustin Pop
          result.append(("drained", "clear drained status due to offline"))
2480 3a5ba66a Iustin Pop
2481 b31c8676 Iustin Pop
    if self.op.master_candidate is not None:
2482 b31c8676 Iustin Pop
      node.master_candidate = self.op.master_candidate
2483 c9d443ea Iustin Pop
      changed_mc = True
2484 b31c8676 Iustin Pop
      result.append(("master_candidate", str(self.op.master_candidate)))
2485 56aa9fd5 Iustin Pop
      if self.op.master_candidate == False:
2486 56aa9fd5 Iustin Pop
        rrc = self.rpc.call_node_demote_from_mc(node.name)
2487 4c4e4e1e Iustin Pop
        msg = rrc.fail_msg
2488 0959c824 Iustin Pop
        if msg:
2489 0959c824 Iustin Pop
          self.LogWarning("Node failed to demote itself: %s" % msg)
2490 b31c8676 Iustin Pop
2491 c9d443ea Iustin Pop
    if self.op.drained is not None:
2492 c9d443ea Iustin Pop
      node.drained = self.op.drained
2493 82e12743 Iustin Pop
      result.append(("drained", str(self.op.drained)))
2494 c9d443ea Iustin Pop
      if self.op.drained == True:
2495 c9d443ea Iustin Pop
        if node.master_candidate:
2496 c9d443ea Iustin Pop
          node.master_candidate = False
2497 c9d443ea Iustin Pop
          changed_mc = True
2498 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to drain"))
2499 c9d443ea Iustin Pop
        if node.offline:
2500 c9d443ea Iustin Pop
          node.offline = False
2501 c9d443ea Iustin Pop
          result.append(("offline", "clear offline status due to drain"))
2502 c9d443ea Iustin Pop
2503 b31c8676 Iustin Pop
    # this will trigger configuration file update, if needed
2504 b31c8676 Iustin Pop
    self.cfg.Update(node)
2505 b31c8676 Iustin Pop
    # this will trigger job queue propagation or cleanup
2506 c9d443ea Iustin Pop
    if changed_mc:
2507 3a26773f Iustin Pop
      self.context.ReaddNode(node)
2508 b31c8676 Iustin Pop
2509 b31c8676 Iustin Pop
    return result
2510 b31c8676 Iustin Pop
2511 b31c8676 Iustin Pop
2512 f5118ade Iustin Pop
class LUPowercycleNode(NoHooksLU):
2513 f5118ade Iustin Pop
  """Powercycles a node.
2514 f5118ade Iustin Pop

2515 f5118ade Iustin Pop
  """
2516 f5118ade Iustin Pop
  _OP_REQP = ["node_name", "force"]
2517 f5118ade Iustin Pop
  REQ_BGL = False
2518 f5118ade Iustin Pop
2519 f5118ade Iustin Pop
  def CheckArguments(self):
2520 f5118ade Iustin Pop
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2521 f5118ade Iustin Pop
    if node_name is None:
2522 f5118ade Iustin Pop
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2523 f5118ade Iustin Pop
    self.op.node_name = node_name
2524 f5118ade Iustin Pop
    if node_name == self.cfg.GetMasterNode() and not self.op.force:
2525 f5118ade Iustin Pop
      raise errors.OpPrereqError("The node is the master and the force"
2526 f5118ade Iustin Pop
                                 " parameter was not set")
2527 f5118ade Iustin Pop
2528 f5118ade Iustin Pop
  def ExpandNames(self):
2529 f5118ade Iustin Pop
    """Locking for PowercycleNode.
2530 f5118ade Iustin Pop

2531 f5118ade Iustin Pop
    This is a last-resource option and shouldn't block on other
2532 f5118ade Iustin Pop
    jobs. Therefore, we grab no locks.
2533 f5118ade Iustin Pop

2534 f5118ade Iustin Pop
    """
2535 f5118ade Iustin Pop
    self.needed_locks = {}
2536 f5118ade Iustin Pop
2537 f5118ade Iustin Pop
  def CheckPrereq(self):
2538 f5118ade Iustin Pop
    """Check prerequisites.
2539 f5118ade Iustin Pop

2540 f5118ade Iustin Pop
    This LU has no prereqs.
2541 f5118ade Iustin Pop

2542 f5118ade Iustin Pop
    """
2543 f5118ade Iustin Pop
    pass
2544 f5118ade Iustin Pop
2545 f5118ade Iustin Pop
  def Exec(self, feedback_fn):
2546 f5118ade Iustin Pop
    """Reboots a node.
2547 f5118ade Iustin Pop

2548 f5118ade Iustin Pop
    """
2549 f5118ade Iustin Pop
    result = self.rpc.call_node_powercycle(self.op.node_name,
2550 f5118ade Iustin Pop
                                           self.cfg.GetHypervisorType())
2551 4c4e4e1e Iustin Pop
    result.Raise("Failed to schedule the reboot")
2552 f5118ade Iustin Pop
    return result.payload
2553 f5118ade Iustin Pop
2554 f5118ade Iustin Pop
2555 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
2556 a8083063 Iustin Pop
  """Query cluster configuration.
2557 a8083063 Iustin Pop

2558 a8083063 Iustin Pop
  """
2559 a8083063 Iustin Pop
  _OP_REQP = []
2560 642339cf Guido Trotter
  REQ_BGL = False
2561 642339cf Guido Trotter
2562 642339cf Guido Trotter
  def ExpandNames(self):
2563 642339cf Guido Trotter
    self.needed_locks = {}
2564 a8083063 Iustin Pop
2565 a8083063 Iustin Pop
  def CheckPrereq(self):
2566 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
2567 a8083063 Iustin Pop

2568 a8083063 Iustin Pop
    """
2569 a8083063 Iustin Pop
    pass
2570 a8083063 Iustin Pop
2571 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2572 a8083063 Iustin Pop
    """Return cluster config.
2573 a8083063 Iustin Pop

2574 a8083063 Iustin Pop
    """
2575 469f88e1 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
2576 a8083063 Iustin Pop
    result = {
2577 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
2578 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
2579 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
2580 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
2581 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
2582 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
2583 469f88e1 Iustin Pop
      "name": cluster.cluster_name,
2584 469f88e1 Iustin Pop
      "master": cluster.master_node,
2585 02691904 Alexander Schreiber
      "default_hypervisor": cluster.default_hypervisor,
2586 469f88e1 Iustin Pop
      "enabled_hypervisors": cluster.enabled_hypervisors,
2587 29921401 Iustin Pop
      "hvparams": dict([(hvname, cluster.hvparams[hvname])
2588 29921401 Iustin Pop
                        for hvname in cluster.enabled_hypervisors]),
2589 469f88e1 Iustin Pop
      "beparams": cluster.beparams,
2590 1094acda Guido Trotter
      "nicparams": cluster.nicparams,
2591 4b7735f9 Iustin Pop
      "candidate_pool_size": cluster.candidate_pool_size,
2592 7a56b411 Guido Trotter
      "master_netdev": cluster.master_netdev,
2593 7a56b411 Guido Trotter
      "volume_group_name": cluster.volume_group_name,
2594 7a56b411 Guido Trotter
      "file_storage_dir": cluster.file_storage_dir,
2595 a8083063 Iustin Pop
      }
2596 a8083063 Iustin Pop
2597 a8083063 Iustin Pop
    return result
2598 a8083063 Iustin Pop
2599 a8083063 Iustin Pop
2600 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
2601 ae5849b5 Michael Hanselmann
  """Return configuration values.
2602 a8083063 Iustin Pop

2603 a8083063 Iustin Pop
  """
2604 a8083063 Iustin Pop
  _OP_REQP = []
2605 642339cf Guido Trotter
  REQ_BGL = False
2606 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet()
2607 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag")
2608 642339cf Guido Trotter
2609 642339cf Guido Trotter
  def ExpandNames(self):
2610 642339cf Guido Trotter
    self.needed_locks = {}
2611 a8083063 Iustin Pop
2612 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2613 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2614 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
2615 ae5849b5 Michael Hanselmann
2616 a8083063 Iustin Pop
  def CheckPrereq(self):
2617 a8083063 Iustin Pop
    """No prerequisites.
2618 a8083063 Iustin Pop

2619 a8083063 Iustin Pop
    """
2620 a8083063 Iustin Pop
    pass
2621 a8083063 Iustin Pop
2622 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2623 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
2624 a8083063 Iustin Pop

2625 a8083063 Iustin Pop
    """
2626 ae5849b5 Michael Hanselmann
    values = []
2627 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
2628 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
2629 3ccafd0e Iustin Pop
        entry = self.cfg.GetClusterName()
2630 ae5849b5 Michael Hanselmann
      elif field == "master_node":
2631 3ccafd0e Iustin Pop
        entry = self.cfg.GetMasterNode()
2632 3ccafd0e Iustin Pop
      elif field == "drain_flag":
2633 3ccafd0e Iustin Pop
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2634 ae5849b5 Michael Hanselmann
      else:
2635 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
2636 3ccafd0e Iustin Pop
      values.append(entry)
2637 ae5849b5 Michael Hanselmann
    return values
2638 a8083063 Iustin Pop
2639 a8083063 Iustin Pop
2640 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
2641 a8083063 Iustin Pop
  """Bring up an instance's disks.
2642 a8083063 Iustin Pop

2643 a8083063 Iustin Pop
  """
2644 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2645 f22a8ba3 Guido Trotter
  REQ_BGL = False
2646 f22a8ba3 Guido Trotter
2647 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2648 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2649 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2650 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2651 f22a8ba3 Guido Trotter
2652 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2653 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2654 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2655 a8083063 Iustin Pop
2656 a8083063 Iustin Pop
  def CheckPrereq(self):
2657 a8083063 Iustin Pop
    """Check prerequisites.
2658 a8083063 Iustin Pop

2659 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2660 a8083063 Iustin Pop

2661 a8083063 Iustin Pop
    """
2662 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2663 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2664 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2665 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
2666 a8083063 Iustin Pop
2667 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2668 a8083063 Iustin Pop
    """Activate the disks.
2669 a8083063 Iustin Pop

2670 a8083063 Iustin Pop
    """
2671 b9bddb6b Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
2672 a8083063 Iustin Pop
    if not disks_ok:
2673 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
2674 a8083063 Iustin Pop
2675 a8083063 Iustin Pop
    return disks_info
2676 a8083063 Iustin Pop
2677 a8083063 Iustin Pop
2678 b9bddb6b Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
2679 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
2680 a8083063 Iustin Pop

2681 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
2682 a8083063 Iustin Pop

2683 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
2684 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
2685 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
2686 e4376078 Iustin Pop
  @param instance: the instance for whose disks we assemble
2687 e4376078 Iustin Pop
  @type ignore_secondaries: boolean
2688 e4376078 Iustin Pop
  @param ignore_secondaries: if true, errors on secondary nodes
2689 e4376078 Iustin Pop
      won't result in an error return from the function
2690 e4376078 Iustin Pop
  @return: False if the operation failed, otherwise a list of
2691 e4376078 Iustin Pop
      (host, instance_visible_name, node_visible_name)
2692 e4376078 Iustin Pop
      with the mapping from node devices to instance devices
2693 a8083063 Iustin Pop

2694 a8083063 Iustin Pop
  """
2695 a8083063 Iustin Pop
  device_info = []
2696 a8083063 Iustin Pop
  disks_ok = True
2697 fdbd668d Iustin Pop
  iname = instance.name
2698 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
2699 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
2700 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
2701 fdbd668d Iustin Pop
2702 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
2703 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
2704 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
2705 fdbd668d Iustin Pop
  # SyncSource, etc.)
2706 fdbd668d Iustin Pop
2707 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
2708 a8083063 Iustin Pop
  for inst_disk in instance.disks:
2709 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2710 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2711 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2712 4c4e4e1e Iustin Pop
      msg = result.fail_msg
2713 53c14ef1 Iustin Pop
      if msg:
2714 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2715 53c14ef1 Iustin Pop
                           " (is_primary=False, pass=1): %s",
2716 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
2717 fdbd668d Iustin Pop
        if not ignore_secondaries:
2718 a8083063 Iustin Pop
          disks_ok = False
2719 fdbd668d Iustin Pop
2720 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
2721 fdbd668d Iustin Pop
2722 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
2723 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
2724 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2725 fdbd668d Iustin Pop
      if node != instance.primary_node:
2726 fdbd668d Iustin Pop
        continue
2727 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2728 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2729 4c4e4e1e Iustin Pop
      msg = result.fail_msg
2730 53c14ef1 Iustin Pop
      if msg:
2731 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2732 53c14ef1 Iustin Pop
                           " (is_primary=True, pass=2): %s",
2733 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
2734 fdbd668d Iustin Pop
        disks_ok = False
2735 1dff8e07 Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name,
2736 1dff8e07 Iustin Pop
                        result.payload))
2737 a8083063 Iustin Pop
2738 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
2739 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
2740 b352ab5b Iustin Pop
  # improving the logical/physical id handling
2741 b352ab5b Iustin Pop
  for disk in instance.disks:
2742 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
2743 b352ab5b Iustin Pop
2744 a8083063 Iustin Pop
  return disks_ok, device_info
2745 a8083063 Iustin Pop
2746 a8083063 Iustin Pop
2747 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
2748 3ecf6786 Iustin Pop
  """Start the disks of an instance.
2749 3ecf6786 Iustin Pop

2750 3ecf6786 Iustin Pop
  """
2751 b9bddb6b Iustin Pop
  disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2752 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
2753 fe7b0351 Michael Hanselmann
  if not disks_ok:
2754 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
2755 fe7b0351 Michael Hanselmann
    if force is not None and not force:
2756 86d9d3bb Iustin Pop
      lu.proc.LogWarning("", hint="If the message above refers to a"
2757 86d9d3bb Iustin Pop
                         " secondary node,"
2758 86d9d3bb Iustin Pop
                         " you can retry the operation using '--force'.")
2759 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
2760 fe7b0351 Michael Hanselmann
2761 fe7b0351 Michael Hanselmann
2762 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
2763 a8083063 Iustin Pop
  """Shutdown an instance's disks.
2764 a8083063 Iustin Pop

2765 a8083063 Iustin Pop
  """
2766 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2767 f22a8ba3 Guido Trotter
  REQ_BGL = False
2768 f22a8ba3 Guido Trotter
2769 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2770 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2771 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2772 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2773 f22a8ba3 Guido Trotter
2774 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2775 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2776 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2777 a8083063 Iustin Pop
2778 a8083063 Iustin Pop
  def CheckPrereq(self):
2779 a8083063 Iustin Pop
    """Check prerequisites.
2780 a8083063 Iustin Pop

2781 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2782 a8083063 Iustin Pop

2783 a8083063 Iustin Pop
    """
2784 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2785 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2786 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2787 a8083063 Iustin Pop
2788 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2789 a8083063 Iustin Pop
    """Deactivate the disks
2790 a8083063 Iustin Pop

2791 a8083063 Iustin Pop
    """
2792 a8083063 Iustin Pop
    instance = self.instance
2793 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
2794 a8083063 Iustin Pop
2795 a8083063 Iustin Pop
2796 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
2797 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
2798 155d6c75 Guido Trotter

2799 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
2800 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
2801 155d6c75 Guido Trotter

2802 155d6c75 Guido Trotter
  """
2803 aca13712 Iustin Pop
  pnode = instance.primary_node
2804 4c4e4e1e Iustin Pop
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
2805 4c4e4e1e Iustin Pop
  ins_l.Raise("Can't contact node %s" % pnode)
2806 aca13712 Iustin Pop
2807 aca13712 Iustin Pop
  if instance.name in ins_l.payload:
2808 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
2809 155d6c75 Guido Trotter
                             " block devices.")
2810 155d6c75 Guido Trotter
2811 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
2812 a8083063 Iustin Pop
2813 a8083063 Iustin Pop
2814 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2815 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
2816 a8083063 Iustin Pop

2817 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
2818 a8083063 Iustin Pop

2819 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
2820 a8083063 Iustin Pop
  ignored.
2821 a8083063 Iustin Pop

2822 a8083063 Iustin Pop
  """
2823 cacfd1fd Iustin Pop
  all_result = True
2824 a8083063 Iustin Pop
  for disk in instance.disks:
2825 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2826 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
2827 781de953 Iustin Pop
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
2828 4c4e4e1e Iustin Pop
      msg = result.fail_msg
2829 cacfd1fd Iustin Pop
      if msg:
2830 cacfd1fd Iustin Pop
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
2831 cacfd1fd Iustin Pop
                      disk.iv_name, node, msg)
2832 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
2833 cacfd1fd Iustin Pop
          all_result = False
2834 cacfd1fd Iustin Pop
  return all_result
2835 a8083063 Iustin Pop
2836 a8083063 Iustin Pop
2837 9ca87a96 Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
2838 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
2839 d4f16fd9 Iustin Pop

2840 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
2841 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
2842 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
2843 d4f16fd9 Iustin Pop
  exception.
2844 d4f16fd9 Iustin Pop

2845 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
2846 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
2847 e69d05fd Iustin Pop
  @type node: C{str}
2848 e69d05fd Iustin Pop
  @param node: the node to check
2849 e69d05fd Iustin Pop
  @type reason: C{str}
2850 e69d05fd Iustin Pop
  @param reason: string to use in the error message
2851 e69d05fd Iustin Pop
  @type requested: C{int}
2852 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
2853 9ca87a96 Iustin Pop
  @type hypervisor_name: C{str}
2854 9ca87a96 Iustin Pop
  @param hypervisor_name: the hypervisor to ask for memory stats
2855 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2856 e69d05fd Iustin Pop
      we cannot check the node
2857 d4f16fd9 Iustin Pop

2858 d4f16fd9 Iustin Pop
  """
2859 9ca87a96 Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
2860 4c4e4e1e Iustin Pop
  nodeinfo[node].Raise("Can't get data from node %s" % node, prereq=True)
2861 070e998b Iustin Pop
  free_mem = nodeinfo[node].payload.get('memory_free', None)
2862 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2863 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2864 070e998b Iustin Pop
                               " was '%s'" % (node, free_mem))
2865 d4f16fd9 Iustin Pop
  if requested > free_mem:
2866 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2867 070e998b Iustin Pop
                               " needed %s MiB, available %s MiB" %
2868 070e998b Iustin Pop
                               (node, reason, requested, free_mem))
2869 d4f16fd9 Iustin Pop
2870 d4f16fd9 Iustin Pop
2871 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2872 a8083063 Iustin Pop
  """Starts an instance.
2873 a8083063 Iustin Pop

2874 a8083063 Iustin Pop
  """
2875 a8083063 Iustin Pop
  HPATH = "instance-start"
2876 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2877 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2878 e873317a Guido Trotter
  REQ_BGL = False
2879 e873317a Guido Trotter
2880 e873317a Guido Trotter
  def ExpandNames(self):
2881 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2882 a8083063 Iustin Pop
2883 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2884 a8083063 Iustin Pop
    """Build hooks env.
2885 a8083063 Iustin Pop

2886 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2887 a8083063 Iustin Pop

2888 a8083063 Iustin Pop
    """
2889 a8083063 Iustin Pop
    env = {
2890 a8083063 Iustin Pop
      "FORCE": self.op.force,
2891 a8083063 Iustin Pop
      }
2892 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2893 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2894 a8083063 Iustin Pop
    return env, nl, nl
2895 a8083063 Iustin Pop
2896 a8083063 Iustin Pop
  def CheckPrereq(self):
2897 a8083063 Iustin Pop
    """Check prerequisites.
2898 a8083063 Iustin Pop

2899 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2900 a8083063 Iustin Pop

2901 a8083063 Iustin Pop
    """
2902 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2903 e873317a Guido Trotter
    assert self.instance is not None, \
2904 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2905 a8083063 Iustin Pop
2906 d04aaa2f Iustin Pop
    # extra beparams
2907 d04aaa2f Iustin Pop
    self.beparams = getattr(self.op, "beparams", {})
2908 d04aaa2f Iustin Pop
    if self.beparams:
2909 d04aaa2f Iustin Pop
      if not isinstance(self.beparams, dict):
2910 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid beparams passed: %s, expected"
2911 d04aaa2f Iustin Pop
                                   " dict" % (type(self.beparams), ))
2912 d04aaa2f Iustin Pop
      # fill the beparams dict
2913 d04aaa2f Iustin Pop
      utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
2914 d04aaa2f Iustin Pop
      self.op.beparams = self.beparams
2915 d04aaa2f Iustin Pop
2916 d04aaa2f Iustin Pop
    # extra hvparams
2917 d04aaa2f Iustin Pop
    self.hvparams = getattr(self.op, "hvparams", {})
2918 d04aaa2f Iustin Pop
    if self.hvparams:
2919 d04aaa2f Iustin Pop
      if not isinstance(self.hvparams, dict):
2920 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid hvparams passed: %s, expected"
2921 d04aaa2f Iustin Pop
                                   " dict" % (type(self.hvparams), ))
2922 d04aaa2f Iustin Pop
2923 d04aaa2f Iustin Pop
      # check hypervisor parameter syntax (locally)
2924 d04aaa2f Iustin Pop
      cluster = self.cfg.GetClusterInfo()
2925 d04aaa2f Iustin Pop
      utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
2926 abe609b2 Guido Trotter
      filled_hvp = objects.FillDict(cluster.hvparams[instance.hypervisor],
2927 d04aaa2f Iustin Pop
                                    instance.hvparams)
2928 d04aaa2f Iustin Pop
      filled_hvp.update(self.hvparams)
2929 d04aaa2f Iustin Pop
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
2930 d04aaa2f Iustin Pop
      hv_type.CheckParameterSyntax(filled_hvp)
2931 d04aaa2f Iustin Pop
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
2932 d04aaa2f Iustin Pop
      self.op.hvparams = self.hvparams
2933 d04aaa2f Iustin Pop
2934 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
2935 7527a8a4 Iustin Pop
2936 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2937 a8083063 Iustin Pop
    # check bridges existance
2938 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2939 a8083063 Iustin Pop
2940 f1926756 Guido Trotter
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2941 f1926756 Guido Trotter
                                              instance.name,
2942 f1926756 Guido Trotter
                                              instance.hypervisor)
2943 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
2944 4c4e4e1e Iustin Pop
                      prereq=True)
2945 7ad1af4a Iustin Pop
    if not remote_info.payload: # not running already
2946 f1926756 Guido Trotter
      _CheckNodeFreeMemory(self, instance.primary_node,
2947 f1926756 Guido Trotter
                           "starting instance %s" % instance.name,
2948 f1926756 Guido Trotter
                           bep[constants.BE_MEMORY], instance.hypervisor)
2949 d4f16fd9 Iustin Pop
2950 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2951 a8083063 Iustin Pop
    """Start the instance.
2952 a8083063 Iustin Pop

2953 a8083063 Iustin Pop
    """
2954 a8083063 Iustin Pop
    instance = self.instance
2955 a8083063 Iustin Pop
    force = self.op.force
2956 a8083063 Iustin Pop
2957 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2958 fe482621 Iustin Pop
2959 a8083063 Iustin Pop
    node_current = instance.primary_node
2960 a8083063 Iustin Pop
2961 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
2962 a8083063 Iustin Pop
2963 d04aaa2f Iustin Pop
    result = self.rpc.call_instance_start(node_current, instance,
2964 d04aaa2f Iustin Pop
                                          self.hvparams, self.beparams)
2965 4c4e4e1e Iustin Pop
    msg = result.fail_msg
2966 dd279568 Iustin Pop
    if msg:
2967 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
2968 dd279568 Iustin Pop
      raise errors.OpExecError("Could not start instance: %s" % msg)
2969 a8083063 Iustin Pop
2970 a8083063 Iustin Pop
2971 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2972 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2973 bf6929a2 Alexander Schreiber

2974 bf6929a2 Alexander Schreiber
  """
2975 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2976 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2977 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2978 e873317a Guido Trotter
  REQ_BGL = False
2979 e873317a Guido Trotter
2980 e873317a Guido Trotter
  def ExpandNames(self):
2981 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2982 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2983 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
2984 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2985 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
2986 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2987 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
2988 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2989 bf6929a2 Alexander Schreiber
2990 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2991 bf6929a2 Alexander Schreiber
    """Build hooks env.
2992 bf6929a2 Alexander Schreiber

2993 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2994 bf6929a2 Alexander Schreiber

2995 bf6929a2 Alexander Schreiber
    """
2996 bf6929a2 Alexander Schreiber
    env = {
2997 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2998 2c2690c9 Iustin Pop
      "REBOOT_TYPE": self.op.reboot_type,
2999 bf6929a2 Alexander Schreiber
      }
3000 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3001 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3002 bf6929a2 Alexander Schreiber
    return env, nl, nl
3003 bf6929a2 Alexander Schreiber
3004 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
3005 bf6929a2 Alexander Schreiber
    """Check prerequisites.
3006 bf6929a2 Alexander Schreiber

3007 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
3008 bf6929a2 Alexander Schreiber

3009 bf6929a2 Alexander Schreiber
    """
3010 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3011 e873317a Guido Trotter
    assert self.instance is not None, \
3012 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3013 bf6929a2 Alexander Schreiber
3014 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3015 7527a8a4 Iustin Pop
3016 bf6929a2 Alexander Schreiber
    # check bridges existance
3017 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
3018 bf6929a2 Alexander Schreiber
3019 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
3020 bf6929a2 Alexander Schreiber
    """Reboot the instance.
3021 bf6929a2 Alexander Schreiber

3022 bf6929a2 Alexander Schreiber
    """
3023 bf6929a2 Alexander Schreiber
    instance = self.instance
3024 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
3025 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
3026 bf6929a2 Alexander Schreiber
3027 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
3028 bf6929a2 Alexander Schreiber
3029 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
3030 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
3031 ae48ac32 Iustin Pop
      for disk in instance.disks:
3032 ae48ac32 Iustin Pop
        self.cfg.SetDiskID(disk, node_current)
3033 781de953 Iustin Pop
      result = self.rpc.call_instance_reboot(node_current, instance,
3034 07813a9e Iustin Pop
                                             reboot_type)
3035 4c4e4e1e Iustin Pop
      result.Raise("Could not reboot instance")
3036 bf6929a2 Alexander Schreiber
    else:
3037 1fae010f Iustin Pop
      result = self.rpc.call_instance_shutdown(node_current, instance)
3038 4c4e4e1e Iustin Pop
      result.Raise("Could not shutdown instance for full reboot")
3039 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
3040 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
3041 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(node_current, instance, None, None)
3042 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3043 dd279568 Iustin Pop
      if msg:
3044 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3045 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance for"
3046 dd279568 Iustin Pop
                                 " full reboot: %s" % msg)
3047 bf6929a2 Alexander Schreiber
3048 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
3049 bf6929a2 Alexander Schreiber
3050 bf6929a2 Alexander Schreiber
3051 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
3052 a8083063 Iustin Pop
  """Shutdown an instance.
3053 a8083063 Iustin Pop

3054 a8083063 Iustin Pop
  """
3055 a8083063 Iustin Pop
  HPATH = "instance-stop"
3056 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3057 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3058 e873317a Guido Trotter
  REQ_BGL = False
3059 e873317a Guido Trotter
3060 e873317a Guido Trotter
  def ExpandNames(self):
3061 e873317a Guido Trotter
    self._ExpandAndLockInstance()
3062 a8083063 Iustin Pop
3063 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3064 a8083063 Iustin Pop
    """Build hooks env.
3065 a8083063 Iustin Pop

3066 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3067 a8083063 Iustin Pop

3068 a8083063 Iustin Pop
    """
3069 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3070 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3071 a8083063 Iustin Pop
    return env, nl, nl
3072 a8083063 Iustin Pop
3073 a8083063 Iustin Pop
  def CheckPrereq(self):
3074 a8083063 Iustin Pop
    """Check prerequisites.
3075 a8083063 Iustin Pop

3076 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3077 a8083063 Iustin Pop

3078 a8083063 Iustin Pop
    """
3079 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3080 e873317a Guido Trotter
    assert self.instance is not None, \
3081 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3082 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
3083 a8083063 Iustin Pop
3084 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3085 a8083063 Iustin Pop
    """Shutdown the instance.
3086 a8083063 Iustin Pop

3087 a8083063 Iustin Pop
    """
3088 a8083063 Iustin Pop
    instance = self.instance
3089 a8083063 Iustin Pop
    node_current = instance.primary_node
3090 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
3091 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(node_current, instance)
3092 4c4e4e1e Iustin Pop
    msg = result.fail_msg
3093 1fae010f Iustin Pop
    if msg:
3094 1fae010f Iustin Pop
      self.proc.LogWarning("Could not shutdown instance: %s" % msg)
3095 a8083063 Iustin Pop
3096 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
3097 a8083063 Iustin Pop
3098 a8083063 Iustin Pop
3099 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
3100 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
3101 fe7b0351 Michael Hanselmann

3102 fe7b0351 Michael Hanselmann
  """
3103 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
3104 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
3105 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
3106 4e0b4d2d Guido Trotter
  REQ_BGL = False
3107 4e0b4d2d Guido Trotter
3108 4e0b4d2d Guido Trotter
  def ExpandNames(self):
3109 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
3110 fe7b0351 Michael Hanselmann
3111 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
3112 fe7b0351 Michael Hanselmann
    """Build hooks env.
3113 fe7b0351 Michael Hanselmann

3114 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
3115 fe7b0351 Michael Hanselmann

3116 fe7b0351 Michael Hanselmann
    """
3117 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3118 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3119 fe7b0351 Michael Hanselmann
    return env, nl, nl
3120 fe7b0351 Michael Hanselmann
3121 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
3122 fe7b0351 Michael Hanselmann
    """Check prerequisites.
3123 fe7b0351 Michael Hanselmann

3124 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
3125 fe7b0351 Michael Hanselmann

3126 fe7b0351 Michael Hanselmann
    """
3127 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3128 4e0b4d2d Guido Trotter
    assert instance is not None, \
3129 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3130 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3131 4e0b4d2d Guido Trotter
3132 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
3133 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
3134 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3135 0d68c45d Iustin Pop
    if instance.admin_up:
3136 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3137 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3138 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3139 72737a7f Iustin Pop
                                              instance.name,
3140 72737a7f Iustin Pop
                                              instance.hypervisor)
3141 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
3142 4c4e4e1e Iustin Pop
                      prereq=True)
3143 7ad1af4a Iustin Pop
    if remote_info.payload:
3144 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3145 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
3146 3ecf6786 Iustin Pop
                                  instance.primary_node))
3147 d0834de3 Michael Hanselmann
3148 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
3149 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
3150 d0834de3 Michael Hanselmann
      # OS verification
3151 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
3152 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
3153 d0834de3 Michael Hanselmann
      if pnode is None:
3154 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
3155 3ecf6786 Iustin Pop
                                   self.op.pnode)
3156 781de953 Iustin Pop
      result = self.rpc.call_os_get(pnode.name, self.op.os_type)
3157 4c4e4e1e Iustin Pop
      result.Raise("OS '%s' not in supported OS list for primary node %s" %
3158 4c4e4e1e Iustin Pop
                   (self.op.os_type, pnode.name), prereq=True)
3159 d0834de3 Michael Hanselmann
3160 fe7b0351 Michael Hanselmann
    self.instance = instance
3161 fe7b0351 Michael Hanselmann
3162 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
3163 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
3164 fe7b0351 Michael Hanselmann

3165 fe7b0351 Michael Hanselmann
    """
3166 fe7b0351 Michael Hanselmann
    inst = self.instance
3167 fe7b0351 Michael Hanselmann
3168 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
3169 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
3170 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
3171 97abc79f Iustin Pop
      self.cfg.Update(inst)
3172 d0834de3 Michael Hanselmann
3173 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
3174 fe7b0351 Michael Hanselmann
    try:
3175 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
3176 e557bae9 Guido Trotter
      result = self.rpc.call_instance_os_add(inst.primary_node, inst, True)
3177 4c4e4e1e Iustin Pop
      result.Raise("Could not install OS for instance %s on node %s" %
3178 4c4e4e1e Iustin Pop
                   (inst.name, inst.primary_node))
3179 fe7b0351 Michael Hanselmann
    finally:
3180 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
3181 fe7b0351 Michael Hanselmann
3182 fe7b0351 Michael Hanselmann
3183 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
3184 decd5f45 Iustin Pop
  """Rename an instance.
3185 decd5f45 Iustin Pop

3186 decd5f45 Iustin Pop
  """
3187 decd5f45 Iustin Pop
  HPATH = "instance-rename"
3188 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3189 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
3190 decd5f45 Iustin Pop
3191 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
3192 decd5f45 Iustin Pop
    """Build hooks env.
3193 decd5f45 Iustin Pop

3194 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3195 decd5f45 Iustin Pop

3196 decd5f45 Iustin Pop
    """
3197 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3198 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
3199 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3200 decd5f45 Iustin Pop
    return env, nl, nl
3201 decd5f45 Iustin Pop
3202 decd5f45 Iustin Pop
  def CheckPrereq(self):
3203 decd5f45 Iustin Pop
    """Check prerequisites.
3204 decd5f45 Iustin Pop

3205 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
3206 decd5f45 Iustin Pop

3207 decd5f45 Iustin Pop
    """
3208 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3209 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3210 decd5f45 Iustin Pop
    if instance is None:
3211 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3212 decd5f45 Iustin Pop
                                 self.op.instance_name)
3213 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3214 7527a8a4 Iustin Pop
3215 0d68c45d Iustin Pop
    if instance.admin_up:
3216 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3217 decd5f45 Iustin Pop
                                 self.op.instance_name)
3218 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3219 72737a7f Iustin Pop
                                              instance.name,
3220 72737a7f Iustin Pop
                                              instance.hypervisor)
3221 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
3222 4c4e4e1e Iustin Pop
                      prereq=True)
3223 7ad1af4a Iustin Pop
    if remote_info.payload:
3224 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3225 decd5f45 Iustin Pop
                                 (self.op.instance_name,
3226 decd5f45 Iustin Pop
                                  instance.primary_node))
3227 decd5f45 Iustin Pop
    self.instance = instance
3228 decd5f45 Iustin Pop
3229 decd5f45 Iustin Pop
    # new name verification
3230 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
3231 decd5f45 Iustin Pop
3232 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
3233 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
3234 7bde3275 Guido Trotter
    if new_name in instance_list:
3235 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3236 c09f363f Manuel Franceschini
                                 new_name)
3237 7bde3275 Guido Trotter
3238 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
3239 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
3240 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3241 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
3242 decd5f45 Iustin Pop
3243 decd5f45 Iustin Pop
3244 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
3245 decd5f45 Iustin Pop
    """Reinstall the instance.
3246 decd5f45 Iustin Pop

3247 decd5f45 Iustin Pop
    """
3248 decd5f45 Iustin Pop
    inst = self.instance
3249 decd5f45 Iustin Pop
    old_name = inst.name
3250 decd5f45 Iustin Pop
3251 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
3252 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3253 b23c4333 Manuel Franceschini
3254 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
3255 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
3256 cb4e8387 Iustin Pop
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
3257 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
3258 decd5f45 Iustin Pop
3259 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
3260 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
3261 decd5f45 Iustin Pop
3262 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
3263 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3264 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
3265 72737a7f Iustin Pop
                                                     old_file_storage_dir,
3266 72737a7f Iustin Pop
                                                     new_file_storage_dir)
3267 4c4e4e1e Iustin Pop
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
3268 4c4e4e1e Iustin Pop
                   " (but the instance has been renamed in Ganeti)" %
3269 4c4e4e1e Iustin Pop
                   (inst.primary_node, old_file_storage_dir,
3270 4c4e4e1e Iustin Pop
                    new_file_storage_dir))
3271 b23c4333 Manuel Franceschini
3272 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
3273 decd5f45 Iustin Pop
    try:
3274 781de953 Iustin Pop
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
3275 781de953 Iustin Pop
                                                 old_name)
3276 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3277 96841384 Iustin Pop
      if msg:
3278 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
3279 96841384 Iustin Pop
               " (but the instance has been renamed in Ganeti): %s" %
3280 96841384 Iustin Pop
               (inst.name, inst.primary_node, msg))
3281 86d9d3bb Iustin Pop
        self.proc.LogWarning(msg)
3282 decd5f45 Iustin Pop
    finally:
3283 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
3284 decd5f45 Iustin Pop
3285 decd5f45 Iustin Pop
3286 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
3287 a8083063 Iustin Pop
  """Remove an instance.
3288 a8083063 Iustin Pop

3289 a8083063 Iustin Pop
  """
3290 a8083063 Iustin Pop
  HPATH = "instance-remove"
3291 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3292 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
3293 cf472233 Guido Trotter
  REQ_BGL = False
3294 cf472233 Guido Trotter
3295 cf472233 Guido Trotter
  def ExpandNames(self):
3296 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
3297 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3298 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3299 cf472233 Guido Trotter
3300 cf472233 Guido Trotter
  def DeclareLocks(self, level):
3301 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
3302 cf472233 Guido Trotter
      self._LockInstancesNodes()
3303 a8083063 Iustin Pop
3304 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3305 a8083063 Iustin Pop
    """Build hooks env.
3306 a8083063 Iustin Pop

3307 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3308 a8083063 Iustin Pop

3309 a8083063 Iustin Pop
    """
3310 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3311 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
3312 a8083063 Iustin Pop
    return env, nl, nl
3313 a8083063 Iustin Pop
3314 a8083063 Iustin Pop
  def CheckPrereq(self):
3315 a8083063 Iustin Pop
    """Check prerequisites.
3316 a8083063 Iustin Pop

3317 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3318 a8083063 Iustin Pop

3319 a8083063 Iustin Pop
    """
3320 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3321 cf472233 Guido Trotter
    assert self.instance is not None, \
3322 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3323 a8083063 Iustin Pop
3324 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3325 a8083063 Iustin Pop
    """Remove the instance.
3326 a8083063 Iustin Pop

3327 a8083063 Iustin Pop
    """
3328 a8083063 Iustin Pop
    instance = self.instance
3329 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
3330 9a4f63d1 Iustin Pop
                 instance.name, instance.primary_node)
3331 a8083063 Iustin Pop
3332 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
3333 4c4e4e1e Iustin Pop
    msg = result.fail_msg
3334 1fae010f Iustin Pop
    if msg:
3335 1d67656e Iustin Pop
      if self.op.ignore_failures:
3336 1fae010f Iustin Pop
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
3337 1d67656e Iustin Pop
      else:
3338 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
3339 1fae010f Iustin Pop
                                 " node %s: %s" %
3340 1fae010f Iustin Pop
                                 (instance.name, instance.primary_node, msg))
3341 a8083063 Iustin Pop
3342 9a4f63d1 Iustin Pop
    logging.info("Removing block devices for instance %s", instance.name)
3343 a8083063 Iustin Pop
3344 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
3345 1d67656e Iustin Pop
      if self.op.ignore_failures:
3346 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
3347 1d67656e Iustin Pop
      else:
3348 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
3349 a8083063 Iustin Pop
3350 9a4f63d1 Iustin Pop
    logging.info("Removing instance %s out of cluster config", instance.name)
3351 a8083063 Iustin Pop
3352 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
3353 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
3354 a8083063 Iustin Pop
3355 a8083063 Iustin Pop
3356 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
3357 a8083063 Iustin Pop
  """Logical unit for querying instances.
3358 a8083063 Iustin Pop

3359 a8083063 Iustin Pop
  """
3360 ec79568d Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
3361 7eb9d8f7 Guido Trotter
  REQ_BGL = False
3362 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
3363 5b460366 Iustin Pop
                                    "admin_state",
3364 a2d2e1a7 Iustin Pop
                                    "disk_template", "ip", "mac", "bridge",
3365 638c6349 Guido Trotter
                                    "nic_mode", "nic_link",
3366 a2d2e1a7 Iustin Pop
                                    "sda_size", "sdb_size", "vcpus", "tags",
3367 a2d2e1a7 Iustin Pop
                                    "network_port", "beparams",
3368 8aec325c Iustin Pop
                                    r"(disk)\.(size)/([0-9]+)",
3369 8aec325c Iustin Pop
                                    r"(disk)\.(sizes)", "disk_usage",
3370 638c6349 Guido Trotter
                                    r"(nic)\.(mac|ip|mode|link)/([0-9]+)",
3371 638c6349 Guido Trotter
                                    r"(nic)\.(bridge)/([0-9]+)",
3372 638c6349 Guido Trotter
                                    r"(nic)\.(macs|ips|modes|links|bridges)",
3373 8aec325c Iustin Pop
                                    r"(disk|nic)\.(count)",
3374 a2d2e1a7 Iustin Pop
                                    "serial_no", "hypervisor", "hvparams",] +
3375 a2d2e1a7 Iustin Pop
                                  ["hv/%s" % name
3376 a2d2e1a7 Iustin Pop
                                   for name in constants.HVS_PARAMETERS] +
3377 a2d2e1a7 Iustin Pop
                                  ["be/%s" % name
3378 a2d2e1a7 Iustin Pop
                                   for name in constants.BES_PARAMETERS])
3379 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
3380 31bf511f Iustin Pop
3381 a8083063 Iustin Pop
3382 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
3383 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
3384 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
3385 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
3386 a8083063 Iustin Pop
3387 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
3388 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
3389 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
3390 7eb9d8f7 Guido Trotter
3391 57a2fb91 Iustin Pop
    if self.op.names:
3392 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
3393 7eb9d8f7 Guido Trotter
    else:
3394 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
3395 7eb9d8f7 Guido Trotter
3396 ec79568d Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
3397 ec79568d Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
3398 57a2fb91 Iustin Pop
    if self.do_locking:
3399 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3400 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
3401 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3402 7eb9d8f7 Guido Trotter
3403 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
3404 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
3405 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
3406 7eb9d8f7 Guido Trotter
3407 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
3408 7eb9d8f7 Guido Trotter
    """Check prerequisites.
3409 7eb9d8f7 Guido Trotter

3410 7eb9d8f7 Guido Trotter
    """
3411 57a2fb91 Iustin Pop
    pass
3412 069dcc86 Iustin Pop
3413 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3414 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
3415 a8083063 Iustin Pop

3416 a8083063 Iustin Pop
    """
3417 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
3418 a7f5dc98 Iustin Pop
    if self.wanted == locking.ALL_SET:
3419 a7f5dc98 Iustin Pop
      # caller didn't specify instance names, so ordering is not important
3420 a7f5dc98 Iustin Pop
      if self.do_locking:
3421 a7f5dc98 Iustin Pop
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
3422 a7f5dc98 Iustin Pop
      else:
3423 a7f5dc98 Iustin Pop
        instance_names = all_info.keys()
3424 a7f5dc98 Iustin Pop
      instance_names = utils.NiceSort(instance_names)
3425 57a2fb91 Iustin Pop
    else:
3426 a7f5dc98 Iustin Pop
      # caller did specify names, so we must keep the ordering
3427 a7f5dc98 Iustin Pop
      if self.do_locking:
3428 a7f5dc98 Iustin Pop
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
3429 a7f5dc98 Iustin Pop
      else:
3430 a7f5dc98 Iustin Pop
        tgt_set = all_info.keys()
3431 a7f5dc98 Iustin Pop
      missing = set(self.wanted).difference(tgt_set)
3432 a7f5dc98 Iustin Pop
      if missing:
3433 a7f5dc98 Iustin Pop
        raise errors.OpExecError("Some instances were removed before"
3434 a7f5dc98 Iustin Pop
                                 " retrieving their data: %s" % missing)
3435 a7f5dc98 Iustin Pop
      instance_names = self.wanted
3436 c1f1cbb2 Iustin Pop
3437 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
3438 a8083063 Iustin Pop
3439 a8083063 Iustin Pop
    # begin data gathering
3440 a8083063 Iustin Pop
3441 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
3442 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
3443 a8083063 Iustin Pop
3444 a8083063 Iustin Pop
    bad_nodes = []
3445 cbfc4681 Iustin Pop
    off_nodes = []
3446 ec79568d Iustin Pop
    if self.do_node_query:
3447 a8083063 Iustin Pop
      live_data = {}
3448 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
3449 a8083063 Iustin Pop
      for name in nodes:
3450 a8083063 Iustin Pop
        result = node_data[name]
3451 cbfc4681 Iustin Pop
        if result.offline:
3452 cbfc4681 Iustin Pop
          # offline nodes will be in both lists
3453 cbfc4681 Iustin Pop
          off_nodes.append(name)
3454 4c4e4e1e Iustin Pop
        if result.failed or result.fail_msg:
3455 a8083063 Iustin Pop
          bad_nodes.append(name)
3456 781de953 Iustin Pop
        else:
3457 2fa74ef4 Iustin Pop
          if result.payload:
3458 2fa74ef4 Iustin Pop
            live_data.update(result.payload)
3459 2fa74ef4 Iustin Pop
          # else no instance is alive
3460 a8083063 Iustin Pop
    else:
3461 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
3462 a8083063 Iustin Pop
3463 a8083063 Iustin Pop
    # end data gathering
3464 a8083063 Iustin Pop
3465 5018a335 Iustin Pop
    HVPREFIX = "hv/"
3466 338e51e8 Iustin Pop
    BEPREFIX = "be/"
3467 a8083063 Iustin Pop
    output = []
3468 638c6349 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
3469 a8083063 Iustin Pop
    for instance in instance_list:
3470 a8083063 Iustin Pop
      iout = []
3471 638c6349 Guido Trotter
      i_hv = cluster.FillHV(instance)
3472 638c6349 Guido Trotter
      i_be = cluster.FillBE(instance)
3473 638c6349 Guido Trotter
      i_nicp = [objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
3474 638c6349 Guido Trotter
                                 nic.nicparams) for nic in instance.nics]
3475 a8083063 Iustin Pop
      for field in self.op.output_fields:
3476 71c1af58 Iustin Pop
        st_match = self._FIELDS_STATIC.Matches(field)
3477 a8083063 Iustin Pop
        if field == "name":
3478 a8083063 Iustin Pop
          val = instance.name
3479 a8083063 Iustin Pop
        elif field == "os":
3480 a8083063 Iustin Pop
          val = instance.os
3481 a8083063 Iustin Pop
        elif field == "pnode":
3482 a8083063 Iustin Pop
          val = instance.primary_node
3483 a8083063 Iustin Pop
        elif field == "snodes":
3484 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
3485 a8083063 Iustin Pop
        elif field == "admin_state":
3486 0d68c45d Iustin Pop
          val = instance.admin_up
3487 a8083063 Iustin Pop
        elif field == "oper_state":
3488 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
3489 8a23d2d3 Iustin Pop
            val = None
3490 a8083063 Iustin Pop
          else:
3491 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
3492 d8052456 Iustin Pop
        elif field == "status":
3493 cbfc4681 Iustin Pop
          if instance.primary_node in off_nodes:
3494 cbfc4681 Iustin Pop
            val = "ERROR_nodeoffline"
3495 cbfc4681 Iustin Pop
          elif instance.primary_node in bad_nodes:
3496 d8052456 Iustin Pop
            val = "ERROR_nodedown"
3497 d8052456 Iustin Pop
          else:
3498 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
3499 d8052456 Iustin Pop
            if running:
3500 0d68c45d Iustin Pop
              if instance.admin_up:
3501 d8052456 Iustin Pop
                val = "running"
3502 d8052456 Iustin Pop
              else:
3503 d8052456 Iustin Pop
                val = "ERROR_up"
3504 d8052456 Iustin Pop
            else:
3505 0d68c45d Iustin Pop
              if instance.admin_up:
3506 d8052456 Iustin Pop
                val = "ERROR_down"
3507 d8052456 Iustin Pop
              else:
3508 d8052456 Iustin Pop
                val = "ADMIN_down"
3509 a8083063 Iustin Pop
        elif field == "oper_ram":
3510 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
3511 8a23d2d3 Iustin Pop
            val = None
3512 a8083063 Iustin Pop
          elif instance.name in live_data:
3513 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
3514 a8083063 Iustin Pop
          else:
3515 a8083063 Iustin Pop
            val = "-"
3516 a8083063 Iustin Pop
        elif field == "disk_template":
3517 a8083063 Iustin Pop
          val = instance.disk_template
3518 a8083063 Iustin Pop
        elif field == "ip":
3519 39a02558 Guido Trotter
          if instance.nics:
3520 39a02558 Guido Trotter
            val = instance.nics[0].ip
3521 39a02558 Guido Trotter
          else:
3522 39a02558 Guido Trotter
            val = None
3523 638c6349 Guido Trotter
        elif field == "nic_mode":
3524 638c6349 Guido Trotter
          if instance.nics:
3525 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_MODE]
3526 638c6349 Guido Trotter
          else:
3527 638c6349 Guido Trotter
            val = None
3528 638c6349 Guido Trotter
        elif field == "nic_link":
3529 39a02558 Guido Trotter
          if instance.nics:
3530 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_LINK]
3531 638c6349 Guido Trotter
          else:
3532 638c6349 Guido Trotter
            val = None
3533 638c6349 Guido Trotter
        elif field == "bridge":
3534 638c6349 Guido Trotter
          if (instance.nics and
3535 638c6349 Guido Trotter
              i_nicp[0][constants.NIC_MODE] == constants.NIC_MODE_BRIDGED):
3536 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_LINK]
3537 39a02558 Guido Trotter
          else:
3538 39a02558 Guido Trotter
            val = None
3539 a8083063 Iustin Pop
        elif field == "mac":
3540 39a02558 Guido Trotter
          if instance.nics:
3541 39a02558 Guido Trotter
            val = instance.nics[0].mac
3542 39a02558 Guido Trotter
          else:
3543 39a02558 Guido Trotter
            val = None
3544 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
3545 ad24e046 Iustin Pop
          idx = ord(field[2]) - ord('a')
3546 ad24e046 Iustin Pop
          try:
3547 ad24e046 Iustin Pop
            val = instance.FindDisk(idx).size
3548 ad24e046 Iustin Pop
          except errors.OpPrereqError:
3549 8a23d2d3 Iustin Pop
            val = None
3550 024e157f Iustin Pop
        elif field == "disk_usage": # total disk usage per node
3551 024e157f Iustin Pop
          disk_sizes = [{'size': disk.size} for disk in instance.disks]
3552 024e157f Iustin Pop
          val = _ComputeDiskSize(instance.disk_template, disk_sizes)
3553 130a6a6f Iustin Pop
        elif field == "tags":
3554 130a6a6f Iustin Pop
          val = list(instance.GetTags())
3555 38d7239a Iustin Pop
        elif field == "serial_no":
3556 38d7239a Iustin Pop
          val = instance.serial_no
3557 5018a335 Iustin Pop
        elif field == "network_port":
3558 5018a335 Iustin Pop
          val = instance.network_port
3559 338e51e8 Iustin Pop
        elif field == "hypervisor":
3560 338e51e8 Iustin Pop
          val = instance.hypervisor
3561 338e51e8 Iustin Pop
        elif field == "hvparams":
3562 338e51e8 Iustin Pop
          val = i_hv
3563 5018a335 Iustin Pop
        elif (field.startswith(HVPREFIX) and
3564 5018a335 Iustin Pop
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
3565 5018a335 Iustin Pop
          val = i_hv.get(field[len(HVPREFIX):], None)
3566 338e51e8 Iustin Pop
        elif field == "beparams":
3567 338e51e8 Iustin Pop
          val = i_be
3568 338e51e8 Iustin Pop
        elif (field.startswith(BEPREFIX) and
3569 338e51e8 Iustin Pop
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
3570 338e51e8 Iustin Pop
          val = i_be.get(field[len(BEPREFIX):], None)
3571 71c1af58 Iustin Pop
        elif st_match and st_match.groups():
3572 71c1af58 Iustin Pop
          # matches a variable list
3573 71c1af58 Iustin Pop
          st_groups = st_match.groups()
3574 71c1af58 Iustin Pop
          if st_groups and st_groups[0] == "disk":
3575 71c1af58 Iustin Pop
            if st_groups[1] == "count":
3576 71c1af58 Iustin Pop
              val = len(instance.disks)
3577 41a776da Iustin Pop
            elif st_groups[1] == "sizes":
3578 41a776da Iustin Pop
              val = [disk.size for disk in instance.disks]
3579 71c1af58 Iustin Pop
            elif st_groups[1] == "size":
3580 3e0cea06 Iustin Pop
              try:
3581 3e0cea06 Iustin Pop
                val = instance.FindDisk(st_groups[2]).size
3582 3e0cea06 Iustin Pop
              except errors.OpPrereqError:
3583 71c1af58 Iustin Pop
                val = None
3584 71c1af58 Iustin Pop
            else:
3585 71c1af58 Iustin Pop
              assert False, "Unhandled disk parameter"
3586 71c1af58 Iustin Pop
          elif st_groups[0] == "nic":
3587 71c1af58 Iustin Pop
            if st_groups[1] == "count":
3588 71c1af58 Iustin Pop
              val = len(instance.nics)
3589 41a776da Iustin Pop
            elif st_groups[1] == "macs":
3590 41a776da Iustin Pop
              val = [nic.mac for nic in instance.nics]
3591 41a776da Iustin Pop
            elif st_groups[1] == "ips":
3592 41a776da Iustin Pop
              val = [nic.ip for nic in instance.nics]
3593 638c6349 Guido Trotter
            elif st_groups[1] == "modes":
3594 638c6349 Guido Trotter
              val = [nicp[constants.NIC_MODE] for nicp in i_nicp]
3595 638c6349 Guido Trotter
            elif st_groups[1] == "links":
3596 638c6349 Guido Trotter
              val = [nicp[constants.NIC_LINK] for nicp in i_nicp]
3597 41a776da Iustin Pop
            elif st_groups[1] == "bridges":
3598 638c6349 Guido Trotter
              val = []
3599 638c6349 Guido Trotter
              for nicp in i_nicp:
3600 638c6349 Guido Trotter
                if nicp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
3601 638c6349 Guido Trotter
                  val.append(nicp[constants.NIC_LINK])
3602 638c6349 Guido Trotter
                else:
3603 638c6349 Guido Trotter
                  val.append(None)
3604 71c1af58 Iustin Pop
            else:
3605 71c1af58 Iustin Pop
              # index-based item
3606 71c1af58 Iustin Pop
              nic_idx = int(st_groups[2])
3607 71c1af58 Iustin Pop
              if nic_idx >= len(instance.nics):
3608 71c1af58 Iustin Pop
                val = None
3609 71c1af58 Iustin Pop
              else:
3610 71c1af58 Iustin Pop
                if st_groups[1] == "mac":
3611 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].mac
3612 71c1af58 Iustin Pop
                elif st_groups[1] == "ip":
3613 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].ip
3614 638c6349 Guido Trotter
                elif st_groups[1] == "mode":
3615 638c6349 Guido Trotter
                  val = i_nicp[nic_idx][constants.NIC_MODE]
3616 638c6349 Guido Trotter
                elif st_groups[1] == "link":
3617 638c6349 Guido Trotter
                  val = i_nicp[nic_idx][constants.NIC_LINK]
3618 71c1af58 Iustin Pop
                elif st_groups[1] == "bridge":
3619 638c6349 Guido Trotter
                  nic_mode = i_nicp[nic_idx][constants.NIC_MODE]
3620 638c6349 Guido Trotter
                  if nic_mode == constants.NIC_MODE_BRIDGED:
3621 638c6349 Guido Trotter
                    val = i_nicp[nic_idx][constants.NIC_LINK]
3622 638c6349 Guido Trotter
                  else:
3623 638c6349 Guido Trotter
                    val = None
3624 71c1af58 Iustin Pop
                else:
3625 71c1af58 Iustin Pop
                  assert False, "Unhandled NIC parameter"
3626 71c1af58 Iustin Pop
          else:
3627 71c1af58 Iustin Pop
            assert False, "Unhandled variable parameter"
3628 a8083063 Iustin Pop
        else:
3629 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
3630 a8083063 Iustin Pop
        iout.append(val)
3631 a8083063 Iustin Pop
      output.append(iout)
3632 a8083063 Iustin Pop
3633 a8083063 Iustin Pop
    return output
3634 a8083063 Iustin Pop
3635 a8083063 Iustin Pop
3636 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
3637 a8083063 Iustin Pop
  """Failover an instance.
3638 a8083063 Iustin Pop

3639 a8083063 Iustin Pop
  """
3640 a8083063 Iustin Pop
  HPATH = "instance-failover"
3641 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3642 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
3643 c9e5c064 Guido Trotter
  REQ_BGL = False
3644 c9e5c064 Guido Trotter
3645 c9e5c064 Guido Trotter
  def ExpandNames(self):
3646 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
3647 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3648 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3649 c9e5c064 Guido Trotter
3650 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
3651 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
3652 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
3653 a8083063 Iustin Pop
3654 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3655 a8083063 Iustin Pop
    """Build hooks env.
3656 a8083063 Iustin Pop

3657 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3658 a8083063 Iustin Pop

3659 a8083063 Iustin Pop
    """
3660 a8083063 Iustin Pop
    env = {
3661 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
3662 a8083063 Iustin Pop
      }
3663 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3664 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3665 a8083063 Iustin Pop
    return env, nl, nl
3666 a8083063 Iustin Pop
3667 a8083063 Iustin Pop
  def CheckPrereq(self):
3668 a8083063 Iustin Pop
    """Check prerequisites.
3669 a8083063 Iustin Pop

3670 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3671 a8083063 Iustin Pop

3672 a8083063 Iustin Pop
    """
3673 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3674 c9e5c064 Guido Trotter
    assert self.instance is not None, \
3675 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3676 a8083063 Iustin Pop
3677 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3678 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3679 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
3680 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
3681 2a710df1 Michael Hanselmann
3682 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
3683 2a710df1 Michael Hanselmann
    if not secondary_nodes:
3684 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
3685 abdf0113 Iustin Pop
                                   "a mirrored disk template")
3686 2a710df1 Michael Hanselmann
3687 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
3688 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, target_node)
3689 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, target_node)
3690 d27776f0 Iustin Pop
    if instance.admin_up:
3691 d27776f0 Iustin Pop
      # check memory requirements on the secondary node
3692 d27776f0 Iustin Pop
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
3693 d27776f0 Iustin Pop
                           instance.name, bep[constants.BE_MEMORY],
3694 d27776f0 Iustin Pop
                           instance.hypervisor)
3695 d27776f0 Iustin Pop
    else:
3696 d27776f0 Iustin Pop
      self.LogInfo("Not checking memory on the secondary node as"
3697 d27776f0 Iustin Pop
                   " instance will not be started")
3698 3a7c308e Guido Trotter
3699 a8083063 Iustin Pop
    # check bridge existance
3700 b165e77e Guido Trotter
    _CheckInstanceBridgesExist(self, instance, node=target_node)
3701 a8083063 Iustin Pop
3702 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3703 a8083063 Iustin Pop
    """Failover an instance.
3704 a8083063 Iustin Pop

3705 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
3706 a8083063 Iustin Pop
    starting it on the secondary.
3707 a8083063 Iustin Pop

3708 a8083063 Iustin Pop
    """
3709 a8083063 Iustin Pop
    instance = self.instance
3710 a8083063 Iustin Pop
3711 a8083063 Iustin Pop
    source_node = instance.primary_node
3712 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
3713 a8083063 Iustin Pop
3714 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
3715 a8083063 Iustin Pop
    for dev in instance.disks:
3716 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
3717 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
3718 0d68c45d Iustin Pop
        if instance.admin_up and not self.op.ignore_consistency:
3719 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
3720 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
3721 a8083063 Iustin Pop
3722 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
3723 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
3724 9a4f63d1 Iustin Pop
                 instance.name, source_node)
3725 a8083063 Iustin Pop
3726 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(source_node, instance)
3727 4c4e4e1e Iustin Pop
    msg = result.fail_msg
3728 1fae010f Iustin Pop
    if msg:
3729 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
3730 86d9d3bb Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
3731 1fae010f Iustin Pop
                             " Proceeding anyway. Please make sure node"
3732 1fae010f Iustin Pop
                             " %s is down. Error details: %s",
3733 1fae010f Iustin Pop
                             instance.name, source_node, source_node, msg)
3734 24a40d57 Iustin Pop
      else:
3735 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
3736 1fae010f Iustin Pop
                                 " node %s: %s" %
3737 1fae010f Iustin Pop
                                 (instance.name, source_node, msg))
3738 a8083063 Iustin Pop
3739 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
3740 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3741 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
3742 a8083063 Iustin Pop
3743 a8083063 Iustin Pop
    instance.primary_node = target_node
3744 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
3745 b6102dab Guido Trotter
    self.cfg.Update(instance)
3746 a8083063 Iustin Pop
3747 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
3748 0d68c45d Iustin Pop
    if instance.admin_up:
3749 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
3750 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s",
3751 9a4f63d1 Iustin Pop
                   instance.name, target_node)
3752 12a0cfbe Guido Trotter
3753 b9bddb6b Iustin Pop
      disks_ok, dummy = _AssembleInstanceDisks(self, instance,
3754 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
3755 12a0cfbe Guido Trotter
      if not disks_ok:
3756 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3757 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
3758 a8083063 Iustin Pop
3759 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
3760 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance, None, None)
3761 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3762 dd279568 Iustin Pop
      if msg:
3763 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3764 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
3765 dd279568 Iustin Pop
                                 (instance.name, target_node, msg))
3766 a8083063 Iustin Pop
3767 a8083063 Iustin Pop
3768 53c776b5 Iustin Pop
class LUMigrateInstance(LogicalUnit):
3769 53c776b5 Iustin Pop
  """Migrate an instance.
3770 53c776b5 Iustin Pop

3771 53c776b5 Iustin Pop
  This is migration without shutting down, compared to the failover,
3772 53c776b5 Iustin Pop
  which is done with shutdown.
3773 53c776b5 Iustin Pop

3774 53c776b5 Iustin Pop
  """
3775 53c776b5 Iustin Pop
  HPATH = "instance-migrate"
3776 53c776b5 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3777 53c776b5 Iustin Pop
  _OP_REQP = ["instance_name", "live", "cleanup"]
3778 53c776b5 Iustin Pop
3779 53c776b5 Iustin Pop
  REQ_BGL = False
3780 53c776b5 Iustin Pop
3781 53c776b5 Iustin Pop
  def ExpandNames(self):
3782 53c776b5 Iustin Pop
    self._ExpandAndLockInstance()
3783 53c776b5 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
3784 53c776b5 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3785 53c776b5 Iustin Pop
3786 53c776b5 Iustin Pop
  def DeclareLocks(self, level):
3787 53c776b5 Iustin Pop
    if level == locking.LEVEL_NODE:
3788 53c776b5 Iustin Pop
      self._LockInstancesNodes()
3789 53c776b5 Iustin Pop
3790 53c776b5 Iustin Pop
  def BuildHooksEnv(self):
3791 53c776b5 Iustin Pop
    """Build hooks env.
3792 53c776b5 Iustin Pop

3793 53c776b5 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3794 53c776b5 Iustin Pop

3795 53c776b5 Iustin Pop
    """
3796 53c776b5 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3797 2c2690c9 Iustin Pop
    env["MIGRATE_LIVE"] = self.op.live
3798 2c2690c9 Iustin Pop
    env["MIGRATE_CLEANUP"] = self.op.cleanup
3799 53c776b5 Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3800 53c776b5 Iustin Pop
    return env, nl, nl
3801 53c776b5 Iustin Pop
3802 53c776b5 Iustin Pop
  def CheckPrereq(self):
3803 53c776b5 Iustin Pop
    """Check prerequisites.
3804 53c776b5 Iustin Pop

3805 53c776b5 Iustin Pop
    This checks that the instance is in the cluster.
3806 53c776b5 Iustin Pop

3807 53c776b5 Iustin Pop
    """
3808 53c776b5 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3809 53c776b5 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3810 53c776b5 Iustin Pop
    if instance is None:
3811 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3812 53c776b5 Iustin Pop
                                 self.op.instance_name)
3813 53c776b5 Iustin Pop
3814 53c776b5 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
3815 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3816 53c776b5 Iustin Pop
                                 " drbd8, cannot migrate.")
3817 53c776b5 Iustin Pop
3818 53c776b5 Iustin Pop
    secondary_nodes = instance.secondary_nodes
3819 53c776b5 Iustin Pop
    if not secondary_nodes:
3820 733a2b6a Iustin Pop
      raise errors.ConfigurationError("No secondary node but using"
3821 733a2b6a Iustin Pop
                                      " drbd8 disk template")
3822 53c776b5 Iustin Pop
3823 53c776b5 Iustin Pop
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
3824 53c776b5 Iustin Pop
3825 53c776b5 Iustin Pop
    target_node = secondary_nodes[0]
3826 53c776b5 Iustin Pop
    # check memory requirements on the secondary node
3827 53c776b5 Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
3828 53c776b5 Iustin Pop
                         instance.name, i_be[constants.BE_MEMORY],
3829 53c776b5 Iustin Pop
                         instance.hypervisor)
3830 53c776b5 Iustin Pop
3831 53c776b5 Iustin Pop
    # check bridge existance
3832 b165e77e Guido Trotter
    _CheckInstanceBridgesExist(self, instance, node=target_node)
3833 53c776b5 Iustin Pop
3834 53c776b5 Iustin Pop
    if not self.op.cleanup:
3835 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, target_node)
3836 53c776b5 Iustin Pop
      result = self.rpc.call_instance_migratable(instance.primary_node,
3837 53c776b5 Iustin Pop
                                                 instance)
3838 4c4e4e1e Iustin Pop
      result.Raise("Can't migrate, please use failover", prereq=True)
3839 53c776b5 Iustin Pop
3840 53c776b5 Iustin Pop
    self.instance = instance
3841 53c776b5 Iustin Pop
3842 53c776b5 Iustin Pop
  def _WaitUntilSync(self):
3843 53c776b5 Iustin Pop
    """Poll with custom rpc for disk sync.
3844 53c776b5 Iustin Pop

3845 53c776b5 Iustin Pop
    This uses our own step-based rpc call.
3846 53c776b5 Iustin Pop

3847 53c776b5 Iustin Pop
    """
3848 53c776b5 Iustin Pop
    self.feedback_fn("* wait until resync is done")
3849 53c776b5 Iustin Pop
    all_done = False
3850 53c776b5 Iustin Pop
    while not all_done:
3851 53c776b5 Iustin Pop
      all_done = True
3852 53c776b5 Iustin Pop
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
3853 53c776b5 Iustin Pop
                                            self.nodes_ip,
3854 53c776b5 Iustin Pop
                                            self.instance.disks)
3855 53c776b5 Iustin Pop
      min_percent = 100
3856 53c776b5 Iustin Pop
      for node, nres in result.items():
3857 4c4e4e1e Iustin Pop
        nres.Raise("Cannot resync disks on node %s" % node)
3858 0959c824 Iustin Pop
        node_done, node_percent = nres.payload
3859 53c776b5 Iustin Pop
        all_done = all_done and node_done
3860 53c776b5 Iustin Pop
        if node_percent is not None:
3861 53c776b5 Iustin Pop
          min_percent = min(min_percent, node_percent)
3862 53c776b5 Iustin Pop
      if not all_done:
3863 53c776b5 Iustin Pop
        if min_percent < 100:
3864 53c776b5 Iustin Pop
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
3865 53c776b5 Iustin Pop
        time.sleep(2)
3866 53c776b5 Iustin Pop
3867 53c776b5 Iustin Pop
  def _EnsureSecondary(self, node):
3868 53c776b5 Iustin Pop
    """Demote a node to secondary.
3869 53c776b5 Iustin Pop

3870 53c776b5 Iustin Pop
    """
3871 53c776b5 Iustin Pop
    self.feedback_fn("* switching node %s to secondary mode" % node)
3872 53c776b5 Iustin Pop
3873 53c776b5 Iustin Pop
    for dev in self.instance.disks:
3874 53c776b5 Iustin Pop
      self.cfg.SetDiskID(dev, node)
3875 53c776b5 Iustin Pop
3876 53c776b5 Iustin Pop
    result = self.rpc.call_blockdev_close(node, self.instance.name,
3877 53c776b5 Iustin Pop
                                          self.instance.disks)
3878 4c4e4e1e Iustin Pop
    result.Raise("Cannot change disk to secondary on node %s" % node)
3879 53c776b5 Iustin Pop
3880 53c776b5 Iustin Pop
  def _GoStandalone(self):
3881 53c776b5 Iustin Pop
    """Disconnect from the network.
3882 53c776b5 Iustin Pop

3883 53c776b5 Iustin Pop
    """
3884 53c776b5 Iustin Pop
    self.feedback_fn("* changing into standalone mode")
3885 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
3886 53c776b5 Iustin Pop
                                               self.instance.disks)
3887 53c776b5 Iustin Pop
    for node, nres in result.items():
3888 4c4e4e1e Iustin Pop
      nres.Raise("Cannot disconnect disks node %s" % node)
3889 53c776b5 Iustin Pop
3890 53c776b5 Iustin Pop
  def _GoReconnect(self, multimaster):
3891 53c776b5 Iustin Pop
    """Reconnect to the network.
3892 53c776b5 Iustin Pop

3893 53c776b5 Iustin Pop
    """
3894 53c776b5 Iustin Pop
    if multimaster:
3895 53c776b5 Iustin Pop
      msg = "dual-master"
3896 53c776b5 Iustin Pop
    else:
3897 53c776b5 Iustin Pop
      msg = "single-master"
3898 53c776b5 Iustin Pop
    self.feedback_fn("* changing disks into %s mode" % msg)
3899 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
3900 53c776b5 Iustin Pop
                                           self.instance.disks,
3901 53c776b5 Iustin Pop
                                           self.instance.name, multimaster)
3902 53c776b5 Iustin Pop
    for node, nres in result.items():
3903 4c4e4e1e Iustin Pop
      nres.Raise("Cannot change disks config on node %s" % node)
3904 53c776b5 Iustin Pop
3905 53c776b5 Iustin Pop
  def _ExecCleanup(self):
3906 53c776b5 Iustin Pop
    """Try to cleanup after a failed migration.
3907 53c776b5 Iustin Pop

3908 53c776b5 Iustin Pop
    The cleanup is done by:
3909 53c776b5 Iustin Pop
      - check that the instance is running only on one node
3910 53c776b5 Iustin Pop
        (and update the config if needed)
3911 53c776b5 Iustin Pop
      - change disks on its secondary node to secondary
3912 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
3913 53c776b5 Iustin Pop
      - disconnect from the network
3914 53c776b5 Iustin Pop
      - change disks into single-master mode
3915 53c776b5 Iustin Pop
      - wait again until disks are fully synchronized
3916 53c776b5 Iustin Pop

3917 53c776b5 Iustin Pop
    """
3918 53c776b5 Iustin Pop
    instance = self.instance
3919 53c776b5 Iustin Pop
    target_node = self.target_node
3920 53c776b5 Iustin Pop
    source_node = self.source_node
3921 53c776b5 Iustin Pop
3922 53c776b5 Iustin Pop
    # check running on only one node
3923 53c776b5 Iustin Pop
    self.feedback_fn("* checking where the instance actually runs"
3924 53c776b5 Iustin Pop
                     " (if this hangs, the hypervisor might be in"
3925 53c776b5 Iustin Pop
                     " a bad state)")
3926 53c776b5 Iustin Pop
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
3927 53c776b5 Iustin Pop
    for node, result in ins_l.items():
3928 4c4e4e1e Iustin Pop
      result.Raise("Can't contact node %s" % node)
3929 53c776b5 Iustin Pop
3930 aca13712 Iustin Pop
    runningon_source = instance.name in ins_l[source_node].payload
3931 aca13712 Iustin Pop
    runningon_target = instance.name in ins_l[target_node].payload
3932 53c776b5 Iustin Pop
3933 53c776b5 Iustin Pop
    if runningon_source and runningon_target:
3934 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance seems to be running on two nodes,"
3935 53c776b5 Iustin Pop
                               " or the hypervisor is confused. You will have"
3936 53c776b5 Iustin Pop
                               " to ensure manually that it runs only on one"
3937 53c776b5 Iustin Pop
                               " and restart this operation.")
3938 53c776b5 Iustin Pop
3939 53c776b5 Iustin Pop
    if not (runningon_source or runningon_target):
3940 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance does not seem to be running at all."
3941 53c776b5 Iustin Pop
                               " In this case, it's safer to repair by"
3942 53c776b5 Iustin Pop
                               " running 'gnt-instance stop' to ensure disk"
3943 53c776b5 Iustin Pop
                               " shutdown, and then restarting it.")
3944 53c776b5 Iustin Pop
3945 53c776b5 Iustin Pop
    if runningon_target:
3946 53c776b5 Iustin Pop
      # the migration has actually succeeded, we need to update the config
3947 53c776b5 Iustin Pop
      self.feedback_fn("* instance running on secondary node (%s),"
3948 53c776b5 Iustin Pop
                       " updating config" % target_node)
3949 53c776b5 Iustin Pop
      instance.primary_node = target_node
3950 53c776b5 Iustin Pop
      self.cfg.Update(instance)
3951 53c776b5 Iustin Pop
      demoted_node = source_node
3952 53c776b5 Iustin Pop
    else:
3953 53c776b5 Iustin Pop
      self.feedback_fn("* instance confirmed to be running on its"
3954 53c776b5 Iustin Pop
                       " primary node (%s)" % source_node)
3955 53c776b5 Iustin Pop
      demoted_node = target_node
3956 53c776b5 Iustin Pop
3957 53c776b5 Iustin Pop
    self._EnsureSecondary(demoted_node)
3958 53c776b5 Iustin Pop
    try:
3959 53c776b5 Iustin Pop
      self._WaitUntilSync()
3960 53c776b5 Iustin Pop
    except errors.OpExecError:
3961 53c776b5 Iustin Pop
      # we ignore here errors, since if the device is standalone, it
3962 53c776b5 Iustin Pop
      # won't be able to sync
3963 53c776b5 Iustin Pop
      pass
3964 53c776b5 Iustin Pop
    self._GoStandalone()
3965 53c776b5 Iustin Pop
    self._GoReconnect(False)
3966 53c776b5 Iustin Pop
    self._WaitUntilSync()
3967 53c776b5 Iustin Pop
3968 53c776b5 Iustin Pop
    self.feedback_fn("* done")
3969 53c776b5 Iustin Pop
3970 6906a9d8 Guido Trotter
  def _RevertDiskStatus(self):
3971 6906a9d8 Guido Trotter
    """Try to revert the disk status after a failed migration.
3972 6906a9d8 Guido Trotter

3973 6906a9d8 Guido Trotter
    """
3974 6906a9d8 Guido Trotter
    target_node = self.target_node
3975 6906a9d8 Guido Trotter
    try:
3976 6906a9d8 Guido Trotter
      self._EnsureSecondary(target_node)
3977 6906a9d8 Guido Trotter
      self._GoStandalone()
3978 6906a9d8 Guido Trotter
      self._GoReconnect(False)
3979 6906a9d8 Guido Trotter
      self._WaitUntilSync()
3980 6906a9d8 Guido Trotter
    except errors.OpExecError, err:
3981 6906a9d8 Guido Trotter
      self.LogWarning("Migration failed and I can't reconnect the"
3982 6906a9d8 Guido Trotter
                      " drives: error '%s'\n"
3983 6906a9d8 Guido Trotter
                      "Please look and recover the instance status" %
3984 6906a9d8 Guido Trotter
                      str(err))
3985 6906a9d8 Guido Trotter
3986 6906a9d8 Guido Trotter
  def _AbortMigration(self):
3987 6906a9d8 Guido Trotter
    """Call the hypervisor code to abort a started migration.
3988 6906a9d8 Guido Trotter

3989 6906a9d8 Guido Trotter
    """
3990 6906a9d8 Guido Trotter
    instance = self.instance
3991 6906a9d8 Guido Trotter
    target_node = self.target_node
3992 6906a9d8 Guido Trotter
    migration_info = self.migration_info
3993 6906a9d8 Guido Trotter
3994 6906a9d8 Guido Trotter
    abort_result = self.rpc.call_finalize_migration(target_node,
3995 6906a9d8 Guido Trotter
                                                    instance,
3996 6906a9d8 Guido Trotter
                                                    migration_info,
3997 6906a9d8 Guido Trotter
                                                    False)
3998 4c4e4e1e Iustin Pop
    abort_msg = abort_result.fail_msg
3999 6906a9d8 Guido Trotter
    if abort_msg:
4000 6906a9d8 Guido Trotter
      logging.error("Aborting migration failed on target node %s: %s" %
4001 6906a9d8 Guido Trotter
                    (target_node, abort_msg))
4002 6906a9d8 Guido Trotter
      # Don't raise an exception here, as we stil have to try to revert the
4003 6906a9d8 Guido Trotter
      # disk status, even if this step failed.
4004 6906a9d8 Guido Trotter
4005 53c776b5 Iustin Pop
  def _ExecMigration(self):
4006 53c776b5 Iustin Pop
    """Migrate an instance.
4007 53c776b5 Iustin Pop

4008 53c776b5 Iustin Pop
    The migrate is done by:
4009 53c776b5 Iustin Pop
      - change the disks into dual-master mode
4010 53c776b5 Iustin Pop
      - wait until disks are fully synchronized again
4011 53c776b5 Iustin Pop
      - migrate the instance
4012 53c776b5 Iustin Pop
      - change disks on the new secondary node (the old primary) to secondary
4013 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
4014 53c776b5 Iustin Pop
      - change disks into single-master mode
4015 53c776b5 Iustin Pop

4016 53c776b5 Iustin Pop
    """
4017 53c776b5 Iustin Pop
    instance = self.instance
4018 53c776b5 Iustin Pop
    target_node = self.target_node
4019 53c776b5 Iustin Pop
    source_node = self.source_node
4020 53c776b5 Iustin Pop
4021 53c776b5 Iustin Pop
    self.feedback_fn("* checking disk consistency between source and target")
4022 53c776b5 Iustin Pop
    for dev in instance.disks:
4023 53c776b5 Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
4024 53c776b5 Iustin Pop
        raise errors.OpExecError("Disk %s is degraded or not fully"
4025 53c776b5 Iustin Pop
                                 " synchronized on target node,"
4026 53c776b5 Iustin Pop
                                 " aborting migrate." % dev.iv_name)
4027 53c776b5 Iustin Pop
4028 6906a9d8 Guido Trotter
    # First get the migration information from the remote node
4029 6906a9d8 Guido Trotter
    result = self.rpc.call_migration_info(source_node, instance)
4030 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4031 6906a9d8 Guido Trotter
    if msg:
4032 6906a9d8 Guido Trotter
      log_err = ("Failed fetching source migration information from %s: %s" %
4033 0959c824 Iustin Pop
                 (source_node, msg))
4034 6906a9d8 Guido Trotter
      logging.error(log_err)
4035 6906a9d8 Guido Trotter
      raise errors.OpExecError(log_err)
4036 6906a9d8 Guido Trotter
4037 0959c824 Iustin Pop
    self.migration_info = migration_info = result.payload
4038 6906a9d8 Guido Trotter
4039 6906a9d8 Guido Trotter
    # Then switch the disks to master/master mode
4040 53c776b5 Iustin Pop
    self._EnsureSecondary(target_node)
4041 53c776b5 Iustin Pop
    self._GoStandalone()
4042 53c776b5 Iustin Pop
    self._GoReconnect(True)
4043 53c776b5 Iustin Pop
    self._WaitUntilSync()
4044 53c776b5 Iustin Pop
4045 6906a9d8 Guido Trotter
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
4046 6906a9d8 Guido Trotter
    result = self.rpc.call_accept_instance(target_node,
4047 6906a9d8 Guido Trotter
                                           instance,
4048 6906a9d8 Guido Trotter
                                           migration_info,
4049 6906a9d8 Guido Trotter
                                           self.nodes_ip[target_node])
4050 6906a9d8 Guido Trotter
4051 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4052 6906a9d8 Guido Trotter
    if msg:
4053 6906a9d8 Guido Trotter
      logging.error("Instance pre-migration failed, trying to revert"
4054 6906a9d8 Guido Trotter
                    " disk status: %s", msg)
4055 6906a9d8 Guido Trotter
      self._AbortMigration()
4056 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
4057 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
4058 6906a9d8 Guido Trotter
                               (instance.name, msg))
4059 6906a9d8 Guido Trotter
4060 53c776b5 Iustin Pop
    self.feedback_fn("* migrating instance to %s" % target_node)
4061 53c776b5 Iustin Pop
    time.sleep(10)
4062 53c776b5 Iustin Pop
    result = self.rpc.call_instance_migrate(source_node, instance,
4063 53c776b5 Iustin Pop
                                            self.nodes_ip[target_node],
4064 53c776b5 Iustin Pop
                                            self.op.live)
4065 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4066 53c776b5 Iustin Pop
    if msg:
4067 53c776b5 Iustin Pop
      logging.error("Instance migration failed, trying to revert"
4068 53c776b5 Iustin Pop
                    " disk status: %s", msg)
4069 6906a9d8 Guido Trotter
      self._AbortMigration()
4070 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
4071 53c776b5 Iustin Pop
      raise errors.OpExecError("Could not migrate instance %s: %s" %
4072 53c776b5 Iustin Pop
                               (instance.name, msg))
4073 53c776b5 Iustin Pop
    time.sleep(10)
4074 53c776b5 Iustin Pop
4075 53c776b5 Iustin Pop
    instance.primary_node = target_node
4076 53c776b5 Iustin Pop
    # distribute new instance config to the other nodes
4077 53c776b5 Iustin Pop
    self.cfg.Update(instance)
4078 53c776b5 Iustin Pop
4079 6906a9d8 Guido Trotter
    result = self.rpc.call_finalize_migration(target_node,
4080 6906a9d8 Guido Trotter
                                              instance,
4081 6906a9d8 Guido Trotter
                                              migration_info,
4082 6906a9d8 Guido Trotter
                                              True)
4083 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4084 6906a9d8 Guido Trotter
    if msg:
4085 6906a9d8 Guido Trotter
      logging.error("Instance migration succeeded, but finalization failed:"
4086 6906a9d8 Guido Trotter
                    " %s" % msg)
4087 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not finalize instance migration: %s" %
4088 6906a9d8 Guido Trotter
                               msg)
4089 6906a9d8 Guido Trotter
4090 53c776b5 Iustin Pop
    self._EnsureSecondary(source_node)
4091 53c776b5 Iustin Pop
    self._WaitUntilSync()
4092 53c776b5 Iustin Pop
    self._GoStandalone()
4093 53c776b5 Iustin Pop
    self._GoReconnect(False)
4094 53c776b5 Iustin Pop
    self._WaitUntilSync()
4095 53c776b5 Iustin Pop
4096 53c776b5 Iustin Pop
    self.feedback_fn("* done")
4097 53c776b5 Iustin Pop
4098 53c776b5 Iustin Pop
  def Exec(self, feedback_fn):
4099 53c776b5 Iustin Pop
    """Perform the migration.
4100 53c776b5 Iustin Pop

4101 53c776b5 Iustin Pop
    """
4102 53c776b5 Iustin Pop
    self.feedback_fn = feedback_fn
4103 53c776b5 Iustin Pop
4104 53c776b5 Iustin Pop
    self.source_node = self.instance.primary_node
4105 53c776b5 Iustin Pop
    self.target_node = self.instance.secondary_nodes[0]
4106 53c776b5 Iustin Pop
    self.all_nodes = [self.source_node, self.target_node]
4107 53c776b5 Iustin Pop
    self.nodes_ip = {
4108 53c776b5 Iustin Pop
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
4109 53c776b5 Iustin Pop
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
4110 53c776b5 Iustin Pop
      }
4111 53c776b5 Iustin Pop
    if self.op.cleanup:
4112 53c776b5 Iustin Pop
      return self._ExecCleanup()
4113 53c776b5 Iustin Pop
    else:
4114 53c776b5 Iustin Pop
      return self._ExecMigration()
4115 53c776b5 Iustin Pop
4116 53c776b5 Iustin Pop
4117 428958aa Iustin Pop
def _CreateBlockDev(lu, node, instance, device, force_create,
4118 428958aa Iustin Pop
                    info, force_open):
4119 428958aa Iustin Pop
  """Create a tree of block devices on a given node.
4120 a8083063 Iustin Pop

4121 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
4122 a8083063 Iustin Pop
  all its children.
4123 a8083063 Iustin Pop

4124 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
4125 a8083063 Iustin Pop

4126 428958aa Iustin Pop
  @param lu: the lu on whose behalf we execute
4127 428958aa Iustin Pop
  @param node: the node on which to create the device
4128 428958aa Iustin Pop
  @type instance: L{objects.Instance}
4129 428958aa Iustin Pop
  @param instance: the instance which owns the device
4130 428958aa Iustin Pop
  @type device: L{objects.Disk}
4131 428958aa Iustin Pop
  @param device: the device to create
4132 428958aa Iustin Pop
  @type force_create: boolean
4133 428958aa Iustin Pop
  @param force_create: whether to force creation of this device; this
4134 428958aa Iustin Pop
      will be change to True whenever we find a device which has
4135 428958aa Iustin Pop
      CreateOnSecondary() attribute
4136 428958aa Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
4137 428958aa Iustin Pop
      (this will be represented as a LVM tag)
4138 428958aa Iustin Pop
  @type force_open: boolean
4139 428958aa Iustin Pop
  @param force_open: this parameter will be passes to the
4140 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
4141 428958aa Iustin Pop
      whether we run on primary or not, and it affects both
4142 428958aa Iustin Pop
      the child assembly and the device own Open() execution
4143 428958aa Iustin Pop

4144 a8083063 Iustin Pop
  """
4145 a8083063 Iustin Pop
  if device.CreateOnSecondary():
4146 428958aa Iustin Pop
    force_create = True
4147 796cab27 Iustin Pop
4148 a8083063 Iustin Pop
  if device.children:
4149 a8083063 Iustin Pop
    for child in device.children:
4150 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, child, force_create,
4151 428958aa Iustin Pop
                      info, force_open)
4152 a8083063 Iustin Pop
4153 428958aa Iustin Pop
  if not force_create:
4154 796cab27 Iustin Pop
    return
4155 796cab27 Iustin Pop
4156 de12473a Iustin Pop
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
4157 de12473a Iustin Pop
4158 de12473a Iustin Pop
4159 de12473a Iustin Pop
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
4160 de12473a Iustin Pop
  """Create a single block device on a given node.
4161 de12473a Iustin Pop

4162 de12473a Iustin Pop
  This will not recurse over children of the device, so they must be
4163 de12473a Iustin Pop
  created in advance.
4164 de12473a Iustin Pop

4165 de12473a Iustin Pop
  @param lu: the lu on whose behalf we execute
4166 de12473a Iustin Pop
  @param node: the node on which to create the device
4167 de12473a Iustin Pop
  @type instance: L{objects.Instance}
4168 de12473a Iustin Pop
  @param instance: the instance which owns the device
4169 de12473a Iustin Pop
  @type device: L{objects.Disk}
4170 de12473a Iustin Pop
  @param device: the device to create
4171 de12473a Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
4172 de12473a Iustin Pop
      (this will be represented as a LVM tag)
4173 de12473a Iustin Pop
  @type force_open: boolean
4174 de12473a Iustin Pop
  @param force_open: this parameter will be passes to the
4175 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
4176 de12473a Iustin Pop
      whether we run on primary or not, and it affects both
4177 de12473a Iustin Pop
      the child assembly and the device own Open() execution
4178 de12473a Iustin Pop

4179 de12473a Iustin Pop
  """
4180 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
4181 7d81697f Iustin Pop
  result = lu.rpc.call_blockdev_create(node, device, device.size,
4182 428958aa Iustin Pop
                                       instance.name, force_open, info)
4183 4c4e4e1e Iustin Pop
  result.Raise("Can't create block device %s on"
4184 4c4e4e1e Iustin Pop
               " node %s for instance %s" % (device, node, instance.name))
4185 a8083063 Iustin Pop
  if device.physical_id is None:
4186 0959c824 Iustin Pop
    device.physical_id = result.payload
4187 a8083063 Iustin Pop
4188 a8083063 Iustin Pop
4189 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
4190 923b1523 Iustin Pop
  """Generate a suitable LV name.
4191 923b1523 Iustin Pop

4192 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
4193 923b1523 Iustin Pop

4194 923b1523 Iustin Pop
  """
4195 923b1523 Iustin Pop
  results = []
4196 923b1523 Iustin Pop
  for val in exts:
4197 b9bddb6b Iustin Pop
    new_id = lu.cfg.GenerateUniqueID()
4198 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
4199 923b1523 Iustin Pop
  return results
4200 923b1523 Iustin Pop
4201 923b1523 Iustin Pop
4202 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
4203 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
4204 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
4205 a1f445d3 Iustin Pop

4206 a1f445d3 Iustin Pop
  """
4207 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
4208 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
4209 b9bddb6b Iustin Pop
  shared_secret = lu.cfg.GenerateDRBDSecret()
4210 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
4211 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
4212 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
4213 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
4214 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
4215 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
4216 f9518d38 Iustin Pop
                                      p_minor, s_minor,
4217 f9518d38 Iustin Pop
                                      shared_secret),
4218 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
4219 a1f445d3 Iustin Pop
                          iv_name=iv_name)
4220 a1f445d3 Iustin Pop
  return drbd_dev
4221 a1f445d3 Iustin Pop
4222 7c0d6283 Michael Hanselmann
4223 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
4224 a8083063 Iustin Pop
                          instance_name, primary_node,
4225 08db7c5c Iustin Pop
                          secondary_nodes, disk_info,
4226 e2a65344 Iustin Pop
                          file_storage_dir, file_driver,
4227 e2a65344 Iustin Pop
                          base_index):
4228 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
4229 a8083063 Iustin Pop

4230 a8083063 Iustin Pop
  """
4231 a8083063 Iustin Pop
  #TODO: compute space requirements
4232 a8083063 Iustin Pop
4233 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
4234 08db7c5c Iustin Pop
  disk_count = len(disk_info)
4235 08db7c5c Iustin Pop
  disks = []
4236 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
4237 08db7c5c Iustin Pop
    pass
4238 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
4239 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
4240 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
4241 923b1523 Iustin Pop
4242 08db7c5c Iustin Pop
    names = _GenerateUniqueNames(lu, [".disk%d" % i
4243 08db7c5c Iustin Pop
                                      for i in range(disk_count)])
4244 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4245 e2a65344 Iustin Pop
      disk_index = idx + base_index
4246 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
4247 08db7c5c Iustin Pop
                              logical_id=(vgname, names[idx]),
4248 6ec66eae Iustin Pop
                              iv_name="disk/%d" % disk_index,
4249 6ec66eae Iustin Pop
                              mode=disk["mode"])
4250 08db7c5c Iustin Pop
      disks.append(disk_dev)
4251 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
4252 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
4253 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
4254 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
4255 08db7c5c Iustin Pop
    minors = lu.cfg.AllocateDRBDMinor(
4256 08db7c5c Iustin Pop
      [primary_node, remote_node] * len(disk_info), instance_name)
4257 08db7c5c Iustin Pop
4258 e6c1ff2f Iustin Pop
    names = []
4259 e6c1ff2f Iustin Pop
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % i
4260 e6c1ff2f Iustin Pop
                                               for i in range(disk_count)]):
4261 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_data")
4262 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_meta")
4263 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4264 112050d9 Iustin Pop
      disk_index = idx + base_index
4265 08db7c5c Iustin Pop
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
4266 08db7c5c Iustin Pop
                                      disk["size"], names[idx*2:idx*2+2],
4267 e2a65344 Iustin Pop
                                      "disk/%d" % disk_index,
4268 08db7c5c Iustin Pop
                                      minors[idx*2], minors[idx*2+1])
4269 6ec66eae Iustin Pop
      disk_dev.mode = disk["mode"]
4270 08db7c5c Iustin Pop
      disks.append(disk_dev)
4271 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
4272 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
4273 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
4274 0f1a06e3 Manuel Franceschini
4275 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4276 112050d9 Iustin Pop
      disk_index = idx + base_index
4277 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
4278 e2a65344 Iustin Pop
                              iv_name="disk/%d" % disk_index,
4279 08db7c5c Iustin Pop
                              logical_id=(file_driver,
4280 08db7c5c Iustin Pop
                                          "%s/disk%d" % (file_storage_dir,
4281 43e99cff Guido Trotter
                                                         disk_index)),
4282 6ec66eae Iustin Pop
                              mode=disk["mode"])
4283 08db7c5c Iustin Pop
      disks.append(disk_dev)
4284 a8083063 Iustin Pop
  else:
4285 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
4286 a8083063 Iustin Pop
  return disks
4287 a8083063 Iustin Pop
4288 a8083063 Iustin Pop
4289 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
4290 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
4291 3ecf6786 Iustin Pop

4292 3ecf6786 Iustin Pop
  """
4293 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
4294 a0c3fea1 Michael Hanselmann
4295 a0c3fea1 Michael Hanselmann
4296 b9bddb6b Iustin Pop
def _CreateDisks(lu, instance):
4297 a8083063 Iustin Pop
  """Create all disks for an instance.
4298 a8083063 Iustin Pop

4299 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
4300 a8083063 Iustin Pop

4301 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
4302 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
4303 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
4304 e4376078 Iustin Pop
  @param instance: the instance whose disks we should create
4305 e4376078 Iustin Pop
  @rtype: boolean
4306 e4376078 Iustin Pop
  @return: the success of the creation
4307 a8083063 Iustin Pop

4308 a8083063 Iustin Pop
  """
4309 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
4310 428958aa Iustin Pop
  pnode = instance.primary_node
4311 a0c3fea1 Michael Hanselmann
4312 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
4313 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4314 428958aa Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
4315 0f1a06e3 Manuel Franceschini
4316 4c4e4e1e Iustin Pop
    result.Raise("Failed to create directory '%s' on"
4317 4c4e4e1e Iustin Pop
                 " node %s: %s" % (file_storage_dir, pnode))
4318 0f1a06e3 Manuel Franceschini
4319 24991749 Iustin Pop
  # Note: this needs to be kept in sync with adding of disks in
4320 24991749 Iustin Pop
  # LUSetInstanceParams
4321 a8083063 Iustin Pop
  for device in instance.disks:
4322 9a4f63d1 Iustin Pop
    logging.info("Creating volume %s for instance %s",
4323 9a4f63d1 Iustin Pop
                 device.iv_name, instance.name)
4324 a8083063 Iustin Pop
    #HARDCODE
4325 428958aa Iustin Pop
    for node in instance.all_nodes:
4326 428958aa Iustin Pop
      f_create = node == pnode
4327 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
4328 a8083063 Iustin Pop
4329 a8083063 Iustin Pop
4330 b9bddb6b Iustin Pop
def _RemoveDisks(lu, instance):
4331 a8083063 Iustin Pop
  """Remove all disks for an instance.
4332 a8083063 Iustin Pop

4333 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
4334 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
4335 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
4336 a8083063 Iustin Pop
  with `_CreateDisks()`).
4337 a8083063 Iustin Pop

4338 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
4339 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
4340 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
4341 e4376078 Iustin Pop
  @param instance: the instance whose disks we should remove
4342 e4376078 Iustin Pop
  @rtype: boolean
4343 e4376078 Iustin Pop
  @return: the success of the removal
4344 a8083063 Iustin Pop

4345 a8083063 Iustin Pop
  """
4346 9a4f63d1 Iustin Pop
  logging.info("Removing block devices for instance %s", instance.name)
4347 a8083063 Iustin Pop
4348 e1bc0878 Iustin Pop
  all_result = True
4349 a8083063 Iustin Pop
  for device in instance.disks:
4350 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
4351 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
4352 4c4e4e1e Iustin Pop
      msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
4353 e1bc0878 Iustin Pop
      if msg:
4354 e1bc0878 Iustin Pop
        lu.LogWarning("Could not remove block device %s on node %s,"
4355 e1bc0878 Iustin Pop
                      " continuing anyway: %s", device.iv_name, node, msg)
4356 e1bc0878 Iustin Pop
        all_result = False
4357 0f1a06e3 Manuel Franceschini
4358 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
4359 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4360 781de953 Iustin Pop
    result = lu.rpc.call_file_storage_dir_remove(instance.primary_node,
4361 781de953 Iustin Pop
                                                 file_storage_dir)
4362 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4363 b2b8bcce Iustin Pop
    if msg:
4364 b2b8bcce Iustin Pop
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
4365 b2b8bcce Iustin Pop
                    file_storage_dir, instance.primary_node, msg)
4366 e1bc0878 Iustin Pop
      all_result = False
4367 0f1a06e3 Manuel Franceschini
4368 e1bc0878 Iustin Pop
  return all_result
4369 a8083063 Iustin Pop
4370 a8083063 Iustin Pop
4371 08db7c5c Iustin Pop
def _ComputeDiskSize(disk_template, disks):
4372 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
4373 e2fe6369 Iustin Pop

4374 e2fe6369 Iustin Pop
  """
4375 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
4376 e2fe6369 Iustin Pop
  req_size_dict = {
4377 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
4378 08db7c5c Iustin Pop
    constants.DT_PLAIN: sum(d["size"] for d in disks),
4379 08db7c5c Iustin Pop
    # 128 MB are added for drbd metadata for each disk
4380 08db7c5c Iustin Pop
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
4381 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
4382 e2fe6369 Iustin Pop
  }
4383 e2fe6369 Iustin Pop
4384 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
4385 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
4386 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
4387 e2fe6369 Iustin Pop
4388 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
4389 e2fe6369 Iustin Pop
4390 e2fe6369 Iustin Pop
4391 74409b12 Iustin Pop
def _CheckHVParams(lu, nodenames, hvname, hvparams):
4392 74409b12 Iustin Pop
  """Hypervisor parameter validation.
4393 74409b12 Iustin Pop

4394 74409b12 Iustin Pop
  This function abstract the hypervisor parameter validation to be
4395 74409b12 Iustin Pop
  used in both instance create and instance modify.
4396 74409b12 Iustin Pop

4397 74409b12 Iustin Pop
  @type lu: L{LogicalUnit}
4398 74409b12 Iustin Pop
  @param lu: the logical unit for which we check
4399 74409b12 Iustin Pop
  @type nodenames: list
4400 74409b12 Iustin Pop
  @param nodenames: the list of nodes on which we should check
4401 74409b12 Iustin Pop
  @type hvname: string
4402 74409b12 Iustin Pop
  @param hvname: the name of the hypervisor we should use
4403 74409b12 Iustin Pop
  @type hvparams: dict
4404 74409b12 Iustin Pop
  @param hvparams: the parameters which we need to check
4405 74409b12 Iustin Pop
  @raise errors.OpPrereqError: if the parameters are not valid
4406 74409b12 Iustin Pop

4407 74409b12 Iustin Pop
  """
4408 74409b12 Iustin Pop
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
4409 74409b12 Iustin Pop
                                                  hvname,
4410 74409b12 Iustin Pop
                                                  hvparams)
4411 74409b12 Iustin Pop
  for node in nodenames:
4412 781de953 Iustin Pop
    info = hvinfo[node]
4413 68c6f21c Iustin Pop
    if info.offline:
4414 68c6f21c Iustin Pop
      continue
4415 4c4e4e1e Iustin Pop
    info.Raise("Hypervisor parameter validation failed on node %s" % node)
4416 74409b12 Iustin Pop
4417 74409b12 Iustin Pop
4418 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
4419 a8083063 Iustin Pop
  """Create an instance.
4420 a8083063 Iustin Pop

4421 a8083063 Iustin Pop
  """
4422 a8083063 Iustin Pop
  HPATH = "instance-add"
4423 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4424 08db7c5c Iustin Pop
  _OP_REQP = ["instance_name", "disks", "disk_template",
4425 08db7c5c Iustin Pop
              "mode", "start",
4426 08db7c5c Iustin Pop
              "wait_for_sync", "ip_check", "nics",
4427 338e51e8 Iustin Pop
              "hvparams", "beparams"]
4428 7baf741d Guido Trotter
  REQ_BGL = False
4429 7baf741d Guido Trotter
4430 7baf741d Guido Trotter
  def _ExpandNode(self, node):
4431 7baf741d Guido Trotter
    """Expands and checks one node name.
4432 7baf741d Guido Trotter

4433 7baf741d Guido Trotter
    """
4434 7baf741d Guido Trotter
    node_full = self.cfg.ExpandNodeName(node)
4435 7baf741d Guido Trotter
    if node_full is None:
4436 7baf741d Guido Trotter
      raise errors.OpPrereqError("Unknown node %s" % node)
4437 7baf741d Guido Trotter
    return node_full
4438 7baf741d Guido Trotter
4439 7baf741d Guido Trotter
  def ExpandNames(self):
4440 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
4441 7baf741d Guido Trotter

4442 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
4443 7baf741d Guido Trotter

4444 7baf741d Guido Trotter
    """
4445 7baf741d Guido Trotter
    self.needed_locks = {}
4446 7baf741d Guido Trotter
4447 7baf741d Guido Trotter
    # set optional parameters to none if they don't exist
4448 6785674e Iustin Pop
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
4449 7baf741d Guido Trotter
      if not hasattr(self.op, attr):
4450 7baf741d Guido Trotter
        setattr(self.op, attr, None)
4451 7baf741d Guido Trotter
4452 4b2f38dd Iustin Pop
    # cheap checks, mostly valid constants given
4453 4b2f38dd Iustin Pop
4454 7baf741d Guido Trotter
    # verify creation mode
4455 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
4456 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
4457 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
4458 7baf741d Guido Trotter
                                 self.op.mode)
4459 4b2f38dd Iustin Pop
4460 7baf741d Guido Trotter
    # disk template and mirror node verification
4461 7baf741d Guido Trotter
    if self.op.disk_template not in constants.DISK_TEMPLATES:
4462 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid disk template name")
4463 7baf741d Guido Trotter
4464 4b2f38dd Iustin Pop
    if self.op.hypervisor is None:
4465 4b2f38dd Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
4466 4b2f38dd Iustin Pop
4467 8705eb96 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
4468 8705eb96 Iustin Pop
    enabled_hvs = cluster.enabled_hypervisors
4469 4b2f38dd Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
4470 4b2f38dd Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
4471 4b2f38dd Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
4472 4b2f38dd Iustin Pop
                                  ",".join(enabled_hvs)))
4473 4b2f38dd Iustin Pop
4474 6785674e Iustin Pop
    # check hypervisor parameter syntax (locally)
4475 a5728081 Guido Trotter
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
4476 abe609b2 Guido Trotter
    filled_hvp = objects.FillDict(cluster.hvparams[self.op.hypervisor],
4477 8705eb96 Iustin Pop
                                  self.op.hvparams)
4478 6785674e Iustin Pop
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
4479 8705eb96 Iustin Pop
    hv_type.CheckParameterSyntax(filled_hvp)
4480 67fc3042 Iustin Pop
    self.hv_full = filled_hvp
4481 6785674e Iustin Pop
4482 338e51e8 Iustin Pop
    # fill and remember the beparams dict
4483 a5728081 Guido Trotter
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
4484 4ef7f423 Guido Trotter
    self.be_full = objects.FillDict(cluster.beparams[constants.PP_DEFAULT],
4485 338e51e8 Iustin Pop
                                    self.op.beparams)
4486 338e51e8 Iustin Pop
4487 7baf741d Guido Trotter
    #### instance parameters check
4488 7baf741d Guido Trotter
4489 7baf741d Guido Trotter
    # instance name verification
4490 7baf741d Guido Trotter
    hostname1 = utils.HostInfo(self.op.instance_name)
4491 7baf741d Guido Trotter
    self.op.instance_name = instance_name = hostname1.name
4492 7baf741d Guido Trotter
4493 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
4494 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
4495 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
4496 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
4497 7baf741d Guido Trotter
                                 instance_name)
4498 7baf741d Guido Trotter
4499 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
4500 7baf741d Guido Trotter
4501 08db7c5c Iustin Pop
    # NIC buildup
4502 08db7c5c Iustin Pop
    self.nics = []
4503 9dce4771 Guido Trotter
    for idx, nic in enumerate(self.op.nics):
4504 9dce4771 Guido Trotter
      nic_mode_req = nic.get("mode", None)
4505 9dce4771 Guido Trotter
      nic_mode = nic_mode_req
4506 9dce4771 Guido Trotter
      if nic_mode is None:
4507 9dce4771 Guido Trotter
        nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
4508 9dce4771 Guido Trotter
4509 9dce4771 Guido Trotter
      # in routed mode, for the first nic, the default ip is 'auto'
4510 9dce4771 Guido Trotter
      if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
4511 9dce4771 Guido Trotter
        default_ip_mode = constants.VALUE_AUTO
4512 9dce4771 Guido Trotter
      else:
4513 9dce4771 Guido Trotter
        default_ip_mode = constants.VALUE_NONE
4514 9dce4771 Guido Trotter
4515 08db7c5c Iustin Pop
      # ip validity checks
4516 9dce4771 Guido Trotter
      ip = nic.get("ip", default_ip_mode)
4517 9dce4771 Guido Trotter
      if ip is None or ip.lower() == constants.VALUE_NONE:
4518 08db7c5c Iustin Pop
        nic_ip = None
4519 08db7c5c Iustin Pop
      elif ip.lower() == constants.VALUE_AUTO:
4520 08db7c5c Iustin Pop
        nic_ip = hostname1.ip
4521 08db7c5c Iustin Pop
      else:
4522 08db7c5c Iustin Pop
        if not utils.IsValidIP(ip):
4523 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
4524 08db7c5c Iustin Pop
                                     " like a valid IP" % ip)
4525 08db7c5c Iustin Pop
        nic_ip = ip
4526 08db7c5c Iustin Pop
4527 9dce4771 Guido Trotter
      # TODO: check the ip for uniqueness !!
4528 9dce4771 Guido Trotter
      if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
4529 9dce4771 Guido Trotter
        raise errors.OpPrereqError("Routed nic mode requires an ip address")
4530 9dce4771 Guido Trotter
4531 08db7c5c Iustin Pop
      # MAC address verification
4532 08db7c5c Iustin Pop
      mac = nic.get("mac", constants.VALUE_AUTO)
4533 08db7c5c Iustin Pop
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4534 08db7c5c Iustin Pop
        if not utils.IsValidMac(mac.lower()):
4535 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Invalid MAC address specified: %s" %
4536 08db7c5c Iustin Pop
                                     mac)
4537 08db7c5c Iustin Pop
      # bridge verification
4538 9939547b Iustin Pop
      bridge = nic.get("bridge", None)
4539 9dce4771 Guido Trotter
      link = nic.get("link", None)
4540 9dce4771 Guido Trotter
      if bridge and link:
4541 29921401 Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
4542 29921401 Iustin Pop
                                   " at the same time")
4543 9dce4771 Guido Trotter
      elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
4544 9dce4771 Guido Trotter
        raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic")
4545 9dce4771 Guido Trotter
      elif bridge:
4546 9dce4771 Guido Trotter
        link = bridge
4547 9dce4771 Guido Trotter
4548 9dce4771 Guido Trotter
      nicparams = {}
4549 9dce4771 Guido Trotter
      if nic_mode_req:
4550 9dce4771 Guido Trotter
        nicparams[constants.NIC_MODE] = nic_mode_req
4551 9dce4771 Guido Trotter
      if link:
4552 9dce4771 Guido Trotter
        nicparams[constants.NIC_LINK] = link
4553 9dce4771 Guido Trotter
4554 9dce4771 Guido Trotter
      check_params = objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
4555 9dce4771 Guido Trotter
                                      nicparams)
4556 9dce4771 Guido Trotter
      objects.NIC.CheckParameterSyntax(check_params)
4557 9dce4771 Guido Trotter
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
4558 08db7c5c Iustin Pop
4559 08db7c5c Iustin Pop
    # disk checks/pre-build
4560 08db7c5c Iustin Pop
    self.disks = []
4561 08db7c5c Iustin Pop
    for disk in self.op.disks:
4562 08db7c5c Iustin Pop
      mode = disk.get("mode", constants.DISK_RDWR)
4563 08db7c5c Iustin Pop
      if mode not in constants.DISK_ACCESS_SET:
4564 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
4565 08db7c5c Iustin Pop
                                   mode)
4566 08db7c5c Iustin Pop
      size = disk.get("size", None)
4567 08db7c5c Iustin Pop
      if size is None:
4568 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Missing disk size")
4569 08db7c5c Iustin Pop
      try:
4570 08db7c5c Iustin Pop
        size = int(size)
4571 08db7c5c Iustin Pop
      except ValueError:
4572 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk size '%s'" % size)
4573 08db7c5c Iustin Pop
      self.disks.append({"size": size, "mode": mode})
4574 08db7c5c Iustin Pop
4575 7baf741d Guido Trotter
    # used in CheckPrereq for ip ping check
4576 7baf741d Guido Trotter
    self.check_ip = hostname1.ip
4577 7baf741d Guido Trotter
4578 7baf741d Guido Trotter
    # file storage checks
4579 7baf741d Guido Trotter
    if (self.op.file_driver and
4580 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
4581 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
4582 7baf741d Guido Trotter
                                 self.op.file_driver)
4583 7baf741d Guido Trotter
4584 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
4585 7baf741d Guido Trotter
      raise errors.OpPrereqError("File storage directory path not absolute")
4586 7baf741d Guido Trotter
4587 7baf741d Guido Trotter
    ### Node/iallocator related checks
4588 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
4589 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
4590 7baf741d Guido Trotter
                                 " node must be given")
4591 7baf741d Guido Trotter
4592 7baf741d Guido Trotter
    if self.op.iallocator:
4593 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4594 7baf741d Guido Trotter
    else:
4595 7baf741d Guido Trotter
      self.op.pnode = self._ExpandNode(self.op.pnode)
4596 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
4597 7baf741d Guido Trotter
      if self.op.snode is not None:
4598 7baf741d Guido Trotter
        self.op.snode = self._ExpandNode(self.op.snode)
4599 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
4600 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
4601 7baf741d Guido Trotter
4602 7baf741d Guido Trotter
    # in case of import lock the source node too
4603 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
4604 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
4605 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
4606 7baf741d Guido Trotter
4607 b9322a9f Guido Trotter
      if src_path is None:
4608 b9322a9f Guido Trotter
        self.op.src_path = src_path = self.op.instance_name
4609 b9322a9f Guido Trotter
4610 b9322a9f Guido Trotter
      if src_node is None:
4611 b9322a9f Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4612 b9322a9f Guido Trotter
        self.op.src_node = None
4613 b9322a9f Guido Trotter
        if os.path.isabs(src_path):
4614 b9322a9f Guido Trotter
          raise errors.OpPrereqError("Importing an instance from an absolute"
4615 b9322a9f Guido Trotter
                                     " path requires a source node option.")
4616 b9322a9f Guido Trotter
      else:
4617 b9322a9f Guido Trotter
        self.op.src_node = src_node = self._ExpandNode(src_node)
4618 b9322a9f Guido Trotter
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4619 b9322a9f Guido Trotter
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
4620 b9322a9f Guido Trotter
        if not os.path.isabs(src_path):
4621 b9322a9f Guido Trotter
          self.op.src_path = src_path = \
4622 b9322a9f Guido Trotter
            os.path.join(constants.EXPORT_DIR, src_path)
4623 7baf741d Guido Trotter
4624 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
4625 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
4626 7baf741d Guido Trotter
        raise errors.OpPrereqError("No guest OS specified")
4627 a8083063 Iustin Pop
4628 538475ca Iustin Pop
  def _RunAllocator(self):
4629 538475ca Iustin Pop
    """Run the allocator based on input opcode.
4630 538475ca Iustin Pop

4631 538475ca Iustin Pop
    """
4632 08db7c5c Iustin Pop
    nics = [n.ToDict() for n in self.nics]
4633 72737a7f Iustin Pop
    ial = IAllocator(self,
4634 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
4635 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
4636 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
4637 d1c2dd75 Iustin Pop
                     tags=[],
4638 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
4639 338e51e8 Iustin Pop
                     vcpus=self.be_full[constants.BE_VCPUS],
4640 338e51e8 Iustin Pop
                     mem_size=self.be_full[constants.BE_MEMORY],
4641 08db7c5c Iustin Pop
                     disks=self.disks,
4642 d1c2dd75 Iustin Pop
                     nics=nics,
4643 8cc7e742 Guido Trotter
                     hypervisor=self.op.hypervisor,
4644 29859cb7 Iustin Pop
                     )
4645 d1c2dd75 Iustin Pop
4646 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
4647 d1c2dd75 Iustin Pop
4648 d1c2dd75 Iustin Pop
    if not ial.success:
4649 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
4650 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
4651 d1c2dd75 Iustin Pop
                                                           ial.info))
4652 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
4653 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4654 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
4655 97abc79f Iustin Pop
                                 (self.op.iallocator, len(ial.nodes),
4656 1ce4bbe3 Renรฉ Nussbaumer
                                  ial.required_nodes))
4657 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
4658 86d9d3bb Iustin Pop
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
4659 86d9d3bb Iustin Pop
                 self.op.instance_name, self.op.iallocator,
4660 86d9d3bb Iustin Pop
                 ", ".join(ial.nodes))
4661 27579978 Iustin Pop
    if ial.required_nodes == 2:
4662 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
4663 538475ca Iustin Pop
4664 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4665 a8083063 Iustin Pop
    """Build hooks env.
4666 a8083063 Iustin Pop

4667 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4668 a8083063 Iustin Pop

4669 a8083063 Iustin Pop
    """
4670 a8083063 Iustin Pop
    env = {
4671 2c2690c9 Iustin Pop
      "ADD_MODE": self.op.mode,
4672 a8083063 Iustin Pop
      }
4673 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
4674 2c2690c9 Iustin Pop
      env["SRC_NODE"] = self.op.src_node
4675 2c2690c9 Iustin Pop
      env["SRC_PATH"] = self.op.src_path
4676 2c2690c9 Iustin Pop
      env["SRC_IMAGES"] = self.src_images
4677 396e1b78 Michael Hanselmann
4678 2c2690c9 Iustin Pop
    env.update(_BuildInstanceHookEnv(
4679 2c2690c9 Iustin Pop
      name=self.op.instance_name,
4680 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
4681 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
4682 4978db17 Iustin Pop
      status=self.op.start,
4683 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
4684 338e51e8 Iustin Pop
      memory=self.be_full[constants.BE_MEMORY],
4685 338e51e8 Iustin Pop
      vcpus=self.be_full[constants.BE_VCPUS],
4686 f9b10246 Guido Trotter
      nics=_NICListToTuple(self, self.nics),
4687 2c2690c9 Iustin Pop
      disk_template=self.op.disk_template,
4688 2c2690c9 Iustin Pop
      disks=[(d["size"], d["mode"]) for d in self.disks],
4689 67fc3042 Iustin Pop
      bep=self.be_full,
4690 67fc3042 Iustin Pop
      hvp=self.hv_full,
4691 67fc3042 Iustin Pop
      hypervisor=self.op.hypervisor,
4692 396e1b78 Michael Hanselmann
    ))
4693 a8083063 Iustin Pop
4694 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
4695 a8083063 Iustin Pop
          self.secondaries)
4696 a8083063 Iustin Pop
    return env, nl, nl
4697 a8083063 Iustin Pop
4698 a8083063 Iustin Pop
4699 a8083063 Iustin Pop
  def CheckPrereq(self):
4700 a8083063 Iustin Pop
    """Check prerequisites.
4701 a8083063 Iustin Pop

4702 a8083063 Iustin Pop
    """
4703 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
4704 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
4705 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
4706 eedc99de Manuel Franceschini
                                 " instances")
4707 eedc99de Manuel Franceschini
4708 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
4709 7baf741d Guido Trotter
      src_node = self.op.src_node
4710 7baf741d Guido Trotter
      src_path = self.op.src_path
4711 a8083063 Iustin Pop
4712 c0cbdc67 Guido Trotter
      if src_node is None:
4713 1b7bfbb7 Iustin Pop
        locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
4714 1b7bfbb7 Iustin Pop
        exp_list = self.rpc.call_export_list(locked_nodes)
4715 c0cbdc67 Guido Trotter
        found = False
4716 c0cbdc67 Guido Trotter
        for node in exp_list:
4717 4c4e4e1e Iustin Pop
          if exp_list[node].fail_msg:
4718 1b7bfbb7 Iustin Pop
            continue
4719 1b7bfbb7 Iustin Pop
          if src_path in exp_list[node].payload:
4720 c0cbdc67 Guido Trotter
            found = True
4721 c0cbdc67 Guido Trotter
            self.op.src_node = src_node = node
4722 c0cbdc67 Guido Trotter
            self.op.src_path = src_path = os.path.join(constants.EXPORT_DIR,
4723 c0cbdc67 Guido Trotter
                                                       src_path)
4724 c0cbdc67 Guido Trotter
            break
4725 c0cbdc67 Guido Trotter
        if not found:
4726 c0cbdc67 Guido Trotter
          raise errors.OpPrereqError("No export found for relative path %s" %
4727 c0cbdc67 Guido Trotter
                                      src_path)
4728 c0cbdc67 Guido Trotter
4729 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, src_node)
4730 781de953 Iustin Pop
      result = self.rpc.call_export_info(src_node, src_path)
4731 4c4e4e1e Iustin Pop
      result.Raise("No export or invalid export found in dir %s" % src_path)
4732 a8083063 Iustin Pop
4733 3eccac06 Iustin Pop
      export_info = objects.SerializableConfigParser.Loads(str(result.payload))
4734 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
4735 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
4736 a8083063 Iustin Pop
4737 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
4738 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
4739 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
4740 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
4741 a8083063 Iustin Pop
4742 09acf207 Guido Trotter
      # Check that the new instance doesn't have less disks than the export
4743 08db7c5c Iustin Pop
      instance_disks = len(self.disks)
4744 09acf207 Guido Trotter
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
4745 09acf207 Guido Trotter
      if instance_disks < export_disks:
4746 09acf207 Guido Trotter
        raise errors.OpPrereqError("Not enough disks to import."
4747 09acf207 Guido Trotter
                                   " (instance: %d, export: %d)" %
4748 726d7d68 Iustin Pop
                                   (instance_disks, export_disks))
4749 a8083063 Iustin Pop
4750 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
4751 09acf207 Guido Trotter
      disk_images = []
4752 09acf207 Guido Trotter
      for idx in range(export_disks):
4753 09acf207 Guido Trotter
        option = 'disk%d_dump' % idx
4754 09acf207 Guido Trotter
        if export_info.has_option(constants.INISECT_INS, option):
4755 09acf207 Guido Trotter
          # FIXME: are the old os-es, disk sizes, etc. useful?
4756 09acf207 Guido Trotter
          export_name = export_info.get(constants.INISECT_INS, option)
4757 09acf207 Guido Trotter
          image = os.path.join(src_path, export_name)
4758 09acf207 Guido Trotter
          disk_images.append(image)
4759 09acf207 Guido Trotter
        else:
4760 09acf207 Guido Trotter
          disk_images.append(False)
4761 09acf207 Guido Trotter
4762 09acf207 Guido Trotter
      self.src_images = disk_images
4763 901a65c1 Iustin Pop
4764 b4364a6b Guido Trotter
      old_name = export_info.get(constants.INISECT_INS, 'name')
4765 b4364a6b Guido Trotter
      # FIXME: int() here could throw a ValueError on broken exports
4766 b4364a6b Guido Trotter
      exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
4767 b4364a6b Guido Trotter
      if self.op.instance_name == old_name:
4768 b4364a6b Guido Trotter
        for idx, nic in enumerate(self.nics):
4769 b4364a6b Guido Trotter
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
4770 b4364a6b Guido Trotter
            nic_mac_ini = 'nic%d_mac' % idx
4771 b4364a6b Guido Trotter
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
4772 bc89efc3 Guido Trotter
4773 295728df Guido Trotter
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
4774 7baf741d Guido Trotter
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
4775 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
4776 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
4777 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
4778 901a65c1 Iustin Pop
4779 901a65c1 Iustin Pop
    if self.op.ip_check:
4780 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
4781 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
4782 7b3a8fb5 Iustin Pop
                                   (self.check_ip, self.op.instance_name))
4783 901a65c1 Iustin Pop
4784 295728df Guido Trotter
    #### mac address generation
4785 295728df Guido Trotter
    # By generating here the mac address both the allocator and the hooks get
4786 295728df Guido Trotter
    # the real final mac address rather than the 'auto' or 'generate' value.
4787 295728df Guido Trotter
    # There is a race condition between the generation and the instance object
4788 295728df Guido Trotter
    # creation, which means that we know the mac is valid now, but we're not
4789 295728df Guido Trotter
    # sure it will be when we actually add the instance. If things go bad
4790 295728df Guido Trotter
    # adding the instance will abort because of a duplicate mac, and the
4791 295728df Guido Trotter
    # creation job will fail.
4792 295728df Guido Trotter
    for nic in self.nics:
4793 295728df Guido Trotter
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4794 295728df Guido Trotter
        nic.mac = self.cfg.GenerateMAC()
4795 295728df Guido Trotter
4796 538475ca Iustin Pop
    #### allocator run
4797 538475ca Iustin Pop
4798 538475ca Iustin Pop
    if self.op.iallocator is not None:
4799 538475ca Iustin Pop
      self._RunAllocator()
4800 0f1a06e3 Manuel Franceschini
4801 901a65c1 Iustin Pop
    #### node related checks
4802 901a65c1 Iustin Pop
4803 901a65c1 Iustin Pop
    # check primary node
4804 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
4805 7baf741d Guido Trotter
    assert self.pnode is not None, \
4806 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
4807 7527a8a4 Iustin Pop
    if pnode.offline:
4808 7527a8a4 Iustin Pop
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
4809 7527a8a4 Iustin Pop
                                 pnode.name)
4810 733a2b6a Iustin Pop
    if pnode.drained:
4811 733a2b6a Iustin Pop
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
4812 733a2b6a Iustin Pop
                                 pnode.name)
4813 7527a8a4 Iustin Pop
4814 901a65c1 Iustin Pop
    self.secondaries = []
4815 901a65c1 Iustin Pop
4816 901a65c1 Iustin Pop
    # mirror node verification
4817 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
4818 7baf741d Guido Trotter
      if self.op.snode is None:
4819 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
4820 3ecf6786 Iustin Pop
                                   " a mirror node")
4821 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
4822 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
4823 3ecf6786 Iustin Pop
                                   " the primary node.")
4824 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, self.op.snode)
4825 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, self.op.snode)
4826 733a2b6a Iustin Pop
      self.secondaries.append(self.op.snode)
4827 a8083063 Iustin Pop
4828 6785674e Iustin Pop
    nodenames = [pnode.name] + self.secondaries
4829 6785674e Iustin Pop
4830 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
4831 08db7c5c Iustin Pop
                                self.disks)
4832 ed1ebc60 Guido Trotter
4833 8d75db10 Iustin Pop
    # Check lv size requirements
4834 8d75db10 Iustin Pop
    if req_size is not None:
4835 72737a7f Iustin Pop
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4836 72737a7f Iustin Pop
                                         self.op.hypervisor)
4837 8d75db10 Iustin Pop
      for node in nodenames:
4838 781de953 Iustin Pop
        info = nodeinfo[node]
4839 4c4e4e1e Iustin Pop
        info.Raise("Cannot get current information from node %s" % node)
4840 070e998b Iustin Pop
        info = info.payload
4841 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
4842 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
4843 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
4844 8d75db10 Iustin Pop
                                     " node %s" % node)
4845 070e998b Iustin Pop
        if req_size > vg_free:
4846 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
4847 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
4848 070e998b Iustin Pop
                                     (node, vg_free, req_size))
4849 ed1ebc60 Guido Trotter
4850 74409b12 Iustin Pop
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
4851 6785674e Iustin Pop
4852 a8083063 Iustin Pop
    # os verification
4853 781de953 Iustin Pop
    result = self.rpc.call_os_get(pnode.name, self.op.os_type)
4854 4c4e4e1e Iustin Pop
    result.Raise("OS '%s' not in supported os list for primary node %s" %
4855 4c4e4e1e Iustin Pop
                 (self.op.os_type, pnode.name), prereq=True)
4856 a8083063 Iustin Pop
4857 b165e77e Guido Trotter
    _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
4858 a8083063 Iustin Pop
4859 49ce1563 Iustin Pop
    # memory check on primary node
4860 49ce1563 Iustin Pop
    if self.op.start:
4861 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
4862 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
4863 338e51e8 Iustin Pop
                           self.be_full[constants.BE_MEMORY],
4864 338e51e8 Iustin Pop
                           self.op.hypervisor)
4865 49ce1563 Iustin Pop
4866 08896026 Iustin Pop
    self.dry_run_result = list(nodenames)
4867 08896026 Iustin Pop
4868 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4869 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
4870 a8083063 Iustin Pop

4871 a8083063 Iustin Pop
    """
4872 a8083063 Iustin Pop
    instance = self.op.instance_name
4873 a8083063 Iustin Pop
    pnode_name = self.pnode.name
4874 a8083063 Iustin Pop
4875 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
4876 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
4877 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
4878 2a6469d5 Alexander Schreiber
    else:
4879 2a6469d5 Alexander Schreiber
      network_port = None
4880 58acb49d Alexander Schreiber
4881 6785674e Iustin Pop
    ##if self.op.vnc_bind_address is None:
4882 6785674e Iustin Pop
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
4883 31a853d2 Iustin Pop
4884 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
4885 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
4886 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
4887 2c313123 Manuel Franceschini
    else:
4888 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
4889 2c313123 Manuel Franceschini
4890 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
4891 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
4892 d6a02168 Michael Hanselmann
                                        self.cfg.GetFileStorageDir(),
4893 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
4894 0f1a06e3 Manuel Franceschini
4895 0f1a06e3 Manuel Franceschini
4896 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
4897 a8083063 Iustin Pop
                                  self.op.disk_template,
4898 a8083063 Iustin Pop
                                  instance, pnode_name,
4899 08db7c5c Iustin Pop
                                  self.secondaries,
4900 08db7c5c Iustin Pop
                                  self.disks,
4901 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
4902 e2a65344 Iustin Pop
                                  self.op.file_driver,
4903 e2a65344 Iustin Pop
                                  0)
4904 a8083063 Iustin Pop
4905 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
4906 a8083063 Iustin Pop
                            primary_node=pnode_name,
4907 08db7c5c Iustin Pop
                            nics=self.nics, disks=disks,
4908 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
4909 4978db17 Iustin Pop
                            admin_up=False,
4910 58acb49d Alexander Schreiber
                            network_port=network_port,
4911 338e51e8 Iustin Pop
                            beparams=self.op.beparams,
4912 6785674e Iustin Pop
                            hvparams=self.op.hvparams,
4913 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
4914 a8083063 Iustin Pop
                            )
4915 a8083063 Iustin Pop
4916 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
4917 796cab27 Iustin Pop
    try:
4918 796cab27 Iustin Pop
      _CreateDisks(self, iobj)
4919 796cab27 Iustin Pop
    except errors.OpExecError:
4920 796cab27 Iustin Pop
      self.LogWarning("Device creation failed, reverting...")
4921 796cab27 Iustin Pop
      try:
4922 796cab27 Iustin Pop
        _RemoveDisks(self, iobj)
4923 796cab27 Iustin Pop
      finally:
4924 796cab27 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance)
4925 796cab27 Iustin Pop
        raise
4926 a8083063 Iustin Pop
4927 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
4928 a8083063 Iustin Pop
4929 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
4930 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
4931 7baf741d Guido Trotter
    # added the instance to the config
4932 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
4933 e36e96b4 Guido Trotter
    # Unlock all the nodes
4934 9c8971d7 Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
4935 9c8971d7 Guido Trotter
      nodes_keep = [self.op.src_node]
4936 9c8971d7 Guido Trotter
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
4937 9c8971d7 Guido Trotter
                       if node != self.op.src_node]
4938 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
4939 9c8971d7 Guido Trotter
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
4940 9c8971d7 Guido Trotter
    else:
4941 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE)
4942 9c8971d7 Guido Trotter
      del self.acquired_locks[locking.LEVEL_NODE]
4943 a8083063 Iustin Pop
4944 a8083063 Iustin Pop
    if self.op.wait_for_sync:
4945 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
4946 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
4947 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
4948 a8083063 Iustin Pop
      time.sleep(15)
4949 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
4950 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
4951 a8083063 Iustin Pop
    else:
4952 a8083063 Iustin Pop
      disk_abort = False
4953 a8083063 Iustin Pop
4954 a8083063 Iustin Pop
    if disk_abort:
4955 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
4956 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
4957 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
4958 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
4959 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
4960 3ecf6786 Iustin Pop
                               " this instance")
4961 a8083063 Iustin Pop
4962 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
4963 a8083063 Iustin Pop
                (instance, pnode_name))
4964 a8083063 Iustin Pop
4965 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
4966 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
4967 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
4968 e557bae9 Guido Trotter
        result = self.rpc.call_instance_os_add(pnode_name, iobj, False)
4969 4c4e4e1e Iustin Pop
        result.Raise("Could not add os for instance %s"
4970 4c4e4e1e Iustin Pop
                     " on node %s" % (instance, pnode_name))
4971 a8083063 Iustin Pop
4972 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
4973 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
4974 a8083063 Iustin Pop
        src_node = self.op.src_node
4975 09acf207 Guido Trotter
        src_images = self.src_images
4976 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
4977 6c0af70e Guido Trotter
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
4978 09acf207 Guido Trotter
                                                         src_node, src_images,
4979 6c0af70e Guido Trotter
                                                         cluster_name)
4980 4c4e4e1e Iustin Pop
        msg = import_result.fail_msg
4981 944bf548 Iustin Pop
        if msg:
4982 944bf548 Iustin Pop
          self.LogWarning("Error while importing the disk images for instance"
4983 944bf548 Iustin Pop
                          " %s on node %s: %s" % (instance, pnode_name, msg))
4984 a8083063 Iustin Pop
      else:
4985 a8083063 Iustin Pop
        # also checked in the prereq part
4986 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
4987 3ecf6786 Iustin Pop
                                     % self.op.mode)
4988 a8083063 Iustin Pop
4989 a8083063 Iustin Pop
    if self.op.start:
4990 4978db17 Iustin Pop
      iobj.admin_up = True
4991 4978db17 Iustin Pop
      self.cfg.Update(iobj)
4992 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s", instance, pnode_name)
4993 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
4994 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
4995 4c4e4e1e Iustin Pop
      result.Raise("Could not start instance")
4996 a8083063 Iustin Pop
4997 08896026 Iustin Pop
    return list(iobj.all_nodes)
4998 08896026 Iustin Pop
4999 a8083063 Iustin Pop
5000 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
5001 a8083063 Iustin Pop
  """Connect to an instance's console.
5002 a8083063 Iustin Pop

5003 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
5004 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
5005 a8083063 Iustin Pop
  console.
5006 a8083063 Iustin Pop

5007 a8083063 Iustin Pop
  """
5008 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
5009 8659b73e Guido Trotter
  REQ_BGL = False
5010 8659b73e Guido Trotter
5011 8659b73e Guido Trotter
  def ExpandNames(self):
5012 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
5013 a8083063 Iustin Pop
5014 a8083063 Iustin Pop
  def CheckPrereq(self):
5015 a8083063 Iustin Pop
    """Check prerequisites.
5016 a8083063 Iustin Pop

5017 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
5018 a8083063 Iustin Pop

5019 a8083063 Iustin Pop
    """
5020 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5021 8659b73e Guido Trotter
    assert self.instance is not None, \
5022 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5023 513e896d Guido Trotter
    _CheckNodeOnline(self, self.instance.primary_node)
5024 a8083063 Iustin Pop
5025 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5026 a8083063 Iustin Pop
    """Connect to the console of an instance
5027 a8083063 Iustin Pop

5028 a8083063 Iustin Pop
    """
5029 a8083063 Iustin Pop
    instance = self.instance
5030 a8083063 Iustin Pop
    node = instance.primary_node
5031 a8083063 Iustin Pop
5032 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
5033 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
5034 4c4e4e1e Iustin Pop
    node_insts.Raise("Can't get node information from %s" % node)
5035 a8083063 Iustin Pop
5036 aca13712 Iustin Pop
    if instance.name not in node_insts.payload:
5037 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
5038 a8083063 Iustin Pop
5039 9a4f63d1 Iustin Pop
    logging.debug("Connecting to console of %s on %s", instance.name, node)
5040 a8083063 Iustin Pop
5041 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
5042 5431b2e4 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
5043 5431b2e4 Guido Trotter
    # beparams and hvparams are passed separately, to avoid editing the
5044 5431b2e4 Guido Trotter
    # instance and then saving the defaults in the instance itself.
5045 5431b2e4 Guido Trotter
    hvparams = cluster.FillHV(instance)
5046 5431b2e4 Guido Trotter
    beparams = cluster.FillBE(instance)
5047 5431b2e4 Guido Trotter
    console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
5048 b047857b Michael Hanselmann
5049 82122173 Iustin Pop
    # build ssh cmdline
5050 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
5051 a8083063 Iustin Pop
5052 a8083063 Iustin Pop
5053 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
5054 a8083063 Iustin Pop
  """Replace the disks of an instance.
5055 a8083063 Iustin Pop

5056 a8083063 Iustin Pop
  """
5057 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
5058 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5059 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
5060 efd990e4 Guido Trotter
  REQ_BGL = False
5061 efd990e4 Guido Trotter
5062 7e9366f7 Iustin Pop
  def CheckArguments(self):
5063 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
5064 efd990e4 Guido Trotter
      self.op.remote_node = None
5065 7e9366f7 Iustin Pop
    if not hasattr(self.op, "iallocator"):
5066 7e9366f7 Iustin Pop
      self.op.iallocator = None
5067 7e9366f7 Iustin Pop
5068 7e9366f7 Iustin Pop
    # check for valid parameter combination
5069 7e9366f7 Iustin Pop
    cnt = [self.op.remote_node, self.op.iallocator].count(None)
5070 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_CHG:
5071 7e9366f7 Iustin Pop
      if cnt == 2:
5072 7e9366f7 Iustin Pop
        raise errors.OpPrereqError("When changing the secondary either an"
5073 7e9366f7 Iustin Pop
                                   " iallocator script must be used or the"
5074 7e9366f7 Iustin Pop
                                   " new node given")
5075 7e9366f7 Iustin Pop
      elif cnt == 0:
5076 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Give either the iallocator or the new"
5077 efd990e4 Guido Trotter
                                   " secondary, not both")
5078 7e9366f7 Iustin Pop
    else: # not replacing the secondary
5079 7e9366f7 Iustin Pop
      if cnt != 2:
5080 7e9366f7 Iustin Pop
        raise errors.OpPrereqError("The iallocator and new node options can"
5081 7e9366f7 Iustin Pop
                                   " be used only when changing the"
5082 7e9366f7 Iustin Pop
                                   " secondary node")
5083 7e9366f7 Iustin Pop
5084 7e9366f7 Iustin Pop
  def ExpandNames(self):
5085 7e9366f7 Iustin Pop
    self._ExpandAndLockInstance()
5086 7e9366f7 Iustin Pop
5087 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
5088 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5089 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
5090 efd990e4 Guido Trotter
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
5091 efd990e4 Guido Trotter
      if remote_node is None:
5092 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Node '%s' not known" %
5093 efd990e4 Guido Trotter
                                   self.op.remote_node)
5094 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
5095 3b559640 Iustin Pop
      # Warning: do not remove the locking of the new secondary here
5096 3b559640 Iustin Pop
      # unless DRBD8.AddChildren is changed to work in parallel;
5097 3b559640 Iustin Pop
      # currently it doesn't since parallel invocations of
5098 3b559640 Iustin Pop
      # FindUnusedMinor will conflict
5099 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
5100 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5101 efd990e4 Guido Trotter
    else:
5102 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
5103 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5104 efd990e4 Guido Trotter
5105 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
5106 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
5107 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
5108 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
5109 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
5110 efd990e4 Guido Trotter
      self._LockInstancesNodes()
5111 a8083063 Iustin Pop
5112 b6e82a65 Iustin Pop
  def _RunAllocator(self):
5113 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
5114 b6e82a65 Iustin Pop

5115 b6e82a65 Iustin Pop
    """
5116 72737a7f Iustin Pop
    ial = IAllocator(self,
5117 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
5118 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
5119 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
5120 b6e82a65 Iustin Pop
5121 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
5122 b6e82a65 Iustin Pop
5123 b6e82a65 Iustin Pop
    if not ial.success:
5124 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
5125 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
5126 b6e82a65 Iustin Pop
                                                           ial.info))
5127 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
5128 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
5129 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
5130 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
5131 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
5132 86d9d3bb Iustin Pop
    self.LogInfo("Selected new secondary for the instance: %s",
5133 86d9d3bb Iustin Pop
                 self.op.remote_node)
5134 b6e82a65 Iustin Pop
5135 a8083063 Iustin Pop
  def BuildHooksEnv(self):
5136 a8083063 Iustin Pop
    """Build hooks env.
5137 a8083063 Iustin Pop

5138 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
5139 a8083063 Iustin Pop

5140 a8083063 Iustin Pop
    """
5141 a8083063 Iustin Pop
    env = {
5142 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
5143 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
5144 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
5145 a8083063 Iustin Pop
      }
5146 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5147 0834c866 Iustin Pop
    nl = [
5148 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
5149 0834c866 Iustin Pop
      self.instance.primary_node,
5150 0834c866 Iustin Pop
      ]
5151 0834c866 Iustin Pop
    if self.op.remote_node is not None:
5152 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
5153 a8083063 Iustin Pop
    return env, nl, nl
5154 a8083063 Iustin Pop
5155 a8083063 Iustin Pop
  def CheckPrereq(self):
5156 a8083063 Iustin Pop
    """Check prerequisites.
5157 a8083063 Iustin Pop

5158 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
5159 a8083063 Iustin Pop

5160 a8083063 Iustin Pop
    """
5161 efd990e4 Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5162 efd990e4 Guido Trotter
    assert instance is not None, \
5163 efd990e4 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5164 a8083063 Iustin Pop
    self.instance = instance
5165 a8083063 Iustin Pop
5166 7e9366f7 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
5167 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
5168 7e9366f7 Iustin Pop
                                 " instances")
5169 a8083063 Iustin Pop
5170 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
5171 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
5172 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
5173 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
5174 a8083063 Iustin Pop
5175 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
5176 a9e0c397 Iustin Pop
5177 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
5178 de8c7666 Guido Trotter
      self._RunAllocator()
5179 b6e82a65 Iustin Pop
5180 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
5181 a9e0c397 Iustin Pop
    if remote_node is not None:
5182 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
5183 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
5184 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
5185 a9e0c397 Iustin Pop
    else:
5186 a9e0c397 Iustin Pop
      self.remote_node_info = None
5187 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
5188 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
5189 3ecf6786 Iustin Pop
                                 " the instance.")
5190 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
5191 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("The specified node is already the"
5192 7e9366f7 Iustin Pop
                                 " secondary node of the instance.")
5193 7e9366f7 Iustin Pop
5194 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_PRI:
5195 7e9366f7 Iustin Pop
      n1 = self.tgt_node = instance.primary_node
5196 7e9366f7 Iustin Pop
      n2 = self.oth_node = self.sec_node
5197 7e9366f7 Iustin Pop
    elif self.op.mode == constants.REPLACE_DISK_SEC:
5198 7e9366f7 Iustin Pop
      n1 = self.tgt_node = self.sec_node
5199 7e9366f7 Iustin Pop
      n2 = self.oth_node = instance.primary_node
5200 7e9366f7 Iustin Pop
    elif self.op.mode == constants.REPLACE_DISK_CHG:
5201 7e9366f7 Iustin Pop
      n1 = self.new_node = remote_node
5202 7e9366f7 Iustin Pop
      n2 = self.oth_node = instance.primary_node
5203 7e9366f7 Iustin Pop
      self.tgt_node = self.sec_node
5204 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, remote_node)
5205 7e9366f7 Iustin Pop
    else:
5206 7e9366f7 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replace mode")
5207 7e9366f7 Iustin Pop
5208 7e9366f7 Iustin Pop
    _CheckNodeOnline(self, n1)
5209 7e9366f7 Iustin Pop
    _CheckNodeOnline(self, n2)
5210 a9e0c397 Iustin Pop
5211 54155f52 Iustin Pop
    if not self.op.disks:
5212 54155f52 Iustin Pop
      self.op.disks = range(len(instance.disks))
5213 54155f52 Iustin Pop
5214 54155f52 Iustin Pop
    for disk_idx in self.op.disks:
5215 3e0cea06 Iustin Pop
      instance.FindDisk(disk_idx)
5216 a8083063 Iustin Pop
5217 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
5218 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
5219 a9e0c397 Iustin Pop

5220 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
5221 e4376078 Iustin Pop

5222 e4376078 Iustin Pop
      1. for each disk to be replaced:
5223 e4376078 Iustin Pop

5224 e4376078 Iustin Pop
        1. create new LVs on the target node with unique names
5225 e4376078 Iustin Pop
        1. detach old LVs from the drbd device
5226 e4376078 Iustin Pop
        1. rename old LVs to name_replaced.<time_t>
5227 e4376078 Iustin Pop
        1. rename new LVs to old LVs
5228 e4376078 Iustin Pop
        1. attach the new LVs (with the old names now) to the drbd device
5229 e4376078 Iustin Pop

5230 e4376078 Iustin Pop
      1. wait for sync across all devices
5231 e4376078 Iustin Pop

5232 e4376078 Iustin Pop
      1. for each modified disk:
5233 e4376078 Iustin Pop

5234 e4376078 Iustin Pop
        1. remove old LVs (which have the name name_replaces.<time_t>)
5235 a9e0c397 Iustin Pop

5236 a9e0c397 Iustin Pop
    Failures are not very well handled.
5237 cff90b79 Iustin Pop

5238 a9e0c397 Iustin Pop
    """
5239 cff90b79 Iustin Pop
    steps_total = 6
5240 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
5241 a9e0c397 Iustin Pop
    instance = self.instance
5242 a9e0c397 Iustin Pop
    iv_names = {}
5243 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
5244 a9e0c397 Iustin Pop
    # start of work
5245 a9e0c397 Iustin Pop
    cfg = self.cfg
5246 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
5247 cff90b79 Iustin Pop
    oth_node = self.oth_node
5248 cff90b79 Iustin Pop
5249 cff90b79 Iustin Pop
    # Step: check device activation
5250 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
5251 cff90b79 Iustin Pop
    info("checking volume groups")
5252 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
5253 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([oth_node, tgt_node])
5254 cff90b79 Iustin Pop
    if not results:
5255 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
5256 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
5257 781de953 Iustin Pop
      res = results[node]
5258 4c4e4e1e Iustin Pop
      res.Raise("Error checking node %s" % node)
5259 e480923b Iustin Pop
      if my_vg not in res.payload:
5260 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
5261 cff90b79 Iustin Pop
                                 (my_vg, node))
5262 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
5263 54155f52 Iustin Pop
      if idx not in self.op.disks:
5264 cff90b79 Iustin Pop
        continue
5265 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
5266 54155f52 Iustin Pop
        info("checking disk/%d on %s" % (idx, node))
5267 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
5268 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(node, dev)
5269 4c4e4e1e Iustin Pop
        msg = result.fail_msg
5270 23829f6f Iustin Pop
        if not msg and not result.payload:
5271 23829f6f Iustin Pop
          msg = "disk not found"
5272 23829f6f Iustin Pop
        if msg:
5273 23829f6f Iustin Pop
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5274 23829f6f Iustin Pop
                                   (idx, node, msg))
5275 cff90b79 Iustin Pop
5276 cff90b79 Iustin Pop
    # Step: check other node consistency
5277 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
5278 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
5279 54155f52 Iustin Pop
      if idx not in self.op.disks:
5280 cff90b79 Iustin Pop
        continue
5281 54155f52 Iustin Pop
      info("checking disk/%d consistency on %s" % (idx, oth_node))
5282 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, oth_node,
5283 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
5284 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
5285 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
5286 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
5287 cff90b79 Iustin Pop
5288 cff90b79 Iustin Pop
    # Step: create new storage
5289 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
5290 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
5291 54155f52 Iustin Pop
      if idx not in self.op.disks:
5292 a9e0c397 Iustin Pop
        continue
5293 a9e0c397 Iustin Pop
      size = dev.size
5294 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
5295 54155f52 Iustin Pop
      lv_names = [".disk%d_%s" % (idx, suf)
5296 54155f52 Iustin Pop
                  for suf in ["data", "meta"]]
5297 b9bddb6b Iustin Pop
      names = _GenerateUniqueNames(self, lv_names)
5298 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
5299 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
5300 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
5301 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
5302 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
5303 a9e0c397 Iustin Pop
      old_lvs = dev.children
5304 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
5305 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
5306 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
5307 428958aa Iustin Pop
      # we pass force_create=True to force the LVM creation
5308 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
5309 428958aa Iustin Pop
        _CreateBlockDev(self, tgt_node, instance, new_lv, True,
5310 428958aa Iustin Pop
                        _GetInstanceInfoText(instance), False)
5311 a9e0c397 Iustin Pop
5312 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
5313 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
5314 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
5315 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
5316 781de953 Iustin Pop
      result = self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs)
5317 4c4e4e1e Iustin Pop
      result.Raise("Can't detach drbd from local storage on node"
5318 4c4e4e1e Iustin Pop
                   " %s for device %s" % (tgt_node, dev.iv_name))
5319 cff90b79 Iustin Pop
      #dev.children = []
5320 cff90b79 Iustin Pop
      #cfg.Update(instance)
5321 a9e0c397 Iustin Pop
5322 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
5323 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
5324 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
5325 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
5326 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
5327 cff90b79 Iustin Pop
5328 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
5329 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
5330 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
5331 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
5332 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
5333 cff90b79 Iustin Pop
      rlist = []
5334 cff90b79 Iustin Pop
      for to_ren in old_lvs:
5335 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(tgt_node, to_ren)
5336 4c4e4e1e Iustin Pop
        if not result.fail_msg and result.payload:
5337 23829f6f Iustin Pop
          # device exists
5338 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
5339 cff90b79 Iustin Pop
5340 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
5341 781de953 Iustin Pop
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5342 4c4e4e1e Iustin Pop
      result.Raise("Can't rename old LVs on node %s" % tgt_node)
5343 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
5344 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
5345 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
5346 781de953 Iustin Pop
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5347 4c4e4e1e Iustin Pop
      result.Raise("Can't rename new LVs on node %s" % tgt_node)
5348 cff90b79 Iustin Pop
5349 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
5350 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
5351 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
5352 a9e0c397 Iustin Pop
5353 cff90b79 Iustin Pop
      for disk in old_lvs:
5354 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
5355 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
5356 a9e0c397 Iustin Pop
5357 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
5358 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
5359 4504c3d6 Iustin Pop
      result = self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs)
5360 4c4e4e1e Iustin Pop
      msg = result.fail_msg
5361 2cc1da8b Iustin Pop
      if msg:
5362 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
5363 4c4e4e1e Iustin Pop
          msg2 = self.rpc.call_blockdev_remove(tgt_node, new_lv).fail_msg
5364 4c4e4e1e Iustin Pop
          if msg2:
5365 4c4e4e1e Iustin Pop
            warning("Can't rollback device %s: %s", dev, msg2,
5366 e1bc0878 Iustin Pop
                    hint="cleanup manually the unused logical volumes")
5367 2cc1da8b Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
5368 a9e0c397 Iustin Pop
5369 a9e0c397 Iustin Pop
      dev.children = new_lvs
5370 a9e0c397 Iustin Pop
      cfg.Update(instance)
5371 a9e0c397 Iustin Pop
5372 cff90b79 Iustin Pop
    # Step: wait for sync
5373 a9e0c397 Iustin Pop
5374 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
5375 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
5376 a9e0c397 Iustin Pop
    # return value
5377 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
5378 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
5379 a9e0c397 Iustin Pop
5380 a9e0c397 Iustin Pop
    # so check manually all the devices
5381 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5382 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
5383 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(instance.primary_node, dev)
5384 4c4e4e1e Iustin Pop
      msg = result.fail_msg
5385 23829f6f Iustin Pop
      if not msg and not result.payload:
5386 23829f6f Iustin Pop
        msg = "disk not found"
5387 23829f6f Iustin Pop
      if msg:
5388 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
5389 23829f6f Iustin Pop
                                 (name, msg))
5390 23829f6f Iustin Pop
      if result.payload[5]:
5391 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
5392 a9e0c397 Iustin Pop
5393 cff90b79 Iustin Pop
    # Step: remove old storage
5394 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
5395 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5396 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
5397 a9e0c397 Iustin Pop
      for lv in old_lvs:
5398 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
5399 4c4e4e1e Iustin Pop
        msg = self.rpc.call_blockdev_remove(tgt_node, lv).fail_msg
5400 e1bc0878 Iustin Pop
        if msg:
5401 e1bc0878 Iustin Pop
          warning("Can't remove old LV: %s" % msg,
5402 e1bc0878 Iustin Pop
                  hint="manually remove unused LVs")
5403 a9e0c397 Iustin Pop
          continue
5404 a9e0c397 Iustin Pop
5405 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
5406 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
5407 a9e0c397 Iustin Pop

5408 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
5409 a9e0c397 Iustin Pop
      - for all disks of the instance:
5410 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
5411 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
5412 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
5413 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
5414 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
5415 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
5416 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
5417 a9e0c397 Iustin Pop
          not network enabled
5418 a9e0c397 Iustin Pop
      - wait for sync across all devices
5419 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
5420 a9e0c397 Iustin Pop

5421 a9e0c397 Iustin Pop
    Failures are not very well handled.
5422 0834c866 Iustin Pop

5423 a9e0c397 Iustin Pop
    """
5424 0834c866 Iustin Pop
    steps_total = 6
5425 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
5426 a9e0c397 Iustin Pop
    instance = self.instance
5427 a9e0c397 Iustin Pop
    iv_names = {}
5428 a9e0c397 Iustin Pop
    # start of work
5429 a9e0c397 Iustin Pop
    cfg = self.cfg
5430 a9e0c397 Iustin Pop
    old_node = self.tgt_node
5431 a9e0c397 Iustin Pop
    new_node = self.new_node
5432 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
5433 a2d59d8b Iustin Pop
    nodes_ip = {
5434 a2d59d8b Iustin Pop
      old_node: self.cfg.GetNodeInfo(old_node).secondary_ip,
5435 a2d59d8b Iustin Pop
      new_node: self.cfg.GetNodeInfo(new_node).secondary_ip,
5436 a2d59d8b Iustin Pop
      pri_node: self.cfg.GetNodeInfo(pri_node).secondary_ip,
5437 a2d59d8b Iustin Pop
      }
5438 0834c866 Iustin Pop
5439 0834c866 Iustin Pop
    # Step: check device activation
5440 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
5441 0834c866 Iustin Pop
    info("checking volume groups")
5442 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
5443 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([pri_node, new_node])
5444 0834c866 Iustin Pop
    for node in pri_node, new_node:
5445 781de953 Iustin Pop
      res = results[node]
5446 4c4e4e1e Iustin Pop
      res.Raise("Error checking node %s" % node)
5447 e480923b Iustin Pop
      if my_vg not in res.payload:
5448 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
5449 0834c866 Iustin Pop
                                 (my_vg, node))
5450 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5451 d418ebfb Iustin Pop
      if idx not in self.op.disks:
5452 0834c866 Iustin Pop
        continue
5453 d418ebfb Iustin Pop
      info("checking disk/%d on %s" % (idx, pri_node))
5454 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5455 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(pri_node, dev)
5456 4c4e4e1e Iustin Pop
      msg = result.fail_msg
5457 23829f6f Iustin Pop
      if not msg and not result.payload:
5458 23829f6f Iustin Pop
        msg = "disk not found"
5459 23829f6f Iustin Pop
      if msg:
5460 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5461 23829f6f Iustin Pop
                                 (idx, pri_node, msg))
5462 0834c866 Iustin Pop
5463 0834c866 Iustin Pop
    # Step: check other node consistency
5464 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
5465 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5466 d418ebfb Iustin Pop
      if idx not in self.op.disks:
5467 0834c866 Iustin Pop
        continue
5468 d418ebfb Iustin Pop
      info("checking disk/%d consistency on %s" % (idx, pri_node))
5469 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
5470 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
5471 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
5472 0834c866 Iustin Pop
                                 pri_node)
5473 0834c866 Iustin Pop
5474 0834c866 Iustin Pop
    # Step: create new storage
5475 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
5476 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5477 d418ebfb Iustin Pop
      info("adding new local storage on %s for disk/%d" %
5478 d418ebfb Iustin Pop
           (new_node, idx))
5479 428958aa Iustin Pop
      # we pass force_create=True to force LVM creation
5480 a9e0c397 Iustin Pop
      for new_lv in dev.children:
5481 428958aa Iustin Pop
        _CreateBlockDev(self, new_node, instance, new_lv, True,
5482 428958aa Iustin Pop
                        _GetInstanceInfoText(instance), False)
5483 a9e0c397 Iustin Pop
5484 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
5485 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
5486 a1578d63 Iustin Pop
    # error and the success paths
5487 a1578d63 Iustin Pop
    minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
5488 a1578d63 Iustin Pop
                                   instance.name)
5489 468b46f9 Iustin Pop
    logging.debug("Allocated minors %s" % (minors,))
5490 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
5491 d418ebfb Iustin Pop
    for idx, (dev, new_minor) in enumerate(zip(instance.disks, minors)):
5492 0834c866 Iustin Pop
      size = dev.size
5493 d418ebfb Iustin Pop
      info("activating a new drbd on %s for disk/%d" % (new_node, idx))
5494 a2d59d8b Iustin Pop
      # create new devices on new_node; note that we create two IDs:
5495 a2d59d8b Iustin Pop
      # one without port, so the drbd will be activated without
5496 a2d59d8b Iustin Pop
      # networking information on the new node at this stage, and one
5497 a2d59d8b Iustin Pop
      # with network, for the latter activation in step 4
5498 a2d59d8b Iustin Pop
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
5499 a2d59d8b Iustin Pop
      if pri_node == o_node1:
5500 a2d59d8b Iustin Pop
        p_minor = o_minor1
5501 ffa1c0dc Iustin Pop
      else:
5502 a2d59d8b Iustin Pop
        p_minor = o_minor2
5503 a2d59d8b Iustin Pop
5504 a2d59d8b Iustin Pop
      new_alone_id = (pri_node, new_node, None, p_minor, new_minor, o_secret)
5505 a2d59d8b Iustin Pop
      new_net_id = (pri_node, new_node, o_port, p_minor, new_minor, o_secret)
5506 a2d59d8b Iustin Pop
5507 a2d59d8b Iustin Pop
      iv_names[idx] = (dev, dev.children, new_net_id)
5508 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
5509 a2d59d8b Iustin Pop
                    new_net_id)
5510 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
5511 a2d59d8b Iustin Pop
                              logical_id=new_alone_id,
5512 8a6c7011 Iustin Pop
                              children=dev.children,
5513 8a6c7011 Iustin Pop
                              size=dev.size)
5514 796cab27 Iustin Pop
      try:
5515 de12473a Iustin Pop
        _CreateSingleBlockDev(self, new_node, instance, new_drbd,
5516 de12473a Iustin Pop
                              _GetInstanceInfoText(instance), False)
5517 82759cb1 Iustin Pop
      except errors.GenericError:
5518 a1578d63 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
5519 796cab27 Iustin Pop
        raise
5520 a9e0c397 Iustin Pop
5521 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5522 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
5523 d418ebfb Iustin Pop
      info("shutting down drbd for disk/%d on old node" % idx)
5524 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
5525 4c4e4e1e Iustin Pop
      msg = self.rpc.call_blockdev_shutdown(old_node, dev).fail_msg
5526 cacfd1fd Iustin Pop
      if msg:
5527 cacfd1fd Iustin Pop
        warning("Failed to shutdown drbd for disk/%d on old node: %s" %
5528 cacfd1fd Iustin Pop
                (idx, msg),
5529 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
5530 a9e0c397 Iustin Pop
5531 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
5532 a2d59d8b Iustin Pop
    result = self.rpc.call_drbd_disconnect_net([pri_node], nodes_ip,
5533 a2d59d8b Iustin Pop
                                               instance.disks)[pri_node]
5534 642445d9 Iustin Pop
5535 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5536 a2d59d8b Iustin Pop
    if msg:
5537 a2d59d8b Iustin Pop
      # detaches didn't succeed (unlikely)
5538 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance.name)
5539 a2d59d8b Iustin Pop
      raise errors.OpExecError("Can't detach the disks from the network on"
5540 a2d59d8b Iustin Pop
                               " old node: %s" % (msg,))
5541 642445d9 Iustin Pop
5542 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
5543 642445d9 Iustin Pop
    # the instance to point to the new secondary
5544 642445d9 Iustin Pop
    info("updating instance configuration")
5545 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
5546 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
5547 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5548 642445d9 Iustin Pop
    cfg.Update(instance)
5549 a9e0c397 Iustin Pop
5550 642445d9 Iustin Pop
    # and now perform the drbd attach
5551 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
5552 a2d59d8b Iustin Pop
    result = self.rpc.call_drbd_attach_net([pri_node, new_node], nodes_ip,
5553 a2d59d8b Iustin Pop
                                           instance.disks, instance.name,
5554 a2d59d8b Iustin Pop
                                           False)
5555 a2d59d8b Iustin Pop
    for to_node, to_result in result.items():
5556 4c4e4e1e Iustin Pop
      msg = to_result.fail_msg
5557 a2d59d8b Iustin Pop
      if msg:
5558 a2d59d8b Iustin Pop
        warning("can't attach drbd disks on node %s: %s", to_node, msg,
5559 a2d59d8b Iustin Pop
                hint="please do a gnt-instance info to see the"
5560 a2d59d8b Iustin Pop
                " status of disks")
5561 a9e0c397 Iustin Pop
5562 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
5563 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
5564 a9e0c397 Iustin Pop
    # return value
5565 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
5566 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
5567 a9e0c397 Iustin Pop
5568 a9e0c397 Iustin Pop
    # so check manually all the devices
5569 d418ebfb Iustin Pop
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
5570 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5571 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(pri_node, dev)
5572 4c4e4e1e Iustin Pop
      msg = result.fail_msg
5573 23829f6f Iustin Pop
      if not msg and not result.payload:
5574 23829f6f Iustin Pop
        msg = "disk not found"
5575 23829f6f Iustin Pop
      if msg:
5576 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find DRBD device disk/%d: %s" %
5577 23829f6f Iustin Pop
                                 (idx, msg))
5578 23829f6f Iustin Pop
      if result.payload[5]:
5579 d418ebfb Iustin Pop
        raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx)
5580 a9e0c397 Iustin Pop
5581 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
5582 d418ebfb Iustin Pop
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
5583 d418ebfb Iustin Pop
      info("remove logical volumes for disk/%d" % idx)
5584 a9e0c397 Iustin Pop
      for lv in old_lvs:
5585 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
5586 4c4e4e1e Iustin Pop
        msg = self.rpc.call_blockdev_remove(old_node, lv).fail_msg
5587 e1bc0878 Iustin Pop
        if msg:
5588 e1bc0878 Iustin Pop
          warning("Can't remove LV on old secondary: %s", msg,
5589 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
5590 a9e0c397 Iustin Pop
5591 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
5592 a9e0c397 Iustin Pop
    """Execute disk replacement.
5593 a9e0c397 Iustin Pop

5594 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
5595 a9e0c397 Iustin Pop

5596 a9e0c397 Iustin Pop
    """
5597 a9e0c397 Iustin Pop
    instance = self.instance
5598 22985314 Guido Trotter
5599 22985314 Guido Trotter
    # Activate the instance disks if we're replacing them on a down instance
5600 0d68c45d Iustin Pop
    if not instance.admin_up:
5601 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, True)
5602 22985314 Guido Trotter
5603 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_CHG:
5604 7e9366f7 Iustin Pop
      fn = self._ExecD8Secondary
5605 a9e0c397 Iustin Pop
    else:
5606 7e9366f7 Iustin Pop
      fn = self._ExecD8DiskOnly
5607 22985314 Guido Trotter
5608 22985314 Guido Trotter
    ret = fn(feedback_fn)
5609 22985314 Guido Trotter
5610 22985314 Guido Trotter
    # Deactivate the instance disks if we're replacing them on a down instance
5611 0d68c45d Iustin Pop
    if not instance.admin_up:
5612 b9bddb6b Iustin Pop
      _SafeShutdownInstanceDisks(self, instance)
5613 22985314 Guido Trotter
5614 22985314 Guido Trotter
    return ret
5615 a9e0c397 Iustin Pop
5616 a8083063 Iustin Pop
5617 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
5618 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
5619 8729e0d7 Iustin Pop

5620 8729e0d7 Iustin Pop
  """
5621 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
5622 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5623 6605411d Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
5624 31e63dbf Guido Trotter
  REQ_BGL = False
5625 31e63dbf Guido Trotter
5626 31e63dbf Guido Trotter
  def ExpandNames(self):
5627 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
5628 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
5629 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5630 31e63dbf Guido Trotter
5631 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
5632 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
5633 31e63dbf Guido Trotter
      self._LockInstancesNodes()
5634 8729e0d7 Iustin Pop
5635 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
5636 8729e0d7 Iustin Pop
    """Build hooks env.
5637 8729e0d7 Iustin Pop

5638 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
5639 8729e0d7 Iustin Pop

5640 8729e0d7 Iustin Pop
    """
5641 8729e0d7 Iustin Pop
    env = {
5642 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
5643 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
5644 8729e0d7 Iustin Pop
      }
5645 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5646 8729e0d7 Iustin Pop
    nl = [
5647 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
5648 8729e0d7 Iustin Pop
      self.instance.primary_node,
5649 8729e0d7 Iustin Pop
      ]
5650 8729e0d7 Iustin Pop
    return env, nl, nl
5651 8729e0d7 Iustin Pop
5652 8729e0d7 Iustin Pop
  def CheckPrereq(self):
5653 8729e0d7 Iustin Pop
    """Check prerequisites.
5654 8729e0d7 Iustin Pop

5655 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
5656 8729e0d7 Iustin Pop

5657 8729e0d7 Iustin Pop
    """
5658 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5659 31e63dbf Guido Trotter
    assert instance is not None, \
5660 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5661 6b12959c Iustin Pop
    nodenames = list(instance.all_nodes)
5662 6b12959c Iustin Pop
    for node in nodenames:
5663 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, node)
5664 7527a8a4 Iustin Pop
5665 31e63dbf Guido Trotter
5666 8729e0d7 Iustin Pop
    self.instance = instance
5667 8729e0d7 Iustin Pop
5668 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
5669 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
5670 8729e0d7 Iustin Pop
                                 " growing.")
5671 8729e0d7 Iustin Pop
5672 ad24e046 Iustin Pop
    self.disk = instance.FindDisk(self.op.disk)
5673 8729e0d7 Iustin Pop
5674 72737a7f Iustin Pop
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
5675 72737a7f Iustin Pop
                                       instance.hypervisor)
5676 8729e0d7 Iustin Pop
    for node in nodenames:
5677 781de953 Iustin Pop
      info = nodeinfo[node]
5678 4c4e4e1e Iustin Pop
      info.Raise("Cannot get current information from node %s" % node)
5679 070e998b Iustin Pop
      vg_free = info.payload.get('vg_free', None)
5680 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
5681 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
5682 8729e0d7 Iustin Pop
                                   " node %s" % node)
5683 781de953 Iustin Pop
      if self.op.amount > vg_free:
5684 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
5685 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
5686 781de953 Iustin Pop
                                   (node, vg_free, self.op.amount))
5687 8729e0d7 Iustin Pop
5688 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
5689 8729e0d7 Iustin Pop
    """Execute disk grow.
5690 8729e0d7 Iustin Pop

5691 8729e0d7 Iustin Pop
    """
5692 8729e0d7 Iustin Pop
    instance = self.instance
5693 ad24e046 Iustin Pop
    disk = self.disk
5694 6b12959c Iustin Pop
    for node in instance.all_nodes:
5695 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
5696 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
5697 4c4e4e1e Iustin Pop
      result.Raise("Grow request failed to node %s" % node)
5698 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
5699 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
5700 6605411d Iustin Pop
    if self.op.wait_for_sync:
5701 cd4d138f Guido Trotter
      disk_abort = not _WaitForSync(self, instance)
5702 6605411d Iustin Pop
      if disk_abort:
5703 86d9d3bb Iustin Pop
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
5704 86d9d3bb Iustin Pop
                             " status.\nPlease check the instance.")
5705 8729e0d7 Iustin Pop
5706 8729e0d7 Iustin Pop
5707 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
5708 a8083063 Iustin Pop
  """Query runtime instance data.
5709 a8083063 Iustin Pop

5710 a8083063 Iustin Pop
  """
5711 57821cac Iustin Pop
  _OP_REQP = ["instances", "static"]
5712 a987fa48 Guido Trotter
  REQ_BGL = False
5713 ae5849b5 Michael Hanselmann
5714 a987fa48 Guido Trotter
  def ExpandNames(self):
5715 a987fa48 Guido Trotter
    self.needed_locks = {}
5716 a987fa48 Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
5717 a987fa48 Guido Trotter
5718 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
5719 a987fa48 Guido Trotter
      raise errors.OpPrereqError("Invalid argument type 'instances'")
5720 a987fa48 Guido Trotter
5721 a987fa48 Guido Trotter
    if self.op.instances:
5722 a987fa48 Guido Trotter
      self.wanted_names = []
5723 a987fa48 Guido Trotter
      for name in self.op.instances:
5724 a987fa48 Guido Trotter
        full_name = self.cfg.ExpandInstanceName(name)
5725 a987fa48 Guido Trotter
        if full_name is None:
5726 f57c76e4 Iustin Pop
          raise errors.OpPrereqError("Instance '%s' not known" % name)
5727 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
5728 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
5729 a987fa48 Guido Trotter
    else:
5730 a987fa48 Guido Trotter
      self.wanted_names = None
5731 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
5732 a987fa48 Guido Trotter
5733 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
5734 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5735 a987fa48 Guido Trotter
5736 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
5737 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
5738 a987fa48 Guido Trotter
      self._LockInstancesNodes()
5739 a8083063 Iustin Pop
5740 a8083063 Iustin Pop
  def CheckPrereq(self):
5741 a8083063 Iustin Pop
    """Check prerequisites.
5742 a8083063 Iustin Pop

5743 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
5744 a8083063 Iustin Pop

5745 a8083063 Iustin Pop
    """
5746 a987fa48 Guido Trotter
    if self.wanted_names is None:
5747 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
5748 a8083063 Iustin Pop
5749 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
5750 a987fa48 Guido Trotter
                             in self.wanted_names]
5751 a987fa48 Guido Trotter
    return
5752 a8083063 Iustin Pop
5753 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
5754 a8083063 Iustin Pop
    """Compute block device status.
5755 a8083063 Iustin Pop

5756 a8083063 Iustin Pop
    """
5757 57821cac Iustin Pop
    static = self.op.static
5758 57821cac Iustin Pop
    if not static:
5759 57821cac Iustin Pop
      self.cfg.SetDiskID(dev, instance.primary_node)
5760 57821cac Iustin Pop
      dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
5761 9854f5d0 Iustin Pop
      if dev_pstatus.offline:
5762 9854f5d0 Iustin Pop
        dev_pstatus = None
5763 9854f5d0 Iustin Pop
      else:
5764 4c4e4e1e Iustin Pop
        dev_pstatus.Raise("Can't compute disk status for %s" % instance.name)
5765 9854f5d0 Iustin Pop
        dev_pstatus = dev_pstatus.payload
5766 57821cac Iustin Pop
    else:
5767 57821cac Iustin Pop
      dev_pstatus = None
5768 57821cac Iustin Pop
5769 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
5770 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
5771 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
5772 a8083063 Iustin Pop
        snode = dev.logical_id[1]
5773 a8083063 Iustin Pop
      else:
5774 a8083063 Iustin Pop
        snode = dev.logical_id[0]
5775 a8083063 Iustin Pop
5776 57821cac Iustin Pop
    if snode and not static:
5777 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
5778 72737a7f Iustin Pop
      dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
5779 9854f5d0 Iustin Pop
      if dev_sstatus.offline:
5780 9854f5d0 Iustin Pop
        dev_sstatus = None
5781 9854f5d0 Iustin Pop
      else:
5782 4c4e4e1e Iustin Pop
        dev_sstatus.Raise("Can't compute disk status for %s" % instance.name)
5783 9854f5d0 Iustin Pop
        dev_sstatus = dev_sstatus.payload
5784 a8083063 Iustin Pop
    else:
5785 a8083063 Iustin Pop
      dev_sstatus = None
5786 a8083063 Iustin Pop
5787 a8083063 Iustin Pop
    if dev.children:
5788 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
5789 a8083063 Iustin Pop
                      for child in dev.children]
5790 a8083063 Iustin Pop
    else:
5791 a8083063 Iustin Pop
      dev_children = []
5792 a8083063 Iustin Pop
5793 a8083063 Iustin Pop
    data = {
5794 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
5795 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
5796 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
5797 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
5798 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
5799 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
5800 a8083063 Iustin Pop
      "children": dev_children,
5801 b6fdf8b8 Iustin Pop
      "mode": dev.mode,
5802 a8083063 Iustin Pop
      }
5803 a8083063 Iustin Pop
5804 a8083063 Iustin Pop
    return data
5805 a8083063 Iustin Pop
5806 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5807 a8083063 Iustin Pop
    """Gather and return data"""
5808 a8083063 Iustin Pop
    result = {}
5809 338e51e8 Iustin Pop
5810 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
5811 338e51e8 Iustin Pop
5812 a8083063 Iustin Pop
    for instance in self.wanted_instances:
5813 57821cac Iustin Pop
      if not self.op.static:
5814 57821cac Iustin Pop
        remote_info = self.rpc.call_instance_info(instance.primary_node,
5815 57821cac Iustin Pop
                                                  instance.name,
5816 57821cac Iustin Pop
                                                  instance.hypervisor)
5817 4c4e4e1e Iustin Pop
        remote_info.Raise("Error checking node %s" % instance.primary_node)
5818 7ad1af4a Iustin Pop
        remote_info = remote_info.payload
5819 57821cac Iustin Pop
        if remote_info and "state" in remote_info:
5820 57821cac Iustin Pop
          remote_state = "up"
5821 57821cac Iustin Pop
        else:
5822 57821cac Iustin Pop
          remote_state = "down"
5823 a8083063 Iustin Pop
      else:
5824 57821cac Iustin Pop
        remote_state = None
5825 0d68c45d Iustin Pop
      if instance.admin_up:
5826 a8083063 Iustin Pop
        config_state = "up"
5827 0d68c45d Iustin Pop
      else:
5828 0d68c45d Iustin Pop
        config_state = "down"
5829 a8083063 Iustin Pop
5830 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
5831 a8083063 Iustin Pop
               for device in instance.disks]
5832 a8083063 Iustin Pop
5833 a8083063 Iustin Pop
      idict = {
5834 a8083063 Iustin Pop
        "name": instance.name,
5835 a8083063 Iustin Pop
        "config_state": config_state,
5836 a8083063 Iustin Pop
        "run_state": remote_state,
5837 a8083063 Iustin Pop
        "pnode": instance.primary_node,
5838 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
5839 a8083063 Iustin Pop
        "os": instance.os,
5840 0b13832c Guido Trotter
        # this happens to be the same format used for hooks
5841 0b13832c Guido Trotter
        "nics": _NICListToTuple(self, instance.nics),
5842 a8083063 Iustin Pop
        "disks": disks,
5843 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
5844 24838135 Iustin Pop
        "network_port": instance.network_port,
5845 24838135 Iustin Pop
        "hv_instance": instance.hvparams,
5846 338e51e8 Iustin Pop
        "hv_actual": cluster.FillHV(instance),
5847 338e51e8 Iustin Pop
        "be_instance": instance.beparams,
5848 338e51e8 Iustin Pop
        "be_actual": cluster.FillBE(instance),
5849 a8083063 Iustin Pop
        }
5850 a8083063 Iustin Pop
5851 a8083063 Iustin Pop
      result[instance.name] = idict
5852 a8083063 Iustin Pop
5853 a8083063 Iustin Pop
    return result
5854 a8083063 Iustin Pop
5855 a8083063 Iustin Pop
5856 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
5857 a8083063 Iustin Pop
  """Modifies an instances's parameters.
5858 a8083063 Iustin Pop

5859 a8083063 Iustin Pop
  """
5860 a8083063 Iustin Pop
  HPATH = "instance-modify"
5861 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5862 24991749 Iustin Pop
  _OP_REQP = ["instance_name"]
5863 1a5c7281 Guido Trotter
  REQ_BGL = False
5864 1a5c7281 Guido Trotter
5865 24991749 Iustin Pop
  def CheckArguments(self):
5866 24991749 Iustin Pop
    if not hasattr(self.op, 'nics'):
5867 24991749 Iustin Pop
      self.op.nics = []
5868 24991749 Iustin Pop
    if not hasattr(self.op, 'disks'):
5869 24991749 Iustin Pop
      self.op.disks = []
5870 24991749 Iustin Pop
    if not hasattr(self.op, 'beparams'):
5871 24991749 Iustin Pop
      self.op.beparams = {}
5872 24991749 Iustin Pop
    if not hasattr(self.op, 'hvparams'):
5873 24991749 Iustin Pop
      self.op.hvparams = {}
5874 24991749 Iustin Pop
    self.op.force = getattr(self.op, "force", False)
5875 24991749 Iustin Pop
    if not (self.op.nics or self.op.disks or
5876 24991749 Iustin Pop
            self.op.hvparams or self.op.beparams):
5877 24991749 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
5878 24991749 Iustin Pop
5879 24991749 Iustin Pop
    # Disk validation
5880 24991749 Iustin Pop
    disk_addremove = 0
5881 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
5882 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
5883 24991749 Iustin Pop
        disk_addremove += 1
5884 24991749 Iustin Pop
        continue
5885 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
5886 24991749 Iustin Pop
        disk_addremove += 1
5887 24991749 Iustin Pop
      else:
5888 24991749 Iustin Pop
        if not isinstance(disk_op, int):
5889 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index")
5890 24991749 Iustin Pop
      if disk_op == constants.DDM_ADD:
5891 24991749 Iustin Pop
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
5892 6ec66eae Iustin Pop
        if mode not in constants.DISK_ACCESS_SET:
5893 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode)
5894 24991749 Iustin Pop
        size = disk_dict.get('size', None)
5895 24991749 Iustin Pop
        if size is None:
5896 24991749 Iustin Pop
          raise errors.OpPrereqError("Required disk parameter size missing")
5897 24991749 Iustin Pop
        try:
5898 24991749 Iustin Pop
          size = int(size)
5899 24991749 Iustin Pop
        except ValueError, err:
5900 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
5901 24991749 Iustin Pop
                                     str(err))
5902 24991749 Iustin Pop
        disk_dict['size'] = size
5903 24991749 Iustin Pop
      else:
5904 24991749 Iustin Pop
        # modification of disk
5905 24991749 Iustin Pop
        if 'size' in disk_dict:
5906 24991749 Iustin Pop
          raise errors.OpPrereqError("Disk size change not possible, use"
5907 24991749 Iustin Pop
                                     " grow-disk")
5908 24991749 Iustin Pop
5909 24991749 Iustin Pop
    if disk_addremove > 1:
5910 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one disk add or remove operation"
5911 24991749 Iustin Pop
                                 " supported at a time")
5912 24991749 Iustin Pop
5913 24991749 Iustin Pop
    # NIC validation
5914 24991749 Iustin Pop
    nic_addremove = 0
5915 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
5916 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
5917 24991749 Iustin Pop
        nic_addremove += 1
5918 24991749 Iustin Pop
        continue
5919 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
5920 24991749 Iustin Pop
        nic_addremove += 1
5921 24991749 Iustin Pop
      else:
5922 24991749 Iustin Pop
        if not isinstance(nic_op, int):
5923 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid nic index")
5924 24991749 Iustin Pop
5925 24991749 Iustin Pop
      # nic_dict should be a dict
5926 24991749 Iustin Pop
      nic_ip = nic_dict.get('ip', None)
5927 24991749 Iustin Pop
      if nic_ip is not None:
5928 5c44da6a Guido Trotter
        if nic_ip.lower() == constants.VALUE_NONE:
5929 24991749 Iustin Pop
          nic_dict['ip'] = None
5930 24991749 Iustin Pop
        else:
5931 24991749 Iustin Pop
          if not utils.IsValidIP(nic_ip):
5932 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip)
5933 5c44da6a Guido Trotter
5934 cd098c41 Guido Trotter
      nic_bridge = nic_dict.get('bridge', None)
5935 cd098c41 Guido Trotter
      nic_link = nic_dict.get('link', None)
5936 cd098c41 Guido Trotter
      if nic_bridge and nic_link:
5937 29921401 Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
5938 29921401 Iustin Pop
                                   " at the same time")
5939 cd098c41 Guido Trotter
      elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
5940 cd098c41 Guido Trotter
        nic_dict['bridge'] = None
5941 cd098c41 Guido Trotter
      elif nic_link and nic_link.lower() == constants.VALUE_NONE:
5942 cd098c41 Guido Trotter
        nic_dict['link'] = None
5943 cd098c41 Guido Trotter
5944 5c44da6a Guido Trotter
      if nic_op == constants.DDM_ADD:
5945 5c44da6a Guido Trotter
        nic_mac = nic_dict.get('mac', None)
5946 5c44da6a Guido Trotter
        if nic_mac is None:
5947 5c44da6a Guido Trotter
          nic_dict['mac'] = constants.VALUE_AUTO
5948 5c44da6a Guido Trotter
5949 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
5950 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
5951 24991749 Iustin Pop
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5952 24991749 Iustin Pop
          if not utils.IsValidMac(nic_mac):
5953 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac)
5954 5c44da6a Guido Trotter
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
5955 5c44da6a Guido Trotter
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
5956 5c44da6a Guido Trotter
                                     " modifying an existing nic")
5957 5c44da6a Guido Trotter
5958 24991749 Iustin Pop
    if nic_addremove > 1:
5959 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one NIC add or remove operation"
5960 24991749 Iustin Pop
                                 " supported at a time")
5961 24991749 Iustin Pop
5962 1a5c7281 Guido Trotter
  def ExpandNames(self):
5963 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
5964 74409b12 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
5965 74409b12 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5966 74409b12 Iustin Pop
5967 74409b12 Iustin Pop
  def DeclareLocks(self, level):
5968 74409b12 Iustin Pop
    if level == locking.LEVEL_NODE:
5969 74409b12 Iustin Pop
      self._LockInstancesNodes()
5970 a8083063 Iustin Pop
5971 a8083063 Iustin Pop
  def BuildHooksEnv(self):
5972 a8083063 Iustin Pop
    """Build hooks env.
5973 a8083063 Iustin Pop

5974 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
5975 a8083063 Iustin Pop

5976 a8083063 Iustin Pop
    """
5977 396e1b78 Michael Hanselmann
    args = dict()
5978 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.be_new:
5979 338e51e8 Iustin Pop
      args['memory'] = self.be_new[constants.BE_MEMORY]
5980 338e51e8 Iustin Pop
    if constants.BE_VCPUS in self.be_new:
5981 61be6ba4 Iustin Pop
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
5982 d8dcf3c9 Guido Trotter
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
5983 d8dcf3c9 Guido Trotter
    # information at all.
5984 d8dcf3c9 Guido Trotter
    if self.op.nics:
5985 d8dcf3c9 Guido Trotter
      args['nics'] = []
5986 d8dcf3c9 Guido Trotter
      nic_override = dict(self.op.nics)
5987 62f0dd02 Guido Trotter
      c_nicparams = self.cluster.nicparams[constants.PP_DEFAULT]
5988 d8dcf3c9 Guido Trotter
      for idx, nic in enumerate(self.instance.nics):
5989 d8dcf3c9 Guido Trotter
        if idx in nic_override:
5990 d8dcf3c9 Guido Trotter
          this_nic_override = nic_override[idx]
5991 d8dcf3c9 Guido Trotter
        else:
5992 d8dcf3c9 Guido Trotter
          this_nic_override = {}
5993 d8dcf3c9 Guido Trotter
        if 'ip' in this_nic_override:
5994 d8dcf3c9 Guido Trotter
          ip = this_nic_override['ip']
5995 d8dcf3c9 Guido Trotter
        else:
5996 d8dcf3c9 Guido Trotter
          ip = nic.ip
5997 d8dcf3c9 Guido Trotter
        if 'mac' in this_nic_override:
5998 d8dcf3c9 Guido Trotter
          mac = this_nic_override['mac']
5999 d8dcf3c9 Guido Trotter
        else:
6000 d8dcf3c9 Guido Trotter
          mac = nic.mac
6001 62f0dd02 Guido Trotter
        if idx in self.nic_pnew:
6002 62f0dd02 Guido Trotter
          nicparams = self.nic_pnew[idx]
6003 62f0dd02 Guido Trotter
        else:
6004 62f0dd02 Guido Trotter
          nicparams = objects.FillDict(c_nicparams, nic.nicparams)
6005 62f0dd02 Guido Trotter
        mode = nicparams[constants.NIC_MODE]
6006 62f0dd02 Guido Trotter
        link = nicparams[constants.NIC_LINK]
6007 62f0dd02 Guido Trotter
        args['nics'].append((ip, mac, mode, link))
6008 d8dcf3c9 Guido Trotter
      if constants.DDM_ADD in nic_override:
6009 d8dcf3c9 Guido Trotter
        ip = nic_override[constants.DDM_ADD].get('ip', None)
6010 d8dcf3c9 Guido Trotter
        mac = nic_override[constants.DDM_ADD]['mac']
6011 62f0dd02 Guido Trotter
        nicparams = self.nic_pnew[constants.DDM_ADD]
6012 62f0dd02 Guido Trotter
        mode = nicparams[constants.NIC_MODE]
6013 62f0dd02 Guido Trotter
        link = nicparams[constants.NIC_LINK]
6014 62f0dd02 Guido Trotter
        args['nics'].append((ip, mac, mode, link))
6015 d8dcf3c9 Guido Trotter
      elif constants.DDM_REMOVE in nic_override:
6016 d8dcf3c9 Guido Trotter
        del args['nics'][-1]
6017 d8dcf3c9 Guido Trotter
6018 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
6019 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6020 a8083063 Iustin Pop
    return env, nl, nl
6021 a8083063 Iustin Pop
6022 0329617a Guido Trotter
  def _GetUpdatedParams(self, old_params, update_dict,
6023 0329617a Guido Trotter
                        default_values, parameter_types):
6024 0329617a Guido Trotter
    """Return the new params dict for the given params.
6025 0329617a Guido Trotter

6026 0329617a Guido Trotter
    @type old_params: dict
6027 f2fd87d7 Iustin Pop
    @param old_params: old parameters
6028 0329617a Guido Trotter
    @type update_dict: dict
6029 f2fd87d7 Iustin Pop
    @param update_dict: dict containing new parameter values,
6030 f2fd87d7 Iustin Pop
                        or constants.VALUE_DEFAULT to reset the
6031 f2fd87d7 Iustin Pop
                        parameter to its default value
6032 0329617a Guido Trotter
    @type default_values: dict
6033 0329617a Guido Trotter
    @param default_values: default values for the filled parameters
6034 0329617a Guido Trotter
    @type parameter_types: dict
6035 0329617a Guido Trotter
    @param parameter_types: dict mapping target dict keys to types
6036 0329617a Guido Trotter
                            in constants.ENFORCEABLE_TYPES
6037 0329617a Guido Trotter
    @rtype: (dict, dict)
6038 0329617a Guido Trotter
    @return: (new_parameters, filled_parameters)
6039 0329617a Guido Trotter

6040 0329617a Guido Trotter
    """
6041 0329617a Guido Trotter
    params_copy = copy.deepcopy(old_params)
6042 0329617a Guido Trotter
    for key, val in update_dict.iteritems():
6043 0329617a Guido Trotter
      if val == constants.VALUE_DEFAULT:
6044 0329617a Guido Trotter
        try:
6045 0329617a Guido Trotter
          del params_copy[key]
6046 0329617a Guido Trotter
        except KeyError:
6047 0329617a Guido Trotter
          pass
6048 0329617a Guido Trotter
      else:
6049 0329617a Guido Trotter
        params_copy[key] = val
6050 0329617a Guido Trotter
    utils.ForceDictType(params_copy, parameter_types)
6051 0329617a Guido Trotter
    params_filled = objects.FillDict(default_values, params_copy)
6052 0329617a Guido Trotter
    return (params_copy, params_filled)
6053 0329617a Guido Trotter
6054 a8083063 Iustin Pop
  def CheckPrereq(self):
6055 a8083063 Iustin Pop
    """Check prerequisites.
6056 a8083063 Iustin Pop

6057 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
6058 a8083063 Iustin Pop

6059 a8083063 Iustin Pop
    """
6060 24991749 Iustin Pop
    force = self.force = self.op.force
6061 a8083063 Iustin Pop
6062 74409b12 Iustin Pop
    # checking the new params on the primary/secondary nodes
6063 31a853d2 Iustin Pop
6064 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6065 2ee88aeb Guido Trotter
    cluster = self.cluster = self.cfg.GetClusterInfo()
6066 1a5c7281 Guido Trotter
    assert self.instance is not None, \
6067 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
6068 6b12959c Iustin Pop
    pnode = instance.primary_node
6069 6b12959c Iustin Pop
    nodelist = list(instance.all_nodes)
6070 74409b12 Iustin Pop
6071 338e51e8 Iustin Pop
    # hvparams processing
6072 74409b12 Iustin Pop
    if self.op.hvparams:
6073 0329617a Guido Trotter
      i_hvdict, hv_new = self._GetUpdatedParams(
6074 0329617a Guido Trotter
                             instance.hvparams, self.op.hvparams,
6075 0329617a Guido Trotter
                             cluster.hvparams[instance.hypervisor],
6076 0329617a Guido Trotter
                             constants.HVS_PARAMETER_TYPES)
6077 74409b12 Iustin Pop
      # local check
6078 74409b12 Iustin Pop
      hypervisor.GetHypervisor(
6079 74409b12 Iustin Pop
        instance.hypervisor).CheckParameterSyntax(hv_new)
6080 74409b12 Iustin Pop
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
6081 338e51e8 Iustin Pop
      self.hv_new = hv_new # the new actual values
6082 338e51e8 Iustin Pop
      self.hv_inst = i_hvdict # the new dict (without defaults)
6083 338e51e8 Iustin Pop
    else:
6084 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
6085 338e51e8 Iustin Pop
6086 338e51e8 Iustin Pop
    # beparams processing
6087 338e51e8 Iustin Pop
    if self.op.beparams:
6088 0329617a Guido Trotter
      i_bedict, be_new = self._GetUpdatedParams(
6089 0329617a Guido Trotter
                             instance.beparams, self.op.beparams,
6090 0329617a Guido Trotter
                             cluster.beparams[constants.PP_DEFAULT],
6091 0329617a Guido Trotter
                             constants.BES_PARAMETER_TYPES)
6092 338e51e8 Iustin Pop
      self.be_new = be_new # the new actual values
6093 338e51e8 Iustin Pop
      self.be_inst = i_bedict # the new dict (without defaults)
6094 338e51e8 Iustin Pop
    else:
6095 b637ae4d Iustin Pop
      self.be_new = self.be_inst = {}
6096 74409b12 Iustin Pop
6097 cfefe007 Guido Trotter
    self.warn = []
6098 647a5d80 Iustin Pop
6099 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.op.beparams and not self.force:
6100 647a5d80 Iustin Pop
      mem_check_list = [pnode]
6101 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
6102 c0f2b229 Iustin Pop
        # either we changed auto_balance to yes or it was from before
6103 647a5d80 Iustin Pop
        mem_check_list.extend(instance.secondary_nodes)
6104 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
6105 72737a7f Iustin Pop
                                                  instance.hypervisor)
6106 647a5d80 Iustin Pop
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
6107 72737a7f Iustin Pop
                                         instance.hypervisor)
6108 070e998b Iustin Pop
      pninfo = nodeinfo[pnode]
6109 4c4e4e1e Iustin Pop
      msg = pninfo.fail_msg
6110 070e998b Iustin Pop
      if msg:
6111 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
6112 070e998b Iustin Pop
        self.warn.append("Can't get info from primary node %s: %s" %
6113 070e998b Iustin Pop
                         (pnode,  msg))
6114 070e998b Iustin Pop
      elif not isinstance(pninfo.payload.get('memory_free', None), int):
6115 070e998b Iustin Pop
        self.warn.append("Node data from primary node %s doesn't contain"
6116 070e998b Iustin Pop
                         " free memory information" % pnode)
6117 4c4e4e1e Iustin Pop
      elif instance_info.fail_msg:
6118 7ad1af4a Iustin Pop
        self.warn.append("Can't get instance runtime information: %s" %
6119 4c4e4e1e Iustin Pop
                        instance_info.fail_msg)
6120 cfefe007 Guido Trotter
      else:
6121 7ad1af4a Iustin Pop
        if instance_info.payload:
6122 7ad1af4a Iustin Pop
          current_mem = int(instance_info.payload['memory'])
6123 cfefe007 Guido Trotter
        else:
6124 cfefe007 Guido Trotter
          # Assume instance not running
6125 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
6126 cfefe007 Guido Trotter
          # and we have no other way to check)
6127 cfefe007 Guido Trotter
          current_mem = 0
6128 338e51e8 Iustin Pop
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
6129 070e998b Iustin Pop
                    pninfo.payload['memory_free'])
6130 cfefe007 Guido Trotter
        if miss_mem > 0:
6131 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
6132 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
6133 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
6134 cfefe007 Guido Trotter
6135 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
6136 070e998b Iustin Pop
        for node, nres in nodeinfo.items():
6137 ea33068f Iustin Pop
          if node not in instance.secondary_nodes:
6138 ea33068f Iustin Pop
            continue
6139 4c4e4e1e Iustin Pop
          msg = nres.fail_msg
6140 070e998b Iustin Pop
          if msg:
6141 070e998b Iustin Pop
            self.warn.append("Can't get info from secondary node %s: %s" %
6142 070e998b Iustin Pop
                             (node, msg))
6143 070e998b Iustin Pop
          elif not isinstance(nres.payload.get('memory_free', None), int):
6144 070e998b Iustin Pop
            self.warn.append("Secondary node %s didn't return free"
6145 070e998b Iustin Pop
                             " memory information" % node)
6146 070e998b Iustin Pop
          elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
6147 647a5d80 Iustin Pop
            self.warn.append("Not enough memory to failover instance to"
6148 647a5d80 Iustin Pop
                             " secondary node %s" % node)
6149 5bc84f33 Alexander Schreiber
6150 24991749 Iustin Pop
    # NIC processing
6151 cd098c41 Guido Trotter
    self.nic_pnew = {}
6152 cd098c41 Guido Trotter
    self.nic_pinst = {}
6153 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
6154 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
6155 24991749 Iustin Pop
        if not instance.nics:
6156 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance has no NICs, cannot remove")
6157 24991749 Iustin Pop
        continue
6158 24991749 Iustin Pop
      if nic_op != constants.DDM_ADD:
6159 24991749 Iustin Pop
        # an existing nic
6160 24991749 Iustin Pop
        if nic_op < 0 or nic_op >= len(instance.nics):
6161 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
6162 24991749 Iustin Pop
                                     " are 0 to %d" %
6163 24991749 Iustin Pop
                                     (nic_op, len(instance.nics)))
6164 cd098c41 Guido Trotter
        old_nic_params = instance.nics[nic_op].nicparams
6165 cd098c41 Guido Trotter
        old_nic_ip = instance.nics[nic_op].ip
6166 cd098c41 Guido Trotter
      else:
6167 cd098c41 Guido Trotter
        old_nic_params = {}
6168 cd098c41 Guido Trotter
        old_nic_ip = None
6169 cd098c41 Guido Trotter
6170 cd098c41 Guido Trotter
      update_params_dict = dict([(key, nic_dict[key])
6171 cd098c41 Guido Trotter
                                 for key in constants.NICS_PARAMETERS
6172 cd098c41 Guido Trotter
                                 if key in nic_dict])
6173 cd098c41 Guido Trotter
6174 5c44da6a Guido Trotter
      if 'bridge' in nic_dict:
6175 cd098c41 Guido Trotter
        update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
6176 cd098c41 Guido Trotter
6177 cd098c41 Guido Trotter
      new_nic_params, new_filled_nic_params = \
6178 cd098c41 Guido Trotter
          self._GetUpdatedParams(old_nic_params, update_params_dict,
6179 cd098c41 Guido Trotter
                                 cluster.nicparams[constants.PP_DEFAULT],
6180 cd098c41 Guido Trotter
                                 constants.NICS_PARAMETER_TYPES)
6181 cd098c41 Guido Trotter
      objects.NIC.CheckParameterSyntax(new_filled_nic_params)
6182 cd098c41 Guido Trotter
      self.nic_pinst[nic_op] = new_nic_params
6183 cd098c41 Guido Trotter
      self.nic_pnew[nic_op] = new_filled_nic_params
6184 cd098c41 Guido Trotter
      new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
6185 cd098c41 Guido Trotter
6186 cd098c41 Guido Trotter
      if new_nic_mode == constants.NIC_MODE_BRIDGED:
6187 cd098c41 Guido Trotter
        nic_bridge = new_filled_nic_params[constants.NIC_LINK]
6188 4c4e4e1e Iustin Pop
        msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
6189 35c0c8da Iustin Pop
        if msg:
6190 35c0c8da Iustin Pop
          msg = "Error checking bridges on node %s: %s" % (pnode, msg)
6191 24991749 Iustin Pop
          if self.force:
6192 24991749 Iustin Pop
            self.warn.append(msg)
6193 24991749 Iustin Pop
          else:
6194 24991749 Iustin Pop
            raise errors.OpPrereqError(msg)
6195 cd098c41 Guido Trotter
      if new_nic_mode == constants.NIC_MODE_ROUTED:
6196 cd098c41 Guido Trotter
        if 'ip' in nic_dict:
6197 cd098c41 Guido Trotter
          nic_ip = nic_dict['ip']
6198 cd098c41 Guido Trotter
        else:
6199 cd098c41 Guido Trotter
          nic_ip = old_nic_ip
6200 cd098c41 Guido Trotter
        if nic_ip is None:
6201 cd098c41 Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic ip to None'
6202 cd098c41 Guido Trotter
                                     ' on a routed nic')
6203 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
6204 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
6205 5c44da6a Guido Trotter
        if nic_mac is None:
6206 5c44da6a Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic mac to None')
6207 5c44da6a Guido Trotter
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
6208 5c44da6a Guido Trotter
          # otherwise generate the mac
6209 5c44da6a Guido Trotter
          nic_dict['mac'] = self.cfg.GenerateMAC()
6210 5c44da6a Guido Trotter
        else:
6211 5c44da6a Guido Trotter
          # or validate/reserve the current one
6212 5c44da6a Guido Trotter
          if self.cfg.IsMacInUse(nic_mac):
6213 5c44da6a Guido Trotter
            raise errors.OpPrereqError("MAC address %s already in use"
6214 5c44da6a Guido Trotter
                                       " in cluster" % nic_mac)
6215 24991749 Iustin Pop
6216 24991749 Iustin Pop
    # DISK processing
6217 24991749 Iustin Pop
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
6218 24991749 Iustin Pop
      raise errors.OpPrereqError("Disk operations not supported for"
6219 24991749 Iustin Pop
                                 " diskless instances")
6220 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
6221 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
6222 24991749 Iustin Pop
        if len(instance.disks) == 1:
6223 24991749 Iustin Pop
          raise errors.OpPrereqError("Cannot remove the last disk of"
6224 24991749 Iustin Pop
                                     " an instance")
6225 24991749 Iustin Pop
        ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
6226 24991749 Iustin Pop
        ins_l = ins_l[pnode]
6227 4c4e4e1e Iustin Pop
        msg = ins_l.fail_msg
6228 aca13712 Iustin Pop
        if msg:
6229 aca13712 Iustin Pop
          raise errors.OpPrereqError("Can't contact node %s: %s" %
6230 aca13712 Iustin Pop
                                     (pnode, msg))
6231 aca13712 Iustin Pop
        if instance.name in ins_l.payload:
6232 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance is running, can't remove"
6233 24991749 Iustin Pop
                                     " disks.")
6234 24991749 Iustin Pop
6235 24991749 Iustin Pop
      if (disk_op == constants.DDM_ADD and
6236 24991749 Iustin Pop
          len(instance.nics) >= constants.MAX_DISKS):
6237 24991749 Iustin Pop
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
6238 24991749 Iustin Pop
                                   " add more" % constants.MAX_DISKS)
6239 24991749 Iustin Pop
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
6240 24991749 Iustin Pop
        # an existing disk
6241 24991749 Iustin Pop
        if disk_op < 0 or disk_op >= len(instance.disks):
6242 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
6243 24991749 Iustin Pop
                                     " are 0 to %d" %
6244 24991749 Iustin Pop
                                     (disk_op, len(instance.disks)))
6245 24991749 Iustin Pop
6246 a8083063 Iustin Pop
    return
6247 a8083063 Iustin Pop
6248 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6249 a8083063 Iustin Pop
    """Modifies an instance.
6250 a8083063 Iustin Pop

6251 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
6252 24991749 Iustin Pop

6253 a8083063 Iustin Pop
    """
6254 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
6255 cfefe007 Guido Trotter
    # feedback_fn there.
6256 cfefe007 Guido Trotter
    for warn in self.warn:
6257 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
6258 cfefe007 Guido Trotter
6259 a8083063 Iustin Pop
    result = []
6260 a8083063 Iustin Pop
    instance = self.instance
6261 cd098c41 Guido Trotter
    cluster = self.cluster
6262 24991749 Iustin Pop
    # disk changes
6263 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
6264 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
6265 24991749 Iustin Pop
        # remove the last disk
6266 24991749 Iustin Pop
        device = instance.disks.pop()
6267 24991749 Iustin Pop
        device_idx = len(instance.disks)
6268 24991749 Iustin Pop
        for node, disk in device.ComputeNodeTree(instance.primary_node):
6269 24991749 Iustin Pop
          self.cfg.SetDiskID(disk, node)
6270 4c4e4e1e Iustin Pop
          msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
6271 e1bc0878 Iustin Pop
          if msg:
6272 e1bc0878 Iustin Pop
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
6273 e1bc0878 Iustin Pop
                            " continuing anyway", device_idx, node, msg)
6274 24991749 Iustin Pop
        result.append(("disk/%d" % device_idx, "remove"))
6275 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
6276 24991749 Iustin Pop
        # add a new disk
6277 24991749 Iustin Pop
        if instance.disk_template == constants.DT_FILE:
6278 24991749 Iustin Pop
          file_driver, file_path = instance.disks[0].logical_id
6279 24991749 Iustin Pop
          file_path = os.path.dirname(file_path)
6280 24991749 Iustin Pop
        else:
6281 24991749 Iustin Pop
          file_driver = file_path = None
6282 24991749 Iustin Pop
        disk_idx_base = len(instance.disks)
6283 24991749 Iustin Pop
        new_disk = _GenerateDiskTemplate(self,
6284 24991749 Iustin Pop
                                         instance.disk_template,
6285 32388e6d Iustin Pop
                                         instance.name, instance.primary_node,
6286 24991749 Iustin Pop
                                         instance.secondary_nodes,
6287 24991749 Iustin Pop
                                         [disk_dict],
6288 24991749 Iustin Pop
                                         file_path,
6289 24991749 Iustin Pop
                                         file_driver,
6290 24991749 Iustin Pop
                                         disk_idx_base)[0]
6291 24991749 Iustin Pop
        instance.disks.append(new_disk)
6292 24991749 Iustin Pop
        info = _GetInstanceInfoText(instance)
6293 24991749 Iustin Pop
6294 24991749 Iustin Pop
        logging.info("Creating volume %s for instance %s",
6295 24991749 Iustin Pop
                     new_disk.iv_name, instance.name)
6296 24991749 Iustin Pop
        # Note: this needs to be kept in sync with _CreateDisks
6297 24991749 Iustin Pop
        #HARDCODE
6298 428958aa Iustin Pop
        for node in instance.all_nodes:
6299 428958aa Iustin Pop
          f_create = node == instance.primary_node
6300 796cab27 Iustin Pop
          try:
6301 428958aa Iustin Pop
            _CreateBlockDev(self, node, instance, new_disk,
6302 428958aa Iustin Pop
                            f_create, info, f_create)
6303 1492cca7 Iustin Pop
          except errors.OpExecError, err:
6304 24991749 Iustin Pop
            self.LogWarning("Failed to create volume %s (%s) on"
6305 428958aa Iustin Pop
                            " node %s: %s",
6306 428958aa Iustin Pop
                            new_disk.iv_name, new_disk, node, err)
6307 24991749 Iustin Pop
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
6308 24991749 Iustin Pop
                       (new_disk.size, new_disk.mode)))
6309 24991749 Iustin Pop
      else:
6310 24991749 Iustin Pop
        # change a given disk
6311 24991749 Iustin Pop
        instance.disks[disk_op].mode = disk_dict['mode']
6312 24991749 Iustin Pop
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
6313 24991749 Iustin Pop
    # NIC changes
6314 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
6315 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
6316 24991749 Iustin Pop
        # remove the last nic
6317 24991749 Iustin Pop
        del instance.nics[-1]
6318 24991749 Iustin Pop
        result.append(("nic.%d" % len(instance.nics), "remove"))
6319 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
6320 5c44da6a Guido Trotter
        # mac and bridge should be set, by now
6321 5c44da6a Guido Trotter
        mac = nic_dict['mac']
6322 cd098c41 Guido Trotter
        ip = nic_dict.get('ip', None)
6323 cd098c41 Guido Trotter
        nicparams = self.nic_pinst[constants.DDM_ADD]
6324 cd098c41 Guido Trotter
        new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
6325 24991749 Iustin Pop
        instance.nics.append(new_nic)
6326 24991749 Iustin Pop
        result.append(("nic.%d" % (len(instance.nics) - 1),
6327 cd098c41 Guido Trotter
                       "add:mac=%s,ip=%s,mode=%s,link=%s" %
6328 cd098c41 Guido Trotter
                       (new_nic.mac, new_nic.ip,
6329 cd098c41 Guido Trotter
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
6330 cd098c41 Guido Trotter
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
6331 cd098c41 Guido Trotter
                       )))
6332 24991749 Iustin Pop
      else:
6333 cd098c41 Guido Trotter
        for key in 'mac', 'ip':
6334 24991749 Iustin Pop
          if key in nic_dict:
6335 24991749 Iustin Pop
            setattr(instance.nics[nic_op], key, nic_dict[key])
6336 cd098c41 Guido Trotter
        if nic_op in self.nic_pnew:
6337 cd098c41 Guido Trotter
          instance.nics[nic_op].nicparams = self.nic_pnew[nic_op]
6338 cd098c41 Guido Trotter
        for key, val in nic_dict.iteritems():
6339 cd098c41 Guido Trotter
          result.append(("nic.%s/%d" % (key, nic_op), val))
6340 24991749 Iustin Pop
6341 24991749 Iustin Pop
    # hvparams changes
6342 74409b12 Iustin Pop
    if self.op.hvparams:
6343 12649e35 Guido Trotter
      instance.hvparams = self.hv_inst
6344 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
6345 74409b12 Iustin Pop
        result.append(("hv/%s" % key, val))
6346 24991749 Iustin Pop
6347 24991749 Iustin Pop
    # beparams changes
6348 338e51e8 Iustin Pop
    if self.op.beparams:
6349 338e51e8 Iustin Pop
      instance.beparams = self.be_inst
6350 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
6351 338e51e8 Iustin Pop
        result.append(("be/%s" % key, val))
6352 a8083063 Iustin Pop
6353 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
6354 a8083063 Iustin Pop
6355 a8083063 Iustin Pop
    return result
6356 a8083063 Iustin Pop
6357 a8083063 Iustin Pop
6358 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
6359 a8083063 Iustin Pop
  """Query the exports list
6360 a8083063 Iustin Pop

6361 a8083063 Iustin Pop
  """
6362 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
6363 21a15682 Guido Trotter
  REQ_BGL = False
6364 21a15682 Guido Trotter
6365 21a15682 Guido Trotter
  def ExpandNames(self):
6366 21a15682 Guido Trotter
    self.needed_locks = {}
6367 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
6368 21a15682 Guido Trotter
    if not self.op.nodes:
6369 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6370 21a15682 Guido Trotter
    else:
6371 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
6372 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
6373 a8083063 Iustin Pop
6374 a8083063 Iustin Pop
  def CheckPrereq(self):
6375 21a15682 Guido Trotter
    """Check prerequisites.
6376 a8083063 Iustin Pop

6377 a8083063 Iustin Pop
    """
6378 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
6379 a8083063 Iustin Pop
6380 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6381 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
6382 a8083063 Iustin Pop

6383 e4376078 Iustin Pop
    @rtype: dict
6384 e4376078 Iustin Pop
    @return: a dictionary with the structure node->(export-list)
6385 e4376078 Iustin Pop
        where export-list is a list of the instances exported on
6386 e4376078 Iustin Pop
        that node.
6387 a8083063 Iustin Pop

6388 a8083063 Iustin Pop
    """
6389 b04285f2 Guido Trotter
    rpcresult = self.rpc.call_export_list(self.nodes)
6390 b04285f2 Guido Trotter
    result = {}
6391 b04285f2 Guido Trotter
    for node in rpcresult:
6392 4c4e4e1e Iustin Pop
      if rpcresult[node].fail_msg:
6393 b04285f2 Guido Trotter
        result[node] = False
6394 b04285f2 Guido Trotter
      else:
6395 1b7bfbb7 Iustin Pop
        result[node] = rpcresult[node].payload
6396 b04285f2 Guido Trotter
6397 b04285f2 Guido Trotter
    return result
6398 a8083063 Iustin Pop
6399 a8083063 Iustin Pop
6400 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
6401 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
6402 a8083063 Iustin Pop

6403 a8083063 Iustin Pop
  """
6404 a8083063 Iustin Pop
  HPATH = "instance-export"
6405 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
6406 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
6407 6657590e Guido Trotter
  REQ_BGL = False
6408 6657590e Guido Trotter
6409 6657590e Guido Trotter
  def ExpandNames(self):
6410 6657590e Guido Trotter
    self._ExpandAndLockInstance()
6411 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
6412 6657590e Guido Trotter
    #
6413 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
6414 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
6415 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
6416 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
6417 6657590e Guido Trotter
    #    then one to remove, after
6418 6657590e Guido Trotter
    #  - removing the removal operation altoghether
6419 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6420 6657590e Guido Trotter
6421 6657590e Guido Trotter
  def DeclareLocks(self, level):
6422 6657590e Guido Trotter
    """Last minute lock declaration."""
6423 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
6424 a8083063 Iustin Pop
6425 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6426 a8083063 Iustin Pop
    """Build hooks env.
6427 a8083063 Iustin Pop

6428 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
6429 a8083063 Iustin Pop

6430 a8083063 Iustin Pop
    """
6431 a8083063 Iustin Pop
    env = {
6432 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
6433 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
6434 a8083063 Iustin Pop
      }
6435 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6436 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
6437 a8083063 Iustin Pop
          self.op.target_node]
6438 a8083063 Iustin Pop
    return env, nl, nl
6439 a8083063 Iustin Pop
6440 a8083063 Iustin Pop
  def CheckPrereq(self):
6441 a8083063 Iustin Pop
    """Check prerequisites.
6442 a8083063 Iustin Pop

6443 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
6444 a8083063 Iustin Pop

6445 a8083063 Iustin Pop
    """
6446 6657590e Guido Trotter
    instance_name = self.op.instance_name
6447 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
6448 6657590e Guido Trotter
    assert self.instance is not None, \
6449 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
6450 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
6451 a8083063 Iustin Pop
6452 6657590e Guido Trotter
    self.dst_node = self.cfg.GetNodeInfo(
6453 6657590e Guido Trotter
      self.cfg.ExpandNodeName(self.op.target_node))
6454 a8083063 Iustin Pop
6455 268b8e42 Iustin Pop
    if self.dst_node is None:
6456 268b8e42 Iustin Pop
      # This is wrong node name, not a non-locked node
6457 268b8e42 Iustin Pop
      raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node)
6458 aeb83a2b Iustin Pop
    _CheckNodeOnline(self, self.dst_node.name)
6459 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, self.dst_node.name)
6460 a8083063 Iustin Pop
6461 b6023d6c Manuel Franceschini
    # instance disk type verification
6462 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
6463 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
6464 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
6465 b6023d6c Manuel Franceschini
                                   " file-based disks")
6466 b6023d6c Manuel Franceschini
6467 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6468 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
6469 a8083063 Iustin Pop

6470 a8083063 Iustin Pop
    """
6471 a8083063 Iustin Pop
    instance = self.instance
6472 a8083063 Iustin Pop
    dst_node = self.dst_node
6473 a8083063 Iustin Pop
    src_node = instance.primary_node
6474 a8083063 Iustin Pop
    if self.op.shutdown:
6475 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
6476 781de953 Iustin Pop
      result = self.rpc.call_instance_shutdown(src_node, instance)
6477 4c4e4e1e Iustin Pop
      result.Raise("Could not shutdown instance %s on"
6478 4c4e4e1e Iustin Pop
                   " node %s" % (instance.name, src_node))
6479 a8083063 Iustin Pop
6480 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
6481 a8083063 Iustin Pop
6482 a8083063 Iustin Pop
    snap_disks = []
6483 a8083063 Iustin Pop
6484 998c712c Iustin Pop
    # set the disks ID correctly since call_instance_start needs the
6485 998c712c Iustin Pop
    # correct drbd minor to create the symlinks
6486 998c712c Iustin Pop
    for disk in instance.disks:
6487 998c712c Iustin Pop
      self.cfg.SetDiskID(disk, src_node)
6488 998c712c Iustin Pop
6489 a8083063 Iustin Pop
    try:
6490 a97da6b7 Iustin Pop
      for idx, disk in enumerate(instance.disks):
6491 87812fd3 Iustin Pop
        # result.payload will be a snapshot of an lvm leaf of the one we passed
6492 87812fd3 Iustin Pop
        result = self.rpc.call_blockdev_snapshot(src_node, disk)
6493 4c4e4e1e Iustin Pop
        msg = result.fail_msg
6494 87812fd3 Iustin Pop
        if msg:
6495 af0413bb Guido Trotter
          self.LogWarning("Could not snapshot disk/%s on node %s: %s",
6496 af0413bb Guido Trotter
                          idx, src_node, msg)
6497 19d7f90a Guido Trotter
          snap_disks.append(False)
6498 19d7f90a Guido Trotter
        else:
6499 87812fd3 Iustin Pop
          disk_id = (vgname, result.payload)
6500 19d7f90a Guido Trotter
          new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
6501 87812fd3 Iustin Pop
                                 logical_id=disk_id, physical_id=disk_id,
6502 19d7f90a Guido Trotter
                                 iv_name=disk.iv_name)
6503 19d7f90a Guido Trotter
          snap_disks.append(new_dev)
6504 a8083063 Iustin Pop
6505 a8083063 Iustin Pop
    finally:
6506 0d68c45d Iustin Pop
      if self.op.shutdown and instance.admin_up:
6507 0eca8e0c Iustin Pop
        result = self.rpc.call_instance_start(src_node, instance, None, None)
6508 4c4e4e1e Iustin Pop
        msg = result.fail_msg
6509 dd279568 Iustin Pop
        if msg:
6510 b9bddb6b Iustin Pop
          _ShutdownInstanceDisks(self, instance)
6511 dd279568 Iustin Pop
          raise errors.OpExecError("Could not start instance: %s" % msg)
6512 a8083063 Iustin Pop
6513 a8083063 Iustin Pop
    # TODO: check for size
6514 a8083063 Iustin Pop
6515 62c9ec92 Iustin Pop
    cluster_name = self.cfg.GetClusterName()
6516 74c47259 Iustin Pop
    for idx, dev in enumerate(snap_disks):
6517 19d7f90a Guido Trotter
      if dev:
6518 781de953 Iustin Pop
        result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
6519 781de953 Iustin Pop
                                               instance, cluster_name, idx)
6520 4c4e4e1e Iustin Pop
        msg = result.fail_msg
6521 ba55d062 Iustin Pop
        if msg:
6522 af0413bb Guido Trotter
          self.LogWarning("Could not export disk/%s from node %s to"
6523 af0413bb Guido Trotter
                          " node %s: %s", idx, src_node, dst_node.name, msg)
6524 4c4e4e1e Iustin Pop
        msg = self.rpc.call_blockdev_remove(src_node, dev).fail_msg
6525 e1bc0878 Iustin Pop
        if msg:
6526 a97da6b7 Iustin Pop
          self.LogWarning("Could not remove snapshot for disk/%d from node"
6527 a97da6b7 Iustin Pop
                          " %s: %s", idx, src_node, msg)
6528 a8083063 Iustin Pop
6529 781de953 Iustin Pop
    result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
6530 4c4e4e1e Iustin Pop
    msg = result.fail_msg
6531 9b201a0d Iustin Pop
    if msg:
6532 9b201a0d Iustin Pop
      self.LogWarning("Could not finalize export for instance %s"
6533 9b201a0d Iustin Pop
                      " on node %s: %s", instance.name, dst_node.name, msg)
6534 a8083063 Iustin Pop
6535 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
6536 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
6537 a8083063 Iustin Pop
6538 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
6539 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
6540 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
6541 35fbcd11 Iustin Pop
    iname = instance.name
6542 a8083063 Iustin Pop
    if nodelist:
6543 72737a7f Iustin Pop
      exportlist = self.rpc.call_export_list(nodelist)
6544 a8083063 Iustin Pop
      for node in exportlist:
6545 4c4e4e1e Iustin Pop
        if exportlist[node].fail_msg:
6546 781de953 Iustin Pop
          continue
6547 35fbcd11 Iustin Pop
        if iname in exportlist[node].payload:
6548 4c4e4e1e Iustin Pop
          msg = self.rpc.call_export_remove(node, iname).fail_msg
6549 35fbcd11 Iustin Pop
          if msg:
6550 19d7f90a Guido Trotter
            self.LogWarning("Could not remove older export for instance %s"
6551 35fbcd11 Iustin Pop
                            " on node %s: %s", iname, node, msg)
6552 5c947f38 Iustin Pop
6553 5c947f38 Iustin Pop
6554 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
6555 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
6556 9ac99fda Guido Trotter

6557 9ac99fda Guido Trotter
  """
6558 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
6559 3656b3af Guido Trotter
  REQ_BGL = False
6560 3656b3af Guido Trotter
6561 3656b3af Guido Trotter
  def ExpandNames(self):
6562 3656b3af Guido Trotter
    self.needed_locks = {}
6563 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
6564 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
6565 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
6566 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6567 9ac99fda Guido Trotter
6568 9ac99fda Guido Trotter
  def CheckPrereq(self):
6569 9ac99fda Guido Trotter
    """Check prerequisites.
6570 9ac99fda Guido Trotter
    """
6571 9ac99fda Guido Trotter
    pass
6572 9ac99fda Guido Trotter
6573 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
6574 9ac99fda Guido Trotter
    """Remove any export.
6575 9ac99fda Guido Trotter

6576 9ac99fda Guido Trotter
    """
6577 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
6578 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
6579 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
6580 9ac99fda Guido Trotter
    fqdn_warn = False
6581 9ac99fda Guido Trotter
    if not instance_name:
6582 9ac99fda Guido Trotter
      fqdn_warn = True
6583 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
6584 9ac99fda Guido Trotter
6585 1b7bfbb7 Iustin Pop
    locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
6586 1b7bfbb7 Iustin Pop
    exportlist = self.rpc.call_export_list(locked_nodes)
6587 9ac99fda Guido Trotter
    found = False
6588 9ac99fda Guido Trotter
    for node in exportlist:
6589 4c4e4e1e Iustin Pop
      msg = exportlist[node].fail_msg
6590 1b7bfbb7 Iustin Pop
      if msg:
6591 1b7bfbb7 Iustin Pop
        self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
6592 781de953 Iustin Pop
        continue
6593 1b7bfbb7 Iustin Pop
      if instance_name in exportlist[node].payload:
6594 9ac99fda Guido Trotter
        found = True
6595 781de953 Iustin Pop
        result = self.rpc.call_export_remove(node, instance_name)
6596 4c4e4e1e Iustin Pop
        msg = result.fail_msg
6597 35fbcd11 Iustin Pop
        if msg:
6598 9a4f63d1 Iustin Pop
          logging.error("Could not remove export for instance %s"
6599 35fbcd11 Iustin Pop
                        " on node %s: %s", instance_name, node, msg)
6600 9ac99fda Guido Trotter
6601 9ac99fda Guido Trotter
    if fqdn_warn and not found:
6602 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
6603 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
6604 9ac99fda Guido Trotter
                  " Domain Name.")
6605 9ac99fda Guido Trotter
6606 9ac99fda Guido Trotter
6607 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
6608 5c947f38 Iustin Pop
  """Generic tags LU.
6609 5c947f38 Iustin Pop

6610 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
6611 5c947f38 Iustin Pop

6612 5c947f38 Iustin Pop
  """
6613 5c947f38 Iustin Pop
6614 8646adce Guido Trotter
  def ExpandNames(self):
6615 8646adce Guido Trotter
    self.needed_locks = {}
6616 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
6617 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
6618 5c947f38 Iustin Pop
      if name is None:
6619 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
6620 3ecf6786 Iustin Pop
                                   (self.op.name,))
6621 5c947f38 Iustin Pop
      self.op.name = name
6622 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = name
6623 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
6624 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
6625 5c947f38 Iustin Pop
      if name is None:
6626 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
6627 3ecf6786 Iustin Pop
                                   (self.op.name,))
6628 5c947f38 Iustin Pop
      self.op.name = name
6629 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = name
6630 8646adce Guido Trotter
6631 8646adce Guido Trotter
  def CheckPrereq(self):
6632 8646adce Guido Trotter
    """Check prerequisites.
6633 8646adce Guido Trotter

6634 8646adce Guido Trotter
    """
6635 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
6636 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
6637 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
6638 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
6639 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
6640 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
6641 5c947f38 Iustin Pop
    else:
6642 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
6643 3ecf6786 Iustin Pop
                                 str(self.op.kind))
6644 5c947f38 Iustin Pop
6645 5c947f38 Iustin Pop
6646 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
6647 5c947f38 Iustin Pop
  """Returns the tags of a given object.
6648 5c947f38 Iustin Pop

6649 5c947f38 Iustin Pop
  """
6650 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
6651 8646adce Guido Trotter
  REQ_BGL = False
6652 5c947f38 Iustin Pop
6653 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6654 5c947f38 Iustin Pop
    """Returns the tag list.
6655 5c947f38 Iustin Pop

6656 5c947f38 Iustin Pop
    """
6657 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
6658 5c947f38 Iustin Pop
6659 5c947f38 Iustin Pop
6660 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
6661 73415719 Iustin Pop
  """Searches the tags for a given pattern.
6662 73415719 Iustin Pop

6663 73415719 Iustin Pop
  """
6664 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
6665 8646adce Guido Trotter
  REQ_BGL = False
6666 8646adce Guido Trotter
6667 8646adce Guido Trotter
  def ExpandNames(self):
6668 8646adce Guido Trotter
    self.needed_locks = {}
6669 73415719 Iustin Pop
6670 73415719 Iustin Pop
  def CheckPrereq(self):
6671 73415719 Iustin Pop
    """Check prerequisites.
6672 73415719 Iustin Pop

6673 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
6674 73415719 Iustin Pop

6675 73415719 Iustin Pop
    """
6676 73415719 Iustin Pop
    try:
6677 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
6678 73415719 Iustin Pop
    except re.error, err:
6679 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
6680 73415719 Iustin Pop
                                 (self.op.pattern, err))
6681 73415719 Iustin Pop
6682 73415719 Iustin Pop
  def Exec(self, feedback_fn):
6683 73415719 Iustin Pop
    """Returns the tag list.
6684 73415719 Iustin Pop

6685 73415719 Iustin Pop
    """
6686 73415719 Iustin Pop
    cfg = self.cfg
6687 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
6688 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
6689 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
6690 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
6691 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
6692 73415719 Iustin Pop
    results = []
6693 73415719 Iustin Pop
    for path, target in tgts:
6694 73415719 Iustin Pop
      for tag in target.GetTags():
6695 73415719 Iustin Pop
        if self.re.search(tag):
6696 73415719 Iustin Pop
          results.append((path, tag))
6697 73415719 Iustin Pop
    return results
6698 73415719 Iustin Pop
6699 73415719 Iustin Pop
6700 f27302fa Iustin Pop
class LUAddTags(TagsLU):
6701 5c947f38 Iustin Pop
  """Sets a tag on a given object.
6702 5c947f38 Iustin Pop

6703 5c947f38 Iustin Pop
  """
6704 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
6705 8646adce Guido Trotter
  REQ_BGL = False
6706 5c947f38 Iustin Pop
6707 5c947f38 Iustin Pop
  def CheckPrereq(self):
6708 5c947f38 Iustin Pop
    """Check prerequisites.
6709 5c947f38 Iustin Pop

6710 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
6711 5c947f38 Iustin Pop

6712 5c947f38 Iustin Pop
    """
6713 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
6714 f27302fa Iustin Pop
    for tag in self.op.tags:
6715 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
6716 5c947f38 Iustin Pop
6717 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6718 5c947f38 Iustin Pop
    """Sets the tag.
6719 5c947f38 Iustin Pop

6720 5c947f38 Iustin Pop
    """
6721 5c947f38 Iustin Pop
    try:
6722 f27302fa Iustin Pop
      for tag in self.op.tags:
6723 f27302fa Iustin Pop
        self.target.AddTag(tag)
6724 5c947f38 Iustin Pop
    except errors.TagError, err:
6725 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
6726 5c947f38 Iustin Pop
    try:
6727 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
6728 5c947f38 Iustin Pop
    except errors.ConfigurationError:
6729 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
6730 3ecf6786 Iustin Pop
                                " config file and the operation has been"
6731 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
6732 5c947f38 Iustin Pop
6733 5c947f38 Iustin Pop
6734 f27302fa Iustin Pop
class LUDelTags(TagsLU):
6735 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
6736 5c947f38 Iustin Pop

6737 5c947f38 Iustin Pop
  """
6738 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
6739 8646adce Guido Trotter
  REQ_BGL = False
6740 5c947f38 Iustin Pop
6741 5c947f38 Iustin Pop
  def CheckPrereq(self):
6742 5c947f38 Iustin Pop
    """Check prerequisites.
6743 5c947f38 Iustin Pop

6744 5c947f38 Iustin Pop
    This checks that we have the given tag.
6745 5c947f38 Iustin Pop

6746 5c947f38 Iustin Pop
    """
6747 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
6748 f27302fa Iustin Pop
    for tag in self.op.tags:
6749 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
6750 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
6751 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
6752 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
6753 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
6754 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
6755 f27302fa Iustin Pop
      diff_names.sort()
6756 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
6757 f27302fa Iustin Pop
                                 (",".join(diff_names)))
6758 5c947f38 Iustin Pop
6759 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6760 5c947f38 Iustin Pop
    """Remove the tag from the object.
6761 5c947f38 Iustin Pop

6762 5c947f38 Iustin Pop
    """
6763 f27302fa Iustin Pop
    for tag in self.op.tags:
6764 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
6765 5c947f38 Iustin Pop
    try:
6766 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
6767 5c947f38 Iustin Pop
    except errors.ConfigurationError:
6768 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
6769 3ecf6786 Iustin Pop
                                " config file and the operation has been"
6770 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
6771 06009e27 Iustin Pop
6772 0eed6e61 Guido Trotter
6773 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
6774 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
6775 06009e27 Iustin Pop

6776 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
6777 06009e27 Iustin Pop
  time.
6778 06009e27 Iustin Pop

6779 06009e27 Iustin Pop
  """
6780 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
6781 fbe9022f Guido Trotter
  REQ_BGL = False
6782 06009e27 Iustin Pop
6783 fbe9022f Guido Trotter
  def ExpandNames(self):
6784 fbe9022f Guido Trotter
    """Expand names and set required locks.
6785 06009e27 Iustin Pop

6786 fbe9022f Guido Trotter
    This expands the node list, if any.
6787 06009e27 Iustin Pop

6788 06009e27 Iustin Pop
    """
6789 fbe9022f Guido Trotter
    self.needed_locks = {}
6790 06009e27 Iustin Pop
    if self.op.on_nodes:
6791 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
6792 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
6793 fbe9022f Guido Trotter
      # more information.
6794 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
6795 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
6796 fbe9022f Guido Trotter
6797 fbe9022f Guido Trotter
  def CheckPrereq(self):
6798 fbe9022f Guido Trotter
    """Check prerequisites.
6799 fbe9022f Guido Trotter

6800 fbe9022f Guido Trotter
    """
6801 06009e27 Iustin Pop
6802 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
6803 06009e27 Iustin Pop
    """Do the actual sleep.
6804 06009e27 Iustin Pop

6805 06009e27 Iustin Pop
    """
6806 06009e27 Iustin Pop
    if self.op.on_master:
6807 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
6808 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
6809 06009e27 Iustin Pop
    if self.op.on_nodes:
6810 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
6811 06009e27 Iustin Pop
      for node, node_result in result.items():
6812 4c4e4e1e Iustin Pop
        node_result.Raise("Failure during rpc call to node %s" % node)
6813 d61df03e Iustin Pop
6814 d61df03e Iustin Pop
6815 d1c2dd75 Iustin Pop
class IAllocator(object):
6816 d1c2dd75 Iustin Pop
  """IAllocator framework.
6817 d61df03e Iustin Pop

6818 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
6819 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
6820 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
6821 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
6822 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
6823 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
6824 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
6825 d1c2dd75 Iustin Pop
      easy usage
6826 d61df03e Iustin Pop

6827 d61df03e Iustin Pop
  """
6828 29859cb7 Iustin Pop
  _ALLO_KEYS = [
6829 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
6830 8cc7e742 Guido Trotter
    "os", "tags", "nics", "vcpus", "hypervisor",
6831 d1c2dd75 Iustin Pop
    ]
6832 29859cb7 Iustin Pop
  _RELO_KEYS = [
6833 29859cb7 Iustin Pop
    "relocate_from",
6834 29859cb7 Iustin Pop
    ]
6835 d1c2dd75 Iustin Pop
6836 72737a7f Iustin Pop
  def __init__(self, lu, mode, name, **kwargs):
6837 72737a7f Iustin Pop
    self.lu = lu
6838 d1c2dd75 Iustin Pop
    # init buffer variables
6839 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
6840 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
6841 29859cb7 Iustin Pop
    self.mode = mode
6842 29859cb7 Iustin Pop
    self.name = name
6843 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
6844 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
6845 a0add446 Iustin Pop
    self.hypervisor = None
6846 29859cb7 Iustin Pop
    self.relocate_from = None
6847 27579978 Iustin Pop
    # computed fields
6848 27579978 Iustin Pop
    self.required_nodes = None
6849 d1c2dd75 Iustin Pop
    # init result fields
6850 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
6851 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6852 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
6853 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6854 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
6855 29859cb7 Iustin Pop
    else:
6856 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
6857 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
6858 d1c2dd75 Iustin Pop
    for key in kwargs:
6859 29859cb7 Iustin Pop
      if key not in keyset:
6860 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
6861 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
6862 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
6863 29859cb7 Iustin Pop
    for key in keyset:
6864 d1c2dd75 Iustin Pop
      if key not in kwargs:
6865 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
6866 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
6867 d1c2dd75 Iustin Pop
    self._BuildInputData()
6868 d1c2dd75 Iustin Pop
6869 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
6870 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
6871 d1c2dd75 Iustin Pop

6872 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
6873 d1c2dd75 Iustin Pop

6874 d1c2dd75 Iustin Pop
    """
6875 72737a7f Iustin Pop
    cfg = self.lu.cfg
6876 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
6877 d1c2dd75 Iustin Pop
    # cluster data
6878 d1c2dd75 Iustin Pop
    data = {
6879 77031881 Iustin Pop
      "version": constants.IALLOCATOR_VERSION,
6880 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
6881 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
6882 1325da74 Iustin Pop
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
6883 d1c2dd75 Iustin Pop
      # we don't have job IDs
6884 d61df03e Iustin Pop
      }
6885 b57e9819 Guido Trotter
    iinfo = cfg.GetAllInstancesInfo().values()
6886 b57e9819 Guido Trotter
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
6887 6286519f Iustin Pop
6888 d1c2dd75 Iustin Pop
    # node data
6889 d1c2dd75 Iustin Pop
    node_results = {}
6890 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
6891 8cc7e742 Guido Trotter
6892 8cc7e742 Guido Trotter
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6893 a0add446 Iustin Pop
      hypervisor_name = self.hypervisor
6894 8cc7e742 Guido Trotter
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6895 a0add446 Iustin Pop
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
6896 8cc7e742 Guido Trotter
6897 72737a7f Iustin Pop
    node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
6898 a0add446 Iustin Pop
                                           hypervisor_name)
6899 18640d69 Guido Trotter
    node_iinfo = self.lu.rpc.call_all_instances_info(node_list,
6900 18640d69 Guido Trotter
                       cluster_info.enabled_hypervisors)
6901 1325da74 Iustin Pop
    for nname, nresult in node_data.items():
6902 1325da74 Iustin Pop
      # first fill in static (config-based) values
6903 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
6904 d1c2dd75 Iustin Pop
      pnr = {
6905 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
6906 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
6907 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
6908 fc0fe88c Iustin Pop
        "offline": ninfo.offline,
6909 0b2454b9 Iustin Pop
        "drained": ninfo.drained,
6910 1325da74 Iustin Pop
        "master_candidate": ninfo.master_candidate,
6911 d1c2dd75 Iustin Pop
        }
6912 1325da74 Iustin Pop
6913 1325da74 Iustin Pop
      if not ninfo.offline:
6914 4c4e4e1e Iustin Pop
        nresult.Raise("Can't get data for node %s" % nname)
6915 4c4e4e1e Iustin Pop
        node_iinfo[nname].Raise("Can't get node instance info from node %s" %
6916 4c4e4e1e Iustin Pop
                                nname)
6917 070e998b Iustin Pop
        remote_info = nresult.payload
6918 1325da74 Iustin Pop
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
6919 1325da74 Iustin Pop
                     'vg_size', 'vg_free', 'cpu_total']:
6920 1325da74 Iustin Pop
          if attr not in remote_info:
6921 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' didn't return attribute"
6922 1325da74 Iustin Pop
                                     " '%s'" % (nname, attr))
6923 070e998b Iustin Pop
          if not isinstance(remote_info[attr], int):
6924 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' returned invalid value"
6925 070e998b Iustin Pop
                                     " for '%s': %s" %
6926 070e998b Iustin Pop
                                     (nname, attr, remote_info[attr]))
6927 1325da74 Iustin Pop
        # compute memory used by primary instances
6928 1325da74 Iustin Pop
        i_p_mem = i_p_up_mem = 0
6929 1325da74 Iustin Pop
        for iinfo, beinfo in i_list:
6930 1325da74 Iustin Pop
          if iinfo.primary_node == nname:
6931 1325da74 Iustin Pop
            i_p_mem += beinfo[constants.BE_MEMORY]
6932 2fa74ef4 Iustin Pop
            if iinfo.name not in node_iinfo[nname].payload:
6933 1325da74 Iustin Pop
              i_used_mem = 0
6934 1325da74 Iustin Pop
            else:
6935 2fa74ef4 Iustin Pop
              i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory'])
6936 1325da74 Iustin Pop
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
6937 1325da74 Iustin Pop
            remote_info['memory_free'] -= max(0, i_mem_diff)
6938 1325da74 Iustin Pop
6939 1325da74 Iustin Pop
            if iinfo.admin_up:
6940 1325da74 Iustin Pop
              i_p_up_mem += beinfo[constants.BE_MEMORY]
6941 1325da74 Iustin Pop
6942 1325da74 Iustin Pop
        # compute memory used by instances
6943 1325da74 Iustin Pop
        pnr_dyn = {
6944 1325da74 Iustin Pop
          "total_memory": remote_info['memory_total'],
6945 1325da74 Iustin Pop
          "reserved_memory": remote_info['memory_dom0'],
6946 1325da74 Iustin Pop
          "free_memory": remote_info['memory_free'],
6947 1325da74 Iustin Pop
          "total_disk": remote_info['vg_size'],
6948 1325da74 Iustin Pop
          "free_disk": remote_info['vg_free'],
6949 1325da74 Iustin Pop
          "total_cpus": remote_info['cpu_total'],
6950 1325da74 Iustin Pop
          "i_pri_memory": i_p_mem,
6951 1325da74 Iustin Pop
          "i_pri_up_memory": i_p_up_mem,
6952 1325da74 Iustin Pop
          }
6953 1325da74 Iustin Pop
        pnr.update(pnr_dyn)
6954 1325da74 Iustin Pop
6955 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
6956 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
6957 d1c2dd75 Iustin Pop
6958 d1c2dd75 Iustin Pop
    # instance data
6959 d1c2dd75 Iustin Pop
    instance_data = {}
6960 338e51e8 Iustin Pop
    for iinfo, beinfo in i_list:
6961 a9fe7e8f Guido Trotter
      nic_data = []
6962 a9fe7e8f Guido Trotter
      for nic in iinfo.nics:
6963 a9fe7e8f Guido Trotter
        filled_params = objects.FillDict(
6964 a9fe7e8f Guido Trotter
            cluster_info.nicparams[constants.PP_DEFAULT],
6965 a9fe7e8f Guido Trotter
            nic.nicparams)
6966 a9fe7e8f Guido Trotter
        nic_dict = {"mac": nic.mac,
6967 a9fe7e8f Guido Trotter
                    "ip": nic.ip,
6968 a9fe7e8f Guido Trotter
                    "mode": filled_params[constants.NIC_MODE],
6969 a9fe7e8f Guido Trotter
                    "link": filled_params[constants.NIC_LINK],
6970 a9fe7e8f Guido Trotter
                   }
6971 a9fe7e8f Guido Trotter
        if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
6972 a9fe7e8f Guido Trotter
          nic_dict["bridge"] = filled_params[constants.NIC_LINK]
6973 a9fe7e8f Guido Trotter
        nic_data.append(nic_dict)
6974 d1c2dd75 Iustin Pop
      pir = {
6975 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
6976 1325da74 Iustin Pop
        "admin_up": iinfo.admin_up,
6977 338e51e8 Iustin Pop
        "vcpus": beinfo[constants.BE_VCPUS],
6978 338e51e8 Iustin Pop
        "memory": beinfo[constants.BE_MEMORY],
6979 d1c2dd75 Iustin Pop
        "os": iinfo.os,
6980 1325da74 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
6981 d1c2dd75 Iustin Pop
        "nics": nic_data,
6982 1325da74 Iustin Pop
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
6983 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
6984 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
6985 d1c2dd75 Iustin Pop
        }
6986 88ae4f85 Iustin Pop
      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
6987 88ae4f85 Iustin Pop
                                                 pir["disks"])
6988 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
6989 d61df03e Iustin Pop
6990 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
6991 d61df03e Iustin Pop
6992 d1c2dd75 Iustin Pop
    self.in_data = data
6993 d61df03e Iustin Pop
6994 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
6995 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
6996 d61df03e Iustin Pop

6997 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
6998 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
6999 d61df03e Iustin Pop

7000 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
7001 d1c2dd75 Iustin Pop
    done.
7002 d61df03e Iustin Pop

7003 d1c2dd75 Iustin Pop
    """
7004 d1c2dd75 Iustin Pop
    data = self.in_data
7005 d1c2dd75 Iustin Pop
7006 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
7007 d1c2dd75 Iustin Pop
7008 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
7009 27579978 Iustin Pop
      self.required_nodes = 2
7010 27579978 Iustin Pop
    else:
7011 27579978 Iustin Pop
      self.required_nodes = 1
7012 d1c2dd75 Iustin Pop
    request = {
7013 d1c2dd75 Iustin Pop
      "type": "allocate",
7014 d1c2dd75 Iustin Pop
      "name": self.name,
7015 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
7016 d1c2dd75 Iustin Pop
      "tags": self.tags,
7017 d1c2dd75 Iustin Pop
      "os": self.os,
7018 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
7019 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
7020 d1c2dd75 Iustin Pop
      "disks": self.disks,
7021 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
7022 d1c2dd75 Iustin Pop
      "nics": self.nics,
7023 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
7024 d1c2dd75 Iustin Pop
      }
7025 d1c2dd75 Iustin Pop
    data["request"] = request
7026 298fe380 Iustin Pop
7027 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
7028 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
7029 298fe380 Iustin Pop

7030 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
7031 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
7032 d61df03e Iustin Pop

7033 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
7034 d1c2dd75 Iustin Pop
    done.
7035 d61df03e Iustin Pop

7036 d1c2dd75 Iustin Pop
    """
7037 72737a7f Iustin Pop
    instance = self.lu.cfg.GetInstanceInfo(self.name)
7038 27579978 Iustin Pop
    if instance is None:
7039 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
7040 27579978 Iustin Pop
                                   " IAllocator" % self.name)
7041 27579978 Iustin Pop
7042 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
7043 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
7044 27579978 Iustin Pop
7045 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
7046 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
7047 2a139bb0 Iustin Pop
7048 27579978 Iustin Pop
    self.required_nodes = 1
7049 dafc7302 Guido Trotter
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
7050 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
7051 27579978 Iustin Pop
7052 d1c2dd75 Iustin Pop
    request = {
7053 2a139bb0 Iustin Pop
      "type": "relocate",
7054 d1c2dd75 Iustin Pop
      "name": self.name,
7055 27579978 Iustin Pop
      "disk_space_total": disk_space,
7056 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
7057 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
7058 d1c2dd75 Iustin Pop
      }
7059 27579978 Iustin Pop
    self.in_data["request"] = request
7060 d61df03e Iustin Pop
7061 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
7062 d1c2dd75 Iustin Pop
    """Build input data structures.
7063 d61df03e Iustin Pop

7064 d1c2dd75 Iustin Pop
    """
7065 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
7066 d61df03e Iustin Pop
7067 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
7068 d1c2dd75 Iustin Pop
      self._AddNewInstance()
7069 d1c2dd75 Iustin Pop
    else:
7070 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
7071 d61df03e Iustin Pop
7072 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
7073 d61df03e Iustin Pop
7074 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
7075 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
7076 298fe380 Iustin Pop

7077 d1c2dd75 Iustin Pop
    """
7078 72737a7f Iustin Pop
    if call_fn is None:
7079 72737a7f Iustin Pop
      call_fn = self.lu.rpc.call_iallocator_runner
7080 d1c2dd75 Iustin Pop
    data = self.in_text
7081 298fe380 Iustin Pop
7082 72737a7f Iustin Pop
    result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
7083 4c4e4e1e Iustin Pop
    result.Raise("Failure while running the iallocator script")
7084 8d528b7c Iustin Pop
7085 87f5c298 Iustin Pop
    self.out_text = result.payload
7086 d1c2dd75 Iustin Pop
    if validate:
7087 d1c2dd75 Iustin Pop
      self._ValidateResult()
7088 298fe380 Iustin Pop
7089 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
7090 d1c2dd75 Iustin Pop
    """Process the allocator results.
7091 538475ca Iustin Pop

7092 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
7093 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
7094 538475ca Iustin Pop

7095 d1c2dd75 Iustin Pop
    """
7096 d1c2dd75 Iustin Pop
    try:
7097 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
7098 d1c2dd75 Iustin Pop
    except Exception, err:
7099 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
7100 d1c2dd75 Iustin Pop
7101 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
7102 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
7103 538475ca Iustin Pop
7104 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
7105 d1c2dd75 Iustin Pop
      if key not in rdict:
7106 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
7107 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
7108 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
7109 538475ca Iustin Pop
7110 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
7111 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
7112 d1c2dd75 Iustin Pop
                               " is not a list")
7113 d1c2dd75 Iustin Pop
    self.out_data = rdict
7114 538475ca Iustin Pop
7115 538475ca Iustin Pop
7116 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
7117 d61df03e Iustin Pop
  """Run allocator tests.
7118 d61df03e Iustin Pop

7119 d61df03e Iustin Pop
  This LU runs the allocator tests
7120 d61df03e Iustin Pop

7121 d61df03e Iustin Pop
  """
7122 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
7123 d61df03e Iustin Pop
7124 d61df03e Iustin Pop
  def CheckPrereq(self):
7125 d61df03e Iustin Pop
    """Check prerequisites.
7126 d61df03e Iustin Pop

7127 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
7128 d61df03e Iustin Pop

7129 d61df03e Iustin Pop
    """
7130 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
7131 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
7132 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
7133 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
7134 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
7135 d61df03e Iustin Pop
                                     attr)
7136 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
7137 d61df03e Iustin Pop
      if iname is not None:
7138 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
7139 d61df03e Iustin Pop
                                   iname)
7140 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
7141 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
7142 d61df03e Iustin Pop
      for row in self.op.nics:
7143 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
7144 d61df03e Iustin Pop
            "mac" not in row or
7145 d61df03e Iustin Pop
            "ip" not in row or
7146 d61df03e Iustin Pop
            "bridge" not in row):
7147 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
7148 d61df03e Iustin Pop
                                     " 'nics' parameter")
7149 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
7150 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
7151 d61df03e Iustin Pop
      for row in self.op.disks:
7152 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
7153 d61df03e Iustin Pop
            "size" not in row or
7154 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
7155 d61df03e Iustin Pop
            "mode" not in row or
7156 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
7157 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
7158 d61df03e Iustin Pop
                                     " 'disks' parameter")
7159 8901997e Iustin Pop
      if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None:
7160 8cc7e742 Guido Trotter
        self.op.hypervisor = self.cfg.GetHypervisorType()
7161 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
7162 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
7163 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
7164 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
7165 d61df03e Iustin Pop
      if fname is None:
7166 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
7167 d61df03e Iustin Pop
                                   self.op.name)
7168 d61df03e Iustin Pop
      self.op.name = fname
7169 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
7170 d61df03e Iustin Pop
    else:
7171 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
7172 d61df03e Iustin Pop
                                 self.op.mode)
7173 d61df03e Iustin Pop
7174 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
7175 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
7176 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
7177 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
7178 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
7179 d61df03e Iustin Pop
                                 self.op.direction)
7180 d61df03e Iustin Pop
7181 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
7182 d61df03e Iustin Pop
    """Run the allocator test.
7183 d61df03e Iustin Pop

7184 d61df03e Iustin Pop
    """
7185 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
7186 72737a7f Iustin Pop
      ial = IAllocator(self,
7187 29859cb7 Iustin Pop
                       mode=self.op.mode,
7188 29859cb7 Iustin Pop
                       name=self.op.name,
7189 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
7190 29859cb7 Iustin Pop
                       disks=self.op.disks,
7191 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
7192 29859cb7 Iustin Pop
                       os=self.op.os,
7193 29859cb7 Iustin Pop
                       tags=self.op.tags,
7194 29859cb7 Iustin Pop
                       nics=self.op.nics,
7195 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
7196 8cc7e742 Guido Trotter
                       hypervisor=self.op.hypervisor,
7197 29859cb7 Iustin Pop
                       )
7198 29859cb7 Iustin Pop
    else:
7199 72737a7f Iustin Pop
      ial = IAllocator(self,
7200 29859cb7 Iustin Pop
                       mode=self.op.mode,
7201 29859cb7 Iustin Pop
                       name=self.op.name,
7202 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
7203 29859cb7 Iustin Pop
                       )
7204 d61df03e Iustin Pop
7205 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
7206 d1c2dd75 Iustin Pop
      result = ial.in_text
7207 298fe380 Iustin Pop
    else:
7208 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
7209 d1c2dd75 Iustin Pop
      result = ial.out_text
7210 298fe380 Iustin Pop
    return result