Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ d357f531

History | View | Annotate | Download (294.9 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import time
29 a8083063 Iustin Pop
import re
30 a8083063 Iustin Pop
import platform
31 ffa1c0dc Iustin Pop
import logging
32 74409b12 Iustin Pop
import copy
33 a8083063 Iustin Pop
34 a8083063 Iustin Pop
from ganeti import ssh
35 a8083063 Iustin Pop
from ganeti import utils
36 a8083063 Iustin Pop
from ganeti import errors
37 a8083063 Iustin Pop
from ganeti import hypervisor
38 6048c986 Guido Trotter
from ganeti import locking
39 a8083063 Iustin Pop
from ganeti import constants
40 a8083063 Iustin Pop
from ganeti import objects
41 8d14b30d Iustin Pop
from ganeti import serializer
42 112f18a5 Iustin Pop
from ganeti import ssconf
43 d61df03e Iustin Pop
44 d61df03e Iustin Pop
45 a8083063 Iustin Pop
class LogicalUnit(object):
46 396e1b78 Michael Hanselmann
  """Logical Unit base class.
47 a8083063 Iustin Pop

48 a8083063 Iustin Pop
  Subclasses must follow these rules:
49 d465bdc8 Guido Trotter
    - implement ExpandNames
50 6fd35c4d Michael Hanselmann
    - implement CheckPrereq (except when tasklets are used)
51 6fd35c4d Michael Hanselmann
    - implement Exec (except when tasklets are used)
52 a8083063 Iustin Pop
    - implement BuildHooksEnv
53 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
54 05f86716 Guido Trotter
    - optionally redefine their run requirements:
55 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
56 05f86716 Guido Trotter

57 05f86716 Guido Trotter
  Note that all commands require root permissions.
58 a8083063 Iustin Pop

59 20777413 Iustin Pop
  @ivar dry_run_result: the value (if any) that will be returned to the caller
60 20777413 Iustin Pop
      in dry-run mode (signalled by opcode dry_run parameter)
61 20777413 Iustin Pop

62 a8083063 Iustin Pop
  """
63 a8083063 Iustin Pop
  HPATH = None
64 a8083063 Iustin Pop
  HTYPE = None
65 a8083063 Iustin Pop
  _OP_REQP = []
66 7e55040e Guido Trotter
  REQ_BGL = True
67 a8083063 Iustin Pop
68 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
69 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
70 a8083063 Iustin Pop

71 5bbd3f7f Michael Hanselmann
    This needs to be overridden in derived classes in order to check op
72 a8083063 Iustin Pop
    validity.
73 a8083063 Iustin Pop

74 a8083063 Iustin Pop
    """
75 5bfac263 Iustin Pop
    self.proc = processor
76 a8083063 Iustin Pop
    self.op = op
77 77b657a3 Guido Trotter
    self.cfg = context.cfg
78 77b657a3 Guido Trotter
    self.context = context
79 72737a7f Iustin Pop
    self.rpc = rpc
80 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
81 d465bdc8 Guido Trotter
    self.needed_locks = None
82 6683bba2 Guido Trotter
    self.acquired_locks = {}
83 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 0)
84 ca2a79e1 Guido Trotter
    self.add_locks = {}
85 ca2a79e1 Guido Trotter
    self.remove_locks = {}
86 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
87 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
88 c92b310a Michael Hanselmann
    self.__ssh = None
89 86d9d3bb Iustin Pop
    # logging
90 86d9d3bb Iustin Pop
    self.LogWarning = processor.LogWarning
91 86d9d3bb Iustin Pop
    self.LogInfo = processor.LogInfo
92 2bb5c911 Michael Hanselmann
    self.LogStep = processor.LogStep
93 20777413 Iustin Pop
    # support for dry-run
94 20777413 Iustin Pop
    self.dry_run_result = None
95 c92b310a Michael Hanselmann
96 6fd35c4d Michael Hanselmann
    # Tasklets
97 3a012b41 Michael Hanselmann
    self.tasklets = None
98 6fd35c4d Michael Hanselmann
99 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
100 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
101 a8083063 Iustin Pop
      if attr_val is None:
102 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
103 3ecf6786 Iustin Pop
                                   attr_name)
104 6fd35c4d Michael Hanselmann
105 4be4691d Iustin Pop
    self.CheckArguments()
106 a8083063 Iustin Pop
107 c92b310a Michael Hanselmann
  def __GetSSH(self):
108 c92b310a Michael Hanselmann
    """Returns the SshRunner object
109 c92b310a Michael Hanselmann

110 c92b310a Michael Hanselmann
    """
111 c92b310a Michael Hanselmann
    if not self.__ssh:
112 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
113 c92b310a Michael Hanselmann
    return self.__ssh
114 c92b310a Michael Hanselmann
115 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
116 c92b310a Michael Hanselmann
117 4be4691d Iustin Pop
  def CheckArguments(self):
118 4be4691d Iustin Pop
    """Check syntactic validity for the opcode arguments.
119 4be4691d Iustin Pop

120 4be4691d Iustin Pop
    This method is for doing a simple syntactic check and ensure
121 4be4691d Iustin Pop
    validity of opcode parameters, without any cluster-related
122 4be4691d Iustin Pop
    checks. While the same can be accomplished in ExpandNames and/or
123 4be4691d Iustin Pop
    CheckPrereq, doing these separate is better because:
124 4be4691d Iustin Pop

125 4be4691d Iustin Pop
      - ExpandNames is left as as purely a lock-related function
126 5bbd3f7f Michael Hanselmann
      - CheckPrereq is run after we have acquired locks (and possible
127 4be4691d Iustin Pop
        waited for them)
128 4be4691d Iustin Pop

129 4be4691d Iustin Pop
    The function is allowed to change the self.op attribute so that
130 4be4691d Iustin Pop
    later methods can no longer worry about missing parameters.
131 4be4691d Iustin Pop

132 4be4691d Iustin Pop
    """
133 4be4691d Iustin Pop
    pass
134 4be4691d Iustin Pop
135 d465bdc8 Guido Trotter
  def ExpandNames(self):
136 d465bdc8 Guido Trotter
    """Expand names for this LU.
137 d465bdc8 Guido Trotter

138 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
139 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
140 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
141 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
142 d465bdc8 Guido Trotter

143 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
144 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
145 d465bdc8 Guido Trotter
    as values. Rules:
146 e4376078 Iustin Pop

147 e4376078 Iustin Pop
      - use an empty dict if you don't need any lock
148 e4376078 Iustin Pop
      - if you don't need any lock at a particular level omit that level
149 e4376078 Iustin Pop
      - don't put anything for the BGL level
150 e4376078 Iustin Pop
      - if you want all locks at a level use locking.ALL_SET as a value
151 d465bdc8 Guido Trotter

152 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
153 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
154 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
155 3977a4c1 Guido Trotter

156 6fd35c4d Michael Hanselmann
    This function can also define a list of tasklets, which then will be
157 6fd35c4d Michael Hanselmann
    executed in order instead of the usual LU-level CheckPrereq and Exec
158 6fd35c4d Michael Hanselmann
    functions, if those are not defined by the LU.
159 6fd35c4d Michael Hanselmann

160 e4376078 Iustin Pop
    Examples::
161 e4376078 Iustin Pop

162 e4376078 Iustin Pop
      # Acquire all nodes and one instance
163 e4376078 Iustin Pop
      self.needed_locks = {
164 e4376078 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
165 e4376078 Iustin Pop
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
166 e4376078 Iustin Pop
      }
167 e4376078 Iustin Pop
      # Acquire just two nodes
168 e4376078 Iustin Pop
      self.needed_locks = {
169 e4376078 Iustin Pop
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
170 e4376078 Iustin Pop
      }
171 e4376078 Iustin Pop
      # Acquire no locks
172 e4376078 Iustin Pop
      self.needed_locks = {} # No, you can't leave it to the default value None
173 d465bdc8 Guido Trotter

174 d465bdc8 Guido Trotter
    """
175 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
176 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
177 d465bdc8 Guido Trotter
    # time.
178 d465bdc8 Guido Trotter
    if self.REQ_BGL:
179 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
180 d465bdc8 Guido Trotter
    else:
181 d465bdc8 Guido Trotter
      raise NotImplementedError
182 d465bdc8 Guido Trotter
183 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
184 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
185 fb8dcb62 Guido Trotter

186 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
187 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
188 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
189 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
190 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
191 fb8dcb62 Guido Trotter
    default it does nothing.
192 fb8dcb62 Guido Trotter

193 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
194 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
195 fb8dcb62 Guido Trotter

196 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
197 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
198 fb8dcb62 Guido Trotter

199 fb8dcb62 Guido Trotter
    """
200 fb8dcb62 Guido Trotter
201 a8083063 Iustin Pop
  def CheckPrereq(self):
202 a8083063 Iustin Pop
    """Check prerequisites for this LU.
203 a8083063 Iustin Pop

204 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
205 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
206 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
207 a8083063 Iustin Pop
    allowed.
208 a8083063 Iustin Pop

209 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
210 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
211 a8083063 Iustin Pop

212 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
213 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
214 a8083063 Iustin Pop

215 a8083063 Iustin Pop
    """
216 3a012b41 Michael Hanselmann
    if self.tasklets is not None:
217 b4a9eb66 Michael Hanselmann
      for (idx, tl) in enumerate(self.tasklets):
218 abae1b2b Michael Hanselmann
        logging.debug("Checking prerequisites for tasklet %s/%s",
219 abae1b2b Michael Hanselmann
                      idx + 1, len(self.tasklets))
220 6fd35c4d Michael Hanselmann
        tl.CheckPrereq()
221 6fd35c4d Michael Hanselmann
    else:
222 6fd35c4d Michael Hanselmann
      raise NotImplementedError
223 a8083063 Iustin Pop
224 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
225 a8083063 Iustin Pop
    """Execute the LU.
226 a8083063 Iustin Pop

227 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
228 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
229 a8083063 Iustin Pop
    code, or expected.
230 a8083063 Iustin Pop

231 a8083063 Iustin Pop
    """
232 3a012b41 Michael Hanselmann
    if self.tasklets is not None:
233 b4a9eb66 Michael Hanselmann
      for (idx, tl) in enumerate(self.tasklets):
234 abae1b2b Michael Hanselmann
        logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
235 6fd35c4d Michael Hanselmann
        tl.Exec(feedback_fn)
236 6fd35c4d Michael Hanselmann
    else:
237 6fd35c4d Michael Hanselmann
      raise NotImplementedError
238 a8083063 Iustin Pop
239 a8083063 Iustin Pop
  def BuildHooksEnv(self):
240 a8083063 Iustin Pop
    """Build hooks environment for this LU.
241 a8083063 Iustin Pop

242 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
243 a8083063 Iustin Pop
    containing the environment that will be used for running the
244 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
245 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
246 a8083063 Iustin Pop
    the hook should run after the execution.
247 a8083063 Iustin Pop

248 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
249 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
250 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
251 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
252 a8083063 Iustin Pop

253 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
254 a8083063 Iustin Pop

255 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
256 a8083063 Iustin Pop
    not be called.
257 a8083063 Iustin Pop

258 a8083063 Iustin Pop
    """
259 a8083063 Iustin Pop
    raise NotImplementedError
260 a8083063 Iustin Pop
261 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
262 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
263 1fce5219 Guido Trotter

264 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
265 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
266 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
267 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
268 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
269 1fce5219 Guido Trotter

270 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
271 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
272 e4376078 Iustin Pop
    @param hook_results: the results of the multi-node hooks rpc call
273 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
274 e4376078 Iustin Pop
    @param lu_result: the previous Exec result this LU had, or None
275 e4376078 Iustin Pop
        in the PRE phase
276 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
277 e4376078 Iustin Pop
        and hook results
278 1fce5219 Guido Trotter

279 1fce5219 Guido Trotter
    """
280 1fce5219 Guido Trotter
    return lu_result
281 1fce5219 Guido Trotter
282 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
283 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
284 43905206 Guido Trotter

285 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
286 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
287 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
288 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
289 43905206 Guido Trotter
    before.
290 43905206 Guido Trotter

291 43905206 Guido Trotter
    """
292 43905206 Guido Trotter
    if self.needed_locks is None:
293 43905206 Guido Trotter
      self.needed_locks = {}
294 43905206 Guido Trotter
    else:
295 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
296 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
297 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
298 43905206 Guido Trotter
    if expanded_name is None:
299 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
300 43905206 Guido Trotter
                                  self.op.instance_name)
301 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
302 43905206 Guido Trotter
    self.op.instance_name = expanded_name
303 43905206 Guido Trotter
304 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
305 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
306 c4a2fee1 Guido Trotter

307 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
308 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
309 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
310 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
311 c4a2fee1 Guido Trotter

312 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
313 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
314 c4a2fee1 Guido Trotter

315 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
316 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
317 c4a2fee1 Guido Trotter

318 e4376078 Iustin Pop
    If should be called in DeclareLocks in a way similar to::
319 c4a2fee1 Guido Trotter

320 e4376078 Iustin Pop
      if level == locking.LEVEL_NODE:
321 e4376078 Iustin Pop
        self._LockInstancesNodes()
322 c4a2fee1 Guido Trotter

323 a82ce292 Guido Trotter
    @type primary_only: boolean
324 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
325 a82ce292 Guido Trotter

326 c4a2fee1 Guido Trotter
    """
327 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
328 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
329 c4a2fee1 Guido Trotter
330 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
331 c4a2fee1 Guido Trotter
332 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
333 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
334 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
335 c4a2fee1 Guido Trotter
    wanted_nodes = []
336 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
337 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
338 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
339 a82ce292 Guido Trotter
      if not primary_only:
340 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
341 9513b6ab Guido Trotter
342 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
343 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
344 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
345 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
346 c4a2fee1 Guido Trotter
347 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
348 c4a2fee1 Guido Trotter
349 a8083063 Iustin Pop
350 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
351 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
352 a8083063 Iustin Pop

353 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
354 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
355 a8083063 Iustin Pop

356 a8083063 Iustin Pop
  """
357 a8083063 Iustin Pop
  HPATH = None
358 a8083063 Iustin Pop
  HTYPE = None
359 a8083063 Iustin Pop
360 a8083063 Iustin Pop
361 9a6800e1 Michael Hanselmann
class Tasklet:
362 9a6800e1 Michael Hanselmann
  """Tasklet base class.
363 9a6800e1 Michael Hanselmann

364 9a6800e1 Michael Hanselmann
  Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
365 9a6800e1 Michael Hanselmann
  they can mix legacy code with tasklets. Locking needs to be done in the LU,
366 9a6800e1 Michael Hanselmann
  tasklets know nothing about locks.
367 9a6800e1 Michael Hanselmann

368 9a6800e1 Michael Hanselmann
  Subclasses must follow these rules:
369 9a6800e1 Michael Hanselmann
    - Implement CheckPrereq
370 9a6800e1 Michael Hanselmann
    - Implement Exec
371 9a6800e1 Michael Hanselmann

372 9a6800e1 Michael Hanselmann
  """
373 464243a7 Michael Hanselmann
  def __init__(self, lu):
374 464243a7 Michael Hanselmann
    self.lu = lu
375 464243a7 Michael Hanselmann
376 464243a7 Michael Hanselmann
    # Shortcuts
377 464243a7 Michael Hanselmann
    self.cfg = lu.cfg
378 464243a7 Michael Hanselmann
    self.rpc = lu.rpc
379 464243a7 Michael Hanselmann
380 9a6800e1 Michael Hanselmann
  def CheckPrereq(self):
381 9a6800e1 Michael Hanselmann
    """Check prerequisites for this tasklets.
382 9a6800e1 Michael Hanselmann

383 9a6800e1 Michael Hanselmann
    This method should check whether the prerequisites for the execution of
384 9a6800e1 Michael Hanselmann
    this tasklet are fulfilled. It can do internode communication, but it
385 9a6800e1 Michael Hanselmann
    should be idempotent - no cluster or system changes are allowed.
386 9a6800e1 Michael Hanselmann

387 9a6800e1 Michael Hanselmann
    The method should raise errors.OpPrereqError in case something is not
388 9a6800e1 Michael Hanselmann
    fulfilled. Its return value is ignored.
389 9a6800e1 Michael Hanselmann

390 9a6800e1 Michael Hanselmann
    This method should also update all parameters to their canonical form if it
391 9a6800e1 Michael Hanselmann
    hasn't been done before.
392 9a6800e1 Michael Hanselmann

393 9a6800e1 Michael Hanselmann
    """
394 9a6800e1 Michael Hanselmann
    raise NotImplementedError
395 9a6800e1 Michael Hanselmann
396 9a6800e1 Michael Hanselmann
  def Exec(self, feedback_fn):
397 9a6800e1 Michael Hanselmann
    """Execute the tasklet.
398 9a6800e1 Michael Hanselmann

399 9a6800e1 Michael Hanselmann
    This method should implement the actual work. It should raise
400 9a6800e1 Michael Hanselmann
    errors.OpExecError for failures that are somewhat dealt with in code, or
401 9a6800e1 Michael Hanselmann
    expected.
402 9a6800e1 Michael Hanselmann

403 9a6800e1 Michael Hanselmann
    """
404 9a6800e1 Michael Hanselmann
    raise NotImplementedError
405 9a6800e1 Michael Hanselmann
406 9a6800e1 Michael Hanselmann
407 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
408 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
409 83120a01 Michael Hanselmann

410 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
411 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
412 e4376078 Iustin Pop
  @type nodes: list
413 e4376078 Iustin Pop
  @param nodes: list of node names or None for all nodes
414 e4376078 Iustin Pop
  @rtype: list
415 e4376078 Iustin Pop
  @return: the list of nodes, sorted
416 e4376078 Iustin Pop
  @raise errors.OpProgrammerError: if the nodes parameter is wrong type
417 83120a01 Michael Hanselmann

418 83120a01 Michael Hanselmann
  """
419 3312b702 Iustin Pop
  if not isinstance(nodes, list):
420 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
421 dcb93971 Michael Hanselmann
422 ea47808a Guido Trotter
  if not nodes:
423 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
424 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
425 dcb93971 Michael Hanselmann
426 ea47808a Guido Trotter
  wanted = []
427 ea47808a Guido Trotter
  for name in nodes:
428 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
429 ea47808a Guido Trotter
    if node is None:
430 ea47808a Guido Trotter
      raise errors.OpPrereqError("No such node name '%s'" % name)
431 ea47808a Guido Trotter
    wanted.append(node)
432 dcb93971 Michael Hanselmann
433 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
434 3312b702 Iustin Pop
435 3312b702 Iustin Pop
436 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
437 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
438 3312b702 Iustin Pop

439 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
440 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
441 e4376078 Iustin Pop
  @type instances: list
442 e4376078 Iustin Pop
  @param instances: list of instance names or None for all instances
443 e4376078 Iustin Pop
  @rtype: list
444 e4376078 Iustin Pop
  @return: the list of instances, sorted
445 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if the instances parameter is wrong type
446 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if any of the passed instances is not found
447 3312b702 Iustin Pop

448 3312b702 Iustin Pop
  """
449 3312b702 Iustin Pop
  if not isinstance(instances, list):
450 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
451 3312b702 Iustin Pop
452 3312b702 Iustin Pop
  if instances:
453 3312b702 Iustin Pop
    wanted = []
454 3312b702 Iustin Pop
455 3312b702 Iustin Pop
    for name in instances:
456 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
457 3312b702 Iustin Pop
      if instance is None:
458 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
459 3312b702 Iustin Pop
      wanted.append(instance)
460 3312b702 Iustin Pop
461 3312b702 Iustin Pop
  else:
462 a7f5dc98 Iustin Pop
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
463 a7f5dc98 Iustin Pop
  return wanted
464 dcb93971 Michael Hanselmann
465 dcb93971 Michael Hanselmann
466 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
467 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
468 83120a01 Michael Hanselmann

469 a2d2e1a7 Iustin Pop
  @type static: L{utils.FieldSet}
470 31bf511f Iustin Pop
  @param static: static fields set
471 a2d2e1a7 Iustin Pop
  @type dynamic: L{utils.FieldSet}
472 31bf511f Iustin Pop
  @param dynamic: dynamic fields set
473 83120a01 Michael Hanselmann

474 83120a01 Michael Hanselmann
  """
475 a2d2e1a7 Iustin Pop
  f = utils.FieldSet()
476 31bf511f Iustin Pop
  f.Extend(static)
477 31bf511f Iustin Pop
  f.Extend(dynamic)
478 dcb93971 Michael Hanselmann
479 31bf511f Iustin Pop
  delta = f.NonMatching(selected)
480 31bf511f Iustin Pop
  if delta:
481 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
482 31bf511f Iustin Pop
                               % ",".join(delta))
483 dcb93971 Michael Hanselmann
484 dcb93971 Michael Hanselmann
485 a5961235 Iustin Pop
def _CheckBooleanOpField(op, name):
486 a5961235 Iustin Pop
  """Validates boolean opcode parameters.
487 a5961235 Iustin Pop

488 a5961235 Iustin Pop
  This will ensure that an opcode parameter is either a boolean value,
489 a5961235 Iustin Pop
  or None (but that it always exists).
490 a5961235 Iustin Pop

491 a5961235 Iustin Pop
  """
492 a5961235 Iustin Pop
  val = getattr(op, name, None)
493 a5961235 Iustin Pop
  if not (val is None or isinstance(val, bool)):
494 a5961235 Iustin Pop
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
495 a5961235 Iustin Pop
                               (name, str(val)))
496 a5961235 Iustin Pop
  setattr(op, name, val)
497 a5961235 Iustin Pop
498 a5961235 Iustin Pop
499 a5961235 Iustin Pop
def _CheckNodeOnline(lu, node):
500 a5961235 Iustin Pop
  """Ensure that a given node is online.
501 a5961235 Iustin Pop

502 a5961235 Iustin Pop
  @param lu: the LU on behalf of which we make the check
503 a5961235 Iustin Pop
  @param node: the node to check
504 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is offline
505 a5961235 Iustin Pop

506 a5961235 Iustin Pop
  """
507 a5961235 Iustin Pop
  if lu.cfg.GetNodeInfo(node).offline:
508 a5961235 Iustin Pop
    raise errors.OpPrereqError("Can't use offline node %s" % node)
509 a5961235 Iustin Pop
510 a5961235 Iustin Pop
511 733a2b6a Iustin Pop
def _CheckNodeNotDrained(lu, node):
512 733a2b6a Iustin Pop
  """Ensure that a given node is not drained.
513 733a2b6a Iustin Pop

514 733a2b6a Iustin Pop
  @param lu: the LU on behalf of which we make the check
515 733a2b6a Iustin Pop
  @param node: the node to check
516 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is drained
517 733a2b6a Iustin Pop

518 733a2b6a Iustin Pop
  """
519 733a2b6a Iustin Pop
  if lu.cfg.GetNodeInfo(node).drained:
520 733a2b6a Iustin Pop
    raise errors.OpPrereqError("Can't use drained node %s" % node)
521 733a2b6a Iustin Pop
522 733a2b6a Iustin Pop
523 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
524 67fc3042 Iustin Pop
                          memory, vcpus, nics, disk_template, disks,
525 7c4d6c7b Michael Hanselmann
                          bep, hvp, hypervisor_name):
526 e4376078 Iustin Pop
  """Builds instance related env variables for hooks
527 e4376078 Iustin Pop

528 e4376078 Iustin Pop
  This builds the hook environment from individual variables.
529 e4376078 Iustin Pop

530 e4376078 Iustin Pop
  @type name: string
531 e4376078 Iustin Pop
  @param name: the name of the instance
532 e4376078 Iustin Pop
  @type primary_node: string
533 e4376078 Iustin Pop
  @param primary_node: the name of the instance's primary node
534 e4376078 Iustin Pop
  @type secondary_nodes: list
535 e4376078 Iustin Pop
  @param secondary_nodes: list of secondary nodes as strings
536 e4376078 Iustin Pop
  @type os_type: string
537 e4376078 Iustin Pop
  @param os_type: the name of the instance's OS
538 0d68c45d Iustin Pop
  @type status: boolean
539 0d68c45d Iustin Pop
  @param status: the should_run status of the instance
540 e4376078 Iustin Pop
  @type memory: string
541 e4376078 Iustin Pop
  @param memory: the memory size of the instance
542 e4376078 Iustin Pop
  @type vcpus: string
543 e4376078 Iustin Pop
  @param vcpus: the count of VCPUs the instance has
544 e4376078 Iustin Pop
  @type nics: list
545 5e3d3eb3 Guido Trotter
  @param nics: list of tuples (ip, mac, mode, link) representing
546 5e3d3eb3 Guido Trotter
      the NICs the instance has
547 2c2690c9 Iustin Pop
  @type disk_template: string
548 5bbd3f7f Michael Hanselmann
  @param disk_template: the disk template of the instance
549 2c2690c9 Iustin Pop
  @type disks: list
550 2c2690c9 Iustin Pop
  @param disks: the list of (size, mode) pairs
551 67fc3042 Iustin Pop
  @type bep: dict
552 67fc3042 Iustin Pop
  @param bep: the backend parameters for the instance
553 67fc3042 Iustin Pop
  @type hvp: dict
554 67fc3042 Iustin Pop
  @param hvp: the hypervisor parameters for the instance
555 7c4d6c7b Michael Hanselmann
  @type hypervisor_name: string
556 7c4d6c7b Michael Hanselmann
  @param hypervisor_name: the hypervisor for the instance
557 e4376078 Iustin Pop
  @rtype: dict
558 e4376078 Iustin Pop
  @return: the hook environment for this instance
559 ecb215b5 Michael Hanselmann

560 396e1b78 Michael Hanselmann
  """
561 0d68c45d Iustin Pop
  if status:
562 0d68c45d Iustin Pop
    str_status = "up"
563 0d68c45d Iustin Pop
  else:
564 0d68c45d Iustin Pop
    str_status = "down"
565 396e1b78 Michael Hanselmann
  env = {
566 0e137c28 Iustin Pop
    "OP_TARGET": name,
567 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
568 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
569 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
570 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
571 0d68c45d Iustin Pop
    "INSTANCE_STATUS": str_status,
572 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
573 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
574 2c2690c9 Iustin Pop
    "INSTANCE_DISK_TEMPLATE": disk_template,
575 7c4d6c7b Michael Hanselmann
    "INSTANCE_HYPERVISOR": hypervisor_name,
576 396e1b78 Michael Hanselmann
  }
577 396e1b78 Michael Hanselmann
578 396e1b78 Michael Hanselmann
  if nics:
579 396e1b78 Michael Hanselmann
    nic_count = len(nics)
580 62f0dd02 Guido Trotter
    for idx, (ip, mac, mode, link) in enumerate(nics):
581 396e1b78 Michael Hanselmann
      if ip is None:
582 396e1b78 Michael Hanselmann
        ip = ""
583 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
584 2c2690c9 Iustin Pop
      env["INSTANCE_NIC%d_MAC" % idx] = mac
585 62f0dd02 Guido Trotter
      env["INSTANCE_NIC%d_MODE" % idx] = mode
586 62f0dd02 Guido Trotter
      env["INSTANCE_NIC%d_LINK" % idx] = link
587 62f0dd02 Guido Trotter
      if mode == constants.NIC_MODE_BRIDGED:
588 62f0dd02 Guido Trotter
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
589 396e1b78 Michael Hanselmann
  else:
590 396e1b78 Michael Hanselmann
    nic_count = 0
591 396e1b78 Michael Hanselmann
592 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
593 396e1b78 Michael Hanselmann
594 2c2690c9 Iustin Pop
  if disks:
595 2c2690c9 Iustin Pop
    disk_count = len(disks)
596 2c2690c9 Iustin Pop
    for idx, (size, mode) in enumerate(disks):
597 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_SIZE" % idx] = size
598 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_MODE" % idx] = mode
599 2c2690c9 Iustin Pop
  else:
600 2c2690c9 Iustin Pop
    disk_count = 0
601 2c2690c9 Iustin Pop
602 2c2690c9 Iustin Pop
  env["INSTANCE_DISK_COUNT"] = disk_count
603 2c2690c9 Iustin Pop
604 67fc3042 Iustin Pop
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
605 67fc3042 Iustin Pop
    for key, value in source.items():
606 67fc3042 Iustin Pop
      env["INSTANCE_%s_%s" % (kind, key)] = value
607 67fc3042 Iustin Pop
608 396e1b78 Michael Hanselmann
  return env
609 396e1b78 Michael Hanselmann
610 96acbc09 Michael Hanselmann
611 f9b10246 Guido Trotter
def _NICListToTuple(lu, nics):
612 62f0dd02 Guido Trotter
  """Build a list of nic information tuples.
613 62f0dd02 Guido Trotter

614 f9b10246 Guido Trotter
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
615 f9b10246 Guido Trotter
  value in LUQueryInstanceData.
616 62f0dd02 Guido Trotter

617 62f0dd02 Guido Trotter
  @type lu:  L{LogicalUnit}
618 62f0dd02 Guido Trotter
  @param lu: the logical unit on whose behalf we execute
619 62f0dd02 Guido Trotter
  @type nics: list of L{objects.NIC}
620 62f0dd02 Guido Trotter
  @param nics: list of nics to convert to hooks tuples
621 62f0dd02 Guido Trotter

622 62f0dd02 Guido Trotter
  """
623 62f0dd02 Guido Trotter
  hooks_nics = []
624 62f0dd02 Guido Trotter
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[constants.PP_DEFAULT]
625 62f0dd02 Guido Trotter
  for nic in nics:
626 62f0dd02 Guido Trotter
    ip = nic.ip
627 62f0dd02 Guido Trotter
    mac = nic.mac
628 62f0dd02 Guido Trotter
    filled_params = objects.FillDict(c_nicparams, nic.nicparams)
629 62f0dd02 Guido Trotter
    mode = filled_params[constants.NIC_MODE]
630 62f0dd02 Guido Trotter
    link = filled_params[constants.NIC_LINK]
631 62f0dd02 Guido Trotter
    hooks_nics.append((ip, mac, mode, link))
632 62f0dd02 Guido Trotter
  return hooks_nics
633 396e1b78 Michael Hanselmann
634 96acbc09 Michael Hanselmann
635 338e51e8 Iustin Pop
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
636 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
637 ecb215b5 Michael Hanselmann

638 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
639 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
640 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
641 e4376078 Iustin Pop
  @param instance: the instance for which we should build the
642 e4376078 Iustin Pop
      environment
643 e4376078 Iustin Pop
  @type override: dict
644 e4376078 Iustin Pop
  @param override: dictionary with key/values that will override
645 e4376078 Iustin Pop
      our values
646 e4376078 Iustin Pop
  @rtype: dict
647 e4376078 Iustin Pop
  @return: the hook environment dictionary
648 e4376078 Iustin Pop

649 ecb215b5 Michael Hanselmann
  """
650 67fc3042 Iustin Pop
  cluster = lu.cfg.GetClusterInfo()
651 67fc3042 Iustin Pop
  bep = cluster.FillBE(instance)
652 67fc3042 Iustin Pop
  hvp = cluster.FillHV(instance)
653 396e1b78 Michael Hanselmann
  args = {
654 396e1b78 Michael Hanselmann
    'name': instance.name,
655 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
656 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
657 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
658 0d68c45d Iustin Pop
    'status': instance.admin_up,
659 338e51e8 Iustin Pop
    'memory': bep[constants.BE_MEMORY],
660 338e51e8 Iustin Pop
    'vcpus': bep[constants.BE_VCPUS],
661 f9b10246 Guido Trotter
    'nics': _NICListToTuple(lu, instance.nics),
662 2c2690c9 Iustin Pop
    'disk_template': instance.disk_template,
663 2c2690c9 Iustin Pop
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
664 67fc3042 Iustin Pop
    'bep': bep,
665 67fc3042 Iustin Pop
    'hvp': hvp,
666 b0c63e2b Iustin Pop
    'hypervisor_name': instance.hypervisor,
667 396e1b78 Michael Hanselmann
  }
668 396e1b78 Michael Hanselmann
  if override:
669 396e1b78 Michael Hanselmann
    args.update(override)
670 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
671 396e1b78 Michael Hanselmann
672 396e1b78 Michael Hanselmann
673 44485f49 Guido Trotter
def _AdjustCandidatePool(lu, exceptions):
674 ec0292f1 Iustin Pop
  """Adjust the candidate pool after node operations.
675 ec0292f1 Iustin Pop

676 ec0292f1 Iustin Pop
  """
677 44485f49 Guido Trotter
  mod_list = lu.cfg.MaintainCandidatePool(exceptions)
678 ec0292f1 Iustin Pop
  if mod_list:
679 ec0292f1 Iustin Pop
    lu.LogInfo("Promoted nodes to master candidate role: %s",
680 ee513a66 Iustin Pop
               ", ".join(node.name for node in mod_list))
681 ec0292f1 Iustin Pop
    for name in mod_list:
682 ec0292f1 Iustin Pop
      lu.context.ReaddNode(name)
683 44485f49 Guido Trotter
  mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
684 ec0292f1 Iustin Pop
  if mc_now > mc_max:
685 ec0292f1 Iustin Pop
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
686 ec0292f1 Iustin Pop
               (mc_now, mc_max))
687 ec0292f1 Iustin Pop
688 ec0292f1 Iustin Pop
689 6d7e1f20 Guido Trotter
def _DecideSelfPromotion(lu, exceptions=None):
690 6d7e1f20 Guido Trotter
  """Decide whether I should promote myself as a master candidate.
691 6d7e1f20 Guido Trotter

692 6d7e1f20 Guido Trotter
  """
693 6d7e1f20 Guido Trotter
  cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
694 6d7e1f20 Guido Trotter
  mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
695 6d7e1f20 Guido Trotter
  # the new node will increase mc_max with one, so:
696 6d7e1f20 Guido Trotter
  mc_should = min(mc_should + 1, cp_size)
697 6d7e1f20 Guido Trotter
  return mc_now < mc_should
698 6d7e1f20 Guido Trotter
699 6d7e1f20 Guido Trotter
700 b165e77e Guido Trotter
def _CheckNicsBridgesExist(lu, target_nics, target_node,
701 b165e77e Guido Trotter
                               profile=constants.PP_DEFAULT):
702 b165e77e Guido Trotter
  """Check that the brigdes needed by a list of nics exist.
703 b165e77e Guido Trotter

704 b165e77e Guido Trotter
  """
705 b165e77e Guido Trotter
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[profile]
706 b165e77e Guido Trotter
  paramslist = [objects.FillDict(c_nicparams, nic.nicparams)
707 b165e77e Guido Trotter
                for nic in target_nics]
708 b165e77e Guido Trotter
  brlist = [params[constants.NIC_LINK] for params in paramslist
709 b165e77e Guido Trotter
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
710 b165e77e Guido Trotter
  if brlist:
711 b165e77e Guido Trotter
    result = lu.rpc.call_bridges_exist(target_node, brlist)
712 4c4e4e1e Iustin Pop
    result.Raise("Error checking bridges on destination node '%s'" %
713 4c4e4e1e Iustin Pop
                 target_node, prereq=True)
714 b165e77e Guido Trotter
715 b165e77e Guido Trotter
716 b165e77e Guido Trotter
def _CheckInstanceBridgesExist(lu, instance, node=None):
717 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
718 bf6929a2 Alexander Schreiber

719 bf6929a2 Alexander Schreiber
  """
720 b165e77e Guido Trotter
  if node is None:
721 29921401 Iustin Pop
    node = instance.primary_node
722 b165e77e Guido Trotter
  _CheckNicsBridgesExist(lu, instance.nics, node)
723 bf6929a2 Alexander Schreiber
724 bf6929a2 Alexander Schreiber
725 f2c05717 Guido Trotter
def _CheckOSVariant(os, name):
726 f2c05717 Guido Trotter
  """Check whether an OS name conforms to the os variants specification.
727 f2c05717 Guido Trotter

728 f2c05717 Guido Trotter
  @type os: L{objects.OS}
729 f2c05717 Guido Trotter
  @param os: OS object to check
730 f2c05717 Guido Trotter
  @type name: string
731 f2c05717 Guido Trotter
  @param name: OS name passed by the user, to check for validity
732 f2c05717 Guido Trotter

733 f2c05717 Guido Trotter
  """
734 f2c05717 Guido Trotter
  if not os.supported_variants:
735 f2c05717 Guido Trotter
    return
736 f2c05717 Guido Trotter
  try:
737 f2c05717 Guido Trotter
    variant = name.split("+", 1)[1]
738 f2c05717 Guido Trotter
  except IndexError:
739 f2c05717 Guido Trotter
    raise errors.OpPrereqError("OS name must include a variant")
740 f2c05717 Guido Trotter
741 f2c05717 Guido Trotter
  if variant not in os.supported_variants:
742 f2c05717 Guido Trotter
    raise errors.OpPrereqError("Unsupported OS variant")
743 f2c05717 Guido Trotter
744 f2c05717 Guido Trotter
745 5ba9701d Michael Hanselmann
def _GetNodeInstancesInner(cfg, fn):
746 5ba9701d Michael Hanselmann
  return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
747 5ba9701d Michael Hanselmann
748 5ba9701d Michael Hanselmann
749 e9721add Michael Hanselmann
def _GetNodeInstances(cfg, node_name):
750 e9721add Michael Hanselmann
  """Returns a list of all primary and secondary instances on a node.
751 e9721add Michael Hanselmann

752 e9721add Michael Hanselmann
  """
753 e9721add Michael Hanselmann
754 e9721add Michael Hanselmann
  return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
755 e9721add Michael Hanselmann
756 e9721add Michael Hanselmann
757 80cb875c Michael Hanselmann
def _GetNodePrimaryInstances(cfg, node_name):
758 80cb875c Michael Hanselmann
  """Returns primary instances on a node.
759 80cb875c Michael Hanselmann

760 80cb875c Michael Hanselmann
  """
761 5ba9701d Michael Hanselmann
  return _GetNodeInstancesInner(cfg,
762 5ba9701d Michael Hanselmann
                                lambda inst: node_name == inst.primary_node)
763 80cb875c Michael Hanselmann
764 80cb875c Michael Hanselmann
765 692738fc Michael Hanselmann
def _GetNodeSecondaryInstances(cfg, node_name):
766 692738fc Michael Hanselmann
  """Returns secondary instances on a node.
767 692738fc Michael Hanselmann

768 692738fc Michael Hanselmann
  """
769 5ba9701d Michael Hanselmann
  return _GetNodeInstancesInner(cfg,
770 5ba9701d Michael Hanselmann
                                lambda inst: node_name in inst.secondary_nodes)
771 692738fc Michael Hanselmann
772 692738fc Michael Hanselmann
773 efb8da02 Michael Hanselmann
def _GetStorageTypeArgs(cfg, storage_type):
774 efb8da02 Michael Hanselmann
  """Returns the arguments for a storage type.
775 efb8da02 Michael Hanselmann

776 efb8da02 Michael Hanselmann
  """
777 efb8da02 Michael Hanselmann
  # Special case for file storage
778 efb8da02 Michael Hanselmann
  if storage_type == constants.ST_FILE:
779 a4d138b7 Michael Hanselmann
    # storage.FileStorage wants a list of storage directories
780 a4d138b7 Michael Hanselmann
    return [[cfg.GetFileStorageDir()]]
781 efb8da02 Michael Hanselmann
782 efb8da02 Michael Hanselmann
  return []
783 efb8da02 Michael Hanselmann
784 efb8da02 Michael Hanselmann
785 2d9005d8 Michael Hanselmann
def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
786 2d9005d8 Michael Hanselmann
  faulty = []
787 2d9005d8 Michael Hanselmann
788 2d9005d8 Michael Hanselmann
  for dev in instance.disks:
789 2d9005d8 Michael Hanselmann
    cfg.SetDiskID(dev, node_name)
790 2d9005d8 Michael Hanselmann
791 2d9005d8 Michael Hanselmann
  result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
792 2d9005d8 Michael Hanselmann
  result.Raise("Failed to get disk status from node %s" % node_name,
793 2d9005d8 Michael Hanselmann
               prereq=prereq)
794 2d9005d8 Michael Hanselmann
795 2d9005d8 Michael Hanselmann
  for idx, bdev_status in enumerate(result.payload):
796 2d9005d8 Michael Hanselmann
    if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
797 2d9005d8 Michael Hanselmann
      faulty.append(idx)
798 2d9005d8 Michael Hanselmann
799 2d9005d8 Michael Hanselmann
  return faulty
800 2d9005d8 Michael Hanselmann
801 2d9005d8 Michael Hanselmann
802 b5f5fae9 Luca Bigliardi
class LUPostInitCluster(LogicalUnit):
803 b5f5fae9 Luca Bigliardi
  """Logical unit for running hooks after cluster initialization.
804 b5f5fae9 Luca Bigliardi

805 b5f5fae9 Luca Bigliardi
  """
806 b5f5fae9 Luca Bigliardi
  HPATH = "cluster-init"
807 b5f5fae9 Luca Bigliardi
  HTYPE = constants.HTYPE_CLUSTER
808 b5f5fae9 Luca Bigliardi
  _OP_REQP = []
809 b5f5fae9 Luca Bigliardi
810 b5f5fae9 Luca Bigliardi
  def BuildHooksEnv(self):
811 b5f5fae9 Luca Bigliardi
    """Build hooks env.
812 b5f5fae9 Luca Bigliardi

813 b5f5fae9 Luca Bigliardi
    """
814 b5f5fae9 Luca Bigliardi
    env = {"OP_TARGET": self.cfg.GetClusterName()}
815 b5f5fae9 Luca Bigliardi
    mn = self.cfg.GetMasterNode()
816 b5f5fae9 Luca Bigliardi
    return env, [], [mn]
817 b5f5fae9 Luca Bigliardi
818 b5f5fae9 Luca Bigliardi
  def CheckPrereq(self):
819 b5f5fae9 Luca Bigliardi
    """No prerequisites to check.
820 b5f5fae9 Luca Bigliardi

821 b5f5fae9 Luca Bigliardi
    """
822 b5f5fae9 Luca Bigliardi
    return True
823 b5f5fae9 Luca Bigliardi
824 b5f5fae9 Luca Bigliardi
  def Exec(self, feedback_fn):
825 b5f5fae9 Luca Bigliardi
    """Nothing to do.
826 b5f5fae9 Luca Bigliardi

827 b5f5fae9 Luca Bigliardi
    """
828 b5f5fae9 Luca Bigliardi
    return True
829 b5f5fae9 Luca Bigliardi
830 b5f5fae9 Luca Bigliardi
831 b2c750a4 Luca Bigliardi
class LUDestroyCluster(LogicalUnit):
832 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
833 a8083063 Iustin Pop

834 a8083063 Iustin Pop
  """
835 b2c750a4 Luca Bigliardi
  HPATH = "cluster-destroy"
836 b2c750a4 Luca Bigliardi
  HTYPE = constants.HTYPE_CLUSTER
837 a8083063 Iustin Pop
  _OP_REQP = []
838 a8083063 Iustin Pop
839 b2c750a4 Luca Bigliardi
  def BuildHooksEnv(self):
840 b2c750a4 Luca Bigliardi
    """Build hooks env.
841 b2c750a4 Luca Bigliardi

842 b2c750a4 Luca Bigliardi
    """
843 b2c750a4 Luca Bigliardi
    env = {"OP_TARGET": self.cfg.GetClusterName()}
844 b2c750a4 Luca Bigliardi
    return env, [], []
845 b2c750a4 Luca Bigliardi
846 a8083063 Iustin Pop
  def CheckPrereq(self):
847 a8083063 Iustin Pop
    """Check prerequisites.
848 a8083063 Iustin Pop

849 a8083063 Iustin Pop
    This checks whether the cluster is empty.
850 a8083063 Iustin Pop

851 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
852 a8083063 Iustin Pop

853 a8083063 Iustin Pop
    """
854 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
855 a8083063 Iustin Pop
856 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
857 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
858 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
859 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
860 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
861 db915bd1 Michael Hanselmann
    if instancelist:
862 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
863 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
864 a8083063 Iustin Pop
865 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
866 a8083063 Iustin Pop
    """Destroys the cluster.
867 a8083063 Iustin Pop

868 a8083063 Iustin Pop
    """
869 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
870 3141ad3b Luca Bigliardi
871 3141ad3b Luca Bigliardi
    # Run post hooks on master node before it's removed
872 3141ad3b Luca Bigliardi
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
873 3141ad3b Luca Bigliardi
    try:
874 3141ad3b Luca Bigliardi
      hm.RunPhase(constants.HOOKS_PHASE_POST, [master])
875 3141ad3b Luca Bigliardi
    except:
876 3141ad3b Luca Bigliardi
      self.LogWarning("Errors occurred running hooks on %s" % master)
877 3141ad3b Luca Bigliardi
878 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
879 4c4e4e1e Iustin Pop
    result.Raise("Could not disable the master role")
880 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
881 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
882 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
883 140aa4a8 Iustin Pop
    return master
884 a8083063 Iustin Pop
885 a8083063 Iustin Pop
886 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
887 a8083063 Iustin Pop
  """Verifies the cluster status.
888 a8083063 Iustin Pop

889 a8083063 Iustin Pop
  """
890 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
891 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
892 a0c9776a Iustin Pop
  _OP_REQP = ["skip_checks", "verbose", "error_codes", "debug_simulate_errors"]
893 d4b9d97f Guido Trotter
  REQ_BGL = False
894 d4b9d97f Guido Trotter
895 7c874ee1 Iustin Pop
  TCLUSTER = "cluster"
896 7c874ee1 Iustin Pop
  TNODE = "node"
897 7c874ee1 Iustin Pop
  TINSTANCE = "instance"
898 7c874ee1 Iustin Pop
899 7c874ee1 Iustin Pop
  ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
900 7c874ee1 Iustin Pop
  EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
901 7c874ee1 Iustin Pop
  EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
902 7c874ee1 Iustin Pop
  EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
903 7c874ee1 Iustin Pop
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
904 7c874ee1 Iustin Pop
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
905 7c874ee1 Iustin Pop
  EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
906 7c874ee1 Iustin Pop
  ENODEDRBD = (TNODE, "ENODEDRBD")
907 7c874ee1 Iustin Pop
  ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
908 7c874ee1 Iustin Pop
  ENODEHOOKS = (TNODE, "ENODEHOOKS")
909 7c874ee1 Iustin Pop
  ENODEHV = (TNODE, "ENODEHV")
910 7c874ee1 Iustin Pop
  ENODELVM = (TNODE, "ENODELVM")
911 7c874ee1 Iustin Pop
  ENODEN1 = (TNODE, "ENODEN1")
912 7c874ee1 Iustin Pop
  ENODENET = (TNODE, "ENODENET")
913 7c874ee1 Iustin Pop
  ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
914 7c874ee1 Iustin Pop
  ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
915 7c874ee1 Iustin Pop
  ENODERPC = (TNODE, "ENODERPC")
916 7c874ee1 Iustin Pop
  ENODESSH = (TNODE, "ENODESSH")
917 7c874ee1 Iustin Pop
  ENODEVERSION = (TNODE, "ENODEVERSION")
918 7c874ee1 Iustin Pop
919 a0c9776a Iustin Pop
  ETYPE_FIELD = "code"
920 a0c9776a Iustin Pop
  ETYPE_ERROR = "ERROR"
921 a0c9776a Iustin Pop
  ETYPE_WARNING = "WARNING"
922 a0c9776a Iustin Pop
923 d4b9d97f Guido Trotter
  def ExpandNames(self):
924 d4b9d97f Guido Trotter
    self.needed_locks = {
925 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
926 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
927 d4b9d97f Guido Trotter
    }
928 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
929 a8083063 Iustin Pop
930 7c874ee1 Iustin Pop
  def _Error(self, ecode, item, msg, *args, **kwargs):
931 7c874ee1 Iustin Pop
    """Format an error message.
932 7c874ee1 Iustin Pop

933 7c874ee1 Iustin Pop
    Based on the opcode's error_codes parameter, either format a
934 7c874ee1 Iustin Pop
    parseable error code, or a simpler error string.
935 7c874ee1 Iustin Pop

936 7c874ee1 Iustin Pop
    This must be called only from Exec and functions called from Exec.
937 7c874ee1 Iustin Pop

938 7c874ee1 Iustin Pop
    """
939 a0c9776a Iustin Pop
    ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
940 7c874ee1 Iustin Pop
    itype, etxt = ecode
941 7c874ee1 Iustin Pop
    # first complete the msg
942 7c874ee1 Iustin Pop
    if args:
943 7c874ee1 Iustin Pop
      msg = msg % args
944 7c874ee1 Iustin Pop
    # then format the whole message
945 7c874ee1 Iustin Pop
    if self.op.error_codes:
946 7c874ee1 Iustin Pop
      msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
947 7c874ee1 Iustin Pop
    else:
948 7c874ee1 Iustin Pop
      if item:
949 7c874ee1 Iustin Pop
        item = " " + item
950 7c874ee1 Iustin Pop
      else:
951 7c874ee1 Iustin Pop
        item = ""
952 7c874ee1 Iustin Pop
      msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
953 7c874ee1 Iustin Pop
    # and finally report it via the feedback_fn
954 7c874ee1 Iustin Pop
    self._feedback_fn("  - %s" % msg)
955 7c874ee1 Iustin Pop
956 a0c9776a Iustin Pop
  def _ErrorIf(self, cond, *args, **kwargs):
957 a0c9776a Iustin Pop
    """Log an error message if the passed condition is True.
958 a0c9776a Iustin Pop

959 a0c9776a Iustin Pop
    """
960 a0c9776a Iustin Pop
    cond = bool(cond) or self.op.debug_simulate_errors
961 a0c9776a Iustin Pop
    if cond:
962 a0c9776a Iustin Pop
      self._Error(*args, **kwargs)
963 a0c9776a Iustin Pop
    # do not mark the operation as failed for WARN cases only
964 a0c9776a Iustin Pop
    if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
965 a0c9776a Iustin Pop
      self.bad = self.bad or cond
966 a0c9776a Iustin Pop
967 25361b9a Iustin Pop
  def _VerifyNode(self, nodeinfo, file_list, local_cksum,
968 7c874ee1 Iustin Pop
                  node_result, master_files, drbd_map, vg_name):
969 a8083063 Iustin Pop
    """Run multiple tests against a node.
970 a8083063 Iustin Pop

971 112f18a5 Iustin Pop
    Test list:
972 e4376078 Iustin Pop

973 a8083063 Iustin Pop
      - compares ganeti version
974 5bbd3f7f Michael Hanselmann
      - checks vg existence and size > 20G
975 a8083063 Iustin Pop
      - checks config file checksum
976 a8083063 Iustin Pop
      - checks ssh to other nodes
977 a8083063 Iustin Pop

978 112f18a5 Iustin Pop
    @type nodeinfo: L{objects.Node}
979 112f18a5 Iustin Pop
    @param nodeinfo: the node to check
980 e4376078 Iustin Pop
    @param file_list: required list of files
981 e4376078 Iustin Pop
    @param local_cksum: dictionary of local files and their checksums
982 e4376078 Iustin Pop
    @param node_result: the results from the node
983 112f18a5 Iustin Pop
    @param master_files: list of files that only masters should have
984 6d2e83d5 Iustin Pop
    @param drbd_map: the useddrbd minors for this node, in
985 6d2e83d5 Iustin Pop
        form of minor: (instance, must_exist) which correspond to instances
986 6d2e83d5 Iustin Pop
        and their running status
987 cc9e1230 Guido Trotter
    @param vg_name: Ganeti Volume Group (result of self.cfg.GetVGName())
988 098c0958 Michael Hanselmann

989 a8083063 Iustin Pop
    """
990 112f18a5 Iustin Pop
    node = nodeinfo.name
991 a0c9776a Iustin Pop
    _ErrorIf = self._ErrorIf
992 25361b9a Iustin Pop
993 25361b9a Iustin Pop
    # main result, node_result should be a non-empty dict
994 a0c9776a Iustin Pop
    test = not node_result or not isinstance(node_result, dict)
995 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODERPC, node,
996 7c874ee1 Iustin Pop
                  "unable to verify node: no data returned")
997 a0c9776a Iustin Pop
    if test:
998 a0c9776a Iustin Pop
      return
999 25361b9a Iustin Pop
1000 a8083063 Iustin Pop
    # compares ganeti version
1001 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
1002 25361b9a Iustin Pop
    remote_version = node_result.get('version', None)
1003 a0c9776a Iustin Pop
    test = not (remote_version and
1004 a0c9776a Iustin Pop
                isinstance(remote_version, (list, tuple)) and
1005 a0c9776a Iustin Pop
                len(remote_version) == 2)
1006 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODERPC, node,
1007 a0c9776a Iustin Pop
             "connection to node returned invalid data")
1008 a0c9776a Iustin Pop
    if test:
1009 a0c9776a Iustin Pop
      return
1010 a0c9776a Iustin Pop
1011 a0c9776a Iustin Pop
    test = local_version != remote_version[0]
1012 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODEVERSION, node,
1013 a0c9776a Iustin Pop
             "incompatible protocol versions: master %s,"
1014 a0c9776a Iustin Pop
             " node %s", local_version, remote_version[0])
1015 a0c9776a Iustin Pop
    if test:
1016 a0c9776a Iustin Pop
      return
1017 a8083063 Iustin Pop
1018 e9ce0a64 Iustin Pop
    # node seems compatible, we can actually try to look into its results
1019 a8083063 Iustin Pop
1020 e9ce0a64 Iustin Pop
    # full package version
1021 a0c9776a Iustin Pop
    self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
1022 a0c9776a Iustin Pop
                  self.ENODEVERSION, node,
1023 7c874ee1 Iustin Pop
                  "software version mismatch: master %s, node %s",
1024 7c874ee1 Iustin Pop
                  constants.RELEASE_VERSION, remote_version[1],
1025 a0c9776a Iustin Pop
                  code=self.ETYPE_WARNING)
1026 e9ce0a64 Iustin Pop
1027 e9ce0a64 Iustin Pop
    # checks vg existence and size > 20G
1028 cc9e1230 Guido Trotter
    if vg_name is not None:
1029 cc9e1230 Guido Trotter
      vglist = node_result.get(constants.NV_VGLIST, None)
1030 a0c9776a Iustin Pop
      test = not vglist
1031 a0c9776a Iustin Pop
      _ErrorIf(test, self.ENODELVM, node, "unable to check volume groups")
1032 a0c9776a Iustin Pop
      if not test:
1033 cc9e1230 Guido Trotter
        vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
1034 cc9e1230 Guido Trotter
                                              constants.MIN_VG_SIZE)
1035 a0c9776a Iustin Pop
        _ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
1036 a8083063 Iustin Pop
1037 a8083063 Iustin Pop
    # checks config file checksum
1038 a8083063 Iustin Pop
1039 25361b9a Iustin Pop
    remote_cksum = node_result.get(constants.NV_FILELIST, None)
1040 a0c9776a Iustin Pop
    test = not isinstance(remote_cksum, dict)
1041 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODEFILECHECK, node,
1042 a0c9776a Iustin Pop
             "node hasn't returned file checksum data")
1043 a0c9776a Iustin Pop
    if not test:
1044 a8083063 Iustin Pop
      for file_name in file_list:
1045 112f18a5 Iustin Pop
        node_is_mc = nodeinfo.master_candidate
1046 a0c9776a Iustin Pop
        must_have = (file_name not in master_files) or node_is_mc
1047 a0c9776a Iustin Pop
        # missing
1048 a0c9776a Iustin Pop
        test1 = file_name not in remote_cksum
1049 a0c9776a Iustin Pop
        # invalid checksum
1050 a0c9776a Iustin Pop
        test2 = not test1 and remote_cksum[file_name] != local_cksum[file_name]
1051 a0c9776a Iustin Pop
        # existing and good
1052 a0c9776a Iustin Pop
        test3 = not test1 and remote_cksum[file_name] == local_cksum[file_name]
1053 a0c9776a Iustin Pop
        _ErrorIf(test1 and must_have, self.ENODEFILECHECK, node,
1054 a0c9776a Iustin Pop
                 "file '%s' missing", file_name)
1055 a0c9776a Iustin Pop
        _ErrorIf(test2 and must_have, self.ENODEFILECHECK, node,
1056 a0c9776a Iustin Pop
                 "file '%s' has wrong checksum", file_name)
1057 a0c9776a Iustin Pop
        # not candidate and this is not a must-have file
1058 a0c9776a Iustin Pop
        _ErrorIf(test2 and not must_have, self.ENODEFILECHECK, node,
1059 a0c9776a Iustin Pop
                 "file '%s' should not exist on non master"
1060 a0c9776a Iustin Pop
                 " candidates (and the file is outdated)", file_name)
1061 a0c9776a Iustin Pop
        # all good, except non-master/non-must have combination
1062 a0c9776a Iustin Pop
        _ErrorIf(test3 and not must_have, self.ENODEFILECHECK, node,
1063 a0c9776a Iustin Pop
                 "file '%s' should not exist"
1064 a0c9776a Iustin Pop
                 " on non master candidates", file_name)
1065 a8083063 Iustin Pop
1066 25361b9a Iustin Pop
    # checks ssh to any
1067 25361b9a Iustin Pop
1068 a0c9776a Iustin Pop
    test = constants.NV_NODELIST not in node_result
1069 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODESSH, node,
1070 a0c9776a Iustin Pop
             "node hasn't returned node ssh connectivity data")
1071 a0c9776a Iustin Pop
    if not test:
1072 25361b9a Iustin Pop
      if node_result[constants.NV_NODELIST]:
1073 7c874ee1 Iustin Pop
        for a_node, a_msg in node_result[constants.NV_NODELIST].items():
1074 a0c9776a Iustin Pop
          _ErrorIf(True, self.ENODESSH, node,
1075 a0c9776a Iustin Pop
                   "ssh communication with node '%s': %s", a_node, a_msg)
1076 25361b9a Iustin Pop
1077 a0c9776a Iustin Pop
    test = constants.NV_NODENETTEST not in node_result
1078 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODENET, node,
1079 a0c9776a Iustin Pop
             "node hasn't returned node tcp connectivity data")
1080 a0c9776a Iustin Pop
    if not test:
1081 25361b9a Iustin Pop
      if node_result[constants.NV_NODENETTEST]:
1082 25361b9a Iustin Pop
        nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys())
1083 7c874ee1 Iustin Pop
        for anode in nlist:
1084 a0c9776a Iustin Pop
          _ErrorIf(True, self.ENODENET, node,
1085 a0c9776a Iustin Pop
                   "tcp communication with node '%s': %s",
1086 a0c9776a Iustin Pop
                   anode, node_result[constants.NV_NODENETTEST][anode])
1087 9d4bfc96 Iustin Pop
1088 25361b9a Iustin Pop
    hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
1089 e69d05fd Iustin Pop
    if isinstance(hyp_result, dict):
1090 e69d05fd Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
1091 a0c9776a Iustin Pop
        test = hv_result is not None
1092 a0c9776a Iustin Pop
        _ErrorIf(test, self.ENODEHV, node,
1093 a0c9776a Iustin Pop
                 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1094 6d2e83d5 Iustin Pop
1095 6d2e83d5 Iustin Pop
    # check used drbd list
1096 cc9e1230 Guido Trotter
    if vg_name is not None:
1097 cc9e1230 Guido Trotter
      used_minors = node_result.get(constants.NV_DRBDLIST, [])
1098 a0c9776a Iustin Pop
      test = not isinstance(used_minors, (tuple, list))
1099 a0c9776a Iustin Pop
      _ErrorIf(test, self.ENODEDRBD, node,
1100 a0c9776a Iustin Pop
               "cannot parse drbd status file: %s", str(used_minors))
1101 a0c9776a Iustin Pop
      if not test:
1102 cc9e1230 Guido Trotter
        for minor, (iname, must_exist) in drbd_map.items():
1103 a0c9776a Iustin Pop
          test = minor not in used_minors and must_exist
1104 a0c9776a Iustin Pop
          _ErrorIf(test, self.ENODEDRBD, node,
1105 a0c9776a Iustin Pop
                   "drbd minor %d of instance %s is not active",
1106 a0c9776a Iustin Pop
                   minor, iname)
1107 cc9e1230 Guido Trotter
        for minor in used_minors:
1108 a0c9776a Iustin Pop
          test = minor not in drbd_map
1109 a0c9776a Iustin Pop
          _ErrorIf(test, self.ENODEDRBD, node,
1110 a0c9776a Iustin Pop
                   "unallocated drbd minor %d is in use", minor)
1111 a8083063 Iustin Pop
1112 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
1113 7c874ee1 Iustin Pop
                      node_instance, n_offline):
1114 a8083063 Iustin Pop
    """Verify an instance.
1115 a8083063 Iustin Pop

1116 a8083063 Iustin Pop
    This function checks to see if the required block devices are
1117 a8083063 Iustin Pop
    available on the instance's node.
1118 a8083063 Iustin Pop

1119 a8083063 Iustin Pop
    """
1120 a0c9776a Iustin Pop
    _ErrorIf = self._ErrorIf
1121 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
1122 a8083063 Iustin Pop
1123 a8083063 Iustin Pop
    node_vol_should = {}
1124 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
1125 a8083063 Iustin Pop
1126 a8083063 Iustin Pop
    for node in node_vol_should:
1127 0a66c968 Iustin Pop
      if node in n_offline:
1128 0a66c968 Iustin Pop
        # ignore missing volumes on offline nodes
1129 0a66c968 Iustin Pop
        continue
1130 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
1131 a0c9776a Iustin Pop
        test = node not in node_vol_is or volume not in node_vol_is[node]
1132 a0c9776a Iustin Pop
        _ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
1133 a0c9776a Iustin Pop
                 "volume %s missing on node %s", volume, node)
1134 a8083063 Iustin Pop
1135 0d68c45d Iustin Pop
    if instanceconfig.admin_up:
1136 a0c9776a Iustin Pop
      test = ((node_current not in node_instance or
1137 a0c9776a Iustin Pop
               not instance in node_instance[node_current]) and
1138 a0c9776a Iustin Pop
              node_current not in n_offline)
1139 a0c9776a Iustin Pop
      _ErrorIf(test, self.EINSTANCEDOWN, instance,
1140 a0c9776a Iustin Pop
               "instance not running on its primary node %s",
1141 a0c9776a Iustin Pop
               node_current)
1142 a8083063 Iustin Pop
1143 a8083063 Iustin Pop
    for node in node_instance:
1144 a8083063 Iustin Pop
      if (not node == node_current):
1145 a0c9776a Iustin Pop
        test = instance in node_instance[node]
1146 a0c9776a Iustin Pop
        _ErrorIf(test, self.EINSTANCEWRONGNODE, instance,
1147 a0c9776a Iustin Pop
                 "instance should not run on node %s", node)
1148 a8083063 Iustin Pop
1149 7c874ee1 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is):
1150 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
1151 a8083063 Iustin Pop

1152 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
1153 a8083063 Iustin Pop
    reported as unknown.
1154 a8083063 Iustin Pop

1155 a8083063 Iustin Pop
    """
1156 a8083063 Iustin Pop
    for node in node_vol_is:
1157 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
1158 a0c9776a Iustin Pop
        test = (node not in node_vol_should or
1159 a0c9776a Iustin Pop
                volume not in node_vol_should[node])
1160 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEORPHANLV, node,
1161 7c874ee1 Iustin Pop
                      "volume %s is unknown", volume)
1162 a8083063 Iustin Pop
1163 7c874ee1 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance):
1164 a8083063 Iustin Pop
    """Verify the list of running instances.
1165 a8083063 Iustin Pop

1166 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
1167 a8083063 Iustin Pop

1168 a8083063 Iustin Pop
    """
1169 a8083063 Iustin Pop
    for node in node_instance:
1170 7c874ee1 Iustin Pop
      for o_inst in node_instance[node]:
1171 a0c9776a Iustin Pop
        test = o_inst not in instancelist
1172 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEORPHANINSTANCE, node,
1173 7c874ee1 Iustin Pop
                      "instance %s on node %s should not exist", o_inst, node)
1174 a8083063 Iustin Pop
1175 7c874ee1 Iustin Pop
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg):
1176 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
1177 2b3b6ddd Guido Trotter

1178 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
1179 2b3b6ddd Guido Trotter
    was primary for.
1180 2b3b6ddd Guido Trotter

1181 2b3b6ddd Guido Trotter
    """
1182 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
1183 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
1184 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
1185 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
1186 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
1187 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
1188 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
1189 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
1190 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
1191 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
1192 2b3b6ddd Guido Trotter
        needed_mem = 0
1193 2b3b6ddd Guido Trotter
        for instance in instances:
1194 338e51e8 Iustin Pop
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
1195 c0f2b229 Iustin Pop
          if bep[constants.BE_AUTO_BALANCE]:
1196 3924700f Iustin Pop
            needed_mem += bep[constants.BE_MEMORY]
1197 a0c9776a Iustin Pop
        test = nodeinfo['mfree'] < needed_mem
1198 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEN1, node,
1199 7c874ee1 Iustin Pop
                      "not enough memory on to accommodate"
1200 7c874ee1 Iustin Pop
                      " failovers should peer node %s fail", prinode)
1201 2b3b6ddd Guido Trotter
1202 a8083063 Iustin Pop
  def CheckPrereq(self):
1203 a8083063 Iustin Pop
    """Check prerequisites.
1204 a8083063 Iustin Pop

1205 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
1206 e54c4c5e Guido Trotter
    all its members are valid.
1207 a8083063 Iustin Pop

1208 a8083063 Iustin Pop
    """
1209 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
1210 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
1211 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
1212 a8083063 Iustin Pop
1213 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
1214 d8fff41c Guido Trotter
    """Build hooks env.
1215 d8fff41c Guido Trotter

1216 5bbd3f7f Michael Hanselmann
    Cluster-Verify hooks just ran in the post phase and their failure makes
1217 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
1218 d8fff41c Guido Trotter

1219 d8fff41c Guido Trotter
    """
1220 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
1221 35e994e9 Iustin Pop
    env = {
1222 35e994e9 Iustin Pop
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
1223 35e994e9 Iustin Pop
      }
1224 35e994e9 Iustin Pop
    for node in self.cfg.GetAllNodesInfo().values():
1225 35e994e9 Iustin Pop
      env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
1226 35e994e9 Iustin Pop
1227 d8fff41c Guido Trotter
    return env, [], all_nodes
1228 d8fff41c Guido Trotter
1229 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1230 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
1231 a8083063 Iustin Pop

1232 a8083063 Iustin Pop
    """
1233 a0c9776a Iustin Pop
    self.bad = False
1234 a0c9776a Iustin Pop
    _ErrorIf = self._ErrorIf
1235 7c874ee1 Iustin Pop
    verbose = self.op.verbose
1236 7c874ee1 Iustin Pop
    self._feedback_fn = feedback_fn
1237 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
1238 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
1239 a0c9776a Iustin Pop
      _ErrorIf(True, self.ECLUSTERCFG, None, msg)
1240 a8083063 Iustin Pop
1241 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
1242 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
1243 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
1244 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
1245 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
1246 6d2e83d5 Iustin Pop
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
1247 6d2e83d5 Iustin Pop
                        for iname in instancelist)
1248 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
1249 3924700f Iustin Pop
    i_non_a_balanced = [] # Non auto-balanced instances
1250 0a66c968 Iustin Pop
    n_offline = [] # List of offline nodes
1251 22f0f71d Iustin Pop
    n_drained = [] # List of nodes being drained
1252 a8083063 Iustin Pop
    node_volume = {}
1253 a8083063 Iustin Pop
    node_instance = {}
1254 9c9c7d30 Guido Trotter
    node_info = {}
1255 26b6af5e Guido Trotter
    instance_cfg = {}
1256 a8083063 Iustin Pop
1257 a8083063 Iustin Pop
    # FIXME: verify OS list
1258 a8083063 Iustin Pop
    # do local checksums
1259 112f18a5 Iustin Pop
    master_files = [constants.CLUSTER_CONF_FILE]
1260 112f18a5 Iustin Pop
1261 112f18a5 Iustin Pop
    file_names = ssconf.SimpleStore().GetFileList()
1262 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
1263 699777f2 Michael Hanselmann
    file_names.append(constants.RAPI_CERT_FILE)
1264 112f18a5 Iustin Pop
    file_names.extend(master_files)
1265 112f18a5 Iustin Pop
1266 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
1267 a8083063 Iustin Pop
1268 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
1269 a8083063 Iustin Pop
    node_verify_param = {
1270 25361b9a Iustin Pop
      constants.NV_FILELIST: file_names,
1271 82e37788 Iustin Pop
      constants.NV_NODELIST: [node.name for node in nodeinfo
1272 82e37788 Iustin Pop
                              if not node.offline],
1273 25361b9a Iustin Pop
      constants.NV_HYPERVISOR: hypervisors,
1274 25361b9a Iustin Pop
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
1275 82e37788 Iustin Pop
                                  node.secondary_ip) for node in nodeinfo
1276 82e37788 Iustin Pop
                                 if not node.offline],
1277 25361b9a Iustin Pop
      constants.NV_INSTANCELIST: hypervisors,
1278 25361b9a Iustin Pop
      constants.NV_VERSION: None,
1279 25361b9a Iustin Pop
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
1280 a8083063 Iustin Pop
      }
1281 cc9e1230 Guido Trotter
    if vg_name is not None:
1282 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_VGLIST] = None
1283 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_LVLIST] = vg_name
1284 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_DRBDLIST] = None
1285 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
1286 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
1287 a8083063 Iustin Pop
1288 3924700f Iustin Pop
    cluster = self.cfg.GetClusterInfo()
1289 112f18a5 Iustin Pop
    master_node = self.cfg.GetMasterNode()
1290 6d2e83d5 Iustin Pop
    all_drbd_map = self.cfg.ComputeDRBDMap()
1291 6d2e83d5 Iustin Pop
1292 7c874ee1 Iustin Pop
    feedback_fn("* Verifying node status")
1293 112f18a5 Iustin Pop
    for node_i in nodeinfo:
1294 112f18a5 Iustin Pop
      node = node_i.name
1295 25361b9a Iustin Pop
1296 0a66c968 Iustin Pop
      if node_i.offline:
1297 7c874ee1 Iustin Pop
        if verbose:
1298 7c874ee1 Iustin Pop
          feedback_fn("* Skipping offline node %s" % (node,))
1299 0a66c968 Iustin Pop
        n_offline.append(node)
1300 0a66c968 Iustin Pop
        continue
1301 0a66c968 Iustin Pop
1302 112f18a5 Iustin Pop
      if node == master_node:
1303 25361b9a Iustin Pop
        ntype = "master"
1304 112f18a5 Iustin Pop
      elif node_i.master_candidate:
1305 25361b9a Iustin Pop
        ntype = "master candidate"
1306 22f0f71d Iustin Pop
      elif node_i.drained:
1307 22f0f71d Iustin Pop
        ntype = "drained"
1308 22f0f71d Iustin Pop
        n_drained.append(node)
1309 112f18a5 Iustin Pop
      else:
1310 25361b9a Iustin Pop
        ntype = "regular"
1311 7c874ee1 Iustin Pop
      if verbose:
1312 7c874ee1 Iustin Pop
        feedback_fn("* Verifying node %s (%s)" % (node, ntype))
1313 25361b9a Iustin Pop
1314 4c4e4e1e Iustin Pop
      msg = all_nvinfo[node].fail_msg
1315 a0c9776a Iustin Pop
      _ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg)
1316 6f68a739 Iustin Pop
      if msg:
1317 25361b9a Iustin Pop
        continue
1318 25361b9a Iustin Pop
1319 6f68a739 Iustin Pop
      nresult = all_nvinfo[node].payload
1320 6d2e83d5 Iustin Pop
      node_drbd = {}
1321 6d2e83d5 Iustin Pop
      for minor, instance in all_drbd_map[node].items():
1322 a0c9776a Iustin Pop
        test = instance not in instanceinfo
1323 a0c9776a Iustin Pop
        _ErrorIf(test, self.ECLUSTERCFG, None,
1324 a0c9776a Iustin Pop
                 "ghost instance '%s' in temporary DRBD map", instance)
1325 c614e5fb Iustin Pop
          # ghost instance should not be running, but otherwise we
1326 c614e5fb Iustin Pop
          # don't give double warnings (both ghost instance and
1327 c614e5fb Iustin Pop
          # unallocated minor in use)
1328 a0c9776a Iustin Pop
        if test:
1329 c614e5fb Iustin Pop
          node_drbd[minor] = (instance, False)
1330 c614e5fb Iustin Pop
        else:
1331 c614e5fb Iustin Pop
          instance = instanceinfo[instance]
1332 c614e5fb Iustin Pop
          node_drbd[minor] = (instance.name, instance.admin_up)
1333 a0c9776a Iustin Pop
      self._VerifyNode(node_i, file_names, local_checksums,
1334 a0c9776a Iustin Pop
                       nresult, master_files, node_drbd, vg_name)
1335 a8083063 Iustin Pop
1336 25361b9a Iustin Pop
      lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1337 cc9e1230 Guido Trotter
      if vg_name is None:
1338 cc9e1230 Guido Trotter
        node_volume[node] = {}
1339 cc9e1230 Guido Trotter
      elif isinstance(lvdata, basestring):
1340 a0c9776a Iustin Pop
        _ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s",
1341 a0c9776a Iustin Pop
                 utils.SafeEncode(lvdata))
1342 b63ed789 Iustin Pop
        node_volume[node] = {}
1343 25361b9a Iustin Pop
      elif not isinstance(lvdata, dict):
1344 a0c9776a Iustin Pop
        _ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)")
1345 a8083063 Iustin Pop
        continue
1346 b63ed789 Iustin Pop
      else:
1347 25361b9a Iustin Pop
        node_volume[node] = lvdata
1348 a8083063 Iustin Pop
1349 a8083063 Iustin Pop
      # node_instance
1350 25361b9a Iustin Pop
      idata = nresult.get(constants.NV_INSTANCELIST, None)
1351 a0c9776a Iustin Pop
      test = not isinstance(idata, list)
1352 a0c9776a Iustin Pop
      _ErrorIf(test, self.ENODEHV, node,
1353 a0c9776a Iustin Pop
               "rpc call to node failed (instancelist)")
1354 a0c9776a Iustin Pop
      if test:
1355 a8083063 Iustin Pop
        continue
1356 a8083063 Iustin Pop
1357 25361b9a Iustin Pop
      node_instance[node] = idata
1358 a8083063 Iustin Pop
1359 9c9c7d30 Guido Trotter
      # node_info
1360 25361b9a Iustin Pop
      nodeinfo = nresult.get(constants.NV_HVINFO, None)
1361 a0c9776a Iustin Pop
      test = not isinstance(nodeinfo, dict)
1362 a0c9776a Iustin Pop
      _ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)")
1363 a0c9776a Iustin Pop
      if test:
1364 9c9c7d30 Guido Trotter
        continue
1365 9c9c7d30 Guido Trotter
1366 9c9c7d30 Guido Trotter
      try:
1367 9c9c7d30 Guido Trotter
        node_info[node] = {
1368 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
1369 93e4c50b Guido Trotter
          "pinst": [],
1370 93e4c50b Guido Trotter
          "sinst": [],
1371 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
1372 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
1373 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
1374 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
1375 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
1376 36e7da50 Guido Trotter
          # secondary.
1377 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
1378 9c9c7d30 Guido Trotter
        }
1379 cc9e1230 Guido Trotter
        # FIXME: devise a free space model for file based instances as well
1380 cc9e1230 Guido Trotter
        if vg_name is not None:
1381 a0c9776a Iustin Pop
          test = (constants.NV_VGLIST not in nresult or
1382 a0c9776a Iustin Pop
                  vg_name not in nresult[constants.NV_VGLIST])
1383 a0c9776a Iustin Pop
          _ErrorIf(test, self.ENODELVM, node,
1384 a0c9776a Iustin Pop
                   "node didn't return data for the volume group '%s'"
1385 a0c9776a Iustin Pop
                   " - it is either missing or broken", vg_name)
1386 a0c9776a Iustin Pop
          if test:
1387 9a198532 Iustin Pop
            continue
1388 cc9e1230 Guido Trotter
          node_info[node]["dfree"] = int(nresult[constants.NV_VGLIST][vg_name])
1389 9a198532 Iustin Pop
      except (ValueError, KeyError):
1390 a0c9776a Iustin Pop
        _ErrorIf(True, self.ENODERPC, node,
1391 a0c9776a Iustin Pop
                 "node returned invalid nodeinfo, check lvm/hypervisor")
1392 9c9c7d30 Guido Trotter
        continue
1393 9c9c7d30 Guido Trotter
1394 a8083063 Iustin Pop
    node_vol_should = {}
1395 a8083063 Iustin Pop
1396 7c874ee1 Iustin Pop
    feedback_fn("* Verifying instance status")
1397 a8083063 Iustin Pop
    for instance in instancelist:
1398 7c874ee1 Iustin Pop
      if verbose:
1399 7c874ee1 Iustin Pop
        feedback_fn("* Verifying instance %s" % instance)
1400 6d2e83d5 Iustin Pop
      inst_config = instanceinfo[instance]
1401 a0c9776a Iustin Pop
      self._VerifyInstance(instance, inst_config, node_volume,
1402 a0c9776a Iustin Pop
                           node_instance, n_offline)
1403 832261fd Iustin Pop
      inst_nodes_offline = []
1404 a8083063 Iustin Pop
1405 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
1406 a8083063 Iustin Pop
1407 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
1408 26b6af5e Guido Trotter
1409 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
1410 a0c9776a Iustin Pop
      _ErrorIf(pnode not in node_info and pnode not in n_offline,
1411 a0c9776a Iustin Pop
               self.ENODERPC, pnode, "instance %s, connection to"
1412 a0c9776a Iustin Pop
               " primary node failed", instance)
1413 93e4c50b Guido Trotter
      if pnode in node_info:
1414 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
1415 93e4c50b Guido Trotter
1416 832261fd Iustin Pop
      if pnode in n_offline:
1417 832261fd Iustin Pop
        inst_nodes_offline.append(pnode)
1418 832261fd Iustin Pop
1419 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
1420 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
1421 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
1422 93e4c50b Guido Trotter
      # supported either.
1423 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
1424 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
1425 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
1426 a0c9776a Iustin Pop
      _ErrorIf(len(inst_config.secondary_nodes) > 1,
1427 a0c9776a Iustin Pop
               self.EINSTANCELAYOUT, instance,
1428 a0c9776a Iustin Pop
               "instance has multiple secondary nodes", code="WARNING")
1429 93e4c50b Guido Trotter
1430 c0f2b229 Iustin Pop
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1431 3924700f Iustin Pop
        i_non_a_balanced.append(instance)
1432 3924700f Iustin Pop
1433 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
1434 a0c9776a Iustin Pop
        _ErrorIf(snode not in node_info and snode not in n_offline,
1435 a0c9776a Iustin Pop
                 self.ENODERPC, snode,
1436 a0c9776a Iustin Pop
                 "instance %s, connection to secondary node"
1437 a0c9776a Iustin Pop
                 "failed", instance)
1438 a0c9776a Iustin Pop
1439 93e4c50b Guido Trotter
        if snode in node_info:
1440 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
1441 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
1442 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
1443 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
1444 a0c9776a Iustin Pop
1445 832261fd Iustin Pop
        if snode in n_offline:
1446 832261fd Iustin Pop
          inst_nodes_offline.append(snode)
1447 832261fd Iustin Pop
1448 a0c9776a Iustin Pop
      # warn that the instance lives on offline nodes
1449 a0c9776a Iustin Pop
      _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
1450 a0c9776a Iustin Pop
               "instance lives on offline node(s) %s",
1451 a0c9776a Iustin Pop
               ", ".join(inst_nodes_offline))
1452 93e4c50b Guido Trotter
1453 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
1454 a0c9776a Iustin Pop
    self._VerifyOrphanVolumes(node_vol_should, node_volume)
1455 a8083063 Iustin Pop
1456 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
1457 a0c9776a Iustin Pop
    self._VerifyOrphanInstances(instancelist, node_instance)
1458 a8083063 Iustin Pop
1459 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1460 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
1461 a0c9776a Iustin Pop
      self._VerifyNPlusOneMemory(node_info, instance_cfg)
1462 2b3b6ddd Guido Trotter
1463 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
1464 2b3b6ddd Guido Trotter
    if i_non_redundant:
1465 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1466 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
1467 2b3b6ddd Guido Trotter
1468 3924700f Iustin Pop
    if i_non_a_balanced:
1469 3924700f Iustin Pop
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1470 3924700f Iustin Pop
                  % len(i_non_a_balanced))
1471 3924700f Iustin Pop
1472 0a66c968 Iustin Pop
    if n_offline:
1473 0a66c968 Iustin Pop
      feedback_fn("  - NOTICE: %d offline node(s) found." % len(n_offline))
1474 0a66c968 Iustin Pop
1475 22f0f71d Iustin Pop
    if n_drained:
1476 22f0f71d Iustin Pop
      feedback_fn("  - NOTICE: %d drained node(s) found." % len(n_drained))
1477 22f0f71d Iustin Pop
1478 a0c9776a Iustin Pop
    return not self.bad
1479 a8083063 Iustin Pop
1480 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1481 5bbd3f7f Michael Hanselmann
    """Analyze the post-hooks' result
1482 e4376078 Iustin Pop

1483 e4376078 Iustin Pop
    This method analyses the hook result, handles it, and sends some
1484 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
1485 d8fff41c Guido Trotter

1486 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1487 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1488 e4376078 Iustin Pop
    @param hooks_results: the results of the multi-node hooks rpc call
1489 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
1490 e4376078 Iustin Pop
    @param lu_result: previous Exec result
1491 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
1492 e4376078 Iustin Pop
        and hook results
1493 d8fff41c Guido Trotter

1494 d8fff41c Guido Trotter
    """
1495 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
1496 38206f3c Iustin Pop
    # their results
1497 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
1498 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
1499 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
1500 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
1501 7c874ee1 Iustin Pop
      assert hooks_results, "invalid result from hooks"
1502 7c874ee1 Iustin Pop
1503 7c874ee1 Iustin Pop
      for node_name in hooks_results:
1504 7c874ee1 Iustin Pop
        show_node_header = True
1505 7c874ee1 Iustin Pop
        res = hooks_results[node_name]
1506 7c874ee1 Iustin Pop
        msg = res.fail_msg
1507 a0c9776a Iustin Pop
        test = msg and not res.offline
1508 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEHOOKS, node_name,
1509 7c874ee1 Iustin Pop
                      "Communication failure in hooks execution: %s", msg)
1510 a0c9776a Iustin Pop
        if test:
1511 a0c9776a Iustin Pop
          # override manually lu_result here as _ErrorIf only
1512 a0c9776a Iustin Pop
          # overrides self.bad
1513 7c874ee1 Iustin Pop
          lu_result = 1
1514 7c874ee1 Iustin Pop
          continue
1515 7c874ee1 Iustin Pop
        for script, hkr, output in res.payload:
1516 a0c9776a Iustin Pop
          test = hkr == constants.HKR_FAIL
1517 a0c9776a Iustin Pop
          self._ErrorIf(test, self.ENODEHOOKS, node_name,
1518 7c874ee1 Iustin Pop
                        "Script %s failed, output:", script)
1519 a0c9776a Iustin Pop
          if test:
1520 7c874ee1 Iustin Pop
            output = indent_re.sub('      ', output)
1521 7c874ee1 Iustin Pop
            feedback_fn("%s" % output)
1522 7c874ee1 Iustin Pop
            lu_result = 1
1523 d8fff41c Guido Trotter
1524 d8fff41c Guido Trotter
      return lu_result
1525 d8fff41c Guido Trotter
1526 a8083063 Iustin Pop
1527 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
1528 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
1529 2c95a8d4 Iustin Pop

1530 2c95a8d4 Iustin Pop
  """
1531 2c95a8d4 Iustin Pop
  _OP_REQP = []
1532 d4b9d97f Guido Trotter
  REQ_BGL = False
1533 d4b9d97f Guido Trotter
1534 d4b9d97f Guido Trotter
  def ExpandNames(self):
1535 d4b9d97f Guido Trotter
    self.needed_locks = {
1536 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1537 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1538 d4b9d97f Guido Trotter
    }
1539 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1540 2c95a8d4 Iustin Pop
1541 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
1542 2c95a8d4 Iustin Pop
    """Check prerequisites.
1543 2c95a8d4 Iustin Pop

1544 2c95a8d4 Iustin Pop
    This has no prerequisites.
1545 2c95a8d4 Iustin Pop

1546 2c95a8d4 Iustin Pop
    """
1547 2c95a8d4 Iustin Pop
    pass
1548 2c95a8d4 Iustin Pop
1549 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
1550 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
1551 2c95a8d4 Iustin Pop

1552 29d376ec Iustin Pop
    @rtype: tuple of three items
1553 29d376ec Iustin Pop
    @return: a tuple of (dict of node-to-node_error, list of instances
1554 29d376ec Iustin Pop
        which need activate-disks, dict of instance: (node, volume) for
1555 29d376ec Iustin Pop
        missing volumes
1556 29d376ec Iustin Pop

1557 2c95a8d4 Iustin Pop
    """
1558 29d376ec Iustin Pop
    result = res_nodes, res_instances, res_missing = {}, [], {}
1559 2c95a8d4 Iustin Pop
1560 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
1561 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1562 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
1563 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
1564 2c95a8d4 Iustin Pop
1565 2c95a8d4 Iustin Pop
    nv_dict = {}
1566 2c95a8d4 Iustin Pop
    for inst in instances:
1567 2c95a8d4 Iustin Pop
      inst_lvs = {}
1568 0d68c45d Iustin Pop
      if (not inst.admin_up or
1569 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
1570 2c95a8d4 Iustin Pop
        continue
1571 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
1572 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1573 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
1574 2c95a8d4 Iustin Pop
        for vol in vol_list:
1575 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
1576 2c95a8d4 Iustin Pop
1577 2c95a8d4 Iustin Pop
    if not nv_dict:
1578 2c95a8d4 Iustin Pop
      return result
1579 2c95a8d4 Iustin Pop
1580 b2a6ccd4 Iustin Pop
    node_lvs = self.rpc.call_lv_list(nodes, vg_name)
1581 2c95a8d4 Iustin Pop
1582 2c95a8d4 Iustin Pop
    for node in nodes:
1583 2c95a8d4 Iustin Pop
      # node_volume
1584 29d376ec Iustin Pop
      node_res = node_lvs[node]
1585 29d376ec Iustin Pop
      if node_res.offline:
1586 ea9ddc07 Iustin Pop
        continue
1587 4c4e4e1e Iustin Pop
      msg = node_res.fail_msg
1588 29d376ec Iustin Pop
      if msg:
1589 29d376ec Iustin Pop
        logging.warning("Error enumerating LVs on node %s: %s", node, msg)
1590 29d376ec Iustin Pop
        res_nodes[node] = msg
1591 2c95a8d4 Iustin Pop
        continue
1592 2c95a8d4 Iustin Pop
1593 29d376ec Iustin Pop
      lvs = node_res.payload
1594 29d376ec Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.items():
1595 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
1596 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
1597 b63ed789 Iustin Pop
            and inst.name not in res_instances):
1598 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
1599 2c95a8d4 Iustin Pop
1600 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1601 b63ed789 Iustin Pop
    # data better
1602 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1603 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1604 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1605 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1606 b63ed789 Iustin Pop
1607 2c95a8d4 Iustin Pop
    return result
1608 2c95a8d4 Iustin Pop
1609 2c95a8d4 Iustin Pop
1610 60975797 Iustin Pop
class LURepairDiskSizes(NoHooksLU):
1611 60975797 Iustin Pop
  """Verifies the cluster disks sizes.
1612 60975797 Iustin Pop

1613 60975797 Iustin Pop
  """
1614 60975797 Iustin Pop
  _OP_REQP = ["instances"]
1615 60975797 Iustin Pop
  REQ_BGL = False
1616 60975797 Iustin Pop
1617 60975797 Iustin Pop
  def ExpandNames(self):
1618 60975797 Iustin Pop
    if not isinstance(self.op.instances, list):
1619 60975797 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'")
1620 60975797 Iustin Pop
1621 60975797 Iustin Pop
    if self.op.instances:
1622 60975797 Iustin Pop
      self.wanted_names = []
1623 60975797 Iustin Pop
      for name in self.op.instances:
1624 60975797 Iustin Pop
        full_name = self.cfg.ExpandInstanceName(name)
1625 60975797 Iustin Pop
        if full_name is None:
1626 60975797 Iustin Pop
          raise errors.OpPrereqError("Instance '%s' not known" % name)
1627 60975797 Iustin Pop
        self.wanted_names.append(full_name)
1628 60975797 Iustin Pop
      self.needed_locks = {
1629 60975797 Iustin Pop
        locking.LEVEL_NODE: [],
1630 60975797 Iustin Pop
        locking.LEVEL_INSTANCE: self.wanted_names,
1631 60975797 Iustin Pop
        }
1632 60975797 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1633 60975797 Iustin Pop
    else:
1634 60975797 Iustin Pop
      self.wanted_names = None
1635 60975797 Iustin Pop
      self.needed_locks = {
1636 60975797 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
1637 60975797 Iustin Pop
        locking.LEVEL_INSTANCE: locking.ALL_SET,
1638 60975797 Iustin Pop
        }
1639 60975797 Iustin Pop
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1640 60975797 Iustin Pop
1641 60975797 Iustin Pop
  def DeclareLocks(self, level):
1642 60975797 Iustin Pop
    if level == locking.LEVEL_NODE and self.wanted_names is not None:
1643 60975797 Iustin Pop
      self._LockInstancesNodes(primary_only=True)
1644 60975797 Iustin Pop
1645 60975797 Iustin Pop
  def CheckPrereq(self):
1646 60975797 Iustin Pop
    """Check prerequisites.
1647 60975797 Iustin Pop

1648 60975797 Iustin Pop
    This only checks the optional instance list against the existing names.
1649 60975797 Iustin Pop

1650 60975797 Iustin Pop
    """
1651 60975797 Iustin Pop
    if self.wanted_names is None:
1652 60975797 Iustin Pop
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
1653 60975797 Iustin Pop
1654 60975797 Iustin Pop
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
1655 60975797 Iustin Pop
                             in self.wanted_names]
1656 60975797 Iustin Pop
1657 b775c337 Iustin Pop
  def _EnsureChildSizes(self, disk):
1658 b775c337 Iustin Pop
    """Ensure children of the disk have the needed disk size.
1659 b775c337 Iustin Pop

1660 b775c337 Iustin Pop
    This is valid mainly for DRBD8 and fixes an issue where the
1661 b775c337 Iustin Pop
    children have smaller disk size.
1662 b775c337 Iustin Pop

1663 b775c337 Iustin Pop
    @param disk: an L{ganeti.objects.Disk} object
1664 b775c337 Iustin Pop

1665 b775c337 Iustin Pop
    """
1666 b775c337 Iustin Pop
    if disk.dev_type == constants.LD_DRBD8:
1667 b775c337 Iustin Pop
      assert disk.children, "Empty children for DRBD8?"
1668 b775c337 Iustin Pop
      fchild = disk.children[0]
1669 b775c337 Iustin Pop
      mismatch = fchild.size < disk.size
1670 b775c337 Iustin Pop
      if mismatch:
1671 b775c337 Iustin Pop
        self.LogInfo("Child disk has size %d, parent %d, fixing",
1672 b775c337 Iustin Pop
                     fchild.size, disk.size)
1673 b775c337 Iustin Pop
        fchild.size = disk.size
1674 b775c337 Iustin Pop
1675 b775c337 Iustin Pop
      # and we recurse on this child only, not on the metadev
1676 b775c337 Iustin Pop
      return self._EnsureChildSizes(fchild) or mismatch
1677 b775c337 Iustin Pop
    else:
1678 b775c337 Iustin Pop
      return False
1679 b775c337 Iustin Pop
1680 60975797 Iustin Pop
  def Exec(self, feedback_fn):
1681 60975797 Iustin Pop
    """Verify the size of cluster disks.
1682 60975797 Iustin Pop

1683 60975797 Iustin Pop
    """
1684 60975797 Iustin Pop
    # TODO: check child disks too
1685 60975797 Iustin Pop
    # TODO: check differences in size between primary/secondary nodes
1686 60975797 Iustin Pop
    per_node_disks = {}
1687 60975797 Iustin Pop
    for instance in self.wanted_instances:
1688 60975797 Iustin Pop
      pnode = instance.primary_node
1689 60975797 Iustin Pop
      if pnode not in per_node_disks:
1690 60975797 Iustin Pop
        per_node_disks[pnode] = []
1691 60975797 Iustin Pop
      for idx, disk in enumerate(instance.disks):
1692 60975797 Iustin Pop
        per_node_disks[pnode].append((instance, idx, disk))
1693 60975797 Iustin Pop
1694 60975797 Iustin Pop
    changed = []
1695 60975797 Iustin Pop
    for node, dskl in per_node_disks.items():
1696 4d9e6835 Iustin Pop
      newl = [v[2].Copy() for v in dskl]
1697 4d9e6835 Iustin Pop
      for dsk in newl:
1698 4d9e6835 Iustin Pop
        self.cfg.SetDiskID(dsk, node)
1699 4d9e6835 Iustin Pop
      result = self.rpc.call_blockdev_getsizes(node, newl)
1700 3cebe102 Michael Hanselmann
      if result.fail_msg:
1701 60975797 Iustin Pop
        self.LogWarning("Failure in blockdev_getsizes call to node"
1702 60975797 Iustin Pop
                        " %s, ignoring", node)
1703 60975797 Iustin Pop
        continue
1704 60975797 Iustin Pop
      if len(result.data) != len(dskl):
1705 60975797 Iustin Pop
        self.LogWarning("Invalid result from node %s, ignoring node results",
1706 60975797 Iustin Pop
                        node)
1707 60975797 Iustin Pop
        continue
1708 60975797 Iustin Pop
      for ((instance, idx, disk), size) in zip(dskl, result.data):
1709 60975797 Iustin Pop
        if size is None:
1710 60975797 Iustin Pop
          self.LogWarning("Disk %d of instance %s did not return size"
1711 60975797 Iustin Pop
                          " information, ignoring", idx, instance.name)
1712 60975797 Iustin Pop
          continue
1713 60975797 Iustin Pop
        if not isinstance(size, (int, long)):
1714 60975797 Iustin Pop
          self.LogWarning("Disk %d of instance %s did not return valid"
1715 60975797 Iustin Pop
                          " size information, ignoring", idx, instance.name)
1716 60975797 Iustin Pop
          continue
1717 60975797 Iustin Pop
        size = size >> 20
1718 60975797 Iustin Pop
        if size != disk.size:
1719 60975797 Iustin Pop
          self.LogInfo("Disk %d of instance %s has mismatched size,"
1720 60975797 Iustin Pop
                       " correcting: recorded %d, actual %d", idx,
1721 60975797 Iustin Pop
                       instance.name, disk.size, size)
1722 60975797 Iustin Pop
          disk.size = size
1723 60975797 Iustin Pop
          self.cfg.Update(instance)
1724 60975797 Iustin Pop
          changed.append((instance.name, idx, size))
1725 b775c337 Iustin Pop
        if self._EnsureChildSizes(disk):
1726 b775c337 Iustin Pop
          self.cfg.Update(instance)
1727 b775c337 Iustin Pop
          changed.append((instance.name, idx, disk.size))
1728 60975797 Iustin Pop
    return changed
1729 60975797 Iustin Pop
1730 60975797 Iustin Pop
1731 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1732 07bd8a51 Iustin Pop
  """Rename the cluster.
1733 07bd8a51 Iustin Pop

1734 07bd8a51 Iustin Pop
  """
1735 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1736 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1737 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1738 07bd8a51 Iustin Pop
1739 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1740 07bd8a51 Iustin Pop
    """Build hooks env.
1741 07bd8a51 Iustin Pop

1742 07bd8a51 Iustin Pop
    """
1743 07bd8a51 Iustin Pop
    env = {
1744 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1745 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1746 07bd8a51 Iustin Pop
      }
1747 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1748 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1749 07bd8a51 Iustin Pop
1750 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1751 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1752 07bd8a51 Iustin Pop

1753 07bd8a51 Iustin Pop
    """
1754 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1755 07bd8a51 Iustin Pop
1756 bcf043c9 Iustin Pop
    new_name = hostname.name
1757 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1758 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
1759 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
1760 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1761 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1762 07bd8a51 Iustin Pop
                                 " cluster has changed")
1763 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1764 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1765 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1766 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1767 07bd8a51 Iustin Pop
                                   new_ip)
1768 07bd8a51 Iustin Pop
1769 07bd8a51 Iustin Pop
    self.op.name = new_name
1770 07bd8a51 Iustin Pop
1771 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1772 07bd8a51 Iustin Pop
    """Rename the cluster.
1773 07bd8a51 Iustin Pop

1774 07bd8a51 Iustin Pop
    """
1775 07bd8a51 Iustin Pop
    clustername = self.op.name
1776 07bd8a51 Iustin Pop
    ip = self.ip
1777 07bd8a51 Iustin Pop
1778 07bd8a51 Iustin Pop
    # shutdown the master IP
1779 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
1780 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
1781 4c4e4e1e Iustin Pop
    result.Raise("Could not disable the master role")
1782 07bd8a51 Iustin Pop
1783 07bd8a51 Iustin Pop
    try:
1784 55cf7d83 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
1785 55cf7d83 Iustin Pop
      cluster.cluster_name = clustername
1786 55cf7d83 Iustin Pop
      cluster.master_ip = ip
1787 55cf7d83 Iustin Pop
      self.cfg.Update(cluster)
1788 ec85e3d5 Iustin Pop
1789 ec85e3d5 Iustin Pop
      # update the known hosts file
1790 ec85e3d5 Iustin Pop
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
1791 ec85e3d5 Iustin Pop
      node_list = self.cfg.GetNodeList()
1792 ec85e3d5 Iustin Pop
      try:
1793 ec85e3d5 Iustin Pop
        node_list.remove(master)
1794 ec85e3d5 Iustin Pop
      except ValueError:
1795 ec85e3d5 Iustin Pop
        pass
1796 ec85e3d5 Iustin Pop
      result = self.rpc.call_upload_file(node_list,
1797 ec85e3d5 Iustin Pop
                                         constants.SSH_KNOWN_HOSTS_FILE)
1798 ec85e3d5 Iustin Pop
      for to_node, to_result in result.iteritems():
1799 6f7d4e75 Iustin Pop
        msg = to_result.fail_msg
1800 6f7d4e75 Iustin Pop
        if msg:
1801 6f7d4e75 Iustin Pop
          msg = ("Copy of file %s to node %s failed: %s" %
1802 6f7d4e75 Iustin Pop
                 (constants.SSH_KNOWN_HOSTS_FILE, to_node, msg))
1803 6f7d4e75 Iustin Pop
          self.proc.LogWarning(msg)
1804 ec85e3d5 Iustin Pop
1805 07bd8a51 Iustin Pop
    finally:
1806 3583908a Guido Trotter
      result = self.rpc.call_node_start_master(master, False, False)
1807 4c4e4e1e Iustin Pop
      msg = result.fail_msg
1808 b726aff0 Iustin Pop
      if msg:
1809 86d9d3bb Iustin Pop
        self.LogWarning("Could not re-enable the master role on"
1810 b726aff0 Iustin Pop
                        " the master, please restart manually: %s", msg)
1811 07bd8a51 Iustin Pop
1812 07bd8a51 Iustin Pop
1813 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1814 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1815 8084f9f6 Manuel Franceschini

1816 e4376078 Iustin Pop
  @type disk: L{objects.Disk}
1817 e4376078 Iustin Pop
  @param disk: the disk to check
1818 5bbd3f7f Michael Hanselmann
  @rtype: boolean
1819 e4376078 Iustin Pop
  @return: boolean indicating whether a LD_LV dev_type was found or not
1820 8084f9f6 Manuel Franceschini

1821 8084f9f6 Manuel Franceschini
  """
1822 8084f9f6 Manuel Franceschini
  if disk.children:
1823 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1824 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1825 8084f9f6 Manuel Franceschini
        return True
1826 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1827 8084f9f6 Manuel Franceschini
1828 8084f9f6 Manuel Franceschini
1829 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1830 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1831 8084f9f6 Manuel Franceschini

1832 8084f9f6 Manuel Franceschini
  """
1833 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1834 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1835 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1836 c53279cf Guido Trotter
  REQ_BGL = False
1837 c53279cf Guido Trotter
1838 3994f455 Iustin Pop
  def CheckArguments(self):
1839 4b7735f9 Iustin Pop
    """Check parameters
1840 4b7735f9 Iustin Pop

1841 4b7735f9 Iustin Pop
    """
1842 4b7735f9 Iustin Pop
    if not hasattr(self.op, "candidate_pool_size"):
1843 4b7735f9 Iustin Pop
      self.op.candidate_pool_size = None
1844 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1845 4b7735f9 Iustin Pop
      try:
1846 4b7735f9 Iustin Pop
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1847 3994f455 Iustin Pop
      except (ValueError, TypeError), err:
1848 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1849 4b7735f9 Iustin Pop
                                   str(err))
1850 4b7735f9 Iustin Pop
      if self.op.candidate_pool_size < 1:
1851 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("At least one master candidate needed")
1852 4b7735f9 Iustin Pop
1853 c53279cf Guido Trotter
  def ExpandNames(self):
1854 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
1855 c53279cf Guido Trotter
    # all nodes to be modified.
1856 c53279cf Guido Trotter
    self.needed_locks = {
1857 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1858 c53279cf Guido Trotter
    }
1859 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1860 8084f9f6 Manuel Franceschini
1861 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1862 8084f9f6 Manuel Franceschini
    """Build hooks env.
1863 8084f9f6 Manuel Franceschini

1864 8084f9f6 Manuel Franceschini
    """
1865 8084f9f6 Manuel Franceschini
    env = {
1866 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1867 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1868 8084f9f6 Manuel Franceschini
      }
1869 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1870 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1871 8084f9f6 Manuel Franceschini
1872 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1873 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1874 8084f9f6 Manuel Franceschini

1875 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1876 5f83e263 Iustin Pop
    if the given volume group is valid.
1877 8084f9f6 Manuel Franceschini

1878 8084f9f6 Manuel Franceschini
    """
1879 779c15bb Iustin Pop
    if self.op.vg_name is not None and not self.op.vg_name:
1880 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
1881 8084f9f6 Manuel Franceschini
      for inst in instances:
1882 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1883 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1884 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1885 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1886 8084f9f6 Manuel Franceschini
1887 779c15bb Iustin Pop
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1888 779c15bb Iustin Pop
1889 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1890 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1891 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
1892 8084f9f6 Manuel Franceschini
      for node in node_list:
1893 4c4e4e1e Iustin Pop
        msg = vglist[node].fail_msg
1894 e480923b Iustin Pop
        if msg:
1895 781de953 Iustin Pop
          # ignoring down node
1896 e480923b Iustin Pop
          self.LogWarning("Error while gathering data on node %s"
1897 e480923b Iustin Pop
                          " (ignoring node): %s", node, msg)
1898 781de953 Iustin Pop
          continue
1899 e480923b Iustin Pop
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
1900 781de953 Iustin Pop
                                              self.op.vg_name,
1901 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1902 8084f9f6 Manuel Franceschini
        if vgstatus:
1903 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1904 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1905 8084f9f6 Manuel Franceschini
1906 779c15bb Iustin Pop
    self.cluster = cluster = self.cfg.GetClusterInfo()
1907 5af3da74 Guido Trotter
    # validate params changes
1908 779c15bb Iustin Pop
    if self.op.beparams:
1909 a5728081 Guido Trotter
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
1910 abe609b2 Guido Trotter
      self.new_beparams = objects.FillDict(
1911 4ef7f423 Guido Trotter
        cluster.beparams[constants.PP_DEFAULT], self.op.beparams)
1912 779c15bb Iustin Pop
1913 5af3da74 Guido Trotter
    if self.op.nicparams:
1914 5af3da74 Guido Trotter
      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
1915 5af3da74 Guido Trotter
      self.new_nicparams = objects.FillDict(
1916 5af3da74 Guido Trotter
        cluster.nicparams[constants.PP_DEFAULT], self.op.nicparams)
1917 5af3da74 Guido Trotter
      objects.NIC.CheckParameterSyntax(self.new_nicparams)
1918 5af3da74 Guido Trotter
1919 779c15bb Iustin Pop
    # hypervisor list/parameters
1920 abe609b2 Guido Trotter
    self.new_hvparams = objects.FillDict(cluster.hvparams, {})
1921 779c15bb Iustin Pop
    if self.op.hvparams:
1922 779c15bb Iustin Pop
      if not isinstance(self.op.hvparams, dict):
1923 779c15bb Iustin Pop
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1924 779c15bb Iustin Pop
      for hv_name, hv_dict in self.op.hvparams.items():
1925 779c15bb Iustin Pop
        if hv_name not in self.new_hvparams:
1926 779c15bb Iustin Pop
          self.new_hvparams[hv_name] = hv_dict
1927 779c15bb Iustin Pop
        else:
1928 779c15bb Iustin Pop
          self.new_hvparams[hv_name].update(hv_dict)
1929 779c15bb Iustin Pop
1930 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1931 779c15bb Iustin Pop
      self.hv_list = self.op.enabled_hypervisors
1932 b119bccb Guido Trotter
      if not self.hv_list:
1933 b119bccb Guido Trotter
        raise errors.OpPrereqError("Enabled hypervisors list must contain at"
1934 b119bccb Guido Trotter
                                   " least one member")
1935 b119bccb Guido Trotter
      invalid_hvs = set(self.hv_list) - constants.HYPER_TYPES
1936 b119bccb Guido Trotter
      if invalid_hvs:
1937 b119bccb Guido Trotter
        raise errors.OpPrereqError("Enabled hypervisors contains invalid"
1938 6915bc28 Guido Trotter
                                   " entries: %s" % " ,".join(invalid_hvs))
1939 779c15bb Iustin Pop
    else:
1940 779c15bb Iustin Pop
      self.hv_list = cluster.enabled_hypervisors
1941 779c15bb Iustin Pop
1942 779c15bb Iustin Pop
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1943 779c15bb Iustin Pop
      # either the enabled list has changed, or the parameters have, validate
1944 779c15bb Iustin Pop
      for hv_name, hv_params in self.new_hvparams.items():
1945 779c15bb Iustin Pop
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1946 779c15bb Iustin Pop
            (self.op.enabled_hypervisors and
1947 779c15bb Iustin Pop
             hv_name in self.op.enabled_hypervisors)):
1948 779c15bb Iustin Pop
          # either this is a new hypervisor, or its parameters have changed
1949 779c15bb Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
1950 a5728081 Guido Trotter
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1951 779c15bb Iustin Pop
          hv_class.CheckParameterSyntax(hv_params)
1952 779c15bb Iustin Pop
          _CheckHVParams(self, node_list, hv_name, hv_params)
1953 779c15bb Iustin Pop
1954 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1955 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1956 8084f9f6 Manuel Franceschini

1957 8084f9f6 Manuel Franceschini
    """
1958 779c15bb Iustin Pop
    if self.op.vg_name is not None:
1959 b2482333 Guido Trotter
      new_volume = self.op.vg_name
1960 b2482333 Guido Trotter
      if not new_volume:
1961 b2482333 Guido Trotter
        new_volume = None
1962 b2482333 Guido Trotter
      if new_volume != self.cfg.GetVGName():
1963 b2482333 Guido Trotter
        self.cfg.SetVGName(new_volume)
1964 779c15bb Iustin Pop
      else:
1965 779c15bb Iustin Pop
        feedback_fn("Cluster LVM configuration already in desired"
1966 779c15bb Iustin Pop
                    " state, not changing")
1967 779c15bb Iustin Pop
    if self.op.hvparams:
1968 779c15bb Iustin Pop
      self.cluster.hvparams = self.new_hvparams
1969 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1970 779c15bb Iustin Pop
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1971 779c15bb Iustin Pop
    if self.op.beparams:
1972 4ef7f423 Guido Trotter
      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
1973 5af3da74 Guido Trotter
    if self.op.nicparams:
1974 5af3da74 Guido Trotter
      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
1975 5af3da74 Guido Trotter
1976 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1977 4b7735f9 Iustin Pop
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
1978 75e914fb Iustin Pop
      # we need to update the pool size here, otherwise the save will fail
1979 44485f49 Guido Trotter
      _AdjustCandidatePool(self, [])
1980 4b7735f9 Iustin Pop
1981 779c15bb Iustin Pop
    self.cfg.Update(self.cluster)
1982 8084f9f6 Manuel Franceschini
1983 8084f9f6 Manuel Franceschini
1984 28eddce5 Guido Trotter
def _RedistributeAncillaryFiles(lu, additional_nodes=None):
1985 28eddce5 Guido Trotter
  """Distribute additional files which are part of the cluster configuration.
1986 28eddce5 Guido Trotter

1987 28eddce5 Guido Trotter
  ConfigWriter takes care of distributing the config and ssconf files, but
1988 28eddce5 Guido Trotter
  there are more files which should be distributed to all nodes. This function
1989 28eddce5 Guido Trotter
  makes sure those are copied.
1990 28eddce5 Guido Trotter

1991 28eddce5 Guido Trotter
  @param lu: calling logical unit
1992 28eddce5 Guido Trotter
  @param additional_nodes: list of nodes not in the config to distribute to
1993 28eddce5 Guido Trotter

1994 28eddce5 Guido Trotter
  """
1995 28eddce5 Guido Trotter
  # 1. Gather target nodes
1996 28eddce5 Guido Trotter
  myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
1997 28eddce5 Guido Trotter
  dist_nodes = lu.cfg.GetNodeList()
1998 28eddce5 Guido Trotter
  if additional_nodes is not None:
1999 28eddce5 Guido Trotter
    dist_nodes.extend(additional_nodes)
2000 28eddce5 Guido Trotter
  if myself.name in dist_nodes:
2001 28eddce5 Guido Trotter
    dist_nodes.remove(myself.name)
2002 28eddce5 Guido Trotter
  # 2. Gather files to distribute
2003 28eddce5 Guido Trotter
  dist_files = set([constants.ETC_HOSTS,
2004 28eddce5 Guido Trotter
                    constants.SSH_KNOWN_HOSTS_FILE,
2005 28eddce5 Guido Trotter
                    constants.RAPI_CERT_FILE,
2006 28eddce5 Guido Trotter
                    constants.RAPI_USERS_FILE,
2007 4a34c5cf Guido Trotter
                    constants.HMAC_CLUSTER_KEY,
2008 28eddce5 Guido Trotter
                   ])
2009 e1b8653f Guido Trotter
2010 e1b8653f Guido Trotter
  enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
2011 e1b8653f Guido Trotter
  for hv_name in enabled_hypervisors:
2012 e1b8653f Guido Trotter
    hv_class = hypervisor.GetHypervisor(hv_name)
2013 e1b8653f Guido Trotter
    dist_files.update(hv_class.GetAncillaryFiles())
2014 e1b8653f Guido Trotter
2015 28eddce5 Guido Trotter
  # 3. Perform the files upload
2016 28eddce5 Guido Trotter
  for fname in dist_files:
2017 28eddce5 Guido Trotter
    if os.path.exists(fname):
2018 28eddce5 Guido Trotter
      result = lu.rpc.call_upload_file(dist_nodes, fname)
2019 28eddce5 Guido Trotter
      for to_node, to_result in result.items():
2020 6f7d4e75 Iustin Pop
        msg = to_result.fail_msg
2021 6f7d4e75 Iustin Pop
        if msg:
2022 6f7d4e75 Iustin Pop
          msg = ("Copy of file %s to node %s failed: %s" %
2023 6f7d4e75 Iustin Pop
                 (fname, to_node, msg))
2024 6f7d4e75 Iustin Pop
          lu.proc.LogWarning(msg)
2025 28eddce5 Guido Trotter
2026 28eddce5 Guido Trotter
2027 afee0879 Iustin Pop
class LURedistributeConfig(NoHooksLU):
2028 afee0879 Iustin Pop
  """Force the redistribution of cluster configuration.
2029 afee0879 Iustin Pop

2030 afee0879 Iustin Pop
  This is a very simple LU.
2031 afee0879 Iustin Pop

2032 afee0879 Iustin Pop
  """
2033 afee0879 Iustin Pop
  _OP_REQP = []
2034 afee0879 Iustin Pop
  REQ_BGL = False
2035 afee0879 Iustin Pop
2036 afee0879 Iustin Pop
  def ExpandNames(self):
2037 afee0879 Iustin Pop
    self.needed_locks = {
2038 afee0879 Iustin Pop
      locking.LEVEL_NODE: locking.ALL_SET,
2039 afee0879 Iustin Pop
    }
2040 afee0879 Iustin Pop
    self.share_locks[locking.LEVEL_NODE] = 1
2041 afee0879 Iustin Pop
2042 afee0879 Iustin Pop
  def CheckPrereq(self):
2043 afee0879 Iustin Pop
    """Check prerequisites.
2044 afee0879 Iustin Pop

2045 afee0879 Iustin Pop
    """
2046 afee0879 Iustin Pop
2047 afee0879 Iustin Pop
  def Exec(self, feedback_fn):
2048 afee0879 Iustin Pop
    """Redistribute the configuration.
2049 afee0879 Iustin Pop

2050 afee0879 Iustin Pop
    """
2051 afee0879 Iustin Pop
    self.cfg.Update(self.cfg.GetClusterInfo())
2052 28eddce5 Guido Trotter
    _RedistributeAncillaryFiles(self)
2053 afee0879 Iustin Pop
2054 afee0879 Iustin Pop
2055 b9bddb6b Iustin Pop
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
2056 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
2057 a8083063 Iustin Pop

2058 a8083063 Iustin Pop
  """
2059 a8083063 Iustin Pop
  if not instance.disks:
2060 a8083063 Iustin Pop
    return True
2061 a8083063 Iustin Pop
2062 a8083063 Iustin Pop
  if not oneshot:
2063 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
2064 a8083063 Iustin Pop
2065 a8083063 Iustin Pop
  node = instance.primary_node
2066 a8083063 Iustin Pop
2067 a8083063 Iustin Pop
  for dev in instance.disks:
2068 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
2069 a8083063 Iustin Pop
2070 a8083063 Iustin Pop
  retries = 0
2071 fbafd7a8 Iustin Pop
  degr_retries = 10 # in seconds, as we sleep 1 second each time
2072 a8083063 Iustin Pop
  while True:
2073 a8083063 Iustin Pop
    max_time = 0
2074 a8083063 Iustin Pop
    done = True
2075 a8083063 Iustin Pop
    cumul_degraded = False
2076 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
2077 4c4e4e1e Iustin Pop
    msg = rstats.fail_msg
2078 3efa9051 Iustin Pop
    if msg:
2079 3efa9051 Iustin Pop
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
2080 a8083063 Iustin Pop
      retries += 1
2081 a8083063 Iustin Pop
      if retries >= 10:
2082 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
2083 3ecf6786 Iustin Pop
                                 " aborting." % node)
2084 a8083063 Iustin Pop
      time.sleep(6)
2085 a8083063 Iustin Pop
      continue
2086 3efa9051 Iustin Pop
    rstats = rstats.payload
2087 a8083063 Iustin Pop
    retries = 0
2088 1492cca7 Iustin Pop
    for i, mstat in enumerate(rstats):
2089 a8083063 Iustin Pop
      if mstat is None:
2090 86d9d3bb Iustin Pop
        lu.LogWarning("Can't compute data for node %s/%s",
2091 86d9d3bb Iustin Pop
                           node, instance.disks[i].iv_name)
2092 a8083063 Iustin Pop
        continue
2093 36145b12 Michael Hanselmann
2094 36145b12 Michael Hanselmann
      cumul_degraded = (cumul_degraded or
2095 36145b12 Michael Hanselmann
                        (mstat.is_degraded and mstat.sync_percent is None))
2096 36145b12 Michael Hanselmann
      if mstat.sync_percent is not None:
2097 a8083063 Iustin Pop
        done = False
2098 36145b12 Michael Hanselmann
        if mstat.estimated_time is not None:
2099 36145b12 Michael Hanselmann
          rem_time = "%d estimated seconds remaining" % mstat.estimated_time
2100 36145b12 Michael Hanselmann
          max_time = mstat.estimated_time
2101 a8083063 Iustin Pop
        else:
2102 a8083063 Iustin Pop
          rem_time = "no time estimate"
2103 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
2104 4d4a651d Michael Hanselmann
                        (instance.disks[i].iv_name, mstat.sync_percent,
2105 4d4a651d Michael Hanselmann
                         rem_time))
2106 fbafd7a8 Iustin Pop
2107 fbafd7a8 Iustin Pop
    # if we're done but degraded, let's do a few small retries, to
2108 fbafd7a8 Iustin Pop
    # make sure we see a stable and not transient situation; therefore
2109 fbafd7a8 Iustin Pop
    # we force restart of the loop
2110 fbafd7a8 Iustin Pop
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
2111 fbafd7a8 Iustin Pop
      logging.info("Degraded disks found, %d retries left", degr_retries)
2112 fbafd7a8 Iustin Pop
      degr_retries -= 1
2113 fbafd7a8 Iustin Pop
      time.sleep(1)
2114 fbafd7a8 Iustin Pop
      continue
2115 fbafd7a8 Iustin Pop
2116 a8083063 Iustin Pop
    if done or oneshot:
2117 a8083063 Iustin Pop
      break
2118 a8083063 Iustin Pop
2119 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
2120 a8083063 Iustin Pop
2121 a8083063 Iustin Pop
  if done:
2122 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
2123 a8083063 Iustin Pop
  return not cumul_degraded
2124 a8083063 Iustin Pop
2125 a8083063 Iustin Pop
2126 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
2127 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
2128 a8083063 Iustin Pop

2129 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
2130 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
2131 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
2132 0834c866 Iustin Pop

2133 a8083063 Iustin Pop
  """
2134 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
2135 a8083063 Iustin Pop
2136 a8083063 Iustin Pop
  result = True
2137 96acbc09 Michael Hanselmann
2138 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
2139 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
2140 4c4e4e1e Iustin Pop
    msg = rstats.fail_msg
2141 23829f6f Iustin Pop
    if msg:
2142 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
2143 23829f6f Iustin Pop
      result = False
2144 23829f6f Iustin Pop
    elif not rstats.payload:
2145 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s", node)
2146 a8083063 Iustin Pop
      result = False
2147 a8083063 Iustin Pop
    else:
2148 96acbc09 Michael Hanselmann
      if ldisk:
2149 f208978a Michael Hanselmann
        result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
2150 96acbc09 Michael Hanselmann
      else:
2151 96acbc09 Michael Hanselmann
        result = result and not rstats.payload.is_degraded
2152 96acbc09 Michael Hanselmann
2153 a8083063 Iustin Pop
  if dev.children:
2154 a8083063 Iustin Pop
    for child in dev.children:
2155 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
2156 a8083063 Iustin Pop
2157 a8083063 Iustin Pop
  return result
2158 a8083063 Iustin Pop
2159 a8083063 Iustin Pop
2160 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
2161 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
2162 a8083063 Iustin Pop

2163 a8083063 Iustin Pop
  """
2164 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2165 6bf01bbb Guido Trotter
  REQ_BGL = False
2166 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet()
2167 1e288a26 Guido Trotter
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status", "variants")
2168 1e288a26 Guido Trotter
  # Fields that need calculation of global os validity
2169 1e288a26 Guido Trotter
  _FIELDS_NEEDVALID = frozenset(["valid", "variants"])
2170 a8083063 Iustin Pop
2171 6bf01bbb Guido Trotter
  def ExpandNames(self):
2172 1f9430d6 Iustin Pop
    if self.op.names:
2173 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
2174 1f9430d6 Iustin Pop
2175 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2176 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2177 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
2178 1f9430d6 Iustin Pop
2179 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
2180 a6ab004b Iustin Pop
    # Temporary removal of locks, should be reverted later
2181 a6ab004b Iustin Pop
    # TODO: reintroduce locks when they are lighter-weight
2182 6bf01bbb Guido Trotter
    self.needed_locks = {}
2183 a6ab004b Iustin Pop
    #self.share_locks[locking.LEVEL_NODE] = 1
2184 a6ab004b Iustin Pop
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2185 6bf01bbb Guido Trotter
2186 6bf01bbb Guido Trotter
  def CheckPrereq(self):
2187 6bf01bbb Guido Trotter
    """Check prerequisites.
2188 6bf01bbb Guido Trotter

2189 6bf01bbb Guido Trotter
    """
2190 6bf01bbb Guido Trotter
2191 1f9430d6 Iustin Pop
  @staticmethod
2192 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
2193 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
2194 1f9430d6 Iustin Pop

2195 e4376078 Iustin Pop
    @param node_list: a list with the names of all nodes
2196 e4376078 Iustin Pop
    @param rlist: a map with node names as keys and OS objects as values
2197 1f9430d6 Iustin Pop

2198 e4376078 Iustin Pop
    @rtype: dict
2199 5fcc718f Iustin Pop
    @return: a dictionary with osnames as keys and as value another map, with
2200 255dcebd Iustin Pop
        nodes as keys and tuples of (path, status, diagnose) as values, eg::
2201 e4376078 Iustin Pop

2202 255dcebd Iustin Pop
          {"debian-etch": {"node1": [(/usr/lib/..., True, ""),
2203 255dcebd Iustin Pop
                                     (/srv/..., False, "invalid api")],
2204 255dcebd Iustin Pop
                           "node2": [(/srv/..., True, "")]}
2205 e4376078 Iustin Pop
          }
2206 1f9430d6 Iustin Pop

2207 1f9430d6 Iustin Pop
    """
2208 1f9430d6 Iustin Pop
    all_os = {}
2209 a6ab004b Iustin Pop
    # we build here the list of nodes that didn't fail the RPC (at RPC
2210 a6ab004b Iustin Pop
    # level), so that nodes with a non-responding node daemon don't
2211 a6ab004b Iustin Pop
    # make all OSes invalid
2212 a6ab004b Iustin Pop
    good_nodes = [node_name for node_name in rlist
2213 4c4e4e1e Iustin Pop
                  if not rlist[node_name].fail_msg]
2214 83d92ad8 Iustin Pop
    for node_name, nr in rlist.items():
2215 4c4e4e1e Iustin Pop
      if nr.fail_msg or not nr.payload:
2216 1f9430d6 Iustin Pop
        continue
2217 ba00557a Guido Trotter
      for name, path, status, diagnose, variants in nr.payload:
2218 255dcebd Iustin Pop
        if name not in all_os:
2219 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
2220 1f9430d6 Iustin Pop
          # for each node in node_list
2221 255dcebd Iustin Pop
          all_os[name] = {}
2222 a6ab004b Iustin Pop
          for nname in good_nodes:
2223 255dcebd Iustin Pop
            all_os[name][nname] = []
2224 ba00557a Guido Trotter
        all_os[name][node_name].append((path, status, diagnose, variants))
2225 1f9430d6 Iustin Pop
    return all_os
2226 a8083063 Iustin Pop
2227 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2228 a8083063 Iustin Pop
    """Compute the list of OSes.
2229 a8083063 Iustin Pop

2230 a8083063 Iustin Pop
    """
2231 a6ab004b Iustin Pop
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
2232 94a02bb5 Iustin Pop
    node_data = self.rpc.call_os_diagnose(valid_nodes)
2233 94a02bb5 Iustin Pop
    pol = self._DiagnoseByOS(valid_nodes, node_data)
2234 1f9430d6 Iustin Pop
    output = []
2235 1e288a26 Guido Trotter
    calc_valid = self._FIELDS_NEEDVALID.intersection(self.op.output_fields)
2236 1e288a26 Guido Trotter
    calc_variants = "variants" in self.op.output_fields
2237 1e288a26 Guido Trotter
2238 83d92ad8 Iustin Pop
    for os_name, os_data in pol.items():
2239 1f9430d6 Iustin Pop
      row = []
2240 1e288a26 Guido Trotter
      if calc_valid:
2241 1e288a26 Guido Trotter
        valid = True
2242 1e288a26 Guido Trotter
        variants = None
2243 1e288a26 Guido Trotter
        for osl in os_data.values():
2244 1e288a26 Guido Trotter
          valid = valid and osl and osl[0][1]
2245 1e288a26 Guido Trotter
          if not valid:
2246 1e288a26 Guido Trotter
            variants = None
2247 1e288a26 Guido Trotter
            break
2248 1e288a26 Guido Trotter
          if calc_variants:
2249 1e288a26 Guido Trotter
            node_variants = osl[0][3]
2250 1e288a26 Guido Trotter
            if variants is None:
2251 1e288a26 Guido Trotter
              variants = node_variants
2252 1e288a26 Guido Trotter
            else:
2253 1e288a26 Guido Trotter
              variants = [v for v in variants if v in node_variants]
2254 1e288a26 Guido Trotter
2255 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
2256 1f9430d6 Iustin Pop
        if field == "name":
2257 1f9430d6 Iustin Pop
          val = os_name
2258 1f9430d6 Iustin Pop
        elif field == "valid":
2259 1e288a26 Guido Trotter
          val = valid
2260 1f9430d6 Iustin Pop
        elif field == "node_status":
2261 255dcebd Iustin Pop
          # this is just a copy of the dict
2262 1f9430d6 Iustin Pop
          val = {}
2263 255dcebd Iustin Pop
          for node_name, nos_list in os_data.items():
2264 255dcebd Iustin Pop
            val[node_name] = nos_list
2265 1e288a26 Guido Trotter
        elif field == "variants":
2266 1e288a26 Guido Trotter
          val =  variants
2267 1f9430d6 Iustin Pop
        else:
2268 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
2269 1f9430d6 Iustin Pop
        row.append(val)
2270 1f9430d6 Iustin Pop
      output.append(row)
2271 1f9430d6 Iustin Pop
2272 1f9430d6 Iustin Pop
    return output
2273 a8083063 Iustin Pop
2274 a8083063 Iustin Pop
2275 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
2276 a8083063 Iustin Pop
  """Logical unit for removing a node.
2277 a8083063 Iustin Pop

2278 a8083063 Iustin Pop
  """
2279 a8083063 Iustin Pop
  HPATH = "node-remove"
2280 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2281 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
2282 a8083063 Iustin Pop
2283 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2284 a8083063 Iustin Pop
    """Build hooks env.
2285 a8083063 Iustin Pop

2286 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
2287 d08869ee Guido Trotter
    node would then be impossible to remove.
2288 a8083063 Iustin Pop

2289 a8083063 Iustin Pop
    """
2290 396e1b78 Michael Hanselmann
    env = {
2291 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
2292 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
2293 396e1b78 Michael Hanselmann
      }
2294 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
2295 cd46f3b4 Luca Bigliardi
    if self.op.node_name in all_nodes:
2296 cd46f3b4 Luca Bigliardi
      all_nodes.remove(self.op.node_name)
2297 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
2298 a8083063 Iustin Pop
2299 a8083063 Iustin Pop
  def CheckPrereq(self):
2300 a8083063 Iustin Pop
    """Check prerequisites.
2301 a8083063 Iustin Pop

2302 a8083063 Iustin Pop
    This checks:
2303 a8083063 Iustin Pop
     - the node exists in the configuration
2304 a8083063 Iustin Pop
     - it does not have primary or secondary instances
2305 a8083063 Iustin Pop
     - it's not the master
2306 a8083063 Iustin Pop

2307 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
2308 a8083063 Iustin Pop

2309 a8083063 Iustin Pop
    """
2310 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
2311 a8083063 Iustin Pop
    if node is None:
2312 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
2313 a8083063 Iustin Pop
2314 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
2315 a8083063 Iustin Pop
2316 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
2317 a8083063 Iustin Pop
    if node.name == masternode:
2318 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
2319 3ecf6786 Iustin Pop
                                 " you need to failover first.")
2320 a8083063 Iustin Pop
2321 a8083063 Iustin Pop
    for instance_name in instance_list:
2322 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
2323 6b12959c Iustin Pop
      if node.name in instance.all_nodes:
2324 6b12959c Iustin Pop
        raise errors.OpPrereqError("Instance %s is still running on the node,"
2325 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
2326 a8083063 Iustin Pop
    self.op.node_name = node.name
2327 a8083063 Iustin Pop
    self.node = node
2328 a8083063 Iustin Pop
2329 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2330 a8083063 Iustin Pop
    """Removes the node from the cluster.
2331 a8083063 Iustin Pop

2332 a8083063 Iustin Pop
    """
2333 a8083063 Iustin Pop
    node = self.node
2334 9a4f63d1 Iustin Pop
    logging.info("Stopping the node daemon and removing configs from node %s",
2335 9a4f63d1 Iustin Pop
                 node.name)
2336 a8083063 Iustin Pop
2337 44485f49 Guido Trotter
    # Promote nodes to master candidate as needed
2338 44485f49 Guido Trotter
    _AdjustCandidatePool(self, exceptions=[node.name])
2339 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
2340 a8083063 Iustin Pop
2341 cd46f3b4 Luca Bigliardi
    # Run post hooks on the node before it's removed
2342 cd46f3b4 Luca Bigliardi
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
2343 cd46f3b4 Luca Bigliardi
    try:
2344 cd46f3b4 Luca Bigliardi
      h_results = hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
2345 3cb5c1e3 Luca Bigliardi
    except:
2346 3cb5c1e3 Luca Bigliardi
      self.LogWarning("Errors occurred running hooks on %s" % node.name)
2347 cd46f3b4 Luca Bigliardi
2348 0623d351 Iustin Pop
    result = self.rpc.call_node_leave_cluster(node.name)
2349 4c4e4e1e Iustin Pop
    msg = result.fail_msg
2350 0623d351 Iustin Pop
    if msg:
2351 0623d351 Iustin Pop
      self.LogWarning("Errors encountered on the remote node while leaving"
2352 0623d351 Iustin Pop
                      " the cluster: %s", msg)
2353 c8a0948f Michael Hanselmann
2354 a8083063 Iustin Pop
2355 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
2356 a8083063 Iustin Pop
  """Logical unit for querying nodes.
2357 a8083063 Iustin Pop

2358 a8083063 Iustin Pop
  """
2359 bc8e4a1a Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
2360 35705d8f Guido Trotter
  REQ_BGL = False
2361 19bed813 Iustin Pop
2362 19bed813 Iustin Pop
  _SIMPLE_FIELDS = ["name", "serial_no", "ctime", "mtime", "uuid",
2363 19bed813 Iustin Pop
                    "master_candidate", "offline", "drained"]
2364 19bed813 Iustin Pop
2365 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet(
2366 31bf511f Iustin Pop
    "dtotal", "dfree",
2367 31bf511f Iustin Pop
    "mtotal", "mnode", "mfree",
2368 31bf511f Iustin Pop
    "bootid",
2369 0105bad3 Iustin Pop
    "ctotal", "cnodes", "csockets",
2370 31bf511f Iustin Pop
    )
2371 31bf511f Iustin Pop
2372 19bed813 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*[
2373 19bed813 Iustin Pop
    "pinst_cnt", "sinst_cnt",
2374 31bf511f Iustin Pop
    "pinst_list", "sinst_list",
2375 31bf511f Iustin Pop
    "pip", "sip", "tags",
2376 0e67cdbe Iustin Pop
    "master",
2377 19bed813 Iustin Pop
    "role"] + _SIMPLE_FIELDS
2378 31bf511f Iustin Pop
    )
2379 a8083063 Iustin Pop
2380 35705d8f Guido Trotter
  def ExpandNames(self):
2381 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2382 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2383 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2384 a8083063 Iustin Pop
2385 35705d8f Guido Trotter
    self.needed_locks = {}
2386 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2387 c8d8b4c8 Iustin Pop
2388 c8d8b4c8 Iustin Pop
    if self.op.names:
2389 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
2390 35705d8f Guido Trotter
    else:
2391 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
2392 c8d8b4c8 Iustin Pop
2393 bc8e4a1a Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
2394 bc8e4a1a Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
2395 c8d8b4c8 Iustin Pop
    if self.do_locking:
2396 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
2397 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
2398 c8d8b4c8 Iustin Pop
2399 35705d8f Guido Trotter
  def CheckPrereq(self):
2400 35705d8f Guido Trotter
    """Check prerequisites.
2401 35705d8f Guido Trotter

2402 35705d8f Guido Trotter
    """
2403 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
2404 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
2405 c8d8b4c8 Iustin Pop
    pass
2406 a8083063 Iustin Pop
2407 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2408 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2409 a8083063 Iustin Pop

2410 a8083063 Iustin Pop
    """
2411 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
2412 c8d8b4c8 Iustin Pop
    if self.do_locking:
2413 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
2414 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
2415 3fa93523 Guido Trotter
      nodenames = self.wanted
2416 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
2417 3fa93523 Guido Trotter
      if missing:
2418 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
2419 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
2420 c8d8b4c8 Iustin Pop
    else:
2421 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
2422 c1f1cbb2 Iustin Pop
2423 c1f1cbb2 Iustin Pop
    nodenames = utils.NiceSort(nodenames)
2424 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
2425 a8083063 Iustin Pop
2426 a8083063 Iustin Pop
    # begin data gathering
2427 a8083063 Iustin Pop
2428 bc8e4a1a Iustin Pop
    if self.do_node_query:
2429 a8083063 Iustin Pop
      live_data = {}
2430 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
2431 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
2432 a8083063 Iustin Pop
      for name in nodenames:
2433 781de953 Iustin Pop
        nodeinfo = node_data[name]
2434 4c4e4e1e Iustin Pop
        if not nodeinfo.fail_msg and nodeinfo.payload:
2435 070e998b Iustin Pop
          nodeinfo = nodeinfo.payload
2436 d599d686 Iustin Pop
          fn = utils.TryConvert
2437 a8083063 Iustin Pop
          live_data[name] = {
2438 d599d686 Iustin Pop
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
2439 d599d686 Iustin Pop
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
2440 d599d686 Iustin Pop
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
2441 d599d686 Iustin Pop
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
2442 d599d686 Iustin Pop
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
2443 d599d686 Iustin Pop
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
2444 d599d686 Iustin Pop
            "bootid": nodeinfo.get('bootid', None),
2445 0105bad3 Iustin Pop
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
2446 0105bad3 Iustin Pop
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
2447 a8083063 Iustin Pop
            }
2448 a8083063 Iustin Pop
        else:
2449 a8083063 Iustin Pop
          live_data[name] = {}
2450 a8083063 Iustin Pop
    else:
2451 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
2452 a8083063 Iustin Pop
2453 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
2454 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
2455 a8083063 Iustin Pop
2456 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
2457 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
2458 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
2459 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
2460 a8083063 Iustin Pop
2461 ec223efb Iustin Pop
      for instance_name in instancelist:
2462 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
2463 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
2464 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
2465 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
2466 ec223efb Iustin Pop
          if secnode in node_to_secondary:
2467 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
2468 a8083063 Iustin Pop
2469 0e67cdbe Iustin Pop
    master_node = self.cfg.GetMasterNode()
2470 0e67cdbe Iustin Pop
2471 a8083063 Iustin Pop
    # end data gathering
2472 a8083063 Iustin Pop
2473 a8083063 Iustin Pop
    output = []
2474 a8083063 Iustin Pop
    for node in nodelist:
2475 a8083063 Iustin Pop
      node_output = []
2476 a8083063 Iustin Pop
      for field in self.op.output_fields:
2477 19bed813 Iustin Pop
        if field in self._SIMPLE_FIELDS:
2478 19bed813 Iustin Pop
          val = getattr(node, field)
2479 ec223efb Iustin Pop
        elif field == "pinst_list":
2480 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
2481 ec223efb Iustin Pop
        elif field == "sinst_list":
2482 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
2483 ec223efb Iustin Pop
        elif field == "pinst_cnt":
2484 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
2485 ec223efb Iustin Pop
        elif field == "sinst_cnt":
2486 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
2487 a8083063 Iustin Pop
        elif field == "pip":
2488 a8083063 Iustin Pop
          val = node.primary_ip
2489 a8083063 Iustin Pop
        elif field == "sip":
2490 a8083063 Iustin Pop
          val = node.secondary_ip
2491 130a6a6f Iustin Pop
        elif field == "tags":
2492 130a6a6f Iustin Pop
          val = list(node.GetTags())
2493 0e67cdbe Iustin Pop
        elif field == "master":
2494 0e67cdbe Iustin Pop
          val = node.name == master_node
2495 31bf511f Iustin Pop
        elif self._FIELDS_DYNAMIC.Matches(field):
2496 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
2497 c120ff34 Iustin Pop
        elif field == "role":
2498 c120ff34 Iustin Pop
          if node.name == master_node:
2499 c120ff34 Iustin Pop
            val = "M"
2500 c120ff34 Iustin Pop
          elif node.master_candidate:
2501 c120ff34 Iustin Pop
            val = "C"
2502 c120ff34 Iustin Pop
          elif node.drained:
2503 c120ff34 Iustin Pop
            val = "D"
2504 c120ff34 Iustin Pop
          elif node.offline:
2505 c120ff34 Iustin Pop
            val = "O"
2506 c120ff34 Iustin Pop
          else:
2507 c120ff34 Iustin Pop
            val = "R"
2508 a8083063 Iustin Pop
        else:
2509 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2510 a8083063 Iustin Pop
        node_output.append(val)
2511 a8083063 Iustin Pop
      output.append(node_output)
2512 a8083063 Iustin Pop
2513 a8083063 Iustin Pop
    return output
2514 a8083063 Iustin Pop
2515 a8083063 Iustin Pop
2516 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
2517 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
2518 dcb93971 Michael Hanselmann

2519 dcb93971 Michael Hanselmann
  """
2520 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
2521 21a15682 Guido Trotter
  REQ_BGL = False
2522 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
2523 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("node")
2524 21a15682 Guido Trotter
2525 21a15682 Guido Trotter
  def ExpandNames(self):
2526 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2527 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2528 21a15682 Guido Trotter
                       selected=self.op.output_fields)
2529 21a15682 Guido Trotter
2530 21a15682 Guido Trotter
    self.needed_locks = {}
2531 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2532 21a15682 Guido Trotter
    if not self.op.nodes:
2533 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2534 21a15682 Guido Trotter
    else:
2535 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
2536 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
2537 dcb93971 Michael Hanselmann
2538 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
2539 dcb93971 Michael Hanselmann
    """Check prerequisites.
2540 dcb93971 Michael Hanselmann

2541 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
2542 dcb93971 Michael Hanselmann

2543 dcb93971 Michael Hanselmann
    """
2544 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
2545 dcb93971 Michael Hanselmann
2546 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
2547 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
2548 dcb93971 Michael Hanselmann

2549 dcb93971 Michael Hanselmann
    """
2550 a7ba5e53 Iustin Pop
    nodenames = self.nodes
2551 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
2552 dcb93971 Michael Hanselmann
2553 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
2554 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
2555 dcb93971 Michael Hanselmann
2556 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
2557 dcb93971 Michael Hanselmann
2558 dcb93971 Michael Hanselmann
    output = []
2559 dcb93971 Michael Hanselmann
    for node in nodenames:
2560 10bfe6cb Iustin Pop
      nresult = volumes[node]
2561 10bfe6cb Iustin Pop
      if nresult.offline:
2562 10bfe6cb Iustin Pop
        continue
2563 4c4e4e1e Iustin Pop
      msg = nresult.fail_msg
2564 10bfe6cb Iustin Pop
      if msg:
2565 10bfe6cb Iustin Pop
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
2566 37d19eb2 Michael Hanselmann
        continue
2567 37d19eb2 Michael Hanselmann
2568 10bfe6cb Iustin Pop
      node_vols = nresult.payload[:]
2569 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
2570 dcb93971 Michael Hanselmann
2571 dcb93971 Michael Hanselmann
      for vol in node_vols:
2572 dcb93971 Michael Hanselmann
        node_output = []
2573 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
2574 dcb93971 Michael Hanselmann
          if field == "node":
2575 dcb93971 Michael Hanselmann
            val = node
2576 dcb93971 Michael Hanselmann
          elif field == "phys":
2577 dcb93971 Michael Hanselmann
            val = vol['dev']
2578 dcb93971 Michael Hanselmann
          elif field == "vg":
2579 dcb93971 Michael Hanselmann
            val = vol['vg']
2580 dcb93971 Michael Hanselmann
          elif field == "name":
2581 dcb93971 Michael Hanselmann
            val = vol['name']
2582 dcb93971 Michael Hanselmann
          elif field == "size":
2583 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
2584 dcb93971 Michael Hanselmann
          elif field == "instance":
2585 dcb93971 Michael Hanselmann
            for inst in ilist:
2586 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
2587 dcb93971 Michael Hanselmann
                continue
2588 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
2589 dcb93971 Michael Hanselmann
                val = inst.name
2590 dcb93971 Michael Hanselmann
                break
2591 dcb93971 Michael Hanselmann
            else:
2592 dcb93971 Michael Hanselmann
              val = '-'
2593 dcb93971 Michael Hanselmann
          else:
2594 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
2595 dcb93971 Michael Hanselmann
          node_output.append(str(val))
2596 dcb93971 Michael Hanselmann
2597 dcb93971 Michael Hanselmann
        output.append(node_output)
2598 dcb93971 Michael Hanselmann
2599 dcb93971 Michael Hanselmann
    return output
2600 dcb93971 Michael Hanselmann
2601 dcb93971 Michael Hanselmann
2602 9e5442ce Michael Hanselmann
class LUQueryNodeStorage(NoHooksLU):
2603 9e5442ce Michael Hanselmann
  """Logical unit for getting information on storage units on node(s).
2604 9e5442ce Michael Hanselmann

2605 9e5442ce Michael Hanselmann
  """
2606 9e5442ce Michael Hanselmann
  _OP_REQP = ["nodes", "storage_type", "output_fields"]
2607 9e5442ce Michael Hanselmann
  REQ_BGL = False
2608 9e5442ce Michael Hanselmann
  _FIELDS_STATIC = utils.FieldSet("node")
2609 9e5442ce Michael Hanselmann
2610 9e5442ce Michael Hanselmann
  def ExpandNames(self):
2611 9e5442ce Michael Hanselmann
    storage_type = self.op.storage_type
2612 9e5442ce Michael Hanselmann
2613 9e5442ce Michael Hanselmann
    if storage_type not in constants.VALID_STORAGE_FIELDS:
2614 9e5442ce Michael Hanselmann
      raise errors.OpPrereqError("Unknown storage type: %s" % storage_type)
2615 9e5442ce Michael Hanselmann
2616 9e5442ce Michael Hanselmann
    dynamic_fields = constants.VALID_STORAGE_FIELDS[storage_type]
2617 9e5442ce Michael Hanselmann
2618 9e5442ce Michael Hanselmann
    _CheckOutputFields(static=self._FIELDS_STATIC,
2619 9e5442ce Michael Hanselmann
                       dynamic=utils.FieldSet(*dynamic_fields),
2620 9e5442ce Michael Hanselmann
                       selected=self.op.output_fields)
2621 9e5442ce Michael Hanselmann
2622 9e5442ce Michael Hanselmann
    self.needed_locks = {}
2623 9e5442ce Michael Hanselmann
    self.share_locks[locking.LEVEL_NODE] = 1
2624 9e5442ce Michael Hanselmann
2625 9e5442ce Michael Hanselmann
    if self.op.nodes:
2626 9e5442ce Michael Hanselmann
      self.needed_locks[locking.LEVEL_NODE] = \
2627 9e5442ce Michael Hanselmann
        _GetWantedNodes(self, self.op.nodes)
2628 9e5442ce Michael Hanselmann
    else:
2629 9e5442ce Michael Hanselmann
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2630 9e5442ce Michael Hanselmann
2631 9e5442ce Michael Hanselmann
  def CheckPrereq(self):
2632 9e5442ce Michael Hanselmann
    """Check prerequisites.
2633 9e5442ce Michael Hanselmann

2634 9e5442ce Michael Hanselmann
    This checks that the fields required are valid output fields.
2635 9e5442ce Michael Hanselmann

2636 9e5442ce Michael Hanselmann
    """
2637 9e5442ce Michael Hanselmann
    self.op.name = getattr(self.op, "name", None)
2638 9e5442ce Michael Hanselmann
2639 9e5442ce Michael Hanselmann
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
2640 9e5442ce Michael Hanselmann
2641 9e5442ce Michael Hanselmann
  def Exec(self, feedback_fn):
2642 9e5442ce Michael Hanselmann
    """Computes the list of nodes and their attributes.
2643 9e5442ce Michael Hanselmann

2644 9e5442ce Michael Hanselmann
    """
2645 9e5442ce Michael Hanselmann
    # Always get name to sort by
2646 9e5442ce Michael Hanselmann
    if constants.SF_NAME in self.op.output_fields:
2647 9e5442ce Michael Hanselmann
      fields = self.op.output_fields[:]
2648 9e5442ce Michael Hanselmann
    else:
2649 9e5442ce Michael Hanselmann
      fields = [constants.SF_NAME] + self.op.output_fields
2650 9e5442ce Michael Hanselmann
2651 9e5442ce Michael Hanselmann
    # Never ask for node as it's only known to the LU
2652 9e5442ce Michael Hanselmann
    while "node" in fields:
2653 9e5442ce Michael Hanselmann
      fields.remove("node")
2654 9e5442ce Michael Hanselmann
2655 9e5442ce Michael Hanselmann
    field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
2656 9e5442ce Michael Hanselmann
    name_idx = field_idx[constants.SF_NAME]
2657 9e5442ce Michael Hanselmann
2658 efb8da02 Michael Hanselmann
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
2659 9e5442ce Michael Hanselmann
    data = self.rpc.call_storage_list(self.nodes,
2660 9e5442ce Michael Hanselmann
                                      self.op.storage_type, st_args,
2661 9e5442ce Michael Hanselmann
                                      self.op.name, fields)
2662 9e5442ce Michael Hanselmann
2663 9e5442ce Michael Hanselmann
    result = []
2664 9e5442ce Michael Hanselmann
2665 9e5442ce Michael Hanselmann
    for node in utils.NiceSort(self.nodes):
2666 9e5442ce Michael Hanselmann
      nresult = data[node]
2667 9e5442ce Michael Hanselmann
      if nresult.offline:
2668 9e5442ce Michael Hanselmann
        continue
2669 9e5442ce Michael Hanselmann
2670 9e5442ce Michael Hanselmann
      msg = nresult.fail_msg
2671 9e5442ce Michael Hanselmann
      if msg:
2672 9e5442ce Michael Hanselmann
        self.LogWarning("Can't get storage data from node %s: %s", node, msg)
2673 9e5442ce Michael Hanselmann
        continue
2674 9e5442ce Michael Hanselmann
2675 9e5442ce Michael Hanselmann
      rows = dict([(row[name_idx], row) for row in nresult.payload])
2676 9e5442ce Michael Hanselmann
2677 9e5442ce Michael Hanselmann
      for name in utils.NiceSort(rows.keys()):
2678 9e5442ce Michael Hanselmann
        row = rows[name]
2679 9e5442ce Michael Hanselmann
2680 9e5442ce Michael Hanselmann
        out = []
2681 9e5442ce Michael Hanselmann
2682 9e5442ce Michael Hanselmann
        for field in self.op.output_fields:
2683 9e5442ce Michael Hanselmann
          if field == "node":
2684 9e5442ce Michael Hanselmann
            val = node
2685 9e5442ce Michael Hanselmann
          elif field in field_idx:
2686 9e5442ce Michael Hanselmann
            val = row[field_idx[field]]
2687 9e5442ce Michael Hanselmann
          else:
2688 9e5442ce Michael Hanselmann
            raise errors.ParameterError(field)
2689 9e5442ce Michael Hanselmann
2690 9e5442ce Michael Hanselmann
          out.append(val)
2691 9e5442ce Michael Hanselmann
2692 9e5442ce Michael Hanselmann
        result.append(out)
2693 9e5442ce Michael Hanselmann
2694 9e5442ce Michael Hanselmann
    return result
2695 9e5442ce Michael Hanselmann
2696 9e5442ce Michael Hanselmann
2697 efb8da02 Michael Hanselmann
class LUModifyNodeStorage(NoHooksLU):
2698 efb8da02 Michael Hanselmann
  """Logical unit for modifying a storage volume on a node.
2699 efb8da02 Michael Hanselmann

2700 efb8da02 Michael Hanselmann
  """
2701 efb8da02 Michael Hanselmann
  _OP_REQP = ["node_name", "storage_type", "name", "changes"]
2702 efb8da02 Michael Hanselmann
  REQ_BGL = False
2703 efb8da02 Michael Hanselmann
2704 efb8da02 Michael Hanselmann
  def CheckArguments(self):
2705 efb8da02 Michael Hanselmann
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2706 efb8da02 Michael Hanselmann
    if node_name is None:
2707 efb8da02 Michael Hanselmann
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2708 efb8da02 Michael Hanselmann
2709 efb8da02 Michael Hanselmann
    self.op.node_name = node_name
2710 efb8da02 Michael Hanselmann
2711 efb8da02 Michael Hanselmann
    storage_type = self.op.storage_type
2712 efb8da02 Michael Hanselmann
    if storage_type not in constants.VALID_STORAGE_FIELDS:
2713 efb8da02 Michael Hanselmann
      raise errors.OpPrereqError("Unknown storage type: %s" % storage_type)
2714 efb8da02 Michael Hanselmann
2715 efb8da02 Michael Hanselmann
  def ExpandNames(self):
2716 efb8da02 Michael Hanselmann
    self.needed_locks = {
2717 efb8da02 Michael Hanselmann
      locking.LEVEL_NODE: self.op.node_name,
2718 efb8da02 Michael Hanselmann
      }
2719 efb8da02 Michael Hanselmann
2720 efb8da02 Michael Hanselmann
  def CheckPrereq(self):
2721 efb8da02 Michael Hanselmann
    """Check prerequisites.
2722 efb8da02 Michael Hanselmann

2723 efb8da02 Michael Hanselmann
    """
2724 efb8da02 Michael Hanselmann
    storage_type = self.op.storage_type
2725 efb8da02 Michael Hanselmann
2726 efb8da02 Michael Hanselmann
    try:
2727 efb8da02 Michael Hanselmann
      modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
2728 efb8da02 Michael Hanselmann
    except KeyError:
2729 efb8da02 Michael Hanselmann
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
2730 efb8da02 Michael Hanselmann
                                 " modified" % storage_type)
2731 efb8da02 Michael Hanselmann
2732 efb8da02 Michael Hanselmann
    diff = set(self.op.changes.keys()) - modifiable
2733 efb8da02 Michael Hanselmann
    if diff:
2734 efb8da02 Michael Hanselmann
      raise errors.OpPrereqError("The following fields can not be modified for"
2735 efb8da02 Michael Hanselmann
                                 " storage units of type '%s': %r" %
2736 efb8da02 Michael Hanselmann
                                 (storage_type, list(diff)))
2737 efb8da02 Michael Hanselmann
2738 efb8da02 Michael Hanselmann
  def Exec(self, feedback_fn):
2739 efb8da02 Michael Hanselmann
    """Computes the list of nodes and their attributes.
2740 efb8da02 Michael Hanselmann

2741 efb8da02 Michael Hanselmann
    """
2742 efb8da02 Michael Hanselmann
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
2743 efb8da02 Michael Hanselmann
    result = self.rpc.call_storage_modify(self.op.node_name,
2744 efb8da02 Michael Hanselmann
                                          self.op.storage_type, st_args,
2745 efb8da02 Michael Hanselmann
                                          self.op.name, self.op.changes)
2746 efb8da02 Michael Hanselmann
    result.Raise("Failed to modify storage unit '%s' on %s" %
2747 efb8da02 Michael Hanselmann
                 (self.op.name, self.op.node_name))
2748 efb8da02 Michael Hanselmann
2749 efb8da02 Michael Hanselmann
2750 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
2751 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
2752 a8083063 Iustin Pop

2753 a8083063 Iustin Pop
  """
2754 a8083063 Iustin Pop
  HPATH = "node-add"
2755 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2756 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
2757 a8083063 Iustin Pop
2758 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2759 a8083063 Iustin Pop
    """Build hooks env.
2760 a8083063 Iustin Pop

2761 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
2762 a8083063 Iustin Pop

2763 a8083063 Iustin Pop
    """
2764 a8083063 Iustin Pop
    env = {
2765 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
2766 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
2767 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
2768 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
2769 a8083063 Iustin Pop
      }
2770 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
2771 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
2772 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
2773 a8083063 Iustin Pop
2774 a8083063 Iustin Pop
  def CheckPrereq(self):
2775 a8083063 Iustin Pop
    """Check prerequisites.
2776 a8083063 Iustin Pop

2777 a8083063 Iustin Pop
    This checks:
2778 a8083063 Iustin Pop
     - the new node is not already in the config
2779 a8083063 Iustin Pop
     - it is resolvable
2780 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
2781 a8083063 Iustin Pop

2782 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
2783 a8083063 Iustin Pop

2784 a8083063 Iustin Pop
    """
2785 a8083063 Iustin Pop
    node_name = self.op.node_name
2786 a8083063 Iustin Pop
    cfg = self.cfg
2787 a8083063 Iustin Pop
2788 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
2789 a8083063 Iustin Pop
2790 bcf043c9 Iustin Pop
    node = dns_data.name
2791 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
2792 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
2793 a8083063 Iustin Pop
    if secondary_ip is None:
2794 a8083063 Iustin Pop
      secondary_ip = primary_ip
2795 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
2796 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
2797 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
2798 e7c6e02b Michael Hanselmann
2799 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
2800 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
2801 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
2802 e7c6e02b Michael Hanselmann
                                 node)
2803 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
2804 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
2805 a8083063 Iustin Pop
2806 a8083063 Iustin Pop
    for existing_node_name in node_list:
2807 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
2808 e7c6e02b Michael Hanselmann
2809 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
2810 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
2811 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
2812 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
2813 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
2814 e7c6e02b Michael Hanselmann
        continue
2815 e7c6e02b Michael Hanselmann
2816 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
2817 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
2818 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
2819 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
2820 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
2821 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
2822 a8083063 Iustin Pop
2823 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
2824 a8083063 Iustin Pop
    # same as for the master
2825 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
2826 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
2827 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
2828 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
2829 a8083063 Iustin Pop
      if master_singlehomed:
2830 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
2831 3ecf6786 Iustin Pop
                                   " new node has one")
2832 a8083063 Iustin Pop
      else:
2833 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
2834 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
2835 a8083063 Iustin Pop
2836 5bbd3f7f Michael Hanselmann
    # checks reachability
2837 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
2838 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
2839 a8083063 Iustin Pop
2840 a8083063 Iustin Pop
    if not newbie_singlehomed:
2841 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
2842 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
2843 b15d625f Iustin Pop
                           source=myself.secondary_ip):
2844 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
2845 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
2846 a8083063 Iustin Pop
2847 a8ae3eb5 Iustin Pop
    if self.op.readd:
2848 a8ae3eb5 Iustin Pop
      exceptions = [node]
2849 a8ae3eb5 Iustin Pop
    else:
2850 a8ae3eb5 Iustin Pop
      exceptions = []
2851 6d7e1f20 Guido Trotter
2852 6d7e1f20 Guido Trotter
    self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
2853 0fff97e9 Guido Trotter
2854 a8ae3eb5 Iustin Pop
    if self.op.readd:
2855 a8ae3eb5 Iustin Pop
      self.new_node = self.cfg.GetNodeInfo(node)
2856 a8ae3eb5 Iustin Pop
      assert self.new_node is not None, "Can't retrieve locked node %s" % node
2857 a8ae3eb5 Iustin Pop
    else:
2858 a8ae3eb5 Iustin Pop
      self.new_node = objects.Node(name=node,
2859 a8ae3eb5 Iustin Pop
                                   primary_ip=primary_ip,
2860 a8ae3eb5 Iustin Pop
                                   secondary_ip=secondary_ip,
2861 a8ae3eb5 Iustin Pop
                                   master_candidate=self.master_candidate,
2862 a8ae3eb5 Iustin Pop
                                   offline=False, drained=False)
2863 a8083063 Iustin Pop
2864 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2865 a8083063 Iustin Pop
    """Adds the new node to the cluster.
2866 a8083063 Iustin Pop

2867 a8083063 Iustin Pop
    """
2868 a8083063 Iustin Pop
    new_node = self.new_node
2869 a8083063 Iustin Pop
    node = new_node.name
2870 a8083063 Iustin Pop
2871 a8ae3eb5 Iustin Pop
    # for re-adds, reset the offline/drained/master-candidate flags;
2872 a8ae3eb5 Iustin Pop
    # we need to reset here, otherwise offline would prevent RPC calls
2873 a8ae3eb5 Iustin Pop
    # later in the procedure; this also means that if the re-add
2874 a8ae3eb5 Iustin Pop
    # fails, we are left with a non-offlined, broken node
2875 a8ae3eb5 Iustin Pop
    if self.op.readd:
2876 a8ae3eb5 Iustin Pop
      new_node.drained = new_node.offline = False
2877 a8ae3eb5 Iustin Pop
      self.LogInfo("Readding a node, the offline/drained flags were reset")
2878 a8ae3eb5 Iustin Pop
      # if we demote the node, we do cleanup later in the procedure
2879 a8ae3eb5 Iustin Pop
      new_node.master_candidate = self.master_candidate
2880 a8ae3eb5 Iustin Pop
2881 a8ae3eb5 Iustin Pop
    # notify the user about any possible mc promotion
2882 a8ae3eb5 Iustin Pop
    if new_node.master_candidate:
2883 a8ae3eb5 Iustin Pop
      self.LogInfo("Node will be a master candidate")
2884 a8ae3eb5 Iustin Pop
2885 a8083063 Iustin Pop
    # check connectivity
2886 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
2887 4c4e4e1e Iustin Pop
    result.Raise("Can't get version information from node %s" % node)
2888 90b54c26 Iustin Pop
    if constants.PROTOCOL_VERSION == result.payload:
2889 90b54c26 Iustin Pop
      logging.info("Communication to node %s fine, sw version %s match",
2890 90b54c26 Iustin Pop
                   node, result.payload)
2891 a8083063 Iustin Pop
    else:
2892 90b54c26 Iustin Pop
      raise errors.OpExecError("Version mismatch master version %s,"
2893 90b54c26 Iustin Pop
                               " node version %s" %
2894 90b54c26 Iustin Pop
                               (constants.PROTOCOL_VERSION, result.payload))
2895 a8083063 Iustin Pop
2896 a8083063 Iustin Pop
    # setup ssh on node
2897 9a4f63d1 Iustin Pop
    logging.info("Copy ssh key to node %s", node)
2898 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
2899 a8083063 Iustin Pop
    keyarray = []
2900 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
2901 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
2902 70d9e3d8 Iustin Pop
                priv_key, pub_key]
2903 a8083063 Iustin Pop
2904 a8083063 Iustin Pop
    for i in keyfiles:
2905 13998ef2 Michael Hanselmann
      keyarray.append(utils.ReadFile(i))
2906 a8083063 Iustin Pop
2907 72737a7f Iustin Pop
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
2908 72737a7f Iustin Pop
                                    keyarray[2],
2909 72737a7f Iustin Pop
                                    keyarray[3], keyarray[4], keyarray[5])
2910 4c4e4e1e Iustin Pop
    result.Raise("Cannot transfer ssh keys to the new node")
2911 a8083063 Iustin Pop
2912 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
2913 b86a6bcd Guido Trotter
    if self.cfg.GetClusterInfo().modify_etc_hosts:
2914 b86a6bcd Guido Trotter
      utils.AddHostToEtcHosts(new_node.name)
2915 c8a0948f Michael Hanselmann
2916 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
2917 781de953 Iustin Pop
      result = self.rpc.call_node_has_ip_address(new_node.name,
2918 781de953 Iustin Pop
                                                 new_node.secondary_ip)
2919 4c4e4e1e Iustin Pop
      result.Raise("Failure checking secondary ip on node %s" % new_node.name,
2920 4c4e4e1e Iustin Pop
                   prereq=True)
2921 c2fc8250 Iustin Pop
      if not result.payload:
2922 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
2923 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
2924 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
2925 a8083063 Iustin Pop
2926 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
2927 5c0527ed Guido Trotter
    node_verify_param = {
2928 f60759f7 Iustin Pop
      constants.NV_NODELIST: [node],
2929 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
2930 5c0527ed Guido Trotter
    }
2931 5c0527ed Guido Trotter
2932 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
2933 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
2934 5c0527ed Guido Trotter
    for verifier in node_verify_list:
2935 4c4e4e1e Iustin Pop
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
2936 f60759f7 Iustin Pop
      nl_payload = result[verifier].payload[constants.NV_NODELIST]
2937 6f68a739 Iustin Pop
      if nl_payload:
2938 6f68a739 Iustin Pop
        for failed in nl_payload:
2939 31821208 Iustin Pop
          feedback_fn("ssh/hostname verification failed"
2940 31821208 Iustin Pop
                      " (checking from %s): %s" %
2941 6f68a739 Iustin Pop
                      (verifier, nl_payload[failed]))
2942 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
2943 ff98055b Iustin Pop
2944 d8470559 Michael Hanselmann
    if self.op.readd:
2945 28eddce5 Guido Trotter
      _RedistributeAncillaryFiles(self)
2946 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
2947 a8ae3eb5 Iustin Pop
      # make sure we redistribute the config
2948 a8ae3eb5 Iustin Pop
      self.cfg.Update(new_node)
2949 a8ae3eb5 Iustin Pop
      # and make sure the new node will not have old files around
2950 a8ae3eb5 Iustin Pop
      if not new_node.master_candidate:
2951 a8ae3eb5 Iustin Pop
        result = self.rpc.call_node_demote_from_mc(new_node.name)
2952 3cebe102 Michael Hanselmann
        msg = result.fail_msg
2953 a8ae3eb5 Iustin Pop
        if msg:
2954 a8ae3eb5 Iustin Pop
          self.LogWarning("Node failed to demote itself from master"
2955 a8ae3eb5 Iustin Pop
                          " candidate status: %s" % msg)
2956 d8470559 Michael Hanselmann
    else:
2957 035566e3 Iustin Pop
      _RedistributeAncillaryFiles(self, additional_nodes=[node])
2958 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
2959 a8083063 Iustin Pop
2960 a8083063 Iustin Pop
2961 b31c8676 Iustin Pop
class LUSetNodeParams(LogicalUnit):
2962 b31c8676 Iustin Pop
  """Modifies the parameters of a node.
2963 b31c8676 Iustin Pop

2964 b31c8676 Iustin Pop
  """
2965 b31c8676 Iustin Pop
  HPATH = "node-modify"
2966 b31c8676 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2967 b31c8676 Iustin Pop
  _OP_REQP = ["node_name"]
2968 b31c8676 Iustin Pop
  REQ_BGL = False
2969 b31c8676 Iustin Pop
2970 b31c8676 Iustin Pop
  def CheckArguments(self):
2971 b31c8676 Iustin Pop
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2972 b31c8676 Iustin Pop
    if node_name is None:
2973 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2974 b31c8676 Iustin Pop
    self.op.node_name = node_name
2975 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'master_candidate')
2976 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'offline')
2977 c9d443ea Iustin Pop
    _CheckBooleanOpField(self.op, 'drained')
2978 c9d443ea Iustin Pop
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
2979 c9d443ea Iustin Pop
    if all_mods.count(None) == 3:
2980 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Please pass at least one modification")
2981 c9d443ea Iustin Pop
    if all_mods.count(True) > 1:
2982 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Can't set the node into more than one"
2983 c9d443ea Iustin Pop
                                 " state at the same time")
2984 b31c8676 Iustin Pop
2985 b31c8676 Iustin Pop
  def ExpandNames(self):
2986 b31c8676 Iustin Pop
    self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
2987 b31c8676 Iustin Pop
2988 b31c8676 Iustin Pop
  def BuildHooksEnv(self):
2989 b31c8676 Iustin Pop
    """Build hooks env.
2990 b31c8676 Iustin Pop

2991 b31c8676 Iustin Pop
    This runs on the master node.
2992 b31c8676 Iustin Pop

2993 b31c8676 Iustin Pop
    """
2994 b31c8676 Iustin Pop
    env = {
2995 b31c8676 Iustin Pop
      "OP_TARGET": self.op.node_name,
2996 b31c8676 Iustin Pop
      "MASTER_CANDIDATE": str(self.op.master_candidate),
2997 3a5ba66a Iustin Pop
      "OFFLINE": str(self.op.offline),
2998 c9d443ea Iustin Pop
      "DRAINED": str(self.op.drained),
2999 b31c8676 Iustin Pop
      }
3000 b31c8676 Iustin Pop
    nl = [self.cfg.GetMasterNode(),
3001 b31c8676 Iustin Pop
          self.op.node_name]
3002 b31c8676 Iustin Pop
    return env, nl, nl
3003 b31c8676 Iustin Pop
3004 b31c8676 Iustin Pop
  def CheckPrereq(self):
3005 b31c8676 Iustin Pop
    """Check prerequisites.
3006 b31c8676 Iustin Pop

3007 b31c8676 Iustin Pop
    This only checks the instance list against the existing names.
3008 b31c8676 Iustin Pop

3009 b31c8676 Iustin Pop
    """
3010 3a5ba66a Iustin Pop
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
3011 b31c8676 Iustin Pop
3012 97c61d46 Iustin Pop
    if (self.op.master_candidate is not None or
3013 97c61d46 Iustin Pop
        self.op.drained is not None or
3014 97c61d46 Iustin Pop
        self.op.offline is not None):
3015 97c61d46 Iustin Pop
      # we can't change the master's node flags
3016 97c61d46 Iustin Pop
      if self.op.node_name == self.cfg.GetMasterNode():
3017 97c61d46 Iustin Pop
        raise errors.OpPrereqError("The master role can be changed"
3018 97c61d46 Iustin Pop
                                   " only via masterfailover")
3019 97c61d46 Iustin Pop
3020 8fbf5ac7 Guido Trotter
    # Boolean value that tells us whether we're offlining or draining the node
3021 8fbf5ac7 Guido Trotter
    offline_or_drain = self.op.offline == True or self.op.drained == True
3022 3d9eb52b Guido Trotter
    deoffline_or_drain = self.op.offline == False or self.op.drained == False
3023 8fbf5ac7 Guido Trotter
3024 8fbf5ac7 Guido Trotter
    if (node.master_candidate and
3025 8fbf5ac7 Guido Trotter
        (self.op.master_candidate == False or offline_or_drain)):
3026 3e83dd48 Iustin Pop
      cp_size = self.cfg.GetClusterInfo().candidate_pool_size
3027 8fbf5ac7 Guido Trotter
      mc_now, mc_should, mc_max = self.cfg.GetMasterCandidateStats()
3028 8fbf5ac7 Guido Trotter
      if mc_now <= cp_size:
3029 3e83dd48 Iustin Pop
        msg = ("Not enough master candidates (desired"
3030 8fbf5ac7 Guido Trotter
               " %d, new value will be %d)" % (cp_size, mc_now-1))
3031 8fbf5ac7 Guido Trotter
        # Only allow forcing the operation if it's an offline/drain operation,
3032 8fbf5ac7 Guido Trotter
        # and we could not possibly promote more nodes.
3033 8fbf5ac7 Guido Trotter
        # FIXME: this can still lead to issues if in any way another node which
3034 8fbf5ac7 Guido Trotter
        # could be promoted appears in the meantime.
3035 8fbf5ac7 Guido Trotter
        if self.op.force and offline_or_drain and mc_should == mc_max:
3036 3e83dd48 Iustin Pop
          self.LogWarning(msg)
3037 3e83dd48 Iustin Pop
        else:
3038 3e83dd48 Iustin Pop
          raise errors.OpPrereqError(msg)
3039 3e83dd48 Iustin Pop
3040 c9d443ea Iustin Pop
    if (self.op.master_candidate == True and
3041 c9d443ea Iustin Pop
        ((node.offline and not self.op.offline == False) or
3042 c9d443ea Iustin Pop
         (node.drained and not self.op.drained == False))):
3043 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
3044 949bdabe Iustin Pop
                                 " to master_candidate" % node.name)
3045 3a5ba66a Iustin Pop
3046 3d9eb52b Guido Trotter
    # If we're being deofflined/drained, we'll MC ourself if needed
3047 3d9eb52b Guido Trotter
    if (deoffline_or_drain and not offline_or_drain and not
3048 3d9eb52b Guido Trotter
        self.op.master_candidate == True):
3049 3d9eb52b Guido Trotter
      self.op.master_candidate = _DecideSelfPromotion(self)
3050 3d9eb52b Guido Trotter
      if self.op.master_candidate:
3051 3d9eb52b Guido Trotter
        self.LogInfo("Autopromoting node to master candidate")
3052 3d9eb52b Guido Trotter
3053 b31c8676 Iustin Pop
    return
3054 b31c8676 Iustin Pop
3055 b31c8676 Iustin Pop
  def Exec(self, feedback_fn):
3056 b31c8676 Iustin Pop
    """Modifies a node.
3057 b31c8676 Iustin Pop

3058 b31c8676 Iustin Pop
    """
3059 3a5ba66a Iustin Pop
    node = self.node
3060 b31c8676 Iustin Pop
3061 b31c8676 Iustin Pop
    result = []
3062 c9d443ea Iustin Pop
    changed_mc = False
3063 b31c8676 Iustin Pop
3064 3a5ba66a Iustin Pop
    if self.op.offline is not None:
3065 3a5ba66a Iustin Pop
      node.offline = self.op.offline
3066 3a5ba66a Iustin Pop
      result.append(("offline", str(self.op.offline)))
3067 c9d443ea Iustin Pop
      if self.op.offline == True:
3068 c9d443ea Iustin Pop
        if node.master_candidate:
3069 c9d443ea Iustin Pop
          node.master_candidate = False
3070 c9d443ea Iustin Pop
          changed_mc = True
3071 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to offline"))
3072 c9d443ea Iustin Pop
        if node.drained:
3073 c9d443ea Iustin Pop
          node.drained = False
3074 c9d443ea Iustin Pop
          result.append(("drained", "clear drained status due to offline"))
3075 3a5ba66a Iustin Pop
3076 b31c8676 Iustin Pop
    if self.op.master_candidate is not None:
3077 b31c8676 Iustin Pop
      node.master_candidate = self.op.master_candidate
3078 c9d443ea Iustin Pop
      changed_mc = True
3079 b31c8676 Iustin Pop
      result.append(("master_candidate", str(self.op.master_candidate)))
3080 56aa9fd5 Iustin Pop
      if self.op.master_candidate == False:
3081 56aa9fd5 Iustin Pop
        rrc = self.rpc.call_node_demote_from_mc(node.name)
3082 4c4e4e1e Iustin Pop
        msg = rrc.fail_msg
3083 0959c824 Iustin Pop
        if msg:
3084 0959c824 Iustin Pop
          self.LogWarning("Node failed to demote itself: %s" % msg)
3085 b31c8676 Iustin Pop
3086 c9d443ea Iustin Pop
    if self.op.drained is not None:
3087 c9d443ea Iustin Pop
      node.drained = self.op.drained
3088 82e12743 Iustin Pop
      result.append(("drained", str(self.op.drained)))
3089 c9d443ea Iustin Pop
      if self.op.drained == True:
3090 c9d443ea Iustin Pop
        if node.master_candidate:
3091 c9d443ea Iustin Pop
          node.master_candidate = False
3092 c9d443ea Iustin Pop
          changed_mc = True
3093 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to drain"))
3094 dec0d9da Iustin Pop
          rrc = self.rpc.call_node_demote_from_mc(node.name)
3095 3cebe102 Michael Hanselmann
          msg = rrc.fail_msg
3096 dec0d9da Iustin Pop
          if msg:
3097 dec0d9da Iustin Pop
            self.LogWarning("Node failed to demote itself: %s" % msg)
3098 c9d443ea Iustin Pop
        if node.offline:
3099 c9d443ea Iustin Pop
          node.offline = False
3100 c9d443ea Iustin Pop
          result.append(("offline", "clear offline status due to drain"))
3101 c9d443ea Iustin Pop
3102 b31c8676 Iustin Pop
    # this will trigger configuration file update, if needed
3103 b31c8676 Iustin Pop
    self.cfg.Update(node)
3104 b31c8676 Iustin Pop
    # this will trigger job queue propagation or cleanup
3105 c9d443ea Iustin Pop
    if changed_mc:
3106 3a26773f Iustin Pop
      self.context.ReaddNode(node)
3107 b31c8676 Iustin Pop
3108 b31c8676 Iustin Pop
    return result
3109 b31c8676 Iustin Pop
3110 b31c8676 Iustin Pop
3111 f5118ade Iustin Pop
class LUPowercycleNode(NoHooksLU):
3112 f5118ade Iustin Pop
  """Powercycles a node.
3113 f5118ade Iustin Pop

3114 f5118ade Iustin Pop
  """
3115 f5118ade Iustin Pop
  _OP_REQP = ["node_name", "force"]
3116 f5118ade Iustin Pop
  REQ_BGL = False
3117 f5118ade Iustin Pop
3118 f5118ade Iustin Pop
  def CheckArguments(self):
3119 f5118ade Iustin Pop
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
3120 f5118ade Iustin Pop
    if node_name is None:
3121 f5118ade Iustin Pop
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
3122 f5118ade Iustin Pop
    self.op.node_name = node_name
3123 f5118ade Iustin Pop
    if node_name == self.cfg.GetMasterNode() and not self.op.force:
3124 f5118ade Iustin Pop
      raise errors.OpPrereqError("The node is the master and the force"
3125 f5118ade Iustin Pop
                                 " parameter was not set")
3126 f5118ade Iustin Pop
3127 f5118ade Iustin Pop
  def ExpandNames(self):
3128 f5118ade Iustin Pop
    """Locking for PowercycleNode.
3129 f5118ade Iustin Pop

3130 efb8da02 Michael Hanselmann
    This is a last-resort option and shouldn't block on other
3131 f5118ade Iustin Pop
    jobs. Therefore, we grab no locks.
3132 f5118ade Iustin Pop

3133 f5118ade Iustin Pop
    """
3134 f5118ade Iustin Pop
    self.needed_locks = {}
3135 f5118ade Iustin Pop
3136 f5118ade Iustin Pop
  def CheckPrereq(self):
3137 f5118ade Iustin Pop
    """Check prerequisites.
3138 f5118ade Iustin Pop

3139 f5118ade Iustin Pop
    This LU has no prereqs.
3140 f5118ade Iustin Pop

3141 f5118ade Iustin Pop
    """
3142 f5118ade Iustin Pop
    pass
3143 f5118ade Iustin Pop
3144 f5118ade Iustin Pop
  def Exec(self, feedback_fn):
3145 f5118ade Iustin Pop
    """Reboots a node.
3146 f5118ade Iustin Pop

3147 f5118ade Iustin Pop
    """
3148 f5118ade Iustin Pop
    result = self.rpc.call_node_powercycle(self.op.node_name,
3149 f5118ade Iustin Pop
                                           self.cfg.GetHypervisorType())
3150 4c4e4e1e Iustin Pop
    result.Raise("Failed to schedule the reboot")
3151 f5118ade Iustin Pop
    return result.payload
3152 f5118ade Iustin Pop
3153 f5118ade Iustin Pop
3154 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
3155 a8083063 Iustin Pop
  """Query cluster configuration.
3156 a8083063 Iustin Pop

3157 a8083063 Iustin Pop
  """
3158 a8083063 Iustin Pop
  _OP_REQP = []
3159 642339cf Guido Trotter
  REQ_BGL = False
3160 642339cf Guido Trotter
3161 642339cf Guido Trotter
  def ExpandNames(self):
3162 642339cf Guido Trotter
    self.needed_locks = {}
3163 a8083063 Iustin Pop
3164 a8083063 Iustin Pop
  def CheckPrereq(self):
3165 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
3166 a8083063 Iustin Pop

3167 a8083063 Iustin Pop
    """
3168 a8083063 Iustin Pop
    pass
3169 a8083063 Iustin Pop
3170 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3171 a8083063 Iustin Pop
    """Return cluster config.
3172 a8083063 Iustin Pop

3173 a8083063 Iustin Pop
    """
3174 469f88e1 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
3175 a8083063 Iustin Pop
    result = {
3176 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
3177 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
3178 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
3179 d1a7d66f Guido Trotter
      "os_api_version": max(constants.OS_API_VERSIONS),
3180 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
3181 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
3182 469f88e1 Iustin Pop
      "name": cluster.cluster_name,
3183 469f88e1 Iustin Pop
      "master": cluster.master_node,
3184 066f465d Guido Trotter
      "default_hypervisor": cluster.enabled_hypervisors[0],
3185 469f88e1 Iustin Pop
      "enabled_hypervisors": cluster.enabled_hypervisors,
3186 b8810fec Michael Hanselmann
      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
3187 7c4d6c7b Michael Hanselmann
                        for hypervisor_name in cluster.enabled_hypervisors]),
3188 469f88e1 Iustin Pop
      "beparams": cluster.beparams,
3189 1094acda Guido Trotter
      "nicparams": cluster.nicparams,
3190 4b7735f9 Iustin Pop
      "candidate_pool_size": cluster.candidate_pool_size,
3191 7a56b411 Guido Trotter
      "master_netdev": cluster.master_netdev,
3192 7a56b411 Guido Trotter
      "volume_group_name": cluster.volume_group_name,
3193 7a56b411 Guido Trotter
      "file_storage_dir": cluster.file_storage_dir,
3194 90f72445 Iustin Pop
      "ctime": cluster.ctime,
3195 90f72445 Iustin Pop
      "mtime": cluster.mtime,
3196 259578eb Iustin Pop
      "uuid": cluster.uuid,
3197 c118d1f4 Michael Hanselmann
      "tags": list(cluster.GetTags()),
3198 a8083063 Iustin Pop
      }
3199 a8083063 Iustin Pop
3200 a8083063 Iustin Pop
    return result
3201 a8083063 Iustin Pop
3202 a8083063 Iustin Pop
3203 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
3204 ae5849b5 Michael Hanselmann
  """Return configuration values.
3205 a8083063 Iustin Pop

3206 a8083063 Iustin Pop
  """
3207 a8083063 Iustin Pop
  _OP_REQP = []
3208 642339cf Guido Trotter
  REQ_BGL = False
3209 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet()
3210 05e50653 Michael Hanselmann
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag",
3211 05e50653 Michael Hanselmann
                                  "watcher_pause")
3212 642339cf Guido Trotter
3213 642339cf Guido Trotter
  def ExpandNames(self):
3214 642339cf Guido Trotter
    self.needed_locks = {}
3215 a8083063 Iustin Pop
3216 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
3217 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
3218 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
3219 ae5849b5 Michael Hanselmann
3220 a8083063 Iustin Pop
  def CheckPrereq(self):
3221 a8083063 Iustin Pop
    """No prerequisites.
3222 a8083063 Iustin Pop

3223 a8083063 Iustin Pop
    """
3224 a8083063 Iustin Pop
    pass
3225 a8083063 Iustin Pop
3226 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3227 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
3228 a8083063 Iustin Pop

3229 a8083063 Iustin Pop
    """
3230 ae5849b5 Michael Hanselmann
    values = []
3231 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
3232 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
3233 3ccafd0e Iustin Pop
        entry = self.cfg.GetClusterName()
3234 ae5849b5 Michael Hanselmann
      elif field == "master_node":
3235 3ccafd0e Iustin Pop
        entry = self.cfg.GetMasterNode()
3236 3ccafd0e Iustin Pop
      elif field == "drain_flag":
3237 3ccafd0e Iustin Pop
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
3238 05e50653 Michael Hanselmann
      elif field == "watcher_pause":
3239 05e50653 Michael Hanselmann
        return utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
3240 ae5849b5 Michael Hanselmann
      else:
3241 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
3242 3ccafd0e Iustin Pop
      values.append(entry)
3243 ae5849b5 Michael Hanselmann
    return values
3244 a8083063 Iustin Pop
3245 a8083063 Iustin Pop
3246 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
3247 a8083063 Iustin Pop
  """Bring up an instance's disks.
3248 a8083063 Iustin Pop

3249 a8083063 Iustin Pop
  """
3250 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3251 f22a8ba3 Guido Trotter
  REQ_BGL = False
3252 f22a8ba3 Guido Trotter
3253 f22a8ba3 Guido Trotter
  def ExpandNames(self):
3254 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
3255 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3256 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3257 f22a8ba3 Guido Trotter
3258 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
3259 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
3260 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
3261 a8083063 Iustin Pop
3262 a8083063 Iustin Pop
  def CheckPrereq(self):
3263 a8083063 Iustin Pop
    """Check prerequisites.
3264 a8083063 Iustin Pop

3265 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3266 a8083063 Iustin Pop

3267 a8083063 Iustin Pop
    """
3268 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3269 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
3270 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3271 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
3272 b4ec07f8 Iustin Pop
    if not hasattr(self.op, "ignore_size"):
3273 b4ec07f8 Iustin Pop
      self.op.ignore_size = False
3274 a8083063 Iustin Pop
3275 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3276 a8083063 Iustin Pop
    """Activate the disks.
3277 a8083063 Iustin Pop

3278 a8083063 Iustin Pop
    """
3279 b4ec07f8 Iustin Pop
    disks_ok, disks_info = \
3280 b4ec07f8 Iustin Pop
              _AssembleInstanceDisks(self, self.instance,
3281 b4ec07f8 Iustin Pop
                                     ignore_size=self.op.ignore_size)
3282 a8083063 Iustin Pop
    if not disks_ok:
3283 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
3284 a8083063 Iustin Pop
3285 a8083063 Iustin Pop
    return disks_info
3286 a8083063 Iustin Pop
3287 a8083063 Iustin Pop
3288 e3443b36 Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False,
3289 e3443b36 Iustin Pop
                           ignore_size=False):
3290 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
3291 a8083063 Iustin Pop

3292 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
3293 a8083063 Iustin Pop

3294 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
3295 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
3296 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
3297 e4376078 Iustin Pop
  @param instance: the instance for whose disks we assemble
3298 e4376078 Iustin Pop
  @type ignore_secondaries: boolean
3299 e4376078 Iustin Pop
  @param ignore_secondaries: if true, errors on secondary nodes
3300 e4376078 Iustin Pop
      won't result in an error return from the function
3301 e3443b36 Iustin Pop
  @type ignore_size: boolean
3302 e3443b36 Iustin Pop
  @param ignore_size: if true, the current known size of the disk
3303 e3443b36 Iustin Pop
      will not be used during the disk activation, useful for cases
3304 e3443b36 Iustin Pop
      when the size is wrong
3305 e4376078 Iustin Pop
  @return: False if the operation failed, otherwise a list of
3306 e4376078 Iustin Pop
      (host, instance_visible_name, node_visible_name)
3307 e4376078 Iustin Pop
      with the mapping from node devices to instance devices
3308 a8083063 Iustin Pop

3309 a8083063 Iustin Pop
  """
3310 a8083063 Iustin Pop
  device_info = []
3311 a8083063 Iustin Pop
  disks_ok = True
3312 fdbd668d Iustin Pop
  iname = instance.name
3313 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
3314 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
3315 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
3316 fdbd668d Iustin Pop
3317 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
3318 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
3319 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
3320 fdbd668d Iustin Pop
  # SyncSource, etc.)
3321 fdbd668d Iustin Pop
3322 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
3323 a8083063 Iustin Pop
  for inst_disk in instance.disks:
3324 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
3325 e3443b36 Iustin Pop
      if ignore_size:
3326 e3443b36 Iustin Pop
        node_disk = node_disk.Copy()
3327 e3443b36 Iustin Pop
        node_disk.UnsetSize()
3328 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
3329 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
3330 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3331 53c14ef1 Iustin Pop
      if msg:
3332 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
3333 53c14ef1 Iustin Pop
                           " (is_primary=False, pass=1): %s",
3334 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
3335 fdbd668d Iustin Pop
        if not ignore_secondaries:
3336 a8083063 Iustin Pop
          disks_ok = False
3337 fdbd668d Iustin Pop
3338 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
3339 fdbd668d Iustin Pop
3340 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
3341 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
3342 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
3343 fdbd668d Iustin Pop
      if node != instance.primary_node:
3344 fdbd668d Iustin Pop
        continue
3345 e3443b36 Iustin Pop
      if ignore_size:
3346 e3443b36 Iustin Pop
        node_disk = node_disk.Copy()
3347 e3443b36 Iustin Pop
        node_disk.UnsetSize()
3348 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
3349 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
3350 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3351 53c14ef1 Iustin Pop
      if msg:
3352 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
3353 53c14ef1 Iustin Pop
                           " (is_primary=True, pass=2): %s",
3354 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
3355 fdbd668d Iustin Pop
        disks_ok = False
3356 1dff8e07 Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name,
3357 1dff8e07 Iustin Pop
                        result.payload))
3358 a8083063 Iustin Pop
3359 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
3360 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
3361 b352ab5b Iustin Pop
  # improving the logical/physical id handling
3362 b352ab5b Iustin Pop
  for disk in instance.disks:
3363 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
3364 b352ab5b Iustin Pop
3365 a8083063 Iustin Pop
  return disks_ok, device_info
3366 a8083063 Iustin Pop
3367 a8083063 Iustin Pop
3368 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
3369 3ecf6786 Iustin Pop
  """Start the disks of an instance.
3370 3ecf6786 Iustin Pop

3371 3ecf6786 Iustin Pop
  """
3372 7c4d6c7b Michael Hanselmann
  disks_ok, _ = _AssembleInstanceDisks(lu, instance,
3373 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
3374 fe7b0351 Michael Hanselmann
  if not disks_ok:
3375 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
3376 fe7b0351 Michael Hanselmann
    if force is not None and not force:
3377 86d9d3bb Iustin Pop
      lu.proc.LogWarning("", hint="If the message above refers to a"
3378 86d9d3bb Iustin Pop
                         " secondary node,"
3379 86d9d3bb Iustin Pop
                         " you can retry the operation using '--force'.")
3380 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
3381 fe7b0351 Michael Hanselmann
3382 fe7b0351 Michael Hanselmann
3383 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
3384 a8083063 Iustin Pop
  """Shutdown an instance's disks.
3385 a8083063 Iustin Pop

3386 a8083063 Iustin Pop
  """
3387 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3388 f22a8ba3 Guido Trotter
  REQ_BGL = False
3389 f22a8ba3 Guido Trotter
3390 f22a8ba3 Guido Trotter
  def ExpandNames(self):
3391 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
3392 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3393 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3394 f22a8ba3 Guido Trotter
3395 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
3396 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
3397 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
3398 a8083063 Iustin Pop
3399 a8083063 Iustin Pop
  def CheckPrereq(self):
3400 a8083063 Iustin Pop
    """Check prerequisites.
3401 a8083063 Iustin Pop

3402 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3403 a8083063 Iustin Pop

3404 a8083063 Iustin Pop
    """
3405 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3406 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
3407 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3408 a8083063 Iustin Pop
3409 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3410 a8083063 Iustin Pop
    """Deactivate the disks
3411 a8083063 Iustin Pop

3412 a8083063 Iustin Pop
    """
3413 a8083063 Iustin Pop
    instance = self.instance
3414 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
3415 a8083063 Iustin Pop
3416 a8083063 Iustin Pop
3417 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
3418 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
3419 155d6c75 Guido Trotter

3420 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
3421 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
3422 155d6c75 Guido Trotter

3423 155d6c75 Guido Trotter
  """
3424 aca13712 Iustin Pop
  pnode = instance.primary_node
3425 4c4e4e1e Iustin Pop
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
3426 4c4e4e1e Iustin Pop
  ins_l.Raise("Can't contact node %s" % pnode)
3427 aca13712 Iustin Pop
3428 aca13712 Iustin Pop
  if instance.name in ins_l.payload:
3429 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
3430 155d6c75 Guido Trotter
                             " block devices.")
3431 155d6c75 Guido Trotter
3432 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
3433 a8083063 Iustin Pop
3434 a8083063 Iustin Pop
3435 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
3436 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
3437 a8083063 Iustin Pop

3438 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
3439 a8083063 Iustin Pop

3440 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
3441 a8083063 Iustin Pop
  ignored.
3442 a8083063 Iustin Pop

3443 a8083063 Iustin Pop
  """
3444 cacfd1fd Iustin Pop
  all_result = True
3445 a8083063 Iustin Pop
  for disk in instance.disks:
3446 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
3447 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
3448 781de953 Iustin Pop
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
3449 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3450 cacfd1fd Iustin Pop
      if msg:
3451 cacfd1fd Iustin Pop
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
3452 cacfd1fd Iustin Pop
                      disk.iv_name, node, msg)
3453 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
3454 cacfd1fd Iustin Pop
          all_result = False
3455 cacfd1fd Iustin Pop
  return all_result
3456 a8083063 Iustin Pop
3457 a8083063 Iustin Pop
3458 9ca87a96 Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
3459 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
3460 d4f16fd9 Iustin Pop

3461 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
3462 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
3463 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
3464 d4f16fd9 Iustin Pop
  exception.
3465 d4f16fd9 Iustin Pop

3466 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
3467 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
3468 e69d05fd Iustin Pop
  @type node: C{str}
3469 e69d05fd Iustin Pop
  @param node: the node to check
3470 e69d05fd Iustin Pop
  @type reason: C{str}
3471 e69d05fd Iustin Pop
  @param reason: string to use in the error message
3472 e69d05fd Iustin Pop
  @type requested: C{int}
3473 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
3474 9ca87a96 Iustin Pop
  @type hypervisor_name: C{str}
3475 9ca87a96 Iustin Pop
  @param hypervisor_name: the hypervisor to ask for memory stats
3476 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
3477 e69d05fd Iustin Pop
      we cannot check the node
3478 d4f16fd9 Iustin Pop

3479 d4f16fd9 Iustin Pop
  """
3480 9ca87a96 Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
3481 4c4e4e1e Iustin Pop
  nodeinfo[node].Raise("Can't get data from node %s" % node, prereq=True)
3482 070e998b Iustin Pop
  free_mem = nodeinfo[node].payload.get('memory_free', None)
3483 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
3484 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
3485 070e998b Iustin Pop
                               " was '%s'" % (node, free_mem))
3486 d4f16fd9 Iustin Pop
  if requested > free_mem:
3487 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
3488 070e998b Iustin Pop
                               " needed %s MiB, available %s MiB" %
3489 070e998b Iustin Pop
                               (node, reason, requested, free_mem))
3490 d4f16fd9 Iustin Pop
3491 d4f16fd9 Iustin Pop
3492 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
3493 a8083063 Iustin Pop
  """Starts an instance.
3494 a8083063 Iustin Pop

3495 a8083063 Iustin Pop
  """
3496 a8083063 Iustin Pop
  HPATH = "instance-start"
3497 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3498 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
3499 e873317a Guido Trotter
  REQ_BGL = False
3500 e873317a Guido Trotter
3501 e873317a Guido Trotter
  def ExpandNames(self):
3502 e873317a Guido Trotter
    self._ExpandAndLockInstance()
3503 a8083063 Iustin Pop
3504 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3505 a8083063 Iustin Pop
    """Build hooks env.
3506 a8083063 Iustin Pop

3507 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3508 a8083063 Iustin Pop

3509 a8083063 Iustin Pop
    """
3510 a8083063 Iustin Pop
    env = {
3511 a8083063 Iustin Pop
      "FORCE": self.op.force,
3512 a8083063 Iustin Pop
      }
3513 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3514 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3515 a8083063 Iustin Pop
    return env, nl, nl
3516 a8083063 Iustin Pop
3517 a8083063 Iustin Pop
  def CheckPrereq(self):
3518 a8083063 Iustin Pop
    """Check prerequisites.
3519 a8083063 Iustin Pop

3520 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3521 a8083063 Iustin Pop

3522 a8083063 Iustin Pop
    """
3523 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3524 e873317a Guido Trotter
    assert self.instance is not None, \
3525 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3526 a8083063 Iustin Pop
3527 d04aaa2f Iustin Pop
    # extra beparams
3528 d04aaa2f Iustin Pop
    self.beparams = getattr(self.op, "beparams", {})
3529 d04aaa2f Iustin Pop
    if self.beparams:
3530 d04aaa2f Iustin Pop
      if not isinstance(self.beparams, dict):
3531 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid beparams passed: %s, expected"
3532 d04aaa2f Iustin Pop
                                   " dict" % (type(self.beparams), ))
3533 d04aaa2f Iustin Pop
      # fill the beparams dict
3534 d04aaa2f Iustin Pop
      utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
3535 d04aaa2f Iustin Pop
      self.op.beparams = self.beparams
3536 d04aaa2f Iustin Pop
3537 d04aaa2f Iustin Pop
    # extra hvparams
3538 d04aaa2f Iustin Pop
    self.hvparams = getattr(self.op, "hvparams", {})
3539 d04aaa2f Iustin Pop
    if self.hvparams:
3540 d04aaa2f Iustin Pop
      if not isinstance(self.hvparams, dict):
3541 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid hvparams passed: %s, expected"
3542 d04aaa2f Iustin Pop
                                   " dict" % (type(self.hvparams), ))
3543 d04aaa2f Iustin Pop
3544 d04aaa2f Iustin Pop
      # check hypervisor parameter syntax (locally)
3545 d04aaa2f Iustin Pop
      cluster = self.cfg.GetClusterInfo()
3546 d04aaa2f Iustin Pop
      utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
3547 abe609b2 Guido Trotter
      filled_hvp = objects.FillDict(cluster.hvparams[instance.hypervisor],
3548 d04aaa2f Iustin Pop
                                    instance.hvparams)
3549 d04aaa2f Iustin Pop
      filled_hvp.update(self.hvparams)
3550 d04aaa2f Iustin Pop
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
3551 d04aaa2f Iustin Pop
      hv_type.CheckParameterSyntax(filled_hvp)
3552 d04aaa2f Iustin Pop
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
3553 d04aaa2f Iustin Pop
      self.op.hvparams = self.hvparams
3554 d04aaa2f Iustin Pop
3555 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3556 7527a8a4 Iustin Pop
3557 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3558 5bbd3f7f Michael Hanselmann
    # check bridges existence
3559 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
3560 a8083063 Iustin Pop
3561 f1926756 Guido Trotter
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3562 f1926756 Guido Trotter
                                              instance.name,
3563 f1926756 Guido Trotter
                                              instance.hypervisor)
3564 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
3565 4c4e4e1e Iustin Pop
                      prereq=True)
3566 7ad1af4a Iustin Pop
    if not remote_info.payload: # not running already
3567 f1926756 Guido Trotter
      _CheckNodeFreeMemory(self, instance.primary_node,
3568 f1926756 Guido Trotter
                           "starting instance %s" % instance.name,
3569 f1926756 Guido Trotter
                           bep[constants.BE_MEMORY], instance.hypervisor)
3570 d4f16fd9 Iustin Pop
3571 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3572 a8083063 Iustin Pop
    """Start the instance.
3573 a8083063 Iustin Pop

3574 a8083063 Iustin Pop
    """
3575 a8083063 Iustin Pop
    instance = self.instance
3576 a8083063 Iustin Pop
    force = self.op.force
3577 a8083063 Iustin Pop
3578 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
3579 fe482621 Iustin Pop
3580 a8083063 Iustin Pop
    node_current = instance.primary_node
3581 a8083063 Iustin Pop
3582 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
3583 a8083063 Iustin Pop
3584 d04aaa2f Iustin Pop
    result = self.rpc.call_instance_start(node_current, instance,
3585 d04aaa2f Iustin Pop
                                          self.hvparams, self.beparams)
3586 4c4e4e1e Iustin Pop
    msg = result.fail_msg
3587 dd279568 Iustin Pop
    if msg:
3588 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
3589 dd279568 Iustin Pop
      raise errors.OpExecError("Could not start instance: %s" % msg)
3590 a8083063 Iustin Pop
3591 a8083063 Iustin Pop
3592 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
3593 bf6929a2 Alexander Schreiber
  """Reboot an instance.
3594 bf6929a2 Alexander Schreiber

3595 bf6929a2 Alexander Schreiber
  """
3596 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
3597 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
3598 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
3599 e873317a Guido Trotter
  REQ_BGL = False
3600 e873317a Guido Trotter
3601 17c3f802 Guido Trotter
  def CheckArguments(self):
3602 17c3f802 Guido Trotter
    """Check the arguments.
3603 17c3f802 Guido Trotter

3604 17c3f802 Guido Trotter
    """
3605 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
3606 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
3607 17c3f802 Guido Trotter
3608 e873317a Guido Trotter
  def ExpandNames(self):
3609 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
3610 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
3611 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
3612 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
3613 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
3614 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
3615 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
3616 e873317a Guido Trotter
    self._ExpandAndLockInstance()
3617 bf6929a2 Alexander Schreiber
3618 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
3619 bf6929a2 Alexander Schreiber
    """Build hooks env.
3620 bf6929a2 Alexander Schreiber

3621 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
3622 bf6929a2 Alexander Schreiber

3623 bf6929a2 Alexander Schreiber
    """
3624 bf6929a2 Alexander Schreiber
    env = {
3625 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
3626 2c2690c9 Iustin Pop
      "REBOOT_TYPE": self.op.reboot_type,
3627 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
3628 bf6929a2 Alexander Schreiber
      }
3629 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3630 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3631 bf6929a2 Alexander Schreiber
    return env, nl, nl
3632 bf6929a2 Alexander Schreiber
3633 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
3634 bf6929a2 Alexander Schreiber
    """Check prerequisites.
3635 bf6929a2 Alexander Schreiber

3636 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
3637 bf6929a2 Alexander Schreiber

3638 bf6929a2 Alexander Schreiber
    """
3639 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3640 e873317a Guido Trotter
    assert self.instance is not None, \
3641 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3642 bf6929a2 Alexander Schreiber
3643 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3644 7527a8a4 Iustin Pop
3645 5bbd3f7f Michael Hanselmann
    # check bridges existence
3646 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
3647 bf6929a2 Alexander Schreiber
3648 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
3649 bf6929a2 Alexander Schreiber
    """Reboot the instance.
3650 bf6929a2 Alexander Schreiber

3651 bf6929a2 Alexander Schreiber
    """
3652 bf6929a2 Alexander Schreiber
    instance = self.instance
3653 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
3654 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
3655 bf6929a2 Alexander Schreiber
3656 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
3657 bf6929a2 Alexander Schreiber
3658 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
3659 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
3660 ae48ac32 Iustin Pop
      for disk in instance.disks:
3661 ae48ac32 Iustin Pop
        self.cfg.SetDiskID(disk, node_current)
3662 781de953 Iustin Pop
      result = self.rpc.call_instance_reboot(node_current, instance,
3663 17c3f802 Guido Trotter
                                             reboot_type,
3664 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
3665 4c4e4e1e Iustin Pop
      result.Raise("Could not reboot instance")
3666 bf6929a2 Alexander Schreiber
    else:
3667 17c3f802 Guido Trotter
      result = self.rpc.call_instance_shutdown(node_current, instance,
3668 17c3f802 Guido Trotter
                                               self.shutdown_timeout)
3669 4c4e4e1e Iustin Pop
      result.Raise("Could not shutdown instance for full reboot")
3670 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
3671 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
3672 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(node_current, instance, None, None)
3673 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3674 dd279568 Iustin Pop
      if msg:
3675 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3676 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance for"
3677 dd279568 Iustin Pop
                                 " full reboot: %s" % msg)
3678 bf6929a2 Alexander Schreiber
3679 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
3680 bf6929a2 Alexander Schreiber
3681 bf6929a2 Alexander Schreiber
3682 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
3683 a8083063 Iustin Pop
  """Shutdown an instance.
3684 a8083063 Iustin Pop

3685 a8083063 Iustin Pop
  """
3686 a8083063 Iustin Pop
  HPATH = "instance-stop"
3687 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3688 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3689 e873317a Guido Trotter
  REQ_BGL = False
3690 e873317a Guido Trotter
3691 6263189c Guido Trotter
  def CheckArguments(self):
3692 6263189c Guido Trotter
    """Check the arguments.
3693 6263189c Guido Trotter

3694 6263189c Guido Trotter
    """
3695 6263189c Guido Trotter
    self.timeout = getattr(self.op, "timeout",
3696 6263189c Guido Trotter
                           constants.DEFAULT_SHUTDOWN_TIMEOUT)
3697 6263189c Guido Trotter
3698 e873317a Guido Trotter
  def ExpandNames(self):
3699 e873317a Guido Trotter
    self._ExpandAndLockInstance()
3700 a8083063 Iustin Pop
3701 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3702 a8083063 Iustin Pop
    """Build hooks env.
3703 a8083063 Iustin Pop

3704 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3705 a8083063 Iustin Pop

3706 a8083063 Iustin Pop
    """
3707 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3708 6263189c Guido Trotter
    env["TIMEOUT"] = self.timeout
3709 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3710 a8083063 Iustin Pop
    return env, nl, nl
3711 a8083063 Iustin Pop
3712 a8083063 Iustin Pop
  def CheckPrereq(self):
3713 a8083063 Iustin Pop
    """Check prerequisites.
3714 a8083063 Iustin Pop

3715 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3716 a8083063 Iustin Pop

3717 a8083063 Iustin Pop
    """
3718 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3719 e873317a Guido Trotter
    assert self.instance is not None, \
3720 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3721 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
3722 a8083063 Iustin Pop
3723 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3724 a8083063 Iustin Pop
    """Shutdown the instance.
3725 a8083063 Iustin Pop

3726 a8083063 Iustin Pop
    """
3727 a8083063 Iustin Pop
    instance = self.instance
3728 a8083063 Iustin Pop
    node_current = instance.primary_node
3729 6263189c Guido Trotter
    timeout = self.timeout
3730 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
3731 6263189c Guido Trotter
    result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
3732 4c4e4e1e Iustin Pop
    msg = result.fail_msg
3733 1fae010f Iustin Pop
    if msg:
3734 1fae010f Iustin Pop
      self.proc.LogWarning("Could not shutdown instance: %s" % msg)
3735 a8083063 Iustin Pop
3736 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
3737 a8083063 Iustin Pop
3738 a8083063 Iustin Pop
3739 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
3740 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
3741 fe7b0351 Michael Hanselmann

3742 fe7b0351 Michael Hanselmann
  """
3743 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
3744 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
3745 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
3746 4e0b4d2d Guido Trotter
  REQ_BGL = False
3747 4e0b4d2d Guido Trotter
3748 4e0b4d2d Guido Trotter
  def ExpandNames(self):
3749 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
3750 fe7b0351 Michael Hanselmann
3751 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
3752 fe7b0351 Michael Hanselmann
    """Build hooks env.
3753 fe7b0351 Michael Hanselmann

3754 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
3755 fe7b0351 Michael Hanselmann

3756 fe7b0351 Michael Hanselmann
    """
3757 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3758 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3759 fe7b0351 Michael Hanselmann
    return env, nl, nl
3760 fe7b0351 Michael Hanselmann
3761 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
3762 fe7b0351 Michael Hanselmann
    """Check prerequisites.
3763 fe7b0351 Michael Hanselmann

3764 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
3765 fe7b0351 Michael Hanselmann

3766 fe7b0351 Michael Hanselmann
    """
3767 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3768 4e0b4d2d Guido Trotter
    assert instance is not None, \
3769 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3770 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3771 4e0b4d2d Guido Trotter
3772 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
3773 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
3774 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3775 0d68c45d Iustin Pop
    if instance.admin_up:
3776 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3777 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3778 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3779 72737a7f Iustin Pop
                                              instance.name,
3780 72737a7f Iustin Pop
                                              instance.hypervisor)
3781 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
3782 4c4e4e1e Iustin Pop
                      prereq=True)
3783 7ad1af4a Iustin Pop
    if remote_info.payload:
3784 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3785 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
3786 3ecf6786 Iustin Pop
                                  instance.primary_node))
3787 d0834de3 Michael Hanselmann
3788 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
3789 f2c05717 Guido Trotter
    self.op.force_variant = getattr(self.op, "force_variant", False)
3790 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
3791 d0834de3 Michael Hanselmann
      # OS verification
3792 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
3793 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
3794 d0834de3 Michael Hanselmann
      if pnode is None:
3795 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
3796 3ecf6786 Iustin Pop
                                   self.op.pnode)
3797 781de953 Iustin Pop
      result = self.rpc.call_os_get(pnode.name, self.op.os_type)
3798 4c4e4e1e Iustin Pop
      result.Raise("OS '%s' not in supported OS list for primary node %s" %
3799 4c4e4e1e Iustin Pop
                   (self.op.os_type, pnode.name), prereq=True)
3800 f2c05717 Guido Trotter
      if not self.op.force_variant:
3801 f2c05717 Guido Trotter
        _CheckOSVariant(result.payload, self.op.os_type)
3802 d0834de3 Michael Hanselmann
3803 fe7b0351 Michael Hanselmann
    self.instance = instance
3804 fe7b0351 Michael Hanselmann
3805 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
3806 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
3807 fe7b0351 Michael Hanselmann

3808 fe7b0351 Michael Hanselmann
    """
3809 fe7b0351 Michael Hanselmann
    inst = self.instance
3810 fe7b0351 Michael Hanselmann
3811 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
3812 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
3813 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
3814 97abc79f Iustin Pop
      self.cfg.Update(inst)
3815 d0834de3 Michael Hanselmann
3816 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
3817 fe7b0351 Michael Hanselmann
    try:
3818 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
3819 e557bae9 Guido Trotter
      result = self.rpc.call_instance_os_add(inst.primary_node, inst, True)
3820 4c4e4e1e Iustin Pop
      result.Raise("Could not install OS for instance %s on node %s" %
3821 4c4e4e1e Iustin Pop
                   (inst.name, inst.primary_node))
3822 fe7b0351 Michael Hanselmann
    finally:
3823 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
3824 fe7b0351 Michael Hanselmann
3825 fe7b0351 Michael Hanselmann
3826 bd315bfa Iustin Pop
class LURecreateInstanceDisks(LogicalUnit):
3827 bd315bfa Iustin Pop
  """Recreate an instance's missing disks.
3828 bd315bfa Iustin Pop

3829 bd315bfa Iustin Pop
  """
3830 bd315bfa Iustin Pop
  HPATH = "instance-recreate-disks"
3831 bd315bfa Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3832 bd315bfa Iustin Pop
  _OP_REQP = ["instance_name", "disks"]
3833 bd315bfa Iustin Pop
  REQ_BGL = False
3834 bd315bfa Iustin Pop
3835 bd315bfa Iustin Pop
  def CheckArguments(self):
3836 bd315bfa Iustin Pop
    """Check the arguments.
3837 bd315bfa Iustin Pop

3838 bd315bfa Iustin Pop
    """
3839 bd315bfa Iustin Pop
    if not isinstance(self.op.disks, list):
3840 bd315bfa Iustin Pop
      raise errors.OpPrereqError("Invalid disks parameter")
3841 bd315bfa Iustin Pop
    for item in self.op.disks:
3842 bd315bfa Iustin Pop
      if (not isinstance(item, int) or
3843 bd315bfa Iustin Pop
          item < 0):
3844 bd315bfa Iustin Pop
        raise errors.OpPrereqError("Invalid disk specification '%s'" %
3845 bd315bfa Iustin Pop
                                   str(item))
3846 bd315bfa Iustin Pop
3847 bd315bfa Iustin Pop
  def ExpandNames(self):
3848 bd315bfa Iustin Pop
    self._ExpandAndLockInstance()
3849 bd315bfa Iustin Pop
3850 bd315bfa Iustin Pop
  def BuildHooksEnv(self):
3851 bd315bfa Iustin Pop
    """Build hooks env.
3852 bd315bfa Iustin Pop

3853 bd315bfa Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3854 bd315bfa Iustin Pop

3855 bd315bfa Iustin Pop
    """
3856 bd315bfa Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3857 bd315bfa Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3858 bd315bfa Iustin Pop
    return env, nl, nl
3859 bd315bfa Iustin Pop
3860 bd315bfa Iustin Pop
  def CheckPrereq(self):
3861 bd315bfa Iustin Pop
    """Check prerequisites.
3862 bd315bfa Iustin Pop

3863 bd315bfa Iustin Pop
    This checks that the instance is in the cluster and is not running.
3864 bd315bfa Iustin Pop

3865 bd315bfa Iustin Pop
    """
3866 bd315bfa Iustin Pop
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3867 bd315bfa Iustin Pop
    assert instance is not None, \
3868 bd315bfa Iustin Pop
      "Cannot retrieve locked instance %s" % self.op.instance_name
3869 bd315bfa Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3870 bd315bfa Iustin Pop
3871 bd315bfa Iustin Pop
    if instance.disk_template == constants.DT_DISKLESS:
3872 bd315bfa Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
3873 bd315bfa Iustin Pop
                                 self.op.instance_name)
3874 bd315bfa Iustin Pop
    if instance.admin_up:
3875 bd315bfa Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3876 bd315bfa Iustin Pop
                                 self.op.instance_name)
3877 bd315bfa Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3878 bd315bfa Iustin Pop
                                              instance.name,
3879 bd315bfa Iustin Pop
                                              instance.hypervisor)
3880 bd315bfa Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
3881 bd315bfa Iustin Pop
                      prereq=True)
3882 bd315bfa Iustin Pop
    if remote_info.payload:
3883 bd315bfa Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3884 bd315bfa Iustin Pop
                                 (self.op.instance_name,
3885 bd315bfa Iustin Pop
                                  instance.primary_node))
3886 bd315bfa Iustin Pop
3887 bd315bfa Iustin Pop
    if not self.op.disks:
3888 bd315bfa Iustin Pop
      self.op.disks = range(len(instance.disks))
3889 bd315bfa Iustin Pop
    else:
3890 bd315bfa Iustin Pop
      for idx in self.op.disks:
3891 bd315bfa Iustin Pop
        if idx >= len(instance.disks):
3892 bd315bfa Iustin Pop
          raise errors.OpPrereqError("Invalid disk index passed '%s'" % idx)
3893 bd315bfa Iustin Pop
3894 bd315bfa Iustin Pop
    self.instance = instance
3895 bd315bfa Iustin Pop
3896 bd315bfa Iustin Pop
  def Exec(self, feedback_fn):
3897 bd315bfa Iustin Pop
    """Recreate the disks.
3898 bd315bfa Iustin Pop

3899 bd315bfa Iustin Pop
    """
3900 bd315bfa Iustin Pop
    to_skip = []
3901 bd315bfa Iustin Pop
    for idx, disk in enumerate(self.instance.disks):
3902 bd315bfa Iustin Pop
      if idx not in self.op.disks: # disk idx has not been passed in
3903 bd315bfa Iustin Pop
        to_skip.append(idx)
3904 bd315bfa Iustin Pop
        continue
3905 bd315bfa Iustin Pop
3906 bd315bfa Iustin Pop
    _CreateDisks(self, self.instance, to_skip=to_skip)
3907 bd315bfa Iustin Pop
3908 bd315bfa Iustin Pop
3909 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
3910 decd5f45 Iustin Pop
  """Rename an instance.
3911 decd5f45 Iustin Pop

3912 decd5f45 Iustin Pop
  """
3913 decd5f45 Iustin Pop
  HPATH = "instance-rename"
3914 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3915 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
3916 decd5f45 Iustin Pop
3917 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
3918 decd5f45 Iustin Pop
    """Build hooks env.
3919 decd5f45 Iustin Pop

3920 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3921 decd5f45 Iustin Pop

3922 decd5f45 Iustin Pop
    """
3923 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3924 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
3925 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3926 decd5f45 Iustin Pop
    return env, nl, nl
3927 decd5f45 Iustin Pop
3928 decd5f45 Iustin Pop
  def CheckPrereq(self):
3929 decd5f45 Iustin Pop
    """Check prerequisites.
3930 decd5f45 Iustin Pop

3931 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
3932 decd5f45 Iustin Pop

3933 decd5f45 Iustin Pop
    """
3934 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3935 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3936 decd5f45 Iustin Pop
    if instance is None:
3937 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3938 decd5f45 Iustin Pop
                                 self.op.instance_name)
3939 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3940 7527a8a4 Iustin Pop
3941 0d68c45d Iustin Pop
    if instance.admin_up:
3942 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3943 decd5f45 Iustin Pop
                                 self.op.instance_name)
3944 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3945 72737a7f Iustin Pop
                                              instance.name,
3946 72737a7f Iustin Pop
                                              instance.hypervisor)
3947 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
3948 4c4e4e1e Iustin Pop
                      prereq=True)
3949 7ad1af4a Iustin Pop
    if remote_info.payload:
3950 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3951 decd5f45 Iustin Pop
                                 (self.op.instance_name,
3952 decd5f45 Iustin Pop
                                  instance.primary_node))
3953 decd5f45 Iustin Pop
    self.instance = instance
3954 decd5f45 Iustin Pop
3955 decd5f45 Iustin Pop
    # new name verification
3956 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
3957 decd5f45 Iustin Pop
3958 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
3959 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
3960 7bde3275 Guido Trotter
    if new_name in instance_list:
3961 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3962 c09f363f Manuel Franceschini
                                 new_name)
3963 7bde3275 Guido Trotter
3964 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
3965 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
3966 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3967 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
3968 decd5f45 Iustin Pop
3969 decd5f45 Iustin Pop
3970 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
3971 decd5f45 Iustin Pop
    """Reinstall the instance.
3972 decd5f45 Iustin Pop

3973 decd5f45 Iustin Pop
    """
3974 decd5f45 Iustin Pop
    inst = self.instance
3975 decd5f45 Iustin Pop
    old_name = inst.name
3976 decd5f45 Iustin Pop
3977 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
3978 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3979 b23c4333 Manuel Franceschini
3980 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
3981 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
3982 cb4e8387 Iustin Pop
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
3983 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
3984 decd5f45 Iustin Pop
3985 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
3986 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
3987 decd5f45 Iustin Pop
3988 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
3989 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3990 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
3991 72737a7f Iustin Pop
                                                     old_file_storage_dir,
3992 72737a7f Iustin Pop
                                                     new_file_storage_dir)
3993 4c4e4e1e Iustin Pop
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
3994 4c4e4e1e Iustin Pop
                   " (but the instance has been renamed in Ganeti)" %
3995 4c4e4e1e Iustin Pop
                   (inst.primary_node, old_file_storage_dir,
3996 4c4e4e1e Iustin Pop
                    new_file_storage_dir))
3997 b23c4333 Manuel Franceschini
3998 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
3999 decd5f45 Iustin Pop
    try:
4000 781de953 Iustin Pop
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
4001 781de953 Iustin Pop
                                                 old_name)
4002 4c4e4e1e Iustin Pop
      msg = result.fail_msg
4003 96841384 Iustin Pop
      if msg:
4004 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
4005 96841384 Iustin Pop
               " (but the instance has been renamed in Ganeti): %s" %
4006 96841384 Iustin Pop
               (inst.name, inst.primary_node, msg))
4007 86d9d3bb Iustin Pop
        self.proc.LogWarning(msg)
4008 decd5f45 Iustin Pop
    finally:
4009 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
4010 decd5f45 Iustin Pop
4011 decd5f45 Iustin Pop
4012 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
4013 a8083063 Iustin Pop
  """Remove an instance.
4014 a8083063 Iustin Pop

4015 a8083063 Iustin Pop
  """
4016 a8083063 Iustin Pop
  HPATH = "instance-remove"
4017 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4018 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
4019 cf472233 Guido Trotter
  REQ_BGL = False
4020 cf472233 Guido Trotter
4021 17c3f802 Guido Trotter
  def CheckArguments(self):
4022 17c3f802 Guido Trotter
    """Check the arguments.
4023 17c3f802 Guido Trotter

4024 17c3f802 Guido Trotter
    """
4025 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
4026 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
4027 17c3f802 Guido Trotter
4028 cf472233 Guido Trotter
  def ExpandNames(self):
4029 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
4030 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4031 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4032 cf472233 Guido Trotter
4033 cf472233 Guido Trotter
  def DeclareLocks(self, level):
4034 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
4035 cf472233 Guido Trotter
      self._LockInstancesNodes()
4036 a8083063 Iustin Pop
4037 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4038 a8083063 Iustin Pop
    """Build hooks env.
4039 a8083063 Iustin Pop

4040 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4041 a8083063 Iustin Pop

4042 a8083063 Iustin Pop
    """
4043 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4044 17c3f802 Guido Trotter
    env["SHUTDOWN_TIMEOUT"] = self.shutdown_timeout
4045 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
4046 a8083063 Iustin Pop
    return env, nl, nl
4047 a8083063 Iustin Pop
4048 a8083063 Iustin Pop
  def CheckPrereq(self):
4049 a8083063 Iustin Pop
    """Check prerequisites.
4050 a8083063 Iustin Pop

4051 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4052 a8083063 Iustin Pop

4053 a8083063 Iustin Pop
    """
4054 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4055 cf472233 Guido Trotter
    assert self.instance is not None, \
4056 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4057 a8083063 Iustin Pop
4058 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4059 a8083063 Iustin Pop
    """Remove the instance.
4060 a8083063 Iustin Pop

4061 a8083063 Iustin Pop
    """
4062 a8083063 Iustin Pop
    instance = self.instance
4063 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
4064 9a4f63d1 Iustin Pop
                 instance.name, instance.primary_node)
4065 a8083063 Iustin Pop
4066 17c3f802 Guido Trotter
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
4067 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
4068 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4069 1fae010f Iustin Pop
    if msg:
4070 1d67656e Iustin Pop
      if self.op.ignore_failures:
4071 1fae010f Iustin Pop
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
4072 1d67656e Iustin Pop
      else:
4073 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
4074 1fae010f Iustin Pop
                                 " node %s: %s" %
4075 1fae010f Iustin Pop
                                 (instance.name, instance.primary_node, msg))
4076 a8083063 Iustin Pop
4077 9a4f63d1 Iustin Pop
    logging.info("Removing block devices for instance %s", instance.name)
4078 a8083063 Iustin Pop
4079 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
4080 1d67656e Iustin Pop
      if self.op.ignore_failures:
4081 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
4082 1d67656e Iustin Pop
      else:
4083 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
4084 a8083063 Iustin Pop
4085 9a4f63d1 Iustin Pop
    logging.info("Removing instance %s out of cluster config", instance.name)
4086 a8083063 Iustin Pop
4087 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
4088 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
4089 a8083063 Iustin Pop
4090 a8083063 Iustin Pop
4091 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
4092 a8083063 Iustin Pop
  """Logical unit for querying instances.
4093 a8083063 Iustin Pop

4094 a8083063 Iustin Pop
  """
4095 ec79568d Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
4096 7eb9d8f7 Guido Trotter
  REQ_BGL = False
4097 19bed813 Iustin Pop
  _SIMPLE_FIELDS = ["name", "os", "network_port", "hypervisor",
4098 19bed813 Iustin Pop
                    "serial_no", "ctime", "mtime", "uuid"]
4099 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
4100 5b460366 Iustin Pop
                                    "admin_state",
4101 a2d2e1a7 Iustin Pop
                                    "disk_template", "ip", "mac", "bridge",
4102 638c6349 Guido Trotter
                                    "nic_mode", "nic_link",
4103 a2d2e1a7 Iustin Pop
                                    "sda_size", "sdb_size", "vcpus", "tags",
4104 a2d2e1a7 Iustin Pop
                                    "network_port", "beparams",
4105 8aec325c Iustin Pop
                                    r"(disk)\.(size)/([0-9]+)",
4106 8aec325c Iustin Pop
                                    r"(disk)\.(sizes)", "disk_usage",
4107 638c6349 Guido Trotter
                                    r"(nic)\.(mac|ip|mode|link)/([0-9]+)",
4108 638c6349 Guido Trotter
                                    r"(nic)\.(bridge)/([0-9]+)",
4109 638c6349 Guido Trotter
                                    r"(nic)\.(macs|ips|modes|links|bridges)",
4110 8aec325c Iustin Pop
                                    r"(disk|nic)\.(count)",
4111 19bed813 Iustin Pop
                                    "hvparams",
4112 19bed813 Iustin Pop
                                    ] + _SIMPLE_FIELDS +
4113 a2d2e1a7 Iustin Pop
                                  ["hv/%s" % name
4114 a2d2e1a7 Iustin Pop
                                   for name in constants.HVS_PARAMETERS] +
4115 a2d2e1a7 Iustin Pop
                                  ["be/%s" % name
4116 a2d2e1a7 Iustin Pop
                                   for name in constants.BES_PARAMETERS])
4117 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
4118 31bf511f Iustin Pop
4119 a8083063 Iustin Pop
4120 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
4121 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
4122 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
4123 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
4124 a8083063 Iustin Pop
4125 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
4126 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
4127 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
4128 7eb9d8f7 Guido Trotter
4129 57a2fb91 Iustin Pop
    if self.op.names:
4130 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
4131 7eb9d8f7 Guido Trotter
    else:
4132 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
4133 7eb9d8f7 Guido Trotter
4134 ec79568d Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
4135 ec79568d Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
4136 57a2fb91 Iustin Pop
    if self.do_locking:
4137 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
4138 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
4139 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4140 7eb9d8f7 Guido Trotter
4141 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
4142 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
4143 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
4144 7eb9d8f7 Guido Trotter
4145 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
4146 7eb9d8f7 Guido Trotter
    """Check prerequisites.
4147 7eb9d8f7 Guido Trotter

4148 7eb9d8f7 Guido Trotter
    """
4149 57a2fb91 Iustin Pop
    pass
4150 069dcc86 Iustin Pop
4151 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4152 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
4153 a8083063 Iustin Pop

4154 a8083063 Iustin Pop
    """
4155 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
4156 a7f5dc98 Iustin Pop
    if self.wanted == locking.ALL_SET:
4157 a7f5dc98 Iustin Pop
      # caller didn't specify instance names, so ordering is not important
4158 a7f5dc98 Iustin Pop
      if self.do_locking:
4159 a7f5dc98 Iustin Pop
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
4160 a7f5dc98 Iustin Pop
      else:
4161 a7f5dc98 Iustin Pop
        instance_names = all_info.keys()
4162 a7f5dc98 Iustin Pop
      instance_names = utils.NiceSort(instance_names)
4163 57a2fb91 Iustin Pop
    else:
4164 a7f5dc98 Iustin Pop
      # caller did specify names, so we must keep the ordering
4165 a7f5dc98 Iustin Pop
      if self.do_locking:
4166 a7f5dc98 Iustin Pop
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
4167 a7f5dc98 Iustin Pop
      else:
4168 a7f5dc98 Iustin Pop
        tgt_set = all_info.keys()
4169 a7f5dc98 Iustin Pop
      missing = set(self.wanted).difference(tgt_set)
4170 a7f5dc98 Iustin Pop
      if missing:
4171 a7f5dc98 Iustin Pop
        raise errors.OpExecError("Some instances were removed before"
4172 a7f5dc98 Iustin Pop
                                 " retrieving their data: %s" % missing)
4173 a7f5dc98 Iustin Pop
      instance_names = self.wanted
4174 c1f1cbb2 Iustin Pop
4175 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
4176 a8083063 Iustin Pop
4177 a8083063 Iustin Pop
    # begin data gathering
4178 a8083063 Iustin Pop
4179 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
4180 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
4181 a8083063 Iustin Pop
4182 a8083063 Iustin Pop
    bad_nodes = []
4183 cbfc4681 Iustin Pop
    off_nodes = []
4184 ec79568d Iustin Pop
    if self.do_node_query:
4185 a8083063 Iustin Pop
      live_data = {}
4186 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
4187 a8083063 Iustin Pop
      for name in nodes:
4188 a8083063 Iustin Pop
        result = node_data[name]
4189 cbfc4681 Iustin Pop
        if result.offline:
4190 cbfc4681 Iustin Pop
          # offline nodes will be in both lists
4191 cbfc4681 Iustin Pop
          off_nodes.append(name)
4192 3cebe102 Michael Hanselmann
        if result.fail_msg:
4193 a8083063 Iustin Pop
          bad_nodes.append(name)
4194 781de953 Iustin Pop
        else:
4195 2fa74ef4 Iustin Pop
          if result.payload:
4196 2fa74ef4 Iustin Pop
            live_data.update(result.payload)
4197 2fa74ef4 Iustin Pop
          # else no instance is alive
4198 a8083063 Iustin Pop
    else:
4199 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
4200 a8083063 Iustin Pop
4201 a8083063 Iustin Pop
    # end data gathering
4202 a8083063 Iustin Pop
4203 5018a335 Iustin Pop
    HVPREFIX = "hv/"
4204 338e51e8 Iustin Pop
    BEPREFIX = "be/"
4205 a8083063 Iustin Pop
    output = []
4206 638c6349 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
4207 a8083063 Iustin Pop
    for instance in instance_list:
4208 a8083063 Iustin Pop
      iout = []
4209 638c6349 Guido Trotter
      i_hv = cluster.FillHV(instance)
4210 638c6349 Guido Trotter
      i_be = cluster.FillBE(instance)
4211 638c6349 Guido Trotter
      i_nicp = [objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
4212 638c6349 Guido Trotter
                                 nic.nicparams) for nic in instance.nics]
4213 a8083063 Iustin Pop
      for field in self.op.output_fields:
4214 71c1af58 Iustin Pop
        st_match = self._FIELDS_STATIC.Matches(field)
4215 19bed813 Iustin Pop
        if field in self._SIMPLE_FIELDS:
4216 19bed813 Iustin Pop
          val = getattr(instance, field)
4217 a8083063 Iustin Pop
        elif field == "pnode":
4218 a8083063 Iustin Pop
          val = instance.primary_node
4219 a8083063 Iustin Pop
        elif field == "snodes":
4220 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
4221 a8083063 Iustin Pop
        elif field == "admin_state":
4222 0d68c45d Iustin Pop
          val = instance.admin_up
4223 a8083063 Iustin Pop
        elif field == "oper_state":
4224 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
4225 8a23d2d3 Iustin Pop
            val = None
4226 a8083063 Iustin Pop
          else:
4227 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
4228 d8052456 Iustin Pop
        elif field == "status":
4229 cbfc4681 Iustin Pop
          if instance.primary_node in off_nodes:
4230 cbfc4681 Iustin Pop
            val = "ERROR_nodeoffline"
4231 cbfc4681 Iustin Pop
          elif instance.primary_node in bad_nodes:
4232 d8052456 Iustin Pop
            val = "ERROR_nodedown"
4233 d8052456 Iustin Pop
          else:
4234 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
4235 d8052456 Iustin Pop
            if running:
4236 0d68c45d Iustin Pop
              if instance.admin_up:
4237 d8052456 Iustin Pop
                val = "running"
4238 d8052456 Iustin Pop
              else:
4239 d8052456 Iustin Pop
                val = "ERROR_up"
4240 d8052456 Iustin Pop
            else:
4241 0d68c45d Iustin Pop
              if instance.admin_up:
4242 d8052456 Iustin Pop
                val = "ERROR_down"
4243 d8052456 Iustin Pop
              else:
4244 d8052456 Iustin Pop
                val = "ADMIN_down"
4245 a8083063 Iustin Pop
        elif field == "oper_ram":
4246 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
4247 8a23d2d3 Iustin Pop
            val = None
4248 a8083063 Iustin Pop
          elif instance.name in live_data:
4249 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
4250 a8083063 Iustin Pop
          else:
4251 a8083063 Iustin Pop
            val = "-"
4252 c1ce76bb Iustin Pop
        elif field == "vcpus":
4253 c1ce76bb Iustin Pop
          val = i_be[constants.BE_VCPUS]
4254 a8083063 Iustin Pop
        elif field == "disk_template":
4255 a8083063 Iustin Pop
          val = instance.disk_template
4256 a8083063 Iustin Pop
        elif field == "ip":
4257 39a02558 Guido Trotter
          if instance.nics:
4258 39a02558 Guido Trotter
            val = instance.nics[0].ip
4259 39a02558 Guido Trotter
          else:
4260 39a02558 Guido Trotter
            val = None
4261 638c6349 Guido Trotter
        elif field == "nic_mode":
4262 638c6349 Guido Trotter
          if instance.nics:
4263 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_MODE]
4264 638c6349 Guido Trotter
          else:
4265 638c6349 Guido Trotter
            val = None
4266 638c6349 Guido Trotter
        elif field == "nic_link":
4267 39a02558 Guido Trotter
          if instance.nics:
4268 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_LINK]
4269 638c6349 Guido Trotter
          else:
4270 638c6349 Guido Trotter
            val = None
4271 638c6349 Guido Trotter
        elif field == "bridge":
4272 638c6349 Guido Trotter
          if (instance.nics and
4273 638c6349 Guido Trotter
              i_nicp[0][constants.NIC_MODE] == constants.NIC_MODE_BRIDGED):
4274 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_LINK]
4275 39a02558 Guido Trotter
          else:
4276 39a02558 Guido Trotter
            val = None
4277 a8083063 Iustin Pop
        elif field == "mac":
4278 39a02558 Guido Trotter
          if instance.nics:
4279 39a02558 Guido Trotter
            val = instance.nics[0].mac
4280 39a02558 Guido Trotter
          else:
4281 39a02558 Guido Trotter
            val = None
4282 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
4283 ad24e046 Iustin Pop
          idx = ord(field[2]) - ord('a')
4284 ad24e046 Iustin Pop
          try:
4285 ad24e046 Iustin Pop
            val = instance.FindDisk(idx).size
4286 ad24e046 Iustin Pop
          except errors.OpPrereqError:
4287 8a23d2d3 Iustin Pop
            val = None
4288 024e157f Iustin Pop
        elif field == "disk_usage": # total disk usage per node
4289 024e157f Iustin Pop
          disk_sizes = [{'size': disk.size} for disk in instance.disks]
4290 024e157f Iustin Pop
          val = _ComputeDiskSize(instance.disk_template, disk_sizes)
4291 130a6a6f Iustin Pop
        elif field == "tags":
4292 130a6a6f Iustin Pop
          val = list(instance.GetTags())
4293 338e51e8 Iustin Pop
        elif field == "hvparams":
4294 338e51e8 Iustin Pop
          val = i_hv
4295 5018a335 Iustin Pop
        elif (field.startswith(HVPREFIX) and
4296 5018a335 Iustin Pop
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
4297 5018a335 Iustin Pop
          val = i_hv.get(field[len(HVPREFIX):], None)
4298 338e51e8 Iustin Pop
        elif field == "beparams":
4299 338e51e8 Iustin Pop
          val = i_be
4300 338e51e8 Iustin Pop
        elif (field.startswith(BEPREFIX) and
4301 338e51e8 Iustin Pop
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
4302 338e51e8 Iustin Pop
          val = i_be.get(field[len(BEPREFIX):], None)
4303 71c1af58 Iustin Pop
        elif st_match and st_match.groups():
4304 71c1af58 Iustin Pop
          # matches a variable list
4305 71c1af58 Iustin Pop
          st_groups = st_match.groups()
4306 71c1af58 Iustin Pop
          if st_groups and st_groups[0] == "disk":
4307 71c1af58 Iustin Pop
            if st_groups[1] == "count":
4308 71c1af58 Iustin Pop
              val = len(instance.disks)
4309 41a776da Iustin Pop
            elif st_groups[1] == "sizes":
4310 41a776da Iustin Pop
              val = [disk.size for disk in instance.disks]
4311 71c1af58 Iustin Pop
            elif st_groups[1] == "size":
4312 3e0cea06 Iustin Pop
              try:
4313 3e0cea06 Iustin Pop
                val = instance.FindDisk(st_groups[2]).size
4314 3e0cea06 Iustin Pop
              except errors.OpPrereqError:
4315 71c1af58 Iustin Pop
                val = None
4316 71c1af58 Iustin Pop
            else:
4317 71c1af58 Iustin Pop
              assert False, "Unhandled disk parameter"
4318 71c1af58 Iustin Pop
          elif st_groups[0] == "nic":
4319 71c1af58 Iustin Pop
            if st_groups[1] == "count":
4320 71c1af58 Iustin Pop
              val = len(instance.nics)
4321 41a776da Iustin Pop
            elif st_groups[1] == "macs":
4322 41a776da Iustin Pop
              val = [nic.mac for nic in instance.nics]
4323 41a776da Iustin Pop
            elif st_groups[1] == "ips":
4324 41a776da Iustin Pop
              val = [nic.ip for nic in instance.nics]
4325 638c6349 Guido Trotter
            elif st_groups[1] == "modes":
4326 638c6349 Guido Trotter
              val = [nicp[constants.NIC_MODE] for nicp in i_nicp]
4327 638c6349 Guido Trotter
            elif st_groups[1] == "links":
4328 638c6349 Guido Trotter
              val = [nicp[constants.NIC_LINK] for nicp in i_nicp]
4329 41a776da Iustin Pop
            elif st_groups[1] == "bridges":
4330 638c6349 Guido Trotter
              val = []
4331 638c6349 Guido Trotter
              for nicp in i_nicp:
4332 638c6349 Guido Trotter
                if nicp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
4333 638c6349 Guido Trotter
                  val.append(nicp[constants.NIC_LINK])
4334 638c6349 Guido Trotter
                else:
4335 638c6349 Guido Trotter
                  val.append(None)
4336 71c1af58 Iustin Pop
            else:
4337 71c1af58 Iustin Pop
              # index-based item
4338 71c1af58 Iustin Pop
              nic_idx = int(st_groups[2])
4339 71c1af58 Iustin Pop
              if nic_idx >= len(instance.nics):
4340 71c1af58 Iustin Pop
                val = None
4341 71c1af58 Iustin Pop
              else:
4342 71c1af58 Iustin Pop
                if st_groups[1] == "mac":
4343 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].mac
4344 71c1af58 Iustin Pop
                elif st_groups[1] == "ip":
4345 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].ip
4346 638c6349 Guido Trotter
                elif st_groups[1] == "mode":
4347 638c6349 Guido Trotter
                  val = i_nicp[nic_idx][constants.NIC_MODE]
4348 638c6349 Guido Trotter
                elif st_groups[1] == "link":
4349 638c6349 Guido Trotter
                  val = i_nicp[nic_idx][constants.NIC_LINK]
4350 71c1af58 Iustin Pop
                elif st_groups[1] == "bridge":
4351 638c6349 Guido Trotter
                  nic_mode = i_nicp[nic_idx][constants.NIC_MODE]
4352 638c6349 Guido Trotter
                  if nic_mode == constants.NIC_MODE_BRIDGED:
4353 638c6349 Guido Trotter
                    val = i_nicp[nic_idx][constants.NIC_LINK]
4354 638c6349 Guido Trotter
                  else:
4355 638c6349 Guido Trotter
                    val = None
4356 71c1af58 Iustin Pop
                else:
4357 71c1af58 Iustin Pop
                  assert False, "Unhandled NIC parameter"
4358 71c1af58 Iustin Pop
          else:
4359 c1ce76bb Iustin Pop
            assert False, ("Declared but unhandled variable parameter '%s'" %
4360 c1ce76bb Iustin Pop
                           field)
4361 a8083063 Iustin Pop
        else:
4362 c1ce76bb Iustin Pop
          assert False, "Declared but unhandled parameter '%s'" % field
4363 a8083063 Iustin Pop
        iout.append(val)
4364 a8083063 Iustin Pop
      output.append(iout)
4365 a8083063 Iustin Pop
4366 a8083063 Iustin Pop
    return output
4367 a8083063 Iustin Pop
4368 a8083063 Iustin Pop
4369 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
4370 a8083063 Iustin Pop
  """Failover an instance.
4371 a8083063 Iustin Pop

4372 a8083063 Iustin Pop
  """
4373 a8083063 Iustin Pop
  HPATH = "instance-failover"
4374 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4375 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
4376 c9e5c064 Guido Trotter
  REQ_BGL = False
4377 c9e5c064 Guido Trotter
4378 17c3f802 Guido Trotter
  def CheckArguments(self):
4379 17c3f802 Guido Trotter
    """Check the arguments.
4380 17c3f802 Guido Trotter

4381 17c3f802 Guido Trotter
    """
4382 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
4383 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
4384 17c3f802 Guido Trotter
4385 c9e5c064 Guido Trotter
  def ExpandNames(self):
4386 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
4387 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4388 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4389 c9e5c064 Guido Trotter
4390 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
4391 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
4392 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
4393 a8083063 Iustin Pop
4394 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4395 a8083063 Iustin Pop
    """Build hooks env.
4396 a8083063 Iustin Pop

4397 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4398 a8083063 Iustin Pop

4399 a8083063 Iustin Pop
    """
4400 a8083063 Iustin Pop
    env = {
4401 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
4402 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
4403 a8083063 Iustin Pop
      }
4404 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4405 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
4406 a8083063 Iustin Pop
    return env, nl, nl
4407 a8083063 Iustin Pop
4408 a8083063 Iustin Pop
  def CheckPrereq(self):
4409 a8083063 Iustin Pop
    """Check prerequisites.
4410 a8083063 Iustin Pop

4411 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4412 a8083063 Iustin Pop

4413 a8083063 Iustin Pop
    """
4414 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4415 c9e5c064 Guido Trotter
    assert self.instance is not None, \
4416 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4417 a8083063 Iustin Pop
4418 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
4419 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
4420 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
4421 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
4422 2a710df1 Michael Hanselmann
4423 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
4424 2a710df1 Michael Hanselmann
    if not secondary_nodes:
4425 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
4426 abdf0113 Iustin Pop
                                   "a mirrored disk template")
4427 2a710df1 Michael Hanselmann
4428 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
4429 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, target_node)
4430 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, target_node)
4431 d27776f0 Iustin Pop
    if instance.admin_up:
4432 d27776f0 Iustin Pop
      # check memory requirements on the secondary node
4433 d27776f0 Iustin Pop
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
4434 d27776f0 Iustin Pop
                           instance.name, bep[constants.BE_MEMORY],
4435 d27776f0 Iustin Pop
                           instance.hypervisor)
4436 d27776f0 Iustin Pop
    else:
4437 d27776f0 Iustin Pop
      self.LogInfo("Not checking memory on the secondary node as"
4438 d27776f0 Iustin Pop
                   " instance will not be started")
4439 3a7c308e Guido Trotter
4440 a8083063 Iustin Pop
    # check bridge existance
4441 b165e77e Guido Trotter
    _CheckInstanceBridgesExist(self, instance, node=target_node)
4442 a8083063 Iustin Pop
4443 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4444 a8083063 Iustin Pop
    """Failover an instance.
4445 a8083063 Iustin Pop

4446 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
4447 a8083063 Iustin Pop
    starting it on the secondary.
4448 a8083063 Iustin Pop

4449 a8083063 Iustin Pop
    """
4450 a8083063 Iustin Pop
    instance = self.instance
4451 a8083063 Iustin Pop
4452 a8083063 Iustin Pop
    source_node = instance.primary_node
4453 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
4454 a8083063 Iustin Pop
4455 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
4456 a8083063 Iustin Pop
    for dev in instance.disks:
4457 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
4458 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
4459 0d68c45d Iustin Pop
        if instance.admin_up and not self.op.ignore_consistency:
4460 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
4461 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
4462 a8083063 Iustin Pop
4463 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
4464 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
4465 9a4f63d1 Iustin Pop
                 instance.name, source_node)
4466 a8083063 Iustin Pop
4467 17c3f802 Guido Trotter
    result = self.rpc.call_instance_shutdown(source_node, instance,
4468 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
4469 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4470 1fae010f Iustin Pop
    if msg:
4471 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
4472 86d9d3bb Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
4473 1fae010f Iustin Pop
                             " Proceeding anyway. Please make sure node"
4474 1fae010f Iustin Pop
                             " %s is down. Error details: %s",
4475 1fae010f Iustin Pop
                             instance.name, source_node, source_node, msg)
4476 24a40d57 Iustin Pop
      else:
4477 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
4478 1fae010f Iustin Pop
                                 " node %s: %s" %
4479 1fae010f Iustin Pop
                                 (instance.name, source_node, msg))
4480 a8083063 Iustin Pop
4481 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
4482 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
4483 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
4484 a8083063 Iustin Pop
4485 a8083063 Iustin Pop
    instance.primary_node = target_node
4486 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
4487 b6102dab Guido Trotter
    self.cfg.Update(instance)
4488 a8083063 Iustin Pop
4489 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
4490 0d68c45d Iustin Pop
    if instance.admin_up:
4491 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
4492 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s",
4493 9a4f63d1 Iustin Pop
                   instance.name, target_node)
4494 12a0cfbe Guido Trotter
4495 7c4d6c7b Michael Hanselmann
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
4496 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
4497 12a0cfbe Guido Trotter
      if not disks_ok:
4498 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
4499 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
4500 a8083063 Iustin Pop
4501 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
4502 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance, None, None)
4503 4c4e4e1e Iustin Pop
      msg = result.fail_msg
4504 dd279568 Iustin Pop
      if msg:
4505 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
4506 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
4507 dd279568 Iustin Pop
                                 (instance.name, target_node, msg))
4508 a8083063 Iustin Pop
4509 a8083063 Iustin Pop
4510 53c776b5 Iustin Pop
class LUMigrateInstance(LogicalUnit):
4511 53c776b5 Iustin Pop
  """Migrate an instance.
4512 53c776b5 Iustin Pop

4513 53c776b5 Iustin Pop
  This is migration without shutting down, compared to the failover,
4514 53c776b5 Iustin Pop
  which is done with shutdown.
4515 53c776b5 Iustin Pop

4516 53c776b5 Iustin Pop
  """
4517 53c776b5 Iustin Pop
  HPATH = "instance-migrate"
4518 53c776b5 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4519 53c776b5 Iustin Pop
  _OP_REQP = ["instance_name", "live", "cleanup"]
4520 53c776b5 Iustin Pop
4521 53c776b5 Iustin Pop
  REQ_BGL = False
4522 53c776b5 Iustin Pop
4523 53c776b5 Iustin Pop
  def ExpandNames(self):
4524 53c776b5 Iustin Pop
    self._ExpandAndLockInstance()
4525 3e06e001 Michael Hanselmann
4526 53c776b5 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
4527 53c776b5 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4528 53c776b5 Iustin Pop
4529 3e06e001 Michael Hanselmann
    self._migrater = TLMigrateInstance(self, self.op.instance_name,
4530 3e06e001 Michael Hanselmann
                                       self.op.live, self.op.cleanup)
4531 3a012b41 Michael Hanselmann
    self.tasklets = [self._migrater]
4532 3e06e001 Michael Hanselmann
4533 53c776b5 Iustin Pop
  def DeclareLocks(self, level):
4534 53c776b5 Iustin Pop
    if level == locking.LEVEL_NODE:
4535 53c776b5 Iustin Pop
      self._LockInstancesNodes()
4536 53c776b5 Iustin Pop
4537 53c776b5 Iustin Pop
  def BuildHooksEnv(self):
4538 53c776b5 Iustin Pop
    """Build hooks env.
4539 53c776b5 Iustin Pop

4540 53c776b5 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4541 53c776b5 Iustin Pop

4542 53c776b5 Iustin Pop
    """
4543 3e06e001 Michael Hanselmann
    instance = self._migrater.instance
4544 3e06e001 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self, instance)
4545 2c2690c9 Iustin Pop
    env["MIGRATE_LIVE"] = self.op.live
4546 2c2690c9 Iustin Pop
    env["MIGRATE_CLEANUP"] = self.op.cleanup
4547 3e06e001 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
4548 53c776b5 Iustin Pop
    return env, nl, nl
4549 53c776b5 Iustin Pop
4550 3e06e001 Michael Hanselmann
4551 313bcead Iustin Pop
class LUMoveInstance(LogicalUnit):
4552 313bcead Iustin Pop
  """Move an instance by data-copying.
4553 313bcead Iustin Pop

4554 313bcead Iustin Pop
  """
4555 313bcead Iustin Pop
  HPATH = "instance-move"
4556 313bcead Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4557 313bcead Iustin Pop
  _OP_REQP = ["instance_name", "target_node"]
4558 313bcead Iustin Pop
  REQ_BGL = False
4559 313bcead Iustin Pop
4560 17c3f802 Guido Trotter
  def CheckArguments(self):
4561 17c3f802 Guido Trotter
    """Check the arguments.
4562 17c3f802 Guido Trotter

4563 17c3f802 Guido Trotter
    """
4564 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
4565 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
4566 17c3f802 Guido Trotter
4567 313bcead Iustin Pop
  def ExpandNames(self):
4568 313bcead Iustin Pop
    self._ExpandAndLockInstance()
4569 313bcead Iustin Pop
    target_node = self.cfg.ExpandNodeName(self.op.target_node)
4570 313bcead Iustin Pop
    if target_node is None:
4571 313bcead Iustin Pop
      raise errors.OpPrereqError("Node '%s' not known" %
4572 313bcead Iustin Pop
                                  self.op.target_node)
4573 313bcead Iustin Pop
    self.op.target_node = target_node
4574 313bcead Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
4575 313bcead Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
4576 313bcead Iustin Pop
4577 313bcead Iustin Pop
  def DeclareLocks(self, level):
4578 313bcead Iustin Pop
    if level == locking.LEVEL_NODE:
4579 313bcead Iustin Pop
      self._LockInstancesNodes(primary_only=True)
4580 313bcead Iustin Pop
4581 313bcead Iustin Pop
  def BuildHooksEnv(self):
4582 313bcead Iustin Pop
    """Build hooks env.
4583 313bcead Iustin Pop

4584 313bcead Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4585 313bcead Iustin Pop

4586 313bcead Iustin Pop
    """
4587 313bcead Iustin Pop
    env = {
4588 313bcead Iustin Pop
      "TARGET_NODE": self.op.target_node,
4589 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
4590 313bcead Iustin Pop
      }
4591 313bcead Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4592 313bcead Iustin Pop
    nl = [self.cfg.GetMasterNode()] + [self.instance.primary_node,
4593 313bcead Iustin Pop
                                       self.op.target_node]
4594 313bcead Iustin Pop
    return env, nl, nl
4595 313bcead Iustin Pop
4596 313bcead Iustin Pop
  def CheckPrereq(self):
4597 313bcead Iustin Pop
    """Check prerequisites.
4598 313bcead Iustin Pop

4599 313bcead Iustin Pop
    This checks that the instance is in the cluster.
4600 313bcead Iustin Pop

4601 313bcead Iustin Pop
    """
4602 313bcead Iustin Pop
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4603 313bcead Iustin Pop
    assert self.instance is not None, \
4604 313bcead Iustin Pop
      "Cannot retrieve locked instance %s" % self.op.instance_name
4605 313bcead Iustin Pop
4606 313bcead Iustin Pop
    node = self.cfg.GetNodeInfo(self.op.target_node)
4607 313bcead Iustin Pop
    assert node is not None, \
4608 313bcead Iustin Pop
      "Cannot retrieve locked node %s" % self.op.target_node
4609 313bcead Iustin Pop
4610 313bcead Iustin Pop
    self.target_node = target_node = node.name
4611 313bcead Iustin Pop
4612 313bcead Iustin Pop
    if target_node == instance.primary_node:
4613 313bcead Iustin Pop
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
4614 313bcead Iustin Pop
                                 (instance.name, target_node))
4615 313bcead Iustin Pop
4616 313bcead Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
4617 313bcead Iustin Pop
4618 313bcead Iustin Pop
    for idx, dsk in enumerate(instance.disks):
4619 313bcead Iustin Pop
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
4620 313bcead Iustin Pop
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
4621 313bcead Iustin Pop
                                   " cannot copy")
4622 313bcead Iustin Pop
4623 313bcead Iustin Pop
    _CheckNodeOnline(self, target_node)
4624 313bcead Iustin Pop
    _CheckNodeNotDrained(self, target_node)
4625 313bcead Iustin Pop
4626 313bcead Iustin Pop
    if instance.admin_up:
4627 313bcead Iustin Pop
      # check memory requirements on the secondary node
4628 313bcead Iustin Pop
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
4629 313bcead Iustin Pop
                           instance.name, bep[constants.BE_MEMORY],
4630 313bcead Iustin Pop
                           instance.hypervisor)
4631 313bcead Iustin Pop
    else:
4632 313bcead Iustin Pop
      self.LogInfo("Not checking memory on the secondary node as"
4633 313bcead Iustin Pop
                   " instance will not be started")
4634 313bcead Iustin Pop
4635 313bcead Iustin Pop
    # check bridge existance
4636 313bcead Iustin Pop
    _CheckInstanceBridgesExist(self, instance, node=target_node)
4637 313bcead Iustin Pop
4638 313bcead Iustin Pop
  def Exec(self, feedback_fn):
4639 313bcead Iustin Pop
    """Move an instance.
4640 313bcead Iustin Pop

4641 313bcead Iustin Pop
    The move is done by shutting it down on its present node, copying
4642 313bcead Iustin Pop
    the data over (slow) and starting it on the new node.
4643 313bcead Iustin Pop

4644 313bcead Iustin Pop
    """
4645 313bcead Iustin Pop
    instance = self.instance
4646 313bcead Iustin Pop
4647 313bcead Iustin Pop
    source_node = instance.primary_node
4648 313bcead Iustin Pop
    target_node = self.target_node
4649 313bcead Iustin Pop
4650 313bcead Iustin Pop
    self.LogInfo("Shutting down instance %s on source node %s",
4651 313bcead Iustin Pop
                 instance.name, source_node)
4652 313bcead Iustin Pop
4653 17c3f802 Guido Trotter
    result = self.rpc.call_instance_shutdown(source_node, instance,
4654 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
4655 313bcead Iustin Pop
    msg = result.fail_msg
4656 313bcead Iustin Pop
    if msg:
4657 313bcead Iustin Pop
      if self.op.ignore_consistency:
4658 313bcead Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
4659 313bcead Iustin Pop
                             " Proceeding anyway. Please make sure node"
4660 313bcead Iustin Pop
                             " %s is down. Error details: %s",
4661 313bcead Iustin Pop
                             instance.name, source_node, source_node, msg)
4662 313bcead Iustin Pop
      else:
4663 313bcead Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
4664 313bcead Iustin Pop
                                 " node %s: %s" %
4665 313bcead Iustin Pop
                                 (instance.name, source_node, msg))
4666 313bcead Iustin Pop
4667 313bcead Iustin Pop
    # create the target disks
4668 313bcead Iustin Pop
    try:
4669 313bcead Iustin Pop
      _CreateDisks(self, instance, target_node=target_node)
4670 313bcead Iustin Pop
    except errors.OpExecError:
4671 313bcead Iustin Pop
      self.LogWarning("Device creation failed, reverting...")
4672 313bcead Iustin Pop
      try:
4673 313bcead Iustin Pop
        _RemoveDisks(self, instance, target_node=target_node)
4674 313bcead Iustin Pop
      finally:
4675 313bcead Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
4676 313bcead Iustin Pop
        raise
4677 313bcead Iustin Pop
4678 313bcead Iustin Pop
    cluster_name = self.cfg.GetClusterInfo().cluster_name
4679 313bcead Iustin Pop
4680 313bcead Iustin Pop
    errs = []
4681 313bcead Iustin Pop
    # activate, get path, copy the data over
4682 313bcead Iustin Pop
    for idx, disk in enumerate(instance.disks):
4683 313bcead Iustin Pop
      self.LogInfo("Copying data for disk %d", idx)
4684 313bcead Iustin Pop
      result = self.rpc.call_blockdev_assemble(target_node, disk,
4685 313bcead Iustin Pop
                                               instance.name, True)
4686 313bcead Iustin Pop
      if result.fail_msg:
4687 313bcead Iustin Pop
        self.LogWarning("Can't assemble newly created disk %d: %s",
4688 313bcead Iustin Pop
                        idx, result.fail_msg)
4689 313bcead Iustin Pop
        errs.append(result.fail_msg)
4690 313bcead Iustin Pop
        break
4691 313bcead Iustin Pop
      dev_path = result.payload
4692 313bcead Iustin Pop
      result = self.rpc.call_blockdev_export(source_node, disk,
4693 313bcead Iustin Pop
                                             target_node, dev_path,
4694 313bcead Iustin Pop
                                             cluster_name)
4695 313bcead Iustin Pop
      if result.fail_msg:
4696 313bcead Iustin Pop
        self.LogWarning("Can't copy data over for disk %d: %s",
4697 313bcead Iustin Pop
                        idx, result.fail_msg)
4698 313bcead Iustin Pop
        errs.append(result.fail_msg)
4699 313bcead Iustin Pop
        break
4700 313bcead Iustin Pop
4701 313bcead Iustin Pop
    if errs:
4702 313bcead Iustin Pop
      self.LogWarning("Some disks failed to copy, aborting")
4703 313bcead Iustin Pop
      try:
4704 313bcead Iustin Pop
        _RemoveDisks(self, instance, target_node=target_node)
4705 313bcead Iustin Pop
      finally:
4706 313bcead Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
4707 313bcead Iustin Pop
        raise errors.OpExecError("Errors during disk copy: %s" %
4708 313bcead Iustin Pop
                                 (",".join(errs),))
4709 313bcead Iustin Pop
4710 313bcead Iustin Pop
    instance.primary_node = target_node
4711 313bcead Iustin Pop
    self.cfg.Update(instance)
4712 313bcead Iustin Pop
4713 313bcead Iustin Pop
    self.LogInfo("Removing the disks on the original node")
4714 313bcead Iustin Pop
    _RemoveDisks(self, instance, target_node=source_node)
4715 313bcead Iustin Pop
4716 313bcead Iustin Pop
    # Only start the instance if it's marked as up
4717 313bcead Iustin Pop
    if instance.admin_up:
4718 313bcead Iustin Pop
      self.LogInfo("Starting instance %s on node %s",
4719 313bcead Iustin Pop
                   instance.name, target_node)
4720 313bcead Iustin Pop
4721 313bcead Iustin Pop
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
4722 313bcead Iustin Pop
                                           ignore_secondaries=True)
4723 313bcead Iustin Pop
      if not disks_ok:
4724 313bcead Iustin Pop
        _ShutdownInstanceDisks(self, instance)
4725 313bcead Iustin Pop
        raise errors.OpExecError("Can't activate the instance's disks")
4726 313bcead Iustin Pop
4727 313bcead Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance, None, None)
4728 313bcead Iustin Pop
      msg = result.fail_msg
4729 313bcead Iustin Pop
      if msg:
4730 313bcead Iustin Pop
        _ShutdownInstanceDisks(self, instance)
4731 313bcead Iustin Pop
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
4732 313bcead Iustin Pop
                                 (instance.name, target_node, msg))
4733 313bcead Iustin Pop
4734 313bcead Iustin Pop
4735 80cb875c Michael Hanselmann
class LUMigrateNode(LogicalUnit):
4736 80cb875c Michael Hanselmann
  """Migrate all instances from a node.
4737 80cb875c Michael Hanselmann

4738 80cb875c Michael Hanselmann
  """
4739 80cb875c Michael Hanselmann
  HPATH = "node-migrate"
4740 80cb875c Michael Hanselmann
  HTYPE = constants.HTYPE_NODE
4741 80cb875c Michael Hanselmann
  _OP_REQP = ["node_name", "live"]
4742 80cb875c Michael Hanselmann
  REQ_BGL = False
4743 80cb875c Michael Hanselmann
4744 80cb875c Michael Hanselmann
  def ExpandNames(self):
4745 80cb875c Michael Hanselmann
    self.op.node_name = self.cfg.ExpandNodeName(self.op.node_name)
4746 80cb875c Michael Hanselmann
    if self.op.node_name is None:
4747 80cb875c Michael Hanselmann
      raise errors.OpPrereqError("Node '%s' not known" % self.op.node_name)
4748 80cb875c Michael Hanselmann
4749 80cb875c Michael Hanselmann
    self.needed_locks = {
4750 80cb875c Michael Hanselmann
      locking.LEVEL_NODE: [self.op.node_name],
4751 80cb875c Michael Hanselmann
      }
4752 80cb875c Michael Hanselmann
4753 80cb875c Michael Hanselmann
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
4754 80cb875c Michael Hanselmann
4755 80cb875c Michael Hanselmann
    # Create tasklets for migrating instances for all instances on this node
4756 80cb875c Michael Hanselmann
    names = []
4757 80cb875c Michael Hanselmann
    tasklets = []
4758 80cb875c Michael Hanselmann
4759 80cb875c Michael Hanselmann
    for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name):
4760 80cb875c Michael Hanselmann
      logging.debug("Migrating instance %s", inst.name)
4761 80cb875c Michael Hanselmann
      names.append(inst.name)
4762 80cb875c Michael Hanselmann
4763 80cb875c Michael Hanselmann
      tasklets.append(TLMigrateInstance(self, inst.name, self.op.live, False))
4764 80cb875c Michael Hanselmann
4765 80cb875c Michael Hanselmann
    self.tasklets = tasklets
4766 80cb875c Michael Hanselmann
4767 80cb875c Michael Hanselmann
    # Declare instance locks
4768 80cb875c Michael Hanselmann
    self.needed_locks[locking.LEVEL_INSTANCE] = names
4769 80cb875c Michael Hanselmann
4770 80cb875c Michael Hanselmann
  def DeclareLocks(self, level):
4771 80cb875c Michael Hanselmann
    if level == locking.LEVEL_NODE:
4772 80cb875c Michael Hanselmann
      self._LockInstancesNodes()
4773 80cb875c Michael Hanselmann
4774 80cb875c Michael Hanselmann
  def BuildHooksEnv(self):
4775 80cb875c Michael Hanselmann
    """Build hooks env.
4776 80cb875c Michael Hanselmann

4777 80cb875c Michael Hanselmann
    This runs on the master, the primary and all the secondaries.
4778 80cb875c Michael Hanselmann

4779 80cb875c Michael Hanselmann
    """
4780 80cb875c Michael Hanselmann
    env = {
4781 80cb875c Michael Hanselmann
      "NODE_NAME": self.op.node_name,
4782 80cb875c Michael Hanselmann
      }
4783 80cb875c Michael Hanselmann
4784 80cb875c Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
4785 80cb875c Michael Hanselmann
4786 80cb875c Michael Hanselmann
    return (env, nl, nl)
4787 80cb875c Michael Hanselmann
4788 80cb875c Michael Hanselmann
4789 3e06e001 Michael Hanselmann
class TLMigrateInstance(Tasklet):
4790 3e06e001 Michael Hanselmann
  def __init__(self, lu, instance_name, live, cleanup):
4791 3e06e001 Michael Hanselmann
    """Initializes this class.
4792 3e06e001 Michael Hanselmann

4793 3e06e001 Michael Hanselmann
    """
4794 464243a7 Michael Hanselmann
    Tasklet.__init__(self, lu)
4795 464243a7 Michael Hanselmann
4796 3e06e001 Michael Hanselmann
    # Parameters
4797 3e06e001 Michael Hanselmann
    self.instance_name = instance_name
4798 3e06e001 Michael Hanselmann
    self.live = live
4799 3e06e001 Michael Hanselmann
    self.cleanup = cleanup
4800 3e06e001 Michael Hanselmann
4801 53c776b5 Iustin Pop
  def CheckPrereq(self):
4802 53c776b5 Iustin Pop
    """Check prerequisites.
4803 53c776b5 Iustin Pop

4804 53c776b5 Iustin Pop
    This checks that the instance is in the cluster.
4805 53c776b5 Iustin Pop

4806 53c776b5 Iustin Pop
    """
4807 53c776b5 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
4808 3e06e001 Michael Hanselmann
      self.cfg.ExpandInstanceName(self.instance_name))
4809 53c776b5 Iustin Pop
    if instance is None:
4810 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
4811 3e06e001 Michael Hanselmann
                                 self.instance_name)
4812 53c776b5 Iustin Pop
4813 53c776b5 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
4814 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
4815 53c776b5 Iustin Pop
                                 " drbd8, cannot migrate.")
4816 53c776b5 Iustin Pop
4817 53c776b5 Iustin Pop
    secondary_nodes = instance.secondary_nodes
4818 53c776b5 Iustin Pop
    if not secondary_nodes:
4819 733a2b6a Iustin Pop
      raise errors.ConfigurationError("No secondary node but using"
4820 733a2b6a Iustin Pop
                                      " drbd8 disk template")
4821 53c776b5 Iustin Pop
4822 53c776b5 Iustin Pop
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
4823 53c776b5 Iustin Pop
4824 53c776b5 Iustin Pop
    target_node = secondary_nodes[0]
4825 53c776b5 Iustin Pop
    # check memory requirements on the secondary node
4826 53c776b5 Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
4827 53c776b5 Iustin Pop
                         instance.name, i_be[constants.BE_MEMORY],
4828 53c776b5 Iustin Pop
                         instance.hypervisor)
4829 53c776b5 Iustin Pop
4830 53c776b5 Iustin Pop
    # check bridge existance
4831 b165e77e Guido Trotter
    _CheckInstanceBridgesExist(self, instance, node=target_node)
4832 53c776b5 Iustin Pop
4833 3e06e001 Michael Hanselmann
    if not self.cleanup:
4834 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, target_node)
4835 53c776b5 Iustin Pop
      result = self.rpc.call_instance_migratable(instance.primary_node,
4836 53c776b5 Iustin Pop
                                                 instance)
4837 4c4e4e1e Iustin Pop
      result.Raise("Can't migrate, please use failover", prereq=True)
4838 53c776b5 Iustin Pop
4839 53c776b5 Iustin Pop
    self.instance = instance
4840 53c776b5 Iustin Pop
4841 53c776b5 Iustin Pop
  def _WaitUntilSync(self):
4842 53c776b5 Iustin Pop
    """Poll with custom rpc for disk sync.
4843 53c776b5 Iustin Pop

4844 53c776b5 Iustin Pop
    This uses our own step-based rpc call.
4845 53c776b5 Iustin Pop

4846 53c776b5 Iustin Pop
    """
4847 53c776b5 Iustin Pop
    self.feedback_fn("* wait until resync is done")
4848 53c776b5 Iustin Pop
    all_done = False
4849 53c776b5 Iustin Pop
    while not all_done:
4850 53c776b5 Iustin Pop
      all_done = True
4851 53c776b5 Iustin Pop
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
4852 53c776b5 Iustin Pop
                                            self.nodes_ip,
4853 53c776b5 Iustin Pop
                                            self.instance.disks)
4854 53c776b5 Iustin Pop
      min_percent = 100
4855 53c776b5 Iustin Pop
      for node, nres in result.items():
4856 4c4e4e1e Iustin Pop
        nres.Raise("Cannot resync disks on node %s" % node)
4857 0959c824 Iustin Pop
        node_done, node_percent = nres.payload
4858 53c776b5 Iustin Pop
        all_done = all_done and node_done
4859 53c776b5 Iustin Pop
        if node_percent is not None:
4860 53c776b5 Iustin Pop
          min_percent = min(min_percent, node_percent)
4861 53c776b5 Iustin Pop
      if not all_done:
4862 53c776b5 Iustin Pop
        if min_percent < 100:
4863 53c776b5 Iustin Pop
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
4864 53c776b5 Iustin Pop
        time.sleep(2)
4865 53c776b5 Iustin Pop
4866 53c776b5 Iustin Pop
  def _EnsureSecondary(self, node):
4867 53c776b5 Iustin Pop
    """Demote a node to secondary.
4868 53c776b5 Iustin Pop

4869 53c776b5 Iustin Pop
    """
4870 53c776b5 Iustin Pop
    self.feedback_fn("* switching node %s to secondary mode" % node)
4871 53c776b5 Iustin Pop
4872 53c776b5 Iustin Pop
    for dev in self.instance.disks:
4873 53c776b5 Iustin Pop
      self.cfg.SetDiskID(dev, node)
4874 53c776b5 Iustin Pop
4875 53c776b5 Iustin Pop
    result = self.rpc.call_blockdev_close(node, self.instance.name,
4876 53c776b5 Iustin Pop
                                          self.instance.disks)
4877 4c4e4e1e Iustin Pop
    result.Raise("Cannot change disk to secondary on node %s" % node)
4878 53c776b5 Iustin Pop
4879 53c776b5 Iustin Pop
  def _GoStandalone(self):
4880 53c776b5 Iustin Pop
    """Disconnect from the network.
4881 53c776b5 Iustin Pop

4882 53c776b5 Iustin Pop
    """
4883 53c776b5 Iustin Pop
    self.feedback_fn("* changing into standalone mode")
4884 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
4885 53c776b5 Iustin Pop
                                               self.instance.disks)
4886 53c776b5 Iustin Pop
    for node, nres in result.items():
4887 4c4e4e1e Iustin Pop
      nres.Raise("Cannot disconnect disks node %s" % node)
4888 53c776b5 Iustin Pop
4889 53c776b5 Iustin Pop
  def _GoReconnect(self, multimaster):
4890 53c776b5 Iustin Pop
    """Reconnect to the network.
4891 53c776b5 Iustin Pop

4892 53c776b5 Iustin Pop
    """
4893 53c776b5 Iustin Pop
    if multimaster:
4894 53c776b5 Iustin Pop
      msg = "dual-master"
4895 53c776b5 Iustin Pop
    else:
4896 53c776b5 Iustin Pop
      msg = "single-master"
4897 53c776b5 Iustin Pop
    self.feedback_fn("* changing disks into %s mode" % msg)
4898 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
4899 53c776b5 Iustin Pop
                                           self.instance.disks,
4900 53c776b5 Iustin Pop
                                           self.instance.name, multimaster)
4901 53c776b5 Iustin Pop
    for node, nres in result.items():
4902 4c4e4e1e Iustin Pop
      nres.Raise("Cannot change disks config on node %s" % node)
4903 53c776b5 Iustin Pop
4904 53c776b5 Iustin Pop
  def _ExecCleanup(self):
4905 53c776b5 Iustin Pop
    """Try to cleanup after a failed migration.
4906 53c776b5 Iustin Pop

4907 53c776b5 Iustin Pop
    The cleanup is done by:
4908 53c776b5 Iustin Pop
      - check that the instance is running only on one node
4909 53c776b5 Iustin Pop
        (and update the config if needed)
4910 53c776b5 Iustin Pop
      - change disks on its secondary node to secondary
4911 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
4912 53c776b5 Iustin Pop
      - disconnect from the network
4913 53c776b5 Iustin Pop
      - change disks into single-master mode
4914 53c776b5 Iustin Pop
      - wait again until disks are fully synchronized
4915 53c776b5 Iustin Pop

4916 53c776b5 Iustin Pop
    """
4917 53c776b5 Iustin Pop
    instance = self.instance
4918 53c776b5 Iustin Pop
    target_node = self.target_node
4919 53c776b5 Iustin Pop
    source_node = self.source_node
4920 53c776b5 Iustin Pop
4921 53c776b5 Iustin Pop
    # check running on only one node
4922 53c776b5 Iustin Pop
    self.feedback_fn("* checking where the instance actually runs"
4923 53c776b5 Iustin Pop
                     " (if this hangs, the hypervisor might be in"
4924 53c776b5 Iustin Pop
                     " a bad state)")
4925 53c776b5 Iustin Pop
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
4926 53c776b5 Iustin Pop
    for node, result in ins_l.items():
4927 4c4e4e1e Iustin Pop
      result.Raise("Can't contact node %s" % node)
4928 53c776b5 Iustin Pop
4929 aca13712 Iustin Pop
    runningon_source = instance.name in ins_l[source_node].payload
4930 aca13712 Iustin Pop
    runningon_target = instance.name in ins_l[target_node].payload
4931 53c776b5 Iustin Pop
4932 53c776b5 Iustin Pop
    if runningon_source and runningon_target:
4933 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance seems to be running on two nodes,"
4934 53c776b5 Iustin Pop
                               " or the hypervisor is confused. You will have"
4935 53c776b5 Iustin Pop
                               " to ensure manually that it runs only on one"
4936 53c776b5 Iustin Pop
                               " and restart this operation.")
4937 53c776b5 Iustin Pop
4938 53c776b5 Iustin Pop
    if not (runningon_source or runningon_target):
4939 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance does not seem to be running at all."
4940 53c776b5 Iustin Pop
                               " In this case, it's safer to repair by"
4941 53c776b5 Iustin Pop
                               " running 'gnt-instance stop' to ensure disk"
4942 53c776b5 Iustin Pop
                               " shutdown, and then restarting it.")
4943 53c776b5 Iustin Pop
4944 53c776b5 Iustin Pop
    if runningon_target:
4945 53c776b5 Iustin Pop
      # the migration has actually succeeded, we need to update the config
4946 53c776b5 Iustin Pop
      self.feedback_fn("* instance running on secondary node (%s),"
4947 53c776b5 Iustin Pop
                       " updating config" % target_node)
4948 53c776b5 Iustin Pop
      instance.primary_node = target_node
4949 53c776b5 Iustin Pop
      self.cfg.Update(instance)
4950 53c776b5 Iustin Pop
      demoted_node = source_node
4951 53c776b5 Iustin Pop
    else:
4952 53c776b5 Iustin Pop
      self.feedback_fn("* instance confirmed to be running on its"
4953 53c776b5 Iustin Pop
                       " primary node (%s)" % source_node)
4954 53c776b5 Iustin Pop
      demoted_node = target_node
4955 53c776b5 Iustin Pop
4956 53c776b5 Iustin Pop
    self._EnsureSecondary(demoted_node)
4957 53c776b5 Iustin Pop
    try:
4958 53c776b5 Iustin Pop
      self._WaitUntilSync()
4959 53c776b5 Iustin Pop
    except errors.OpExecError:
4960 53c776b5 Iustin Pop
      # we ignore here errors, since if the device is standalone, it
4961 53c776b5 Iustin Pop
      # won't be able to sync
4962 53c776b5 Iustin Pop
      pass
4963 53c776b5 Iustin Pop
    self._GoStandalone()
4964 53c776b5 Iustin Pop
    self._GoReconnect(False)
4965 53c776b5 Iustin Pop
    self._WaitUntilSync()
4966 53c776b5 Iustin Pop
4967 53c776b5 Iustin Pop
    self.feedback_fn("* done")
4968 53c776b5 Iustin Pop
4969 6906a9d8 Guido Trotter
  def _RevertDiskStatus(self):
4970 6906a9d8 Guido Trotter
    """Try to revert the disk status after a failed migration.
4971 6906a9d8 Guido Trotter

4972 6906a9d8 Guido Trotter
    """
4973 6906a9d8 Guido Trotter
    target_node = self.target_node
4974 6906a9d8 Guido Trotter
    try:
4975 6906a9d8 Guido Trotter
      self._EnsureSecondary(target_node)
4976 6906a9d8 Guido Trotter
      self._GoStandalone()
4977 6906a9d8 Guido Trotter
      self._GoReconnect(False)
4978 6906a9d8 Guido Trotter
      self._WaitUntilSync()
4979 6906a9d8 Guido Trotter
    except errors.OpExecError, err:
4980 3e06e001 Michael Hanselmann
      self.lu.LogWarning("Migration failed and I can't reconnect the"
4981 3e06e001 Michael Hanselmann
                         " drives: error '%s'\n"
4982 3e06e001 Michael Hanselmann
                         "Please look and recover the instance status" %
4983 3e06e001 Michael Hanselmann
                         str(err))
4984 6906a9d8 Guido Trotter
4985 6906a9d8 Guido Trotter
  def _AbortMigration(self):
4986 6906a9d8 Guido Trotter
    """Call the hypervisor code to abort a started migration.
4987 6906a9d8 Guido Trotter

4988 6906a9d8 Guido Trotter
    """
4989 6906a9d8 Guido Trotter
    instance = self.instance
4990 6906a9d8 Guido Trotter
    target_node = self.target_node
4991 6906a9d8 Guido Trotter
    migration_info = self.migration_info
4992 6906a9d8 Guido Trotter
4993 6906a9d8 Guido Trotter
    abort_result = self.rpc.call_finalize_migration(target_node,
4994 6906a9d8 Guido Trotter
                                                    instance,
4995 6906a9d8 Guido Trotter
                                                    migration_info,
4996 6906a9d8 Guido Trotter
                                                    False)
4997 4c4e4e1e Iustin Pop
    abort_msg = abort_result.fail_msg
4998 6906a9d8 Guido Trotter
    if abort_msg:
4999 6906a9d8 Guido Trotter
      logging.error("Aborting migration failed on target node %s: %s" %
5000 6906a9d8 Guido Trotter
                    (target_node, abort_msg))
5001 6906a9d8 Guido Trotter
      # Don't raise an exception here, as we stil have to try to revert the
5002 6906a9d8 Guido Trotter
      # disk status, even if this step failed.
5003 6906a9d8 Guido Trotter
5004 53c776b5 Iustin Pop
  def _ExecMigration(self):
5005 53c776b5 Iustin Pop
    """Migrate an instance.
5006 53c776b5 Iustin Pop

5007 53c776b5 Iustin Pop
    The migrate is done by:
5008 53c776b5 Iustin Pop
      - change the disks into dual-master mode
5009 53c776b5 Iustin Pop
      - wait until disks are fully synchronized again
5010 53c776b5 Iustin Pop
      - migrate the instance
5011 53c776b5 Iustin Pop
      - change disks on the new secondary node (the old primary) to secondary
5012 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
5013 53c776b5 Iustin Pop
      - change disks into single-master mode
5014 53c776b5 Iustin Pop

5015 53c776b5 Iustin Pop
    """
5016 53c776b5 Iustin Pop
    instance = self.instance
5017 53c776b5 Iustin Pop
    target_node = self.target_node
5018 53c776b5 Iustin Pop
    source_node = self.source_node
5019 53c776b5 Iustin Pop
5020 53c776b5 Iustin Pop
    self.feedback_fn("* checking disk consistency between source and target")
5021 53c776b5 Iustin Pop
    for dev in instance.disks:
5022 53c776b5 Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
5023 53c776b5 Iustin Pop
        raise errors.OpExecError("Disk %s is degraded or not fully"
5024 53c776b5 Iustin Pop
                                 " synchronized on target node,"
5025 53c776b5 Iustin Pop
                                 " aborting migrate." % dev.iv_name)
5026 53c776b5 Iustin Pop
5027 6906a9d8 Guido Trotter
    # First get the migration information from the remote node
5028 6906a9d8 Guido Trotter
    result = self.rpc.call_migration_info(source_node, instance)
5029 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5030 6906a9d8 Guido Trotter
    if msg:
5031 6906a9d8 Guido Trotter
      log_err = ("Failed fetching source migration information from %s: %s" %
5032 0959c824 Iustin Pop
                 (source_node, msg))
5033 6906a9d8 Guido Trotter
      logging.error(log_err)
5034 6906a9d8 Guido Trotter
      raise errors.OpExecError(log_err)
5035 6906a9d8 Guido Trotter
5036 0959c824 Iustin Pop
    self.migration_info = migration_info = result.payload
5037 6906a9d8 Guido Trotter
5038 6906a9d8 Guido Trotter
    # Then switch the disks to master/master mode
5039 53c776b5 Iustin Pop
    self._EnsureSecondary(target_node)
5040 53c776b5 Iustin Pop
    self._GoStandalone()
5041 53c776b5 Iustin Pop
    self._GoReconnect(True)
5042 53c776b5 Iustin Pop
    self._WaitUntilSync()
5043 53c776b5 Iustin Pop
5044 6906a9d8 Guido Trotter
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
5045 6906a9d8 Guido Trotter
    result = self.rpc.call_accept_instance(target_node,
5046 6906a9d8 Guido Trotter
                                           instance,
5047 6906a9d8 Guido Trotter
                                           migration_info,
5048 6906a9d8 Guido Trotter
                                           self.nodes_ip[target_node])
5049 6906a9d8 Guido Trotter
5050 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5051 6906a9d8 Guido Trotter
    if msg:
5052 6906a9d8 Guido Trotter
      logging.error("Instance pre-migration failed, trying to revert"
5053 6906a9d8 Guido Trotter
                    " disk status: %s", msg)
5054 6906a9d8 Guido Trotter
      self._AbortMigration()
5055 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
5056 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
5057 6906a9d8 Guido Trotter
                               (instance.name, msg))
5058 6906a9d8 Guido Trotter
5059 53c776b5 Iustin Pop
    self.feedback_fn("* migrating instance to %s" % target_node)
5060 53c776b5 Iustin Pop
    time.sleep(10)
5061 53c776b5 Iustin Pop
    result = self.rpc.call_instance_migrate(source_node, instance,
5062 53c776b5 Iustin Pop
                                            self.nodes_ip[target_node],
5063 3e06e001 Michael Hanselmann
                                            self.live)
5064 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5065 53c776b5 Iustin Pop
    if msg:
5066 53c776b5 Iustin Pop
      logging.error("Instance migration failed, trying to revert"
5067 53c776b5 Iustin Pop
                    " disk status: %s", msg)
5068 6906a9d8 Guido Trotter
      self._AbortMigration()
5069 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
5070 53c776b5 Iustin Pop
      raise errors.OpExecError("Could not migrate instance %s: %s" %
5071 53c776b5 Iustin Pop
                               (instance.name, msg))
5072 53c776b5 Iustin Pop
    time.sleep(10)
5073 53c776b5 Iustin Pop
5074 53c776b5 Iustin Pop
    instance.primary_node = target_node
5075 53c776b5 Iustin Pop
    # distribute new instance config to the other nodes
5076 53c776b5 Iustin Pop
    self.cfg.Update(instance)
5077 53c776b5 Iustin Pop
5078 6906a9d8 Guido Trotter
    result = self.rpc.call_finalize_migration(target_node,
5079 6906a9d8 Guido Trotter
                                              instance,
5080 6906a9d8 Guido Trotter
                                              migration_info,
5081 6906a9d8 Guido Trotter
                                              True)
5082 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5083 6906a9d8 Guido Trotter
    if msg:
5084 6906a9d8 Guido Trotter
      logging.error("Instance migration succeeded, but finalization failed:"
5085 6906a9d8 Guido Trotter
                    " %s" % msg)
5086 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not finalize instance migration: %s" %
5087 6906a9d8 Guido Trotter
                               msg)
5088 6906a9d8 Guido Trotter
5089 53c776b5 Iustin Pop
    self._EnsureSecondary(source_node)
5090 53c776b5 Iustin Pop
    self._WaitUntilSync()
5091 53c776b5 Iustin Pop
    self._GoStandalone()
5092 53c776b5 Iustin Pop
    self._GoReconnect(False)
5093 53c776b5 Iustin Pop
    self._WaitUntilSync()
5094 53c776b5 Iustin Pop
5095 53c776b5 Iustin Pop
    self.feedback_fn("* done")
5096 53c776b5 Iustin Pop
5097 53c776b5 Iustin Pop
  def Exec(self, feedback_fn):
5098 53c776b5 Iustin Pop
    """Perform the migration.
5099 53c776b5 Iustin Pop

5100 53c776b5 Iustin Pop
    """
5101 80cb875c Michael Hanselmann
    feedback_fn("Migrating instance %s" % self.instance.name)
5102 80cb875c Michael Hanselmann
5103 53c776b5 Iustin Pop
    self.feedback_fn = feedback_fn
5104 53c776b5 Iustin Pop
5105 53c776b5 Iustin Pop
    self.source_node = self.instance.primary_node
5106 53c776b5 Iustin Pop
    self.target_node = self.instance.secondary_nodes[0]
5107 53c776b5 Iustin Pop
    self.all_nodes = [self.source_node, self.target_node]
5108 53c776b5 Iustin Pop
    self.nodes_ip = {
5109 53c776b5 Iustin Pop
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
5110 53c776b5 Iustin Pop
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
5111 53c776b5 Iustin Pop
      }
5112 3e06e001 Michael Hanselmann
5113 3e06e001 Michael Hanselmann
    if self.cleanup:
5114 53c776b5 Iustin Pop
      return self._ExecCleanup()
5115 53c776b5 Iustin Pop
    else:
5116 53c776b5 Iustin Pop
      return self._ExecMigration()
5117 53c776b5 Iustin Pop
5118 53c776b5 Iustin Pop
5119 428958aa Iustin Pop
def _CreateBlockDev(lu, node, instance, device, force_create,
5120 428958aa Iustin Pop
                    info, force_open):
5121 428958aa Iustin Pop
  """Create a tree of block devices on a given node.
5122 a8083063 Iustin Pop

5123 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
5124 a8083063 Iustin Pop
  all its children.
5125 a8083063 Iustin Pop

5126 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
5127 a8083063 Iustin Pop

5128 428958aa Iustin Pop
  @param lu: the lu on whose behalf we execute
5129 428958aa Iustin Pop
  @param node: the node on which to create the device
5130 428958aa Iustin Pop
  @type instance: L{objects.Instance}
5131 428958aa Iustin Pop
  @param instance: the instance which owns the device
5132 428958aa Iustin Pop
  @type device: L{objects.Disk}
5133 428958aa Iustin Pop
  @param device: the device to create
5134 428958aa Iustin Pop
  @type force_create: boolean
5135 428958aa Iustin Pop
  @param force_create: whether to force creation of this device; this
5136 428958aa Iustin Pop
      will be change to True whenever we find a device which has
5137 428958aa Iustin Pop
      CreateOnSecondary() attribute
5138 428958aa Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
5139 428958aa Iustin Pop
      (this will be represented as a LVM tag)
5140 428958aa Iustin Pop
  @type force_open: boolean
5141 428958aa Iustin Pop
  @param force_open: this parameter will be passes to the
5142 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
5143 428958aa Iustin Pop
      whether we run on primary or not, and it affects both
5144 428958aa Iustin Pop
      the child assembly and the device own Open() execution
5145 428958aa Iustin Pop

5146 a8083063 Iustin Pop
  """
5147 a8083063 Iustin Pop
  if device.CreateOnSecondary():
5148 428958aa Iustin Pop
    force_create = True
5149 796cab27 Iustin Pop
5150 a8083063 Iustin Pop
  if device.children:
5151 a8083063 Iustin Pop
    for child in device.children:
5152 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, child, force_create,
5153 428958aa Iustin Pop
                      info, force_open)
5154 a8083063 Iustin Pop
5155 428958aa Iustin Pop
  if not force_create:
5156 796cab27 Iustin Pop
    return
5157 796cab27 Iustin Pop
5158 de12473a Iustin Pop
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
5159 de12473a Iustin Pop
5160 de12473a Iustin Pop
5161 de12473a Iustin Pop
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
5162 de12473a Iustin Pop
  """Create a single block device on a given node.
5163 de12473a Iustin Pop

5164 de12473a Iustin Pop
  This will not recurse over children of the device, so they must be
5165 de12473a Iustin Pop
  created in advance.
5166 de12473a Iustin Pop

5167 de12473a Iustin Pop
  @param lu: the lu on whose behalf we execute
5168 de12473a Iustin Pop
  @param node: the node on which to create the device
5169 de12473a Iustin Pop
  @type instance: L{objects.Instance}
5170 de12473a Iustin Pop
  @param instance: the instance which owns the device
5171 de12473a Iustin Pop
  @type device: L{objects.Disk}
5172 de12473a Iustin Pop
  @param device: the device to create
5173 de12473a Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
5174 de12473a Iustin Pop
      (this will be represented as a LVM tag)
5175 de12473a Iustin Pop
  @type force_open: boolean
5176 de12473a Iustin Pop
  @param force_open: this parameter will be passes to the
5177 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
5178 de12473a Iustin Pop
      whether we run on primary or not, and it affects both
5179 de12473a Iustin Pop
      the child assembly and the device own Open() execution
5180 de12473a Iustin Pop

5181 de12473a Iustin Pop
  """
5182 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
5183 7d81697f Iustin Pop
  result = lu.rpc.call_blockdev_create(node, device, device.size,
5184 428958aa Iustin Pop
                                       instance.name, force_open, info)
5185 4c4e4e1e Iustin Pop
  result.Raise("Can't create block device %s on"
5186 4c4e4e1e Iustin Pop
               " node %s for instance %s" % (device, node, instance.name))
5187 a8083063 Iustin Pop
  if device.physical_id is None:
5188 0959c824 Iustin Pop
    device.physical_id = result.payload
5189 a8083063 Iustin Pop
5190 a8083063 Iustin Pop
5191 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
5192 923b1523 Iustin Pop
  """Generate a suitable LV name.
5193 923b1523 Iustin Pop

5194 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
5195 923b1523 Iustin Pop

5196 923b1523 Iustin Pop
  """
5197 923b1523 Iustin Pop
  results = []
5198 923b1523 Iustin Pop
  for val in exts:
5199 b9bddb6b Iustin Pop
    new_id = lu.cfg.GenerateUniqueID()
5200 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
5201 923b1523 Iustin Pop
  return results
5202 923b1523 Iustin Pop
5203 923b1523 Iustin Pop
5204 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
5205 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
5206 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
5207 a1f445d3 Iustin Pop

5208 a1f445d3 Iustin Pop
  """
5209 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
5210 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
5211 b9bddb6b Iustin Pop
  shared_secret = lu.cfg.GenerateDRBDSecret()
5212 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
5213 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
5214 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
5215 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
5216 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
5217 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
5218 f9518d38 Iustin Pop
                                      p_minor, s_minor,
5219 f9518d38 Iustin Pop
                                      shared_secret),
5220 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
5221 a1f445d3 Iustin Pop
                          iv_name=iv_name)
5222 a1f445d3 Iustin Pop
  return drbd_dev
5223 a1f445d3 Iustin Pop
5224 7c0d6283 Michael Hanselmann
5225 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
5226 a8083063 Iustin Pop
                          instance_name, primary_node,
5227 08db7c5c Iustin Pop
                          secondary_nodes, disk_info,
5228 e2a65344 Iustin Pop
                          file_storage_dir, file_driver,
5229 e2a65344 Iustin Pop
                          base_index):
5230 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
5231 a8083063 Iustin Pop

5232 a8083063 Iustin Pop
  """
5233 a8083063 Iustin Pop
  #TODO: compute space requirements
5234 a8083063 Iustin Pop
5235 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
5236 08db7c5c Iustin Pop
  disk_count = len(disk_info)
5237 08db7c5c Iustin Pop
  disks = []
5238 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
5239 08db7c5c Iustin Pop
    pass
5240 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
5241 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
5242 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
5243 923b1523 Iustin Pop
5244 fb4b324b Guido Trotter
    names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
5245 08db7c5c Iustin Pop
                                      for i in range(disk_count)])
5246 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
5247 e2a65344 Iustin Pop
      disk_index = idx + base_index
5248 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
5249 08db7c5c Iustin Pop
                              logical_id=(vgname, names[idx]),
5250 6ec66eae Iustin Pop
                              iv_name="disk/%d" % disk_index,
5251 6ec66eae Iustin Pop
                              mode=disk["mode"])
5252 08db7c5c Iustin Pop
      disks.append(disk_dev)
5253 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
5254 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
5255 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
5256 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
5257 08db7c5c Iustin Pop
    minors = lu.cfg.AllocateDRBDMinor(
5258 08db7c5c Iustin Pop
      [primary_node, remote_node] * len(disk_info), instance_name)
5259 08db7c5c Iustin Pop
5260 e6c1ff2f Iustin Pop
    names = []
5261 fb4b324b Guido Trotter
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
5262 e6c1ff2f Iustin Pop
                                               for i in range(disk_count)]):
5263 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_data")
5264 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_meta")
5265 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
5266 112050d9 Iustin Pop
      disk_index = idx + base_index
5267 08db7c5c Iustin Pop
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
5268 08db7c5c Iustin Pop
                                      disk["size"], names[idx*2:idx*2+2],
5269 e2a65344 Iustin Pop
                                      "disk/%d" % disk_index,
5270 08db7c5c Iustin Pop
                                      minors[idx*2], minors[idx*2+1])
5271 6ec66eae Iustin Pop
      disk_dev.mode = disk["mode"]
5272 08db7c5c Iustin Pop
      disks.append(disk_dev)
5273 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
5274 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
5275 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
5276 0f1a06e3 Manuel Franceschini
5277 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
5278 112050d9 Iustin Pop
      disk_index = idx + base_index
5279 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
5280 e2a65344 Iustin Pop
                              iv_name="disk/%d" % disk_index,
5281 08db7c5c Iustin Pop
                              logical_id=(file_driver,
5282 08db7c5c Iustin Pop
                                          "%s/disk%d" % (file_storage_dir,
5283 43e99cff Guido Trotter
                                                         disk_index)),
5284 6ec66eae Iustin Pop
                              mode=disk["mode"])
5285 08db7c5c Iustin Pop
      disks.append(disk_dev)
5286 a8083063 Iustin Pop
  else:
5287 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
5288 a8083063 Iustin Pop
  return disks
5289 a8083063 Iustin Pop
5290 a8083063 Iustin Pop
5291 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
5292 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
5293 3ecf6786 Iustin Pop

5294 3ecf6786 Iustin Pop
  """
5295 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
5296 a0c3fea1 Michael Hanselmann
5297 a0c3fea1 Michael Hanselmann
5298 621b7678 Iustin Pop
def _CreateDisks(lu, instance, to_skip=None, target_node=None):
5299 a8083063 Iustin Pop
  """Create all disks for an instance.
5300 a8083063 Iustin Pop

5301 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
5302 a8083063 Iustin Pop

5303 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
5304 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
5305 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
5306 e4376078 Iustin Pop
  @param instance: the instance whose disks we should create
5307 bd315bfa Iustin Pop
  @type to_skip: list
5308 bd315bfa Iustin Pop
  @param to_skip: list of indices to skip
5309 621b7678 Iustin Pop
  @type target_node: string
5310 621b7678 Iustin Pop
  @param target_node: if passed, overrides the target node for creation
5311 e4376078 Iustin Pop
  @rtype: boolean
5312 e4376078 Iustin Pop
  @return: the success of the creation
5313 a8083063 Iustin Pop

5314 a8083063 Iustin Pop
  """
5315 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
5316 621b7678 Iustin Pop
  if target_node is None:
5317 621b7678 Iustin Pop
    pnode = instance.primary_node
5318 621b7678 Iustin Pop
    all_nodes = instance.all_nodes
5319 621b7678 Iustin Pop
  else:
5320 621b7678 Iustin Pop
    pnode = target_node
5321 621b7678 Iustin Pop
    all_nodes = [pnode]
5322 a0c3fea1 Michael Hanselmann
5323 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
5324 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
5325 428958aa Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
5326 0f1a06e3 Manuel Franceschini
5327 4c4e4e1e Iustin Pop
    result.Raise("Failed to create directory '%s' on"
5328 9b4127eb Guido Trotter
                 " node %s" % (file_storage_dir, pnode))
5329 0f1a06e3 Manuel Franceschini
5330 24991749 Iustin Pop
  # Note: this needs to be kept in sync with adding of disks in
5331 24991749 Iustin Pop
  # LUSetInstanceParams
5332 bd315bfa Iustin Pop
  for idx, device in enumerate(instance.disks):
5333 bd315bfa Iustin Pop
    if to_skip and idx in to_skip:
5334 bd315bfa Iustin Pop
      continue
5335 9a4f63d1 Iustin Pop
    logging.info("Creating volume %s for instance %s",
5336 9a4f63d1 Iustin Pop
                 device.iv_name, instance.name)
5337 a8083063 Iustin Pop
    #HARDCODE
5338 621b7678 Iustin Pop
    for node in all_nodes:
5339 428958aa Iustin Pop
      f_create = node == pnode
5340 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
5341 a8083063 Iustin Pop
5342 a8083063 Iustin Pop
5343 621b7678 Iustin Pop
def _RemoveDisks(lu, instance, target_node=None):
5344 a8083063 Iustin Pop
  """Remove all disks for an instance.
5345 a8083063 Iustin Pop

5346 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
5347 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
5348 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
5349 a8083063 Iustin Pop
  with `_CreateDisks()`).
5350 a8083063 Iustin Pop

5351 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
5352 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
5353 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
5354 e4376078 Iustin Pop
  @param instance: the instance whose disks we should remove
5355 621b7678 Iustin Pop
  @type target_node: string
5356 621b7678 Iustin Pop
  @param target_node: used to override the node on which to remove the disks
5357 e4376078 Iustin Pop
  @rtype: boolean
5358 e4376078 Iustin Pop
  @return: the success of the removal
5359 a8083063 Iustin Pop

5360 a8083063 Iustin Pop
  """
5361 9a4f63d1 Iustin Pop
  logging.info("Removing block devices for instance %s", instance.name)
5362 a8083063 Iustin Pop
5363 e1bc0878 Iustin Pop
  all_result = True
5364 a8083063 Iustin Pop
  for device in instance.disks:
5365 621b7678 Iustin Pop
    if target_node:
5366 621b7678 Iustin Pop
      edata = [(target_node, device)]
5367 621b7678 Iustin Pop
    else:
5368 621b7678 Iustin Pop
      edata = device.ComputeNodeTree(instance.primary_node)
5369 621b7678 Iustin Pop
    for node, disk in edata:
5370 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
5371 4c4e4e1e Iustin Pop
      msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
5372 e1bc0878 Iustin Pop
      if msg:
5373 e1bc0878 Iustin Pop
        lu.LogWarning("Could not remove block device %s on node %s,"
5374 e1bc0878 Iustin Pop
                      " continuing anyway: %s", device.iv_name, node, msg)
5375 e1bc0878 Iustin Pop
        all_result = False
5376 0f1a06e3 Manuel Franceschini
5377 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
5378 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
5379 dfc2a24c Guido Trotter
    if target_node:
5380 dfc2a24c Guido Trotter
      tgt = target_node
5381 621b7678 Iustin Pop
    else:
5382 dfc2a24c Guido Trotter
      tgt = instance.primary_node
5383 621b7678 Iustin Pop
    result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
5384 621b7678 Iustin Pop
    if result.fail_msg:
5385 b2b8bcce Iustin Pop
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
5386 621b7678 Iustin Pop
                    file_storage_dir, instance.primary_node, result.fail_msg)
5387 e1bc0878 Iustin Pop
      all_result = False
5388 0f1a06e3 Manuel Franceschini
5389 e1bc0878 Iustin Pop
  return all_result
5390 a8083063 Iustin Pop
5391 a8083063 Iustin Pop
5392 08db7c5c Iustin Pop
def _ComputeDiskSize(disk_template, disks):
5393 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
5394 e2fe6369 Iustin Pop

5395 e2fe6369 Iustin Pop
  """
5396 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
5397 e2fe6369 Iustin Pop
  req_size_dict = {
5398 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
5399 08db7c5c Iustin Pop
    constants.DT_PLAIN: sum(d["size"] for d in disks),
5400 08db7c5c Iustin Pop
    # 128 MB are added for drbd metadata for each disk
5401 08db7c5c Iustin Pop
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
5402 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
5403 e2fe6369 Iustin Pop
  }
5404 e2fe6369 Iustin Pop
5405 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
5406 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
5407 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
5408 e2fe6369 Iustin Pop
5409 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
5410 e2fe6369 Iustin Pop
5411 e2fe6369 Iustin Pop
5412 74409b12 Iustin Pop
def _CheckHVParams(lu, nodenames, hvname, hvparams):
5413 74409b12 Iustin Pop
  """Hypervisor parameter validation.
5414 74409b12 Iustin Pop

5415 74409b12 Iustin Pop
  This function abstract the hypervisor parameter validation to be
5416 74409b12 Iustin Pop
  used in both instance create and instance modify.
5417 74409b12 Iustin Pop

5418 74409b12 Iustin Pop
  @type lu: L{LogicalUnit}
5419 74409b12 Iustin Pop
  @param lu: the logical unit for which we check
5420 74409b12 Iustin Pop
  @type nodenames: list
5421 74409b12 Iustin Pop
  @param nodenames: the list of nodes on which we should check
5422 74409b12 Iustin Pop
  @type hvname: string
5423 74409b12 Iustin Pop
  @param hvname: the name of the hypervisor we should use
5424 74409b12 Iustin Pop
  @type hvparams: dict
5425 74409b12 Iustin Pop
  @param hvparams: the parameters which we need to check
5426 74409b12 Iustin Pop
  @raise errors.OpPrereqError: if the parameters are not valid
5427 74409b12 Iustin Pop

5428 74409b12 Iustin Pop
  """
5429 74409b12 Iustin Pop
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
5430 74409b12 Iustin Pop
                                                  hvname,
5431 74409b12 Iustin Pop
                                                  hvparams)
5432 74409b12 Iustin Pop
  for node in nodenames:
5433 781de953 Iustin Pop
    info = hvinfo[node]
5434 68c6f21c Iustin Pop
    if info.offline:
5435 68c6f21c Iustin Pop
      continue
5436 4c4e4e1e Iustin Pop
    info.Raise("Hypervisor parameter validation failed on node %s" % node)
5437 74409b12 Iustin Pop
5438 74409b12 Iustin Pop
5439 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
5440 a8083063 Iustin Pop
  """Create an instance.
5441 a8083063 Iustin Pop

5442 a8083063 Iustin Pop
  """
5443 a8083063 Iustin Pop
  HPATH = "instance-add"
5444 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5445 08db7c5c Iustin Pop
  _OP_REQP = ["instance_name", "disks", "disk_template",
5446 08db7c5c Iustin Pop
              "mode", "start",
5447 08db7c5c Iustin Pop
              "wait_for_sync", "ip_check", "nics",
5448 338e51e8 Iustin Pop
              "hvparams", "beparams"]
5449 7baf741d Guido Trotter
  REQ_BGL = False
5450 7baf741d Guido Trotter
5451 7baf741d Guido Trotter
  def _ExpandNode(self, node):
5452 7baf741d Guido Trotter
    """Expands and checks one node name.
5453 7baf741d Guido Trotter

5454 7baf741d Guido Trotter
    """
5455 7baf741d Guido Trotter
    node_full = self.cfg.ExpandNodeName(node)
5456 7baf741d Guido Trotter
    if node_full is None:
5457 7baf741d Guido Trotter
      raise errors.OpPrereqError("Unknown node %s" % node)
5458 7baf741d Guido Trotter
    return node_full
5459 7baf741d Guido Trotter
5460 7baf741d Guido Trotter
  def ExpandNames(self):
5461 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
5462 7baf741d Guido Trotter

5463 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
5464 7baf741d Guido Trotter

5465 7baf741d Guido Trotter
    """
5466 7baf741d Guido Trotter
    self.needed_locks = {}
5467 7baf741d Guido Trotter
5468 7baf741d Guido Trotter
    # set optional parameters to none if they don't exist
5469 6785674e Iustin Pop
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
5470 7baf741d Guido Trotter
      if not hasattr(self.op, attr):
5471 7baf741d Guido Trotter
        setattr(self.op, attr, None)
5472 7baf741d Guido Trotter
5473 4b2f38dd Iustin Pop
    # cheap checks, mostly valid constants given
5474 4b2f38dd Iustin Pop
5475 7baf741d Guido Trotter
    # verify creation mode
5476 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
5477 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
5478 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
5479 7baf741d Guido Trotter
                                 self.op.mode)
5480 4b2f38dd Iustin Pop
5481 7baf741d Guido Trotter
    # disk template and mirror node verification
5482 7baf741d Guido Trotter
    if self.op.disk_template not in constants.DISK_TEMPLATES:
5483 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid disk template name")
5484 7baf741d Guido Trotter
5485 4b2f38dd Iustin Pop
    if self.op.hypervisor is None:
5486 4b2f38dd Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
5487 4b2f38dd Iustin Pop
5488 8705eb96 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
5489 8705eb96 Iustin Pop
    enabled_hvs = cluster.enabled_hypervisors
5490 4b2f38dd Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
5491 4b2f38dd Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
5492 4b2f38dd Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
5493 4b2f38dd Iustin Pop
                                  ",".join(enabled_hvs)))
5494 4b2f38dd Iustin Pop
5495 6785674e Iustin Pop
    # check hypervisor parameter syntax (locally)
5496 a5728081 Guido Trotter
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
5497 abe609b2 Guido Trotter
    filled_hvp = objects.FillDict(cluster.hvparams[self.op.hypervisor],
5498 8705eb96 Iustin Pop
                                  self.op.hvparams)
5499 6785674e Iustin Pop
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
5500 8705eb96 Iustin Pop
    hv_type.CheckParameterSyntax(filled_hvp)
5501 67fc3042 Iustin Pop
    self.hv_full = filled_hvp
5502 6785674e Iustin Pop
5503 338e51e8 Iustin Pop
    # fill and remember the beparams dict
5504 a5728081 Guido Trotter
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
5505 4ef7f423 Guido Trotter
    self.be_full = objects.FillDict(cluster.beparams[constants.PP_DEFAULT],
5506 338e51e8 Iustin Pop
                                    self.op.beparams)
5507 338e51e8 Iustin Pop
5508 7baf741d Guido Trotter
    #### instance parameters check
5509 7baf741d Guido Trotter
5510 7baf741d Guido Trotter
    # instance name verification
5511 7baf741d Guido Trotter
    hostname1 = utils.HostInfo(self.op.instance_name)
5512 7baf741d Guido Trotter
    self.op.instance_name = instance_name = hostname1.name
5513 7baf741d Guido Trotter
5514 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
5515 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
5516 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
5517 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
5518 7baf741d Guido Trotter
                                 instance_name)
5519 7baf741d Guido Trotter
5520 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
5521 7baf741d Guido Trotter
5522 08db7c5c Iustin Pop
    # NIC buildup
5523 08db7c5c Iustin Pop
    self.nics = []
5524 9dce4771 Guido Trotter
    for idx, nic in enumerate(self.op.nics):
5525 9dce4771 Guido Trotter
      nic_mode_req = nic.get("mode", None)
5526 9dce4771 Guido Trotter
      nic_mode = nic_mode_req
5527 9dce4771 Guido Trotter
      if nic_mode is None:
5528 9dce4771 Guido Trotter
        nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
5529 9dce4771 Guido Trotter
5530 9dce4771 Guido Trotter
      # in routed mode, for the first nic, the default ip is 'auto'
5531 9dce4771 Guido Trotter
      if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
5532 9dce4771 Guido Trotter
        default_ip_mode = constants.VALUE_AUTO
5533 9dce4771 Guido Trotter
      else:
5534 9dce4771 Guido Trotter
        default_ip_mode = constants.VALUE_NONE
5535 9dce4771 Guido Trotter
5536 08db7c5c Iustin Pop
      # ip validity checks
5537 9dce4771 Guido Trotter
      ip = nic.get("ip", default_ip_mode)
5538 9dce4771 Guido Trotter
      if ip is None or ip.lower() == constants.VALUE_NONE:
5539 08db7c5c Iustin Pop
        nic_ip = None
5540 08db7c5c Iustin Pop
      elif ip.lower() == constants.VALUE_AUTO:
5541 08db7c5c Iustin Pop
        nic_ip = hostname1.ip
5542 08db7c5c Iustin Pop
      else:
5543 08db7c5c Iustin Pop
        if not utils.IsValidIP(ip):
5544 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
5545 08db7c5c Iustin Pop
                                     " like a valid IP" % ip)
5546 08db7c5c Iustin Pop
        nic_ip = ip
5547 08db7c5c Iustin Pop
5548 9dce4771 Guido Trotter
      # TODO: check the ip for uniqueness !!
5549 9dce4771 Guido Trotter
      if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
5550 9dce4771 Guido Trotter
        raise errors.OpPrereqError("Routed nic mode requires an ip address")
5551 9dce4771 Guido Trotter
5552 08db7c5c Iustin Pop
      # MAC address verification
5553 08db7c5c Iustin Pop
      mac = nic.get("mac", constants.VALUE_AUTO)
5554 08db7c5c Iustin Pop
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5555 08db7c5c Iustin Pop
        if not utils.IsValidMac(mac.lower()):
5556 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Invalid MAC address specified: %s" %
5557 08db7c5c Iustin Pop
                                     mac)
5558 87e43988 Iustin Pop
        else:
5559 87e43988 Iustin Pop
          # or validate/reserve the current one
5560 87e43988 Iustin Pop
          if self.cfg.IsMacInUse(mac):
5561 87e43988 Iustin Pop
            raise errors.OpPrereqError("MAC address %s already in use"
5562 87e43988 Iustin Pop
                                       " in cluster" % mac)
5563 87e43988 Iustin Pop
5564 08db7c5c Iustin Pop
      # bridge verification
5565 9939547b Iustin Pop
      bridge = nic.get("bridge", None)
5566 9dce4771 Guido Trotter
      link = nic.get("link", None)
5567 9dce4771 Guido Trotter
      if bridge and link:
5568 29921401 Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
5569 29921401 Iustin Pop
                                   " at the same time")
5570 9dce4771 Guido Trotter
      elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
5571 9dce4771 Guido Trotter
        raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic")
5572 9dce4771 Guido Trotter
      elif bridge:
5573 9dce4771 Guido Trotter
        link = bridge
5574 9dce4771 Guido Trotter
5575 9dce4771 Guido Trotter
      nicparams = {}
5576 9dce4771 Guido Trotter
      if nic_mode_req:
5577 9dce4771 Guido Trotter
        nicparams[constants.NIC_MODE] = nic_mode_req
5578 9dce4771 Guido Trotter
      if link:
5579 9dce4771 Guido Trotter
        nicparams[constants.NIC_LINK] = link
5580 9dce4771 Guido Trotter
5581 9dce4771 Guido Trotter
      check_params = objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
5582 9dce4771 Guido Trotter
                                      nicparams)
5583 9dce4771 Guido Trotter
      objects.NIC.CheckParameterSyntax(check_params)
5584 9dce4771 Guido Trotter
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
5585 08db7c5c Iustin Pop
5586 08db7c5c Iustin Pop
    # disk checks/pre-build
5587 08db7c5c Iustin Pop
    self.disks = []
5588 08db7c5c Iustin Pop
    for disk in self.op.disks:
5589 08db7c5c Iustin Pop
      mode = disk.get("mode", constants.DISK_RDWR)
5590 08db7c5c Iustin Pop
      if mode not in constants.DISK_ACCESS_SET:
5591 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
5592 08db7c5c Iustin Pop
                                   mode)
5593 08db7c5c Iustin Pop
      size = disk.get("size", None)
5594 08db7c5c Iustin Pop
      if size is None:
5595 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Missing disk size")
5596 08db7c5c Iustin Pop
      try:
5597 08db7c5c Iustin Pop
        size = int(size)
5598 08db7c5c Iustin Pop
      except ValueError:
5599 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk size '%s'" % size)
5600 08db7c5c Iustin Pop
      self.disks.append({"size": size, "mode": mode})
5601 08db7c5c Iustin Pop
5602 7baf741d Guido Trotter
    # used in CheckPrereq for ip ping check
5603 7baf741d Guido Trotter
    self.check_ip = hostname1.ip
5604 7baf741d Guido Trotter
5605 7baf741d Guido Trotter
    # file storage checks
5606 7baf741d Guido Trotter
    if (self.op.file_driver and
5607 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
5608 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
5609 7baf741d Guido Trotter
                                 self.op.file_driver)
5610 7baf741d Guido Trotter
5611 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
5612 7baf741d Guido Trotter
      raise errors.OpPrereqError("File storage directory path not absolute")
5613 7baf741d Guido Trotter
5614 7baf741d Guido Trotter
    ### Node/iallocator related checks
5615 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
5616 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
5617 7baf741d Guido Trotter
                                 " node must be given")
5618 7baf741d Guido Trotter
5619 7baf741d Guido Trotter
    if self.op.iallocator:
5620 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5621 7baf741d Guido Trotter
    else:
5622 7baf741d Guido Trotter
      self.op.pnode = self._ExpandNode(self.op.pnode)
5623 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
5624 7baf741d Guido Trotter
      if self.op.snode is not None:
5625 7baf741d Guido Trotter
        self.op.snode = self._ExpandNode(self.op.snode)
5626 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
5627 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
5628 7baf741d Guido Trotter
5629 7baf741d Guido Trotter
    # in case of import lock the source node too
5630 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
5631 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
5632 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
5633 7baf741d Guido Trotter
5634 b9322a9f Guido Trotter
      if src_path is None:
5635 b9322a9f Guido Trotter
        self.op.src_path = src_path = self.op.instance_name
5636 b9322a9f Guido Trotter
5637 b9322a9f Guido Trotter
      if src_node is None:
5638 b9322a9f Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5639 b9322a9f Guido Trotter
        self.op.src_node = None
5640 b9322a9f Guido Trotter
        if os.path.isabs(src_path):
5641 b9322a9f Guido Trotter
          raise errors.OpPrereqError("Importing an instance from an absolute"
5642 b9322a9f Guido Trotter
                                     " path requires a source node option.")
5643 b9322a9f Guido Trotter
      else:
5644 b9322a9f Guido Trotter
        self.op.src_node = src_node = self._ExpandNode(src_node)
5645 b9322a9f Guido Trotter
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
5646 b9322a9f Guido Trotter
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
5647 b9322a9f Guido Trotter
        if not os.path.isabs(src_path):
5648 b9322a9f Guido Trotter
          self.op.src_path = src_path = \
5649 b9322a9f Guido Trotter
            os.path.join(constants.EXPORT_DIR, src_path)
5650 7baf741d Guido Trotter
5651 f2c05717 Guido Trotter
      # On import force_variant must be True, because if we forced it at
5652 f2c05717 Guido Trotter
      # initial install, our only chance when importing it back is that it
5653 f2c05717 Guido Trotter
      # works again!
5654 f2c05717 Guido Trotter
      self.op.force_variant = True
5655 f2c05717 Guido Trotter
5656 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
5657 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
5658 7baf741d Guido Trotter
        raise errors.OpPrereqError("No guest OS specified")
5659 f2c05717 Guido Trotter
      self.op.force_variant = getattr(self.op, "force_variant", False)
5660 a8083063 Iustin Pop
5661 538475ca Iustin Pop
  def _RunAllocator(self):
5662 538475ca Iustin Pop
    """Run the allocator based on input opcode.
5663 538475ca Iustin Pop

5664 538475ca Iustin Pop
    """
5665 08db7c5c Iustin Pop
    nics = [n.ToDict() for n in self.nics]
5666 923ddac0 Michael Hanselmann
    ial = IAllocator(self.cfg, self.rpc,
5667 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
5668 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
5669 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
5670 d1c2dd75 Iustin Pop
                     tags=[],
5671 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
5672 338e51e8 Iustin Pop
                     vcpus=self.be_full[constants.BE_VCPUS],
5673 338e51e8 Iustin Pop
                     mem_size=self.be_full[constants.BE_MEMORY],
5674 08db7c5c Iustin Pop
                     disks=self.disks,
5675 d1c2dd75 Iustin Pop
                     nics=nics,
5676 8cc7e742 Guido Trotter
                     hypervisor=self.op.hypervisor,
5677 29859cb7 Iustin Pop
                     )
5678 d1c2dd75 Iustin Pop
5679 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
5680 d1c2dd75 Iustin Pop
5681 d1c2dd75 Iustin Pop
    if not ial.success:
5682 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
5683 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
5684 d1c2dd75 Iustin Pop
                                                           ial.info))
5685 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
5686 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
5687 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
5688 97abc79f Iustin Pop
                                 (self.op.iallocator, len(ial.nodes),
5689 1ce4bbe3 René Nussbaumer
                                  ial.required_nodes))
5690 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
5691 86d9d3bb Iustin Pop
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
5692 86d9d3bb Iustin Pop
                 self.op.instance_name, self.op.iallocator,
5693 86d9d3bb Iustin Pop
                 ", ".join(ial.nodes))
5694 27579978 Iustin Pop
    if ial.required_nodes == 2:
5695 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
5696 538475ca Iustin Pop
5697 a8083063 Iustin Pop
  def BuildHooksEnv(self):
5698 a8083063 Iustin Pop
    """Build hooks env.
5699 a8083063 Iustin Pop

5700 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
5701 a8083063 Iustin Pop

5702 a8083063 Iustin Pop
    """
5703 a8083063 Iustin Pop
    env = {
5704 2c2690c9 Iustin Pop
      "ADD_MODE": self.op.mode,
5705 a8083063 Iustin Pop
      }
5706 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
5707 2c2690c9 Iustin Pop
      env["SRC_NODE"] = self.op.src_node
5708 2c2690c9 Iustin Pop
      env["SRC_PATH"] = self.op.src_path
5709 2c2690c9 Iustin Pop
      env["SRC_IMAGES"] = self.src_images
5710 396e1b78 Michael Hanselmann
5711 2c2690c9 Iustin Pop
    env.update(_BuildInstanceHookEnv(
5712 2c2690c9 Iustin Pop
      name=self.op.instance_name,
5713 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
5714 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
5715 4978db17 Iustin Pop
      status=self.op.start,
5716 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
5717 338e51e8 Iustin Pop
      memory=self.be_full[constants.BE_MEMORY],
5718 338e51e8 Iustin Pop
      vcpus=self.be_full[constants.BE_VCPUS],
5719 f9b10246 Guido Trotter
      nics=_NICListToTuple(self, self.nics),
5720 2c2690c9 Iustin Pop
      disk_template=self.op.disk_template,
5721 2c2690c9 Iustin Pop
      disks=[(d["size"], d["mode"]) for d in self.disks],
5722 67fc3042 Iustin Pop
      bep=self.be_full,
5723 67fc3042 Iustin Pop
      hvp=self.hv_full,
5724 3df6e710 Iustin Pop
      hypervisor_name=self.op.hypervisor,
5725 396e1b78 Michael Hanselmann
    ))
5726 a8083063 Iustin Pop
5727 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
5728 a8083063 Iustin Pop
          self.secondaries)
5729 a8083063 Iustin Pop
    return env, nl, nl
5730 a8083063 Iustin Pop
5731 a8083063 Iustin Pop
5732 a8083063 Iustin Pop
  def CheckPrereq(self):
5733 a8083063 Iustin Pop
    """Check prerequisites.
5734 a8083063 Iustin Pop

5735 a8083063 Iustin Pop
    """
5736 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
5737 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
5738 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
5739 eedc99de Manuel Franceschini
                                 " instances")
5740 eedc99de Manuel Franceschini
5741 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
5742 7baf741d Guido Trotter
      src_node = self.op.src_node
5743 7baf741d Guido Trotter
      src_path = self.op.src_path
5744 a8083063 Iustin Pop
5745 c0cbdc67 Guido Trotter
      if src_node is None:
5746 1b7bfbb7 Iustin Pop
        locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
5747 1b7bfbb7 Iustin Pop
        exp_list = self.rpc.call_export_list(locked_nodes)
5748 c0cbdc67 Guido Trotter
        found = False
5749 c0cbdc67 Guido Trotter
        for node in exp_list:
5750 4c4e4e1e Iustin Pop
          if exp_list[node].fail_msg:
5751 1b7bfbb7 Iustin Pop
            continue
5752 1b7bfbb7 Iustin Pop
          if src_path in exp_list[node].payload:
5753 c0cbdc67 Guido Trotter
            found = True
5754 c0cbdc67 Guido Trotter
            self.op.src_node = src_node = node
5755 c0cbdc67 Guido Trotter
            self.op.src_path = src_path = os.path.join(constants.EXPORT_DIR,
5756 c0cbdc67 Guido Trotter
                                                       src_path)
5757 c0cbdc67 Guido Trotter
            break
5758 c0cbdc67 Guido Trotter
        if not found:
5759 c0cbdc67 Guido Trotter
          raise errors.OpPrereqError("No export found for relative path %s" %
5760 c0cbdc67 Guido Trotter
                                      src_path)
5761 c0cbdc67 Guido Trotter
5762 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, src_node)
5763 781de953 Iustin Pop
      result = self.rpc.call_export_info(src_node, src_path)
5764 4c4e4e1e Iustin Pop
      result.Raise("No export or invalid export found in dir %s" % src_path)
5765 a8083063 Iustin Pop
5766 3eccac06 Iustin Pop
      export_info = objects.SerializableConfigParser.Loads(str(result.payload))
5767 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
5768 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
5769 a8083063 Iustin Pop
5770 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
5771 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
5772 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
5773 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
5774 a8083063 Iustin Pop
5775 09acf207 Guido Trotter
      # Check that the new instance doesn't have less disks than the export
5776 08db7c5c Iustin Pop
      instance_disks = len(self.disks)
5777 09acf207 Guido Trotter
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
5778 09acf207 Guido Trotter
      if instance_disks < export_disks:
5779 09acf207 Guido Trotter
        raise errors.OpPrereqError("Not enough disks to import."
5780 09acf207 Guido Trotter
                                   " (instance: %d, export: %d)" %
5781 726d7d68 Iustin Pop
                                   (instance_disks, export_disks))
5782 a8083063 Iustin Pop
5783 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
5784 09acf207 Guido Trotter
      disk_images = []
5785 09acf207 Guido Trotter
      for idx in range(export_disks):
5786 09acf207 Guido Trotter
        option = 'disk%d_dump' % idx
5787 09acf207 Guido Trotter
        if export_info.has_option(constants.INISECT_INS, option):
5788 09acf207 Guido Trotter
          # FIXME: are the old os-es, disk sizes, etc. useful?
5789 09acf207 Guido Trotter
          export_name = export_info.get(constants.INISECT_INS, option)
5790 09acf207 Guido Trotter
          image = os.path.join(src_path, export_name)
5791 09acf207 Guido Trotter
          disk_images.append(image)
5792 09acf207 Guido Trotter
        else:
5793 09acf207 Guido Trotter
          disk_images.append(False)
5794 09acf207 Guido Trotter
5795 09acf207 Guido Trotter
      self.src_images = disk_images
5796 901a65c1 Iustin Pop
5797 b4364a6b Guido Trotter
      old_name = export_info.get(constants.INISECT_INS, 'name')
5798 b4364a6b Guido Trotter
      # FIXME: int() here could throw a ValueError on broken exports
5799 b4364a6b Guido Trotter
      exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
5800 b4364a6b Guido Trotter
      if self.op.instance_name == old_name:
5801 b4364a6b Guido Trotter
        for idx, nic in enumerate(self.nics):
5802 b4364a6b Guido Trotter
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
5803 b4364a6b Guido Trotter
            nic_mac_ini = 'nic%d_mac' % idx
5804 b4364a6b Guido Trotter
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
5805 bc89efc3 Guido Trotter
5806 295728df Guido Trotter
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
5807 7baf741d Guido Trotter
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
5808 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
5809 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
5810 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
5811 901a65c1 Iustin Pop
5812 901a65c1 Iustin Pop
    if self.op.ip_check:
5813 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
5814 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
5815 7b3a8fb5 Iustin Pop
                                   (self.check_ip, self.op.instance_name))
5816 901a65c1 Iustin Pop
5817 295728df Guido Trotter
    #### mac address generation
5818 295728df Guido Trotter
    # By generating here the mac address both the allocator and the hooks get
5819 295728df Guido Trotter
    # the real final mac address rather than the 'auto' or 'generate' value.
5820 295728df Guido Trotter
    # There is a race condition between the generation and the instance object
5821 295728df Guido Trotter
    # creation, which means that we know the mac is valid now, but we're not
5822 295728df Guido Trotter
    # sure it will be when we actually add the instance. If things go bad
5823 295728df Guido Trotter
    # adding the instance will abort because of a duplicate mac, and the
5824 295728df Guido Trotter
    # creation job will fail.
5825 295728df Guido Trotter
    for nic in self.nics:
5826 295728df Guido Trotter
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5827 295728df Guido Trotter
        nic.mac = self.cfg.GenerateMAC()
5828 295728df Guido Trotter
5829 538475ca Iustin Pop
    #### allocator run
5830 538475ca Iustin Pop
5831 538475ca Iustin Pop
    if self.op.iallocator is not None:
5832 538475ca Iustin Pop
      self._RunAllocator()
5833 0f1a06e3 Manuel Franceschini
5834 901a65c1 Iustin Pop
    #### node related checks
5835 901a65c1 Iustin Pop
5836 901a65c1 Iustin Pop
    # check primary node
5837 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
5838 7baf741d Guido Trotter
    assert self.pnode is not None, \
5839 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
5840 7527a8a4 Iustin Pop
    if pnode.offline:
5841 7527a8a4 Iustin Pop
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
5842 7527a8a4 Iustin Pop
                                 pnode.name)
5843 733a2b6a Iustin Pop
    if pnode.drained:
5844 733a2b6a Iustin Pop
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
5845 733a2b6a Iustin Pop
                                 pnode.name)
5846 7527a8a4 Iustin Pop
5847 901a65c1 Iustin Pop
    self.secondaries = []
5848 901a65c1 Iustin Pop
5849 901a65c1 Iustin Pop
    # mirror node verification
5850 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
5851 7baf741d Guido Trotter
      if self.op.snode is None:
5852 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
5853 3ecf6786 Iustin Pop
                                   " a mirror node")
5854 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
5855 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
5856 3ecf6786 Iustin Pop
                                   " the primary node.")
5857 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, self.op.snode)
5858 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, self.op.snode)
5859 733a2b6a Iustin Pop
      self.secondaries.append(self.op.snode)
5860 a8083063 Iustin Pop
5861 6785674e Iustin Pop
    nodenames = [pnode.name] + self.secondaries
5862 6785674e Iustin Pop
5863 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
5864 08db7c5c Iustin Pop
                                self.disks)
5865 ed1ebc60 Guido Trotter
5866 8d75db10 Iustin Pop
    # Check lv size requirements
5867 8d75db10 Iustin Pop
    if req_size is not None:
5868 72737a7f Iustin Pop
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
5869 72737a7f Iustin Pop
                                         self.op.hypervisor)
5870 8d75db10 Iustin Pop
      for node in nodenames:
5871 781de953 Iustin Pop
        info = nodeinfo[node]
5872 4c4e4e1e Iustin Pop
        info.Raise("Cannot get current information from node %s" % node)
5873 070e998b Iustin Pop
        info = info.payload
5874 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
5875 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
5876 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
5877 8d75db10 Iustin Pop
                                     " node %s" % node)
5878 070e998b Iustin Pop
        if req_size > vg_free:
5879 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
5880 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
5881 070e998b Iustin Pop
                                     (node, vg_free, req_size))
5882 ed1ebc60 Guido Trotter
5883 74409b12 Iustin Pop
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
5884 6785674e Iustin Pop
5885 a8083063 Iustin Pop
    # os verification
5886 781de953 Iustin Pop
    result = self.rpc.call_os_get(pnode.name, self.op.os_type)
5887 4c4e4e1e Iustin Pop
    result.Raise("OS '%s' not in supported os list for primary node %s" %
5888 4c4e4e1e Iustin Pop
                 (self.op.os_type, pnode.name), prereq=True)
5889 f2c05717 Guido Trotter
    if not self.op.force_variant:
5890 f2c05717 Guido Trotter
      _CheckOSVariant(result.payload, self.op.os_type)
5891 a8083063 Iustin Pop
5892 b165e77e Guido Trotter
    _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
5893 a8083063 Iustin Pop
5894 49ce1563 Iustin Pop
    # memory check on primary node
5895 49ce1563 Iustin Pop
    if self.op.start:
5896 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
5897 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
5898 338e51e8 Iustin Pop
                           self.be_full[constants.BE_MEMORY],
5899 338e51e8 Iustin Pop
                           self.op.hypervisor)
5900 49ce1563 Iustin Pop
5901 08896026 Iustin Pop
    self.dry_run_result = list(nodenames)
5902 08896026 Iustin Pop
5903 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5904 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
5905 a8083063 Iustin Pop

5906 a8083063 Iustin Pop
    """
5907 a8083063 Iustin Pop
    instance = self.op.instance_name
5908 a8083063 Iustin Pop
    pnode_name = self.pnode.name
5909 a8083063 Iustin Pop
5910 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
5911 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
5912 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
5913 2a6469d5 Alexander Schreiber
    else:
5914 2a6469d5 Alexander Schreiber
      network_port = None
5915 58acb49d Alexander Schreiber
5916 6785674e Iustin Pop
    ##if self.op.vnc_bind_address is None:
5917 6785674e Iustin Pop
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
5918 31a853d2 Iustin Pop
5919 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
5920 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
5921 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
5922 2c313123 Manuel Franceschini
    else:
5923 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
5924 2c313123 Manuel Franceschini
5925 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
5926 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
5927 d6a02168 Michael Hanselmann
                                        self.cfg.GetFileStorageDir(),
5928 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
5929 0f1a06e3 Manuel Franceschini
5930 0f1a06e3 Manuel Franceschini
5931 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
5932 a8083063 Iustin Pop
                                  self.op.disk_template,
5933 a8083063 Iustin Pop
                                  instance, pnode_name,
5934 08db7c5c Iustin Pop
                                  self.secondaries,
5935 08db7c5c Iustin Pop
                                  self.disks,
5936 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
5937 e2a65344 Iustin Pop
                                  self.op.file_driver,
5938 e2a65344 Iustin Pop
                                  0)
5939 a8083063 Iustin Pop
5940 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
5941 a8083063 Iustin Pop
                            primary_node=pnode_name,
5942 08db7c5c Iustin Pop
                            nics=self.nics, disks=disks,
5943 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
5944 4978db17 Iustin Pop
                            admin_up=False,
5945 58acb49d Alexander Schreiber
                            network_port=network_port,
5946 338e51e8 Iustin Pop
                            beparams=self.op.beparams,
5947 6785674e Iustin Pop
                            hvparams=self.op.hvparams,
5948 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
5949 a8083063 Iustin Pop
                            )
5950 a8083063 Iustin Pop
5951 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
5952 796cab27 Iustin Pop
    try:
5953 796cab27 Iustin Pop
      _CreateDisks(self, iobj)
5954 796cab27 Iustin Pop
    except errors.OpExecError:
5955 796cab27 Iustin Pop
      self.LogWarning("Device creation failed, reverting...")
5956 796cab27 Iustin Pop
      try:
5957 796cab27 Iustin Pop
        _RemoveDisks(self, iobj)
5958 796cab27 Iustin Pop
      finally:
5959 796cab27 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance)
5960 796cab27 Iustin Pop
        raise
5961 a8083063 Iustin Pop
5962 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
5963 a8083063 Iustin Pop
5964 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
5965 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
5966 7baf741d Guido Trotter
    # added the instance to the config
5967 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
5968 e36e96b4 Guido Trotter
    # Unlock all the nodes
5969 9c8971d7 Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
5970 9c8971d7 Guido Trotter
      nodes_keep = [self.op.src_node]
5971 9c8971d7 Guido Trotter
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
5972 9c8971d7 Guido Trotter
                       if node != self.op.src_node]
5973 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
5974 9c8971d7 Guido Trotter
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
5975 9c8971d7 Guido Trotter
    else:
5976 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE)
5977 9c8971d7 Guido Trotter
      del self.acquired_locks[locking.LEVEL_NODE]
5978 a8083063 Iustin Pop
5979 a8083063 Iustin Pop
    if self.op.wait_for_sync:
5980 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
5981 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
5982 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
5983 a8083063 Iustin Pop
      time.sleep(15)
5984 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
5985 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
5986 a8083063 Iustin Pop
    else:
5987 a8083063 Iustin Pop
      disk_abort = False
5988 a8083063 Iustin Pop
5989 a8083063 Iustin Pop
    if disk_abort:
5990 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
5991 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
5992 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
5993 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
5994 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
5995 3ecf6786 Iustin Pop
                               " this instance")
5996 a8083063 Iustin Pop
5997 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
5998 a8083063 Iustin Pop
                (instance, pnode_name))
5999 a8083063 Iustin Pop
6000 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
6001 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
6002 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
6003 e557bae9 Guido Trotter
        result = self.rpc.call_instance_os_add(pnode_name, iobj, False)
6004 4c4e4e1e Iustin Pop
        result.Raise("Could not add os for instance %s"
6005 4c4e4e1e Iustin Pop
                     " on node %s" % (instance, pnode_name))
6006 a8083063 Iustin Pop
6007 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
6008 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
6009 a8083063 Iustin Pop
        src_node = self.op.src_node
6010 09acf207 Guido Trotter
        src_images = self.src_images
6011 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
6012 6c0af70e Guido Trotter
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
6013 09acf207 Guido Trotter
                                                         src_node, src_images,
6014 6c0af70e Guido Trotter
                                                         cluster_name)
6015 4c4e4e1e Iustin Pop
        msg = import_result.fail_msg
6016 944bf548 Iustin Pop
        if msg:
6017 944bf548 Iustin Pop
          self.LogWarning("Error while importing the disk images for instance"
6018 944bf548 Iustin Pop
                          " %s on node %s: %s" % (instance, pnode_name, msg))
6019 a8083063 Iustin Pop
      else:
6020 a8083063 Iustin Pop
        # also checked in the prereq part
6021 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
6022 3ecf6786 Iustin Pop
                                     % self.op.mode)
6023 a8083063 Iustin Pop
6024 a8083063 Iustin Pop
    if self.op.start:
6025 4978db17 Iustin Pop
      iobj.admin_up = True
6026 4978db17 Iustin Pop
      self.cfg.Update(iobj)
6027 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s", instance, pnode_name)
6028 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
6029 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
6030 4c4e4e1e Iustin Pop
      result.Raise("Could not start instance")
6031 a8083063 Iustin Pop
6032 08896026 Iustin Pop
    return list(iobj.all_nodes)
6033 08896026 Iustin Pop
6034 a8083063 Iustin Pop
6035 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
6036 a8083063 Iustin Pop
  """Connect to an instance's console.
6037 a8083063 Iustin Pop

6038 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
6039 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
6040 a8083063 Iustin Pop
  console.
6041 a8083063 Iustin Pop

6042 a8083063 Iustin Pop
  """
6043 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
6044 8659b73e Guido Trotter
  REQ_BGL = False
6045 8659b73e Guido Trotter
6046 8659b73e Guido Trotter
  def ExpandNames(self):
6047 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
6048 a8083063 Iustin Pop
6049 a8083063 Iustin Pop
  def CheckPrereq(self):
6050 a8083063 Iustin Pop
    """Check prerequisites.
6051 a8083063 Iustin Pop

6052 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
6053 a8083063 Iustin Pop

6054 a8083063 Iustin Pop
    """
6055 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6056 8659b73e Guido Trotter
    assert self.instance is not None, \
6057 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
6058 513e896d Guido Trotter
    _CheckNodeOnline(self, self.instance.primary_node)
6059 a8083063 Iustin Pop
6060 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6061 a8083063 Iustin Pop
    """Connect to the console of an instance
6062 a8083063 Iustin Pop

6063 a8083063 Iustin Pop
    """
6064 a8083063 Iustin Pop
    instance = self.instance
6065 a8083063 Iustin Pop
    node = instance.primary_node
6066 a8083063 Iustin Pop
6067 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
6068 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
6069 4c4e4e1e Iustin Pop
    node_insts.Raise("Can't get node information from %s" % node)
6070 a8083063 Iustin Pop
6071 aca13712 Iustin Pop
    if instance.name not in node_insts.payload:
6072 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
6073 a8083063 Iustin Pop
6074 9a4f63d1 Iustin Pop
    logging.debug("Connecting to console of %s on %s", instance.name, node)
6075 a8083063 Iustin Pop
6076 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
6077 5431b2e4 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
6078 5431b2e4 Guido Trotter
    # beparams and hvparams are passed separately, to avoid editing the
6079 5431b2e4 Guido Trotter
    # instance and then saving the defaults in the instance itself.
6080 5431b2e4 Guido Trotter
    hvparams = cluster.FillHV(instance)
6081 5431b2e4 Guido Trotter
    beparams = cluster.FillBE(instance)
6082 5431b2e4 Guido Trotter
    console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
6083 b047857b Michael Hanselmann
6084 82122173 Iustin Pop
    # build ssh cmdline
6085 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
6086 a8083063 Iustin Pop
6087 a8083063 Iustin Pop
6088 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
6089 a8083063 Iustin Pop
  """Replace the disks of an instance.
6090 a8083063 Iustin Pop

6091 a8083063 Iustin Pop
  """
6092 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
6093 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
6094 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
6095 efd990e4 Guido Trotter
  REQ_BGL = False
6096 efd990e4 Guido Trotter
6097 7e9366f7 Iustin Pop
  def CheckArguments(self):
6098 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
6099 efd990e4 Guido Trotter
      self.op.remote_node = None
6100 7e9366f7 Iustin Pop
    if not hasattr(self.op, "iallocator"):
6101 7e9366f7 Iustin Pop
      self.op.iallocator = None
6102 7e9366f7 Iustin Pop
6103 c68174b6 Michael Hanselmann
    TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
6104 c68174b6 Michael Hanselmann
                                  self.op.iallocator)
6105 7e9366f7 Iustin Pop
6106 7e9366f7 Iustin Pop
  def ExpandNames(self):
6107 7e9366f7 Iustin Pop
    self._ExpandAndLockInstance()
6108 7e9366f7 Iustin Pop
6109 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
6110 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6111 2bb5c911 Michael Hanselmann
6112 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
6113 efd990e4 Guido Trotter
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
6114 efd990e4 Guido Trotter
      if remote_node is None:
6115 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Node '%s' not known" %
6116 efd990e4 Guido Trotter
                                   self.op.remote_node)
6117 2bb5c911 Michael Hanselmann
6118 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
6119 2bb5c911 Michael Hanselmann
6120 3b559640 Iustin Pop
      # Warning: do not remove the locking of the new secondary here
6121 3b559640 Iustin Pop
      # unless DRBD8.AddChildren is changed to work in parallel;
6122 3b559640 Iustin Pop
      # currently it doesn't since parallel invocations of
6123 3b559640 Iustin Pop
      # FindUnusedMinor will conflict
6124 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
6125 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6126 2bb5c911 Michael Hanselmann
6127 efd990e4 Guido Trotter
    else:
6128 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
6129 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6130 efd990e4 Guido Trotter
6131 c68174b6 Michael Hanselmann
    self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
6132 c68174b6 Michael Hanselmann
                                   self.op.iallocator, self.op.remote_node,
6133 c68174b6 Michael Hanselmann
                                   self.op.disks)
6134 c68174b6 Michael Hanselmann
6135 3a012b41 Michael Hanselmann
    self.tasklets = [self.replacer]
6136 2bb5c911 Michael Hanselmann
6137 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
6138 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
6139 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
6140 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
6141 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
6142 efd990e4 Guido Trotter
      self._LockInstancesNodes()
6143 a8083063 Iustin Pop
6144 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6145 a8083063 Iustin Pop
    """Build hooks env.
6146 a8083063 Iustin Pop

6147 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
6148 a8083063 Iustin Pop

6149 a8083063 Iustin Pop
    """
6150 2bb5c911 Michael Hanselmann
    instance = self.replacer.instance
6151 a8083063 Iustin Pop
    env = {
6152 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
6153 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
6154 2bb5c911 Michael Hanselmann
      "OLD_SECONDARY": instance.secondary_nodes[0],
6155 a8083063 Iustin Pop
      }
6156 2bb5c911 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self, instance))
6157 0834c866 Iustin Pop
    nl = [
6158 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
6159 2bb5c911 Michael Hanselmann
      instance.primary_node,
6160 0834c866 Iustin Pop
      ]
6161 0834c866 Iustin Pop
    if self.op.remote_node is not None:
6162 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
6163 a8083063 Iustin Pop
    return env, nl, nl
6164 a8083063 Iustin Pop
6165 2bb5c911 Michael Hanselmann
6166 7ffc5a86 Michael Hanselmann
class LUEvacuateNode(LogicalUnit):
6167 7ffc5a86 Michael Hanselmann
  """Relocate the secondary instances from a node.
6168 7ffc5a86 Michael Hanselmann

6169 7ffc5a86 Michael Hanselmann
  """
6170 7ffc5a86 Michael Hanselmann
  HPATH = "node-evacuate"
6171 7ffc5a86 Michael Hanselmann
  HTYPE = constants.HTYPE_NODE
6172 7ffc5a86 Michael Hanselmann
  _OP_REQP = ["node_name"]
6173 7ffc5a86 Michael Hanselmann
  REQ_BGL = False
6174 7ffc5a86 Michael Hanselmann
6175 7ffc5a86 Michael Hanselmann
  def CheckArguments(self):
6176 7ffc5a86 Michael Hanselmann
    if not hasattr(self.op, "remote_node"):
6177 7ffc5a86 Michael Hanselmann
      self.op.remote_node = None
6178 7ffc5a86 Michael Hanselmann
    if not hasattr(self.op, "iallocator"):
6179 7ffc5a86 Michael Hanselmann
      self.op.iallocator = None
6180 7ffc5a86 Michael Hanselmann
6181 7ffc5a86 Michael Hanselmann
    TLReplaceDisks.CheckArguments(constants.REPLACE_DISK_CHG,
6182 7ffc5a86 Michael Hanselmann
                                  self.op.remote_node,
6183 7ffc5a86 Michael Hanselmann
                                  self.op.iallocator)
6184 7ffc5a86 Michael Hanselmann
6185 7ffc5a86 Michael Hanselmann
  def ExpandNames(self):
6186 7ffc5a86 Michael Hanselmann
    self.op.node_name = self.cfg.ExpandNodeName(self.op.node_name)
6187 7ffc5a86 Michael Hanselmann
    if self.op.node_name is None:
6188 7ffc5a86 Michael Hanselmann
      raise errors.OpPrereqError("Node '%s' not known" % self.op.node_name)
6189 7ffc5a86 Michael Hanselmann
6190 7ffc5a86 Michael Hanselmann
    self.needed_locks = {}
6191 7ffc5a86 Michael Hanselmann
6192 7ffc5a86 Michael Hanselmann
    # Declare node locks
6193 7ffc5a86 Michael Hanselmann
    if self.op.iallocator is not None:
6194 7ffc5a86 Michael Hanselmann
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6195 7ffc5a86 Michael Hanselmann
6196 7ffc5a86 Michael Hanselmann
    elif self.op.remote_node is not None:
6197 7ffc5a86 Michael Hanselmann
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
6198 7ffc5a86 Michael Hanselmann
      if remote_node is None:
6199 7ffc5a86 Michael Hanselmann
        raise errors.OpPrereqError("Node '%s' not known" %
6200 7ffc5a86 Michael Hanselmann
                                   self.op.remote_node)
6201 7ffc5a86 Michael Hanselmann
6202 7ffc5a86 Michael Hanselmann
      self.op.remote_node = remote_node
6203 7ffc5a86 Michael Hanselmann
6204 7ffc5a86 Michael Hanselmann
      # Warning: do not remove the locking of the new secondary here
6205 7ffc5a86 Michael Hanselmann
      # unless DRBD8.AddChildren is changed to work in parallel;
6206 7ffc5a86 Michael Hanselmann
      # currently it doesn't since parallel invocations of
6207 7ffc5a86 Michael Hanselmann
      # FindUnusedMinor will conflict
6208 7ffc5a86 Michael Hanselmann
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
6209 7ffc5a86 Michael Hanselmann
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6210 7ffc5a86 Michael Hanselmann
6211 7ffc5a86 Michael Hanselmann
    else:
6212 7ffc5a86 Michael Hanselmann
      raise errors.OpPrereqError("Invalid parameters")
6213 7ffc5a86 Michael Hanselmann
6214 7ffc5a86 Michael Hanselmann
    # Create tasklets for replacing disks for all secondary instances on this
6215 7ffc5a86 Michael Hanselmann
    # node
6216 7ffc5a86 Michael Hanselmann
    names = []
6217 3a012b41 Michael Hanselmann
    tasklets = []
6218 7ffc5a86 Michael Hanselmann
6219 7ffc5a86 Michael Hanselmann
    for inst in _GetNodeSecondaryInstances(self.cfg, self.op.node_name):
6220 7ffc5a86 Michael Hanselmann
      logging.debug("Replacing disks for instance %s", inst.name)
6221 7ffc5a86 Michael Hanselmann
      names.append(inst.name)
6222 7ffc5a86 Michael Hanselmann
6223 7ffc5a86 Michael Hanselmann
      replacer = TLReplaceDisks(self, inst.name, constants.REPLACE_DISK_CHG,
6224 7ffc5a86 Michael Hanselmann
                                self.op.iallocator, self.op.remote_node, [])
6225 3a012b41 Michael Hanselmann
      tasklets.append(replacer)
6226 7ffc5a86 Michael Hanselmann
6227 3a012b41 Michael Hanselmann
    self.tasklets = tasklets
6228 7ffc5a86 Michael Hanselmann
    self.instance_names = names
6229 7ffc5a86 Michael Hanselmann
6230 7ffc5a86 Michael Hanselmann
    # Declare instance locks
6231 7ffc5a86 Michael Hanselmann
    self.needed_locks[locking.LEVEL_INSTANCE] = self.instance_names
6232 7ffc5a86 Michael Hanselmann
6233 7ffc5a86 Michael Hanselmann
  def DeclareLocks(self, level):
6234 7ffc5a86 Michael Hanselmann
    # If we're not already locking all nodes in the set we have to declare the
6235 7ffc5a86 Michael Hanselmann
    # instance's primary/secondary nodes.
6236 7ffc5a86 Michael Hanselmann
    if (level == locking.LEVEL_NODE and
6237 7ffc5a86 Michael Hanselmann
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
6238 7ffc5a86 Michael Hanselmann
      self._LockInstancesNodes()
6239 7ffc5a86 Michael Hanselmann
6240 7ffc5a86 Michael Hanselmann
  def BuildHooksEnv(self):
6241 7ffc5a86 Michael Hanselmann
    """Build hooks env.
6242 7ffc5a86 Michael Hanselmann

6243 7ffc5a86 Michael Hanselmann
    This runs on the master, the primary and all the secondaries.
6244 7ffc5a86 Michael Hanselmann

6245 7ffc5a86 Michael Hanselmann
    """
6246 7ffc5a86 Michael Hanselmann
    env = {
6247 7ffc5a86 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
6248 7ffc5a86 Michael Hanselmann
      }
6249 7ffc5a86 Michael Hanselmann
6250 7ffc5a86 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
6251 7ffc5a86 Michael Hanselmann
6252 7ffc5a86 Michael Hanselmann
    if self.op.remote_node is not None:
6253 7ffc5a86 Michael Hanselmann
      env["NEW_SECONDARY"] = self.op.remote_node
6254 7ffc5a86 Michael Hanselmann
      nl.append(self.op.remote_node)
6255 7ffc5a86 Michael Hanselmann
6256 7ffc5a86 Michael Hanselmann
    return (env, nl, nl)
6257 7ffc5a86 Michael Hanselmann
6258 7ffc5a86 Michael Hanselmann
6259 c68174b6 Michael Hanselmann
class TLReplaceDisks(Tasklet):
6260 2bb5c911 Michael Hanselmann
  """Replaces disks for an instance.
6261 2bb5c911 Michael Hanselmann

6262 2bb5c911 Michael Hanselmann
  Note: Locking is not within the scope of this class.
6263 2bb5c911 Michael Hanselmann

6264 2bb5c911 Michael Hanselmann
  """
6265 2bb5c911 Michael Hanselmann
  def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
6266 2bb5c911 Michael Hanselmann
               disks):
6267 2bb5c911 Michael Hanselmann
    """Initializes this class.
6268 2bb5c911 Michael Hanselmann

6269 2bb5c911 Michael Hanselmann
    """
6270 464243a7 Michael Hanselmann
    Tasklet.__init__(self, lu)
6271 464243a7 Michael Hanselmann
6272 2bb5c911 Michael Hanselmann
    # Parameters
6273 2bb5c911 Michael Hanselmann
    self.instance_name = instance_name
6274 2bb5c911 Michael Hanselmann
    self.mode = mode
6275 2bb5c911 Michael Hanselmann
    self.iallocator_name = iallocator_name
6276 2bb5c911 Michael Hanselmann
    self.remote_node = remote_node
6277 2bb5c911 Michael Hanselmann
    self.disks = disks
6278 2bb5c911 Michael Hanselmann
6279 2bb5c911 Michael Hanselmann
    # Runtime data
6280 2bb5c911 Michael Hanselmann
    self.instance = None
6281 2bb5c911 Michael Hanselmann
    self.new_node = None
6282 2bb5c911 Michael Hanselmann
    self.target_node = None
6283 2bb5c911 Michael Hanselmann
    self.other_node = None
6284 2bb5c911 Michael Hanselmann
    self.remote_node_info = None
6285 2bb5c911 Michael Hanselmann
    self.node_secondary_ip = None
6286 2bb5c911 Michael Hanselmann
6287 2bb5c911 Michael Hanselmann
  @staticmethod
6288 2bb5c911 Michael Hanselmann
  def CheckArguments(mode, remote_node, iallocator):
6289 c68174b6 Michael Hanselmann
    """Helper function for users of this class.
6290 c68174b6 Michael Hanselmann

6291 c68174b6 Michael Hanselmann
    """
6292 2bb5c911 Michael Hanselmann
    # check for valid parameter combination
6293 2bb5c911 Michael Hanselmann
    if mode == constants.REPLACE_DISK_CHG:
6294 02a00186 Michael Hanselmann
      if remote_node is None and iallocator is None:
6295 2bb5c911 Michael Hanselmann
        raise errors.OpPrereqError("When changing the secondary either an"
6296 2bb5c911 Michael Hanselmann
                                   " iallocator script must be used or the"
6297 2bb5c911 Michael Hanselmann
                                   " new node given")
6298 02a00186 Michael Hanselmann
6299 02a00186 Michael Hanselmann
      if remote_node is not None and iallocator is not None:
6300 2bb5c911 Michael Hanselmann
        raise errors.OpPrereqError("Give either the iallocator or the new"
6301 2bb5c911 Michael Hanselmann
                                   " secondary, not both")
6302 02a00186 Michael Hanselmann
6303 02a00186 Michael Hanselmann
    elif remote_node is not None or iallocator is not None:
6304 02a00186 Michael Hanselmann
      # Not replacing the secondary
6305 02a00186 Michael Hanselmann
      raise errors.OpPrereqError("The iallocator and new node options can"
6306 02a00186 Michael Hanselmann
                                 " only be used when changing the"
6307 02a00186 Michael Hanselmann
                                 " secondary node")
6308 2bb5c911 Michael Hanselmann
6309 2bb5c911 Michael Hanselmann
  @staticmethod
6310 2bb5c911 Michael Hanselmann
  def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
6311 2bb5c911 Michael Hanselmann
    """Compute a new secondary node using an IAllocator.
6312 2bb5c911 Michael Hanselmann

6313 2bb5c911 Michael Hanselmann
    """
6314 2bb5c911 Michael Hanselmann
    ial = IAllocator(lu.cfg, lu.rpc,
6315 2bb5c911 Michael Hanselmann
                     mode=constants.IALLOCATOR_MODE_RELOC,
6316 2bb5c911 Michael Hanselmann
                     name=instance_name,
6317 2bb5c911 Michael Hanselmann
                     relocate_from=relocate_from)
6318 2bb5c911 Michael Hanselmann
6319 2bb5c911 Michael Hanselmann
    ial.Run(iallocator_name)
6320 2bb5c911 Michael Hanselmann
6321 2bb5c911 Michael Hanselmann
    if not ial.success:
6322 2bb5c911 Michael Hanselmann
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
6323 2bb5c911 Michael Hanselmann
                                 " %s" % (iallocator_name, ial.info))
6324 2bb5c911 Michael Hanselmann
6325 2bb5c911 Michael Hanselmann
    if len(ial.nodes) != ial.required_nodes:
6326 2bb5c911 Michael Hanselmann
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
6327 2bb5c911 Michael Hanselmann
                                 " of nodes (%s), required %s" %
6328 2bb5c911 Michael Hanselmann
                                 (len(ial.nodes), ial.required_nodes))
6329 2bb5c911 Michael Hanselmann
6330 2bb5c911 Michael Hanselmann
    remote_node_name = ial.nodes[0]
6331 2bb5c911 Michael Hanselmann
6332 2bb5c911 Michael Hanselmann
    lu.LogInfo("Selected new secondary for instance '%s': %s",
6333 2bb5c911 Michael Hanselmann
               instance_name, remote_node_name)
6334 2bb5c911 Michael Hanselmann
6335 2bb5c911 Michael Hanselmann
    return remote_node_name
6336 2bb5c911 Michael Hanselmann
6337 942be002 Michael Hanselmann
  def _FindFaultyDisks(self, node_name):
6338 2d9005d8 Michael Hanselmann
    return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
6339 2d9005d8 Michael Hanselmann
                                    node_name, True)
6340 942be002 Michael Hanselmann
6341 2bb5c911 Michael Hanselmann
  def CheckPrereq(self):
6342 2bb5c911 Michael Hanselmann
    """Check prerequisites.
6343 2bb5c911 Michael Hanselmann

6344 2bb5c911 Michael Hanselmann
    This checks that the instance is in the cluster.
6345 2bb5c911 Michael Hanselmann

6346 2bb5c911 Michael Hanselmann
    """
6347 2bb5c911 Michael Hanselmann
    self.instance = self.cfg.GetInstanceInfo(self.instance_name)
6348 2bb5c911 Michael Hanselmann
    assert self.instance is not None, \
6349 2bb5c911 Michael Hanselmann
      "Cannot retrieve locked instance %s" % self.instance_name
6350 2bb5c911 Michael Hanselmann
6351 2bb5c911 Michael Hanselmann
    if self.instance.disk_template != constants.DT_DRBD8:
6352 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
6353 7e9366f7 Iustin Pop
                                 " instances")
6354 a8083063 Iustin Pop
6355 2bb5c911 Michael Hanselmann
    if len(self.instance.secondary_nodes) != 1:
6356 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
6357 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
6358 2bb5c911 Michael Hanselmann
                                 len(self.instance.secondary_nodes))
6359 a8083063 Iustin Pop
6360 2bb5c911 Michael Hanselmann
    secondary_node = self.instance.secondary_nodes[0]
6361 a9e0c397 Iustin Pop
6362 2bb5c911 Michael Hanselmann
    if self.iallocator_name is None:
6363 2bb5c911 Michael Hanselmann
      remote_node = self.remote_node
6364 2bb5c911 Michael Hanselmann
    else:
6365 2bb5c911 Michael Hanselmann
      remote_node = self._RunAllocator(self.lu, self.iallocator_name,
6366 2bb5c911 Michael Hanselmann
                                       self.instance.name, secondary_node)
6367 b6e82a65 Iustin Pop
6368 a9e0c397 Iustin Pop
    if remote_node is not None:
6369 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
6370 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
6371 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
6372 a9e0c397 Iustin Pop
    else:
6373 a9e0c397 Iustin Pop
      self.remote_node_info = None
6374 2bb5c911 Michael Hanselmann
6375 2bb5c911 Michael Hanselmann
    if remote_node == self.instance.primary_node:
6376 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
6377 3ecf6786 Iustin Pop
                                 " the instance.")
6378 2bb5c911 Michael Hanselmann
6379 2bb5c911 Michael Hanselmann
    if remote_node == secondary_node:
6380 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("The specified node is already the"
6381 7e9366f7 Iustin Pop
                                 " secondary node of the instance.")
6382 7e9366f7 Iustin Pop
6383 2945fd2d Michael Hanselmann
    if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
6384 2945fd2d Michael Hanselmann
                                    constants.REPLACE_DISK_CHG):
6385 2945fd2d Michael Hanselmann
      raise errors.OpPrereqError("Cannot specify disks to be replaced")
6386 942be002 Michael Hanselmann
6387 2945fd2d Michael Hanselmann
    if self.mode == constants.REPLACE_DISK_AUTO:
6388 942be002 Michael Hanselmann
      faulty_primary = self._FindFaultyDisks(self.instance.primary_node)
6389 942be002 Michael Hanselmann
      faulty_secondary = self._FindFaultyDisks(secondary_node)
6390 942be002 Michael Hanselmann
6391 942be002 Michael Hanselmann
      if faulty_primary and faulty_secondary:
6392 942be002 Michael Hanselmann
        raise errors.OpPrereqError("Instance %s has faulty disks on more than"
6393 942be002 Michael Hanselmann
                                   " one node and can not be repaired"
6394 942be002 Michael Hanselmann
                                   " automatically" % self.instance_name)
6395 942be002 Michael Hanselmann
6396 942be002 Michael Hanselmann
      if faulty_primary:
6397 942be002 Michael Hanselmann
        self.disks = faulty_primary
6398 942be002 Michael Hanselmann
        self.target_node = self.instance.primary_node
6399 942be002 Michael Hanselmann
        self.other_node = secondary_node
6400 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
6401 942be002 Michael Hanselmann
      elif faulty_secondary:
6402 942be002 Michael Hanselmann
        self.disks = faulty_secondary
6403 942be002 Michael Hanselmann
        self.target_node = secondary_node
6404 942be002 Michael Hanselmann
        self.other_node = self.instance.primary_node
6405 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
6406 942be002 Michael Hanselmann
      else:
6407 942be002 Michael Hanselmann
        self.disks = []
6408 942be002 Michael Hanselmann
        check_nodes = []
6409 942be002 Michael Hanselmann
6410 942be002 Michael Hanselmann
    else:
6411 942be002 Michael Hanselmann
      # Non-automatic modes
6412 942be002 Michael Hanselmann
      if self.mode == constants.REPLACE_DISK_PRI:
6413 942be002 Michael Hanselmann
        self.target_node = self.instance.primary_node
6414 942be002 Michael Hanselmann
        self.other_node = secondary_node
6415 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
6416 7e9366f7 Iustin Pop
6417 942be002 Michael Hanselmann
      elif self.mode == constants.REPLACE_DISK_SEC:
6418 942be002 Michael Hanselmann
        self.target_node = secondary_node
6419 942be002 Michael Hanselmann
        self.other_node = self.instance.primary_node
6420 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
6421 a9e0c397 Iustin Pop
6422 942be002 Michael Hanselmann
      elif self.mode == constants.REPLACE_DISK_CHG:
6423 942be002 Michael Hanselmann
        self.new_node = remote_node
6424 942be002 Michael Hanselmann
        self.other_node = self.instance.primary_node
6425 942be002 Michael Hanselmann
        self.target_node = secondary_node
6426 942be002 Michael Hanselmann
        check_nodes = [self.new_node, self.other_node]
6427 54155f52 Iustin Pop
6428 942be002 Michael Hanselmann
        _CheckNodeNotDrained(self.lu, remote_node)
6429 a8083063 Iustin Pop
6430 942be002 Michael Hanselmann
      else:
6431 942be002 Michael Hanselmann
        raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
6432 942be002 Michael Hanselmann
                                     self.mode)
6433 942be002 Michael Hanselmann
6434 942be002 Michael Hanselmann
      # If not specified all disks should be replaced
6435 942be002 Michael Hanselmann
      if not self.disks:
6436 942be002 Michael Hanselmann
        self.disks = range(len(self.instance.disks))
6437 a9e0c397 Iustin Pop
6438 2bb5c911 Michael Hanselmann
    for node in check_nodes:
6439 2bb5c911 Michael Hanselmann
      _CheckNodeOnline(self.lu, node)
6440 e4376078 Iustin Pop
6441 2bb5c911 Michael Hanselmann
    # Check whether disks are valid
6442 2bb5c911 Michael Hanselmann
    for disk_idx in self.disks:
6443 2bb5c911 Michael Hanselmann
      self.instance.FindDisk(disk_idx)
6444 e4376078 Iustin Pop
6445 2bb5c911 Michael Hanselmann
    # Get secondary node IP addresses
6446 2bb5c911 Michael Hanselmann
    node_2nd_ip = {}
6447 e4376078 Iustin Pop
6448 2bb5c911 Michael Hanselmann
    for node_name in [self.target_node, self.other_node, self.new_node]:
6449 2bb5c911 Michael Hanselmann
      if node_name is not None:
6450 2bb5c911 Michael Hanselmann
        node_2nd_ip[node_name] = self.cfg.GetNodeInfo(node_name).secondary_ip
6451 e4376078 Iustin Pop
6452 2bb5c911 Michael Hanselmann
    self.node_secondary_ip = node_2nd_ip
6453 a9e0c397 Iustin Pop
6454 c68174b6 Michael Hanselmann
  def Exec(self, feedback_fn):
6455 2bb5c911 Michael Hanselmann
    """Execute disk replacement.
6456 2bb5c911 Michael Hanselmann

6457 2bb5c911 Michael Hanselmann
    This dispatches the disk replacement to the appropriate handler.
6458 cff90b79 Iustin Pop

6459 a9e0c397 Iustin Pop
    """
6460 942be002 Michael Hanselmann
    if not self.disks:
6461 942be002 Michael Hanselmann
      feedback_fn("No disks need replacement")
6462 942be002 Michael Hanselmann
      return
6463 942be002 Michael Hanselmann
6464 942be002 Michael Hanselmann
    feedback_fn("Replacing disk(s) %s for %s" %
6465 942be002 Michael Hanselmann
                (", ".join([str(i) for i in self.disks]), self.instance.name))
6466 7ffc5a86 Michael Hanselmann
6467 2bb5c911 Michael Hanselmann
    activate_disks = (not self.instance.admin_up)
6468 2bb5c911 Michael Hanselmann
6469 2bb5c911 Michael Hanselmann
    # Activate the instance disks if we're replacing them on a down instance
6470 2bb5c911 Michael Hanselmann
    if activate_disks:
6471 2bb5c911 Michael Hanselmann
      _StartInstanceDisks(self.lu, self.instance, True)
6472 2bb5c911 Michael Hanselmann
6473 2bb5c911 Michael Hanselmann
    try:
6474 942be002 Michael Hanselmann
      # Should we replace the secondary node?
6475 942be002 Michael Hanselmann
      if self.new_node is not None:
6476 2bb5c911 Michael Hanselmann
        return self._ExecDrbd8Secondary()
6477 2bb5c911 Michael Hanselmann
      else:
6478 2bb5c911 Michael Hanselmann
        return self._ExecDrbd8DiskOnly()
6479 2bb5c911 Michael Hanselmann
6480 2bb5c911 Michael Hanselmann
    finally:
6481 2bb5c911 Michael Hanselmann
      # Deactivate the instance disks if we're replacing them on a down instance
6482 2bb5c911 Michael Hanselmann
      if activate_disks:
6483 2bb5c911 Michael Hanselmann
        _SafeShutdownInstanceDisks(self.lu, self.instance)
6484 2bb5c911 Michael Hanselmann
6485 2bb5c911 Michael Hanselmann
  def _CheckVolumeGroup(self, nodes):
6486 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Checking volume groups")
6487 2bb5c911 Michael Hanselmann
6488 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
6489 cff90b79 Iustin Pop
6490 2bb5c911 Michael Hanselmann
    # Make sure volume group exists on all involved nodes
6491 2bb5c911 Michael Hanselmann
    results = self.rpc.call_vg_list(nodes)
6492 cff90b79 Iustin Pop
    if not results:
6493 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
6494 2bb5c911 Michael Hanselmann
6495 2bb5c911 Michael Hanselmann
    for node in nodes:
6496 781de953 Iustin Pop
      res = results[node]
6497 4c4e4e1e Iustin Pop
      res.Raise("Error checking node %s" % node)
6498 2bb5c911 Michael Hanselmann
      if vgname not in res.payload:
6499 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Volume group '%s' not found on node %s" %
6500 2bb5c911 Michael Hanselmann
                                 (vgname, node))
6501 2bb5c911 Michael Hanselmann
6502 2bb5c911 Michael Hanselmann
  def _CheckDisksExistence(self, nodes):
6503 2bb5c911 Michael Hanselmann
    # Check disk existence
6504 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
6505 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
6506 cff90b79 Iustin Pop
        continue
6507 2bb5c911 Michael Hanselmann
6508 2bb5c911 Michael Hanselmann
      for node in nodes:
6509 2bb5c911 Michael Hanselmann
        self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
6510 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(dev, node)
6511 2bb5c911 Michael Hanselmann
6512 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(node, dev)
6513 2bb5c911 Michael Hanselmann
6514 4c4e4e1e Iustin Pop
        msg = result.fail_msg
6515 2bb5c911 Michael Hanselmann
        if msg or not result.payload:
6516 2bb5c911 Michael Hanselmann
          if not msg:
6517 2bb5c911 Michael Hanselmann
            msg = "disk not found"
6518 23829f6f Iustin Pop
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
6519 23829f6f Iustin Pop
                                   (idx, node, msg))
6520 cff90b79 Iustin Pop
6521 2bb5c911 Michael Hanselmann
  def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
6522 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
6523 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
6524 cff90b79 Iustin Pop
        continue
6525 cff90b79 Iustin Pop
6526 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Checking disk/%d consistency on node %s" %
6527 2bb5c911 Michael Hanselmann
                      (idx, node_name))
6528 2bb5c911 Michael Hanselmann
6529 2bb5c911 Michael Hanselmann
      if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary,
6530 2bb5c911 Michael Hanselmann
                                   ldisk=ldisk):
6531 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Node %s has degraded storage, unsafe to"
6532 2bb5c911 Michael Hanselmann
                                 " replace disks for instance %s" %
6533 2bb5c911 Michael Hanselmann
                                 (node_name, self.instance.name))
6534 2bb5c911 Michael Hanselmann
6535 2bb5c911 Michael Hanselmann
  def _CreateNewStorage(self, node_name):
6536 2bb5c911 Michael Hanselmann
    vgname = self.cfg.GetVGName()
6537 2bb5c911 Michael Hanselmann
    iv_names = {}
6538 2bb5c911 Michael Hanselmann
6539 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
6540 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
6541 a9e0c397 Iustin Pop
        continue
6542 2bb5c911 Michael Hanselmann
6543 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
6544 2bb5c911 Michael Hanselmann
6545 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, node_name)
6546 2bb5c911 Michael Hanselmann
6547 2bb5c911 Michael Hanselmann
      lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
6548 2bb5c911 Michael Hanselmann
      names = _GenerateUniqueNames(self.lu, lv_names)
6549 2bb5c911 Michael Hanselmann
6550 2bb5c911 Michael Hanselmann
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
6551 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
6552 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
6553 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
6554 2bb5c911 Michael Hanselmann
6555 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
6556 a9e0c397 Iustin Pop
      old_lvs = dev.children
6557 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
6558 2bb5c911 Michael Hanselmann
6559 428958aa Iustin Pop
      # we pass force_create=True to force the LVM creation
6560 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
6561 2bb5c911 Michael Hanselmann
        _CreateBlockDev(self.lu, node_name, self.instance, new_lv, True,
6562 2bb5c911 Michael Hanselmann
                        _GetInstanceInfoText(self.instance), False)
6563 2bb5c911 Michael Hanselmann
6564 2bb5c911 Michael Hanselmann
    return iv_names
6565 2bb5c911 Michael Hanselmann
6566 2bb5c911 Michael Hanselmann
  def _CheckDevices(self, node_name, iv_names):
6567 2bb5c911 Michael Hanselmann
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
6568 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, node_name)
6569 2bb5c911 Michael Hanselmann
6570 2bb5c911 Michael Hanselmann
      result = self.rpc.call_blockdev_find(node_name, dev)
6571 2bb5c911 Michael Hanselmann
6572 2bb5c911 Michael Hanselmann
      msg = result.fail_msg
6573 2bb5c911 Michael Hanselmann
      if msg or not result.payload:
6574 2bb5c911 Michael Hanselmann
        if not msg:
6575 2bb5c911 Michael Hanselmann
          msg = "disk not found"
6576 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
6577 2bb5c911 Michael Hanselmann
                                 (name, msg))
6578 2bb5c911 Michael Hanselmann
6579 96acbc09 Michael Hanselmann
      if result.payload.is_degraded:
6580 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
6581 2bb5c911 Michael Hanselmann
6582 2bb5c911 Michael Hanselmann
  def _RemoveOldStorage(self, node_name, iv_names):
6583 2bb5c911 Michael Hanselmann
    for name, (dev, old_lvs, _) in iv_names.iteritems():
6584 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Remove logical volumes for %s" % name)
6585 2bb5c911 Michael Hanselmann
6586 2bb5c911 Michael Hanselmann
      for lv in old_lvs:
6587 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(lv, node_name)
6588 2bb5c911 Michael Hanselmann
6589 2bb5c911 Michael Hanselmann
        msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
6590 2bb5c911 Michael Hanselmann
        if msg:
6591 2bb5c911 Michael Hanselmann
          self.lu.LogWarning("Can't remove old LV: %s" % msg,
6592 2bb5c911 Michael Hanselmann
                             hint="remove unused LVs manually")
6593 2bb5c911 Michael Hanselmann
6594 2bb5c911 Michael Hanselmann
  def _ExecDrbd8DiskOnly(self):
6595 2bb5c911 Michael Hanselmann
    """Replace a disk on the primary or secondary for DRBD 8.
6596 2bb5c911 Michael Hanselmann

6597 2bb5c911 Michael Hanselmann
    The algorithm for replace is quite complicated:
6598 2bb5c911 Michael Hanselmann

6599 2bb5c911 Michael Hanselmann
      1. for each disk to be replaced:
6600 2bb5c911 Michael Hanselmann

6601 2bb5c911 Michael Hanselmann
        1. create new LVs on the target node with unique names
6602 2bb5c911 Michael Hanselmann
        1. detach old LVs from the drbd device
6603 2bb5c911 Michael Hanselmann
        1. rename old LVs to name_replaced.<time_t>
6604 2bb5c911 Michael Hanselmann
        1. rename new LVs to old LVs
6605 2bb5c911 Michael Hanselmann
        1. attach the new LVs (with the old names now) to the drbd device
6606 2bb5c911 Michael Hanselmann

6607 2bb5c911 Michael Hanselmann
      1. wait for sync across all devices
6608 2bb5c911 Michael Hanselmann

6609 2bb5c911 Michael Hanselmann
      1. for each modified disk:
6610 2bb5c911 Michael Hanselmann

6611 2bb5c911 Michael Hanselmann
        1. remove old LVs (which have the name name_replaces.<time_t>)
6612 2bb5c911 Michael Hanselmann

6613 2bb5c911 Michael Hanselmann
    Failures are not very well handled.
6614 2bb5c911 Michael Hanselmann

6615 2bb5c911 Michael Hanselmann
    """
6616 2bb5c911 Michael Hanselmann
    steps_total = 6
6617 2bb5c911 Michael Hanselmann
6618 2bb5c911 Michael Hanselmann
    # Step: check device activation
6619 2bb5c911 Michael Hanselmann
    self.lu.LogStep(1, steps_total, "Check device existence")
6620 2bb5c911 Michael Hanselmann
    self._CheckDisksExistence([self.other_node, self.target_node])
6621 2bb5c911 Michael Hanselmann
    self._CheckVolumeGroup([self.target_node, self.other_node])
6622 2bb5c911 Michael Hanselmann
6623 2bb5c911 Michael Hanselmann
    # Step: check other node consistency
6624 2bb5c911 Michael Hanselmann
    self.lu.LogStep(2, steps_total, "Check peer consistency")
6625 2bb5c911 Michael Hanselmann
    self._CheckDisksConsistency(self.other_node,
6626 2bb5c911 Michael Hanselmann
                                self.other_node == self.instance.primary_node,
6627 2bb5c911 Michael Hanselmann
                                False)
6628 2bb5c911 Michael Hanselmann
6629 2bb5c911 Michael Hanselmann
    # Step: create new storage
6630 2bb5c911 Michael Hanselmann
    self.lu.LogStep(3, steps_total, "Allocate new storage")
6631 2bb5c911 Michael Hanselmann
    iv_names = self._CreateNewStorage(self.target_node)
6632 a9e0c397 Iustin Pop
6633 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
6634 2bb5c911 Michael Hanselmann
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
6635 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
6636 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
6637 2bb5c911 Michael Hanselmann
6638 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
6639 4d4a651d Michael Hanselmann
                                                     old_lvs)
6640 4c4e4e1e Iustin Pop
      result.Raise("Can't detach drbd from local storage on node"
6641 2bb5c911 Michael Hanselmann
                   " %s for device %s" % (self.target_node, dev.iv_name))
6642 cff90b79 Iustin Pop
      #dev.children = []
6643 cff90b79 Iustin Pop
      #cfg.Update(instance)
6644 a9e0c397 Iustin Pop
6645 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
6646 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
6647 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
6648 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
6649 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
6650 cff90b79 Iustin Pop
6651 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
6652 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
6653 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
6654 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
6655 2bb5c911 Michael Hanselmann
6656 2bb5c911 Michael Hanselmann
      # Build the rename list based on what LVs exist on the node
6657 2bb5c911 Michael Hanselmann
      rename_old_to_new = []
6658 cff90b79 Iustin Pop
      for to_ren in old_lvs:
6659 2bb5c911 Michael Hanselmann
        result = self.rpc.call_blockdev_find(self.target_node, to_ren)
6660 4c4e4e1e Iustin Pop
        if not result.fail_msg and result.payload:
6661 23829f6f Iustin Pop
          # device exists
6662 2bb5c911 Michael Hanselmann
          rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
6663 cff90b79 Iustin Pop
6664 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Renaming the old LVs on the target node")
6665 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_rename(self.target_node,
6666 4d4a651d Michael Hanselmann
                                             rename_old_to_new)
6667 2bb5c911 Michael Hanselmann
      result.Raise("Can't rename old LVs on node %s" % self.target_node)
6668 2bb5c911 Michael Hanselmann
6669 2bb5c911 Michael Hanselmann
      # Now we rename the new LVs to the old LVs
6670 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Renaming the new LVs on the target node")
6671 2bb5c911 Michael Hanselmann
      rename_new_to_old = [(new, old.physical_id)
6672 2bb5c911 Michael Hanselmann
                           for old, new in zip(old_lvs, new_lvs)]
6673 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_rename(self.target_node,
6674 4d4a651d Michael Hanselmann
                                             rename_new_to_old)
6675 2bb5c911 Michael Hanselmann
      result.Raise("Can't rename new LVs on node %s" % self.target_node)
6676 cff90b79 Iustin Pop
6677 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
6678 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
6679 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(new, self.target_node)
6680 a9e0c397 Iustin Pop
6681 cff90b79 Iustin Pop
      for disk in old_lvs:
6682 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
6683 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(disk, self.target_node)
6684 a9e0c397 Iustin Pop
6685 2bb5c911 Michael Hanselmann
      # Now that the new lvs have the old name, we can add them to the device
6686 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
6687 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_addchildren(self.target_node, dev,
6688 4d4a651d Michael Hanselmann
                                                  new_lvs)
6689 4c4e4e1e Iustin Pop
      msg = result.fail_msg
6690 2cc1da8b Iustin Pop
      if msg:
6691 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
6692 4d4a651d Michael Hanselmann
          msg2 = self.rpc.call_blockdev_remove(self.target_node,
6693 4d4a651d Michael Hanselmann
                                               new_lv).fail_msg
6694 4c4e4e1e Iustin Pop
          if msg2:
6695 2bb5c911 Michael Hanselmann
            self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
6696 2bb5c911 Michael Hanselmann
                               hint=("cleanup manually the unused logical"
6697 2bb5c911 Michael Hanselmann
                                     "volumes"))
6698 2cc1da8b Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
6699 a9e0c397 Iustin Pop
6700 a9e0c397 Iustin Pop
      dev.children = new_lvs
6701 a9e0c397 Iustin Pop
6702 2bb5c911 Michael Hanselmann
      self.cfg.Update(self.instance)
6703 a9e0c397 Iustin Pop
6704 2bb5c911 Michael Hanselmann
    # Wait for sync
6705 2bb5c911 Michael Hanselmann
    # This can fail as the old devices are degraded and _WaitForSync
6706 2bb5c911 Michael Hanselmann
    # does a combined result over all disks, so we don't check its return value
6707 2bb5c911 Michael Hanselmann
    self.lu.LogStep(5, steps_total, "Sync devices")
6708 2bb5c911 Michael Hanselmann
    _WaitForSync(self.lu, self.instance, unlock=True)
6709 a9e0c397 Iustin Pop
6710 2bb5c911 Michael Hanselmann
    # Check all devices manually
6711 2bb5c911 Michael Hanselmann
    self._CheckDevices(self.instance.primary_node, iv_names)
6712 a9e0c397 Iustin Pop
6713 cff90b79 Iustin Pop
    # Step: remove old storage
6714 2bb5c911 Michael Hanselmann
    self.lu.LogStep(6, steps_total, "Removing old storage")
6715 2bb5c911 Michael Hanselmann
    self._RemoveOldStorage(self.target_node, iv_names)
6716 a9e0c397 Iustin Pop
6717 2bb5c911 Michael Hanselmann
  def _ExecDrbd8Secondary(self):
6718 2bb5c911 Michael Hanselmann
    """Replace the secondary node for DRBD 8.
6719 a9e0c397 Iustin Pop

6720 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
6721 a9e0c397 Iustin Pop
      - for all disks of the instance:
6722 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
6723 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
6724 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
6725 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
6726 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
6727 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
6728 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
6729 a9e0c397 Iustin Pop
          not network enabled
6730 a9e0c397 Iustin Pop
      - wait for sync across all devices
6731 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
6732 a9e0c397 Iustin Pop

6733 a9e0c397 Iustin Pop
    Failures are not very well handled.
6734 0834c866 Iustin Pop

6735 a9e0c397 Iustin Pop
    """
6736 0834c866 Iustin Pop
    steps_total = 6
6737 0834c866 Iustin Pop
6738 0834c866 Iustin Pop
    # Step: check device activation
6739 2bb5c911 Michael Hanselmann
    self.lu.LogStep(1, steps_total, "Check device existence")
6740 2bb5c911 Michael Hanselmann
    self._CheckDisksExistence([self.instance.primary_node])
6741 2bb5c911 Michael Hanselmann
    self._CheckVolumeGroup([self.instance.primary_node])
6742 0834c866 Iustin Pop
6743 0834c866 Iustin Pop
    # Step: check other node consistency
6744 2bb5c911 Michael Hanselmann
    self.lu.LogStep(2, steps_total, "Check peer consistency")
6745 2bb5c911 Michael Hanselmann
    self._CheckDisksConsistency(self.instance.primary_node, True, True)
6746 0834c866 Iustin Pop
6747 0834c866 Iustin Pop
    # Step: create new storage
6748 2bb5c911 Michael Hanselmann
    self.lu.LogStep(3, steps_total, "Allocate new storage")
6749 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
6750 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
6751 2bb5c911 Michael Hanselmann
                      (self.new_node, idx))
6752 428958aa Iustin Pop
      # we pass force_create=True to force LVM creation
6753 a9e0c397 Iustin Pop
      for new_lv in dev.children:
6754 2bb5c911 Michael Hanselmann
        _CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True,
6755 2bb5c911 Michael Hanselmann
                        _GetInstanceInfoText(self.instance), False)
6756 a9e0c397 Iustin Pop
6757 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
6758 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
6759 a1578d63 Iustin Pop
    # error and the success paths
6760 2bb5c911 Michael Hanselmann
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
6761 4d4a651d Michael Hanselmann
    minors = self.cfg.AllocateDRBDMinor([self.new_node
6762 4d4a651d Michael Hanselmann
                                         for dev in self.instance.disks],
6763 2bb5c911 Michael Hanselmann
                                        self.instance.name)
6764 2bb5c911 Michael Hanselmann
    logging.debug("Allocated minors %r" % (minors,))
6765 2bb5c911 Michael Hanselmann
6766 2bb5c911 Michael Hanselmann
    iv_names = {}
6767 2bb5c911 Michael Hanselmann
    for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
6768 4d4a651d Michael Hanselmann
      self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
6769 4d4a651d Michael Hanselmann
                      (self.new_node, idx))
6770 a2d59d8b Iustin Pop
      # create new devices on new_node; note that we create two IDs:
6771 a2d59d8b Iustin Pop
      # one without port, so the drbd will be activated without
6772 a2d59d8b Iustin Pop
      # networking information on the new node at this stage, and one
6773 a2d59d8b Iustin Pop
      # with network, for the latter activation in step 4
6774 a2d59d8b Iustin Pop
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
6775 2bb5c911 Michael Hanselmann
      if self.instance.primary_node == o_node1:
6776 a2d59d8b Iustin Pop
        p_minor = o_minor1
6777 ffa1c0dc Iustin Pop
      else:
6778 a2d59d8b Iustin Pop
        p_minor = o_minor2
6779 a2d59d8b Iustin Pop
6780 4d4a651d Michael Hanselmann
      new_alone_id = (self.instance.primary_node, self.new_node, None,
6781 4d4a651d Michael Hanselmann
                      p_minor, new_minor, o_secret)
6782 4d4a651d Michael Hanselmann
      new_net_id = (self.instance.primary_node, self.new_node, o_port,
6783 4d4a651d Michael Hanselmann
                    p_minor, new_minor, o_secret)
6784 a2d59d8b Iustin Pop
6785 a2d59d8b Iustin Pop
      iv_names[idx] = (dev, dev.children, new_net_id)
6786 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
6787 a2d59d8b Iustin Pop
                    new_net_id)
6788 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
6789 a2d59d8b Iustin Pop
                              logical_id=new_alone_id,
6790 8a6c7011 Iustin Pop
                              children=dev.children,
6791 8a6c7011 Iustin Pop
                              size=dev.size)
6792 796cab27 Iustin Pop
      try:
6793 2bb5c911 Michael Hanselmann
        _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
6794 2bb5c911 Michael Hanselmann
                              _GetInstanceInfoText(self.instance), False)
6795 82759cb1 Iustin Pop
      except errors.GenericError:
6796 2bb5c911 Michael Hanselmann
        self.cfg.ReleaseDRBDMinors(self.instance.name)
6797 796cab27 Iustin Pop
        raise
6798 a9e0c397 Iustin Pop
6799 2bb5c911 Michael Hanselmann
    # We have new devices, shutdown the drbd on the old secondary
6800 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
6801 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
6802 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, self.target_node)
6803 2bb5c911 Michael Hanselmann
      msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
6804 cacfd1fd Iustin Pop
      if msg:
6805 2bb5c911 Michael Hanselmann
        self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
6806 2bb5c911 Michael Hanselmann
                           "node: %s" % (idx, msg),
6807 2bb5c911 Michael Hanselmann
                           hint=("Please cleanup this device manually as"
6808 2bb5c911 Michael Hanselmann
                                 " soon as possible"))
6809 a9e0c397 Iustin Pop
6810 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
6811 4d4a651d Michael Hanselmann
    result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node],
6812 4d4a651d Michael Hanselmann
                                               self.node_secondary_ip,
6813 4d4a651d Michael Hanselmann
                                               self.instance.disks)\
6814 4d4a651d Michael Hanselmann
                                              [self.instance.primary_node]
6815 642445d9 Iustin Pop
6816 4c4e4e1e Iustin Pop
    msg = result.fail_msg
6817 a2d59d8b Iustin Pop
    if msg:
6818 a2d59d8b Iustin Pop
      # detaches didn't succeed (unlikely)
6819 2bb5c911 Michael Hanselmann
      self.cfg.ReleaseDRBDMinors(self.instance.name)
6820 a2d59d8b Iustin Pop
      raise errors.OpExecError("Can't detach the disks from the network on"
6821 a2d59d8b Iustin Pop
                               " old node: %s" % (msg,))
6822 642445d9 Iustin Pop
6823 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
6824 642445d9 Iustin Pop
    # the instance to point to the new secondary
6825 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Updating instance configuration")
6826 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
6827 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
6828 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, self.instance.primary_node)
6829 2bb5c911 Michael Hanselmann
6830 2bb5c911 Michael Hanselmann
    self.cfg.Update(self.instance)
6831 a9e0c397 Iustin Pop
6832 642445d9 Iustin Pop
    # and now perform the drbd attach
6833 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Attaching primary drbds to new secondary"
6834 2bb5c911 Michael Hanselmann
                    " (standalone => connected)")
6835 4d4a651d Michael Hanselmann
    result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
6836 4d4a651d Michael Hanselmann
                                            self.new_node],
6837 4d4a651d Michael Hanselmann
                                           self.node_secondary_ip,
6838 4d4a651d Michael Hanselmann
                                           self.instance.disks,
6839 4d4a651d Michael Hanselmann
                                           self.instance.name,
6840 a2d59d8b Iustin Pop
                                           False)
6841 a2d59d8b Iustin Pop
    for to_node, to_result in result.items():
6842 4c4e4e1e Iustin Pop
      msg = to_result.fail_msg
6843 a2d59d8b Iustin Pop
      if msg:
6844 4d4a651d Michael Hanselmann
        self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
6845 4d4a651d Michael Hanselmann
                           to_node, msg,
6846 2bb5c911 Michael Hanselmann
                           hint=("please do a gnt-instance info to see the"
6847 2bb5c911 Michael Hanselmann
                                 " status of disks"))
6848 a9e0c397 Iustin Pop
6849 2bb5c911 Michael Hanselmann
    # Wait for sync
6850 2bb5c911 Michael Hanselmann
    # This can fail as the old devices are degraded and _WaitForSync
6851 2bb5c911 Michael Hanselmann
    # does a combined result over all disks, so we don't check its return value
6852 2bb5c911 Michael Hanselmann
    self.lu.LogStep(5, steps_total, "Sync devices")
6853 2bb5c911 Michael Hanselmann
    _WaitForSync(self.lu, self.instance, unlock=True)
6854 a9e0c397 Iustin Pop
6855 2bb5c911 Michael Hanselmann
    # Check all devices manually
6856 2bb5c911 Michael Hanselmann
    self._CheckDevices(self.instance.primary_node, iv_names)
6857 22985314 Guido Trotter
6858 2bb5c911 Michael Hanselmann
    # Step: remove old storage
6859 2bb5c911 Michael Hanselmann
    self.lu.LogStep(6, steps_total, "Removing old storage")
6860 2bb5c911 Michael Hanselmann
    self._RemoveOldStorage(self.target_node, iv_names)
6861 a9e0c397 Iustin Pop
6862 a8083063 Iustin Pop
6863 76aef8fc Michael Hanselmann
class LURepairNodeStorage(NoHooksLU):
6864 76aef8fc Michael Hanselmann
  """Repairs the volume group on a node.
6865 76aef8fc Michael Hanselmann

6866 76aef8fc Michael Hanselmann
  """
6867 76aef8fc Michael Hanselmann
  _OP_REQP = ["node_name"]
6868 76aef8fc Michael Hanselmann
  REQ_BGL = False
6869 76aef8fc Michael Hanselmann
6870 76aef8fc Michael Hanselmann
  def CheckArguments(self):
6871 76aef8fc Michael Hanselmann
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
6872 76aef8fc Michael Hanselmann
    if node_name is None:
6873 76aef8fc Michael Hanselmann
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
6874 76aef8fc Michael Hanselmann
6875 76aef8fc Michael Hanselmann
    self.op.node_name = node_name
6876 76aef8fc Michael Hanselmann
6877 76aef8fc Michael Hanselmann
  def ExpandNames(self):
6878 76aef8fc Michael Hanselmann
    self.needed_locks = {
6879 76aef8fc Michael Hanselmann
      locking.LEVEL_NODE: [self.op.node_name],
6880 76aef8fc Michael Hanselmann
      }
6881 76aef8fc Michael Hanselmann
6882 76aef8fc Michael Hanselmann
  def _CheckFaultyDisks(self, instance, node_name):
6883 76aef8fc Michael Hanselmann
    if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
6884 76aef8fc Michael Hanselmann
                                node_name, True):
6885 76aef8fc Michael Hanselmann
      raise errors.OpPrereqError("Instance '%s' has faulty disks on"
6886 aa053071 Michael Hanselmann
                                 " node '%s'" % (instance.name, node_name))
6887 76aef8fc Michael Hanselmann
6888 76aef8fc Michael Hanselmann
  def CheckPrereq(self):
6889 76aef8fc Michael Hanselmann
    """Check prerequisites.
6890 76aef8fc Michael Hanselmann

6891 76aef8fc Michael Hanselmann
    """
6892 76aef8fc Michael Hanselmann
    storage_type = self.op.storage_type
6893 76aef8fc Michael Hanselmann
6894 76aef8fc Michael Hanselmann
    if (constants.SO_FIX_CONSISTENCY not in
6895 76aef8fc Michael Hanselmann
        constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
6896 76aef8fc Michael Hanselmann
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
6897 76aef8fc Michael Hanselmann
                                 " repaired" % storage_type)
6898 76aef8fc Michael Hanselmann
6899 76aef8fc Michael Hanselmann
    # Check whether any instance on this node has faulty disks
6900 76aef8fc Michael Hanselmann
    for inst in _GetNodeInstances(self.cfg, self.op.node_name):
6901 76aef8fc Michael Hanselmann
      check_nodes = set(inst.all_nodes)
6902 76aef8fc Michael Hanselmann
      check_nodes.discard(self.op.node_name)
6903 76aef8fc Michael Hanselmann
      for inst_node_name in check_nodes:
6904 76aef8fc Michael Hanselmann
        self._CheckFaultyDisks(inst, inst_node_name)
6905 76aef8fc Michael Hanselmann
6906 76aef8fc Michael Hanselmann
  def Exec(self, feedback_fn):
6907 76aef8fc Michael Hanselmann
    feedback_fn("Repairing storage unit '%s' on %s ..." %
6908 76aef8fc Michael Hanselmann
                (self.op.name, self.op.node_name))
6909 76aef8fc Michael Hanselmann
6910 76aef8fc Michael Hanselmann
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
6911 76aef8fc Michael Hanselmann
    result = self.rpc.call_storage_execute(self.op.node_name,
6912 76aef8fc Michael Hanselmann
                                           self.op.storage_type, st_args,
6913 76aef8fc Michael Hanselmann
                                           self.op.name,
6914 76aef8fc Michael Hanselmann
                                           constants.SO_FIX_CONSISTENCY)
6915 76aef8fc Michael Hanselmann
    result.Raise("Failed to repair storage unit '%s' on %s" %
6916 76aef8fc Michael Hanselmann
                 (self.op.name, self.op.node_name))
6917 76aef8fc Michael Hanselmann
6918 76aef8fc Michael Hanselmann
6919 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
6920 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
6921 8729e0d7 Iustin Pop

6922 8729e0d7 Iustin Pop
  """
6923 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
6924 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
6925 6605411d Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
6926 31e63dbf Guido Trotter
  REQ_BGL = False
6927 31e63dbf Guido Trotter
6928 31e63dbf Guido Trotter
  def ExpandNames(self):
6929 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
6930 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
6931 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6932 31e63dbf Guido Trotter
6933 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
6934 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
6935 31e63dbf Guido Trotter
      self._LockInstancesNodes()
6936 8729e0d7 Iustin Pop
6937 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
6938 8729e0d7 Iustin Pop
    """Build hooks env.
6939 8729e0d7 Iustin Pop

6940 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
6941 8729e0d7 Iustin Pop

6942 8729e0d7 Iustin Pop
    """
6943 8729e0d7 Iustin Pop
    env = {
6944 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
6945 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
6946 8729e0d7 Iustin Pop
      }
6947 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6948 8729e0d7 Iustin Pop
    nl = [
6949 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
6950 8729e0d7 Iustin Pop
      self.instance.primary_node,
6951 8729e0d7 Iustin Pop
      ]
6952 8729e0d7 Iustin Pop
    return env, nl, nl
6953 8729e0d7 Iustin Pop
6954 8729e0d7 Iustin Pop
  def CheckPrereq(self):
6955 8729e0d7 Iustin Pop
    """Check prerequisites.
6956 8729e0d7 Iustin Pop

6957 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
6958 8729e0d7 Iustin Pop

6959 8729e0d7 Iustin Pop
    """
6960 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6961 31e63dbf Guido Trotter
    assert instance is not None, \
6962 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
6963 6b12959c Iustin Pop
    nodenames = list(instance.all_nodes)
6964 6b12959c Iustin Pop
    for node in nodenames:
6965 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, node)
6966 7527a8a4 Iustin Pop
6967 31e63dbf Guido Trotter
6968 8729e0d7 Iustin Pop
    self.instance = instance
6969 8729e0d7 Iustin Pop
6970 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
6971 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
6972 8729e0d7 Iustin Pop
                                 " growing.")
6973 8729e0d7 Iustin Pop
6974 ad24e046 Iustin Pop
    self.disk = instance.FindDisk(self.op.disk)
6975 8729e0d7 Iustin Pop
6976 72737a7f Iustin Pop
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
6977 72737a7f Iustin Pop
                                       instance.hypervisor)
6978 8729e0d7 Iustin Pop
    for node in nodenames:
6979 781de953 Iustin Pop
      info = nodeinfo[node]
6980 4c4e4e1e Iustin Pop
      info.Raise("Cannot get current information from node %s" % node)
6981 070e998b Iustin Pop
      vg_free = info.payload.get('vg_free', None)
6982 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
6983 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
6984 8729e0d7 Iustin Pop
                                   " node %s" % node)
6985 781de953 Iustin Pop
      if self.op.amount > vg_free:
6986 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
6987 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
6988 781de953 Iustin Pop
                                   (node, vg_free, self.op.amount))
6989 8729e0d7 Iustin Pop
6990 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
6991 8729e0d7 Iustin Pop
    """Execute disk grow.
6992 8729e0d7 Iustin Pop

6993 8729e0d7 Iustin Pop
    """
6994 8729e0d7 Iustin Pop
    instance = self.instance
6995 ad24e046 Iustin Pop
    disk = self.disk
6996 6b12959c Iustin Pop
    for node in instance.all_nodes:
6997 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
6998 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
6999 4c4e4e1e Iustin Pop
      result.Raise("Grow request failed to node %s" % node)
7000 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
7001 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
7002 6605411d Iustin Pop
    if self.op.wait_for_sync:
7003 cd4d138f Guido Trotter
      disk_abort = not _WaitForSync(self, instance)
7004 6605411d Iustin Pop
      if disk_abort:
7005 86d9d3bb Iustin Pop
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
7006 86d9d3bb Iustin Pop
                             " status.\nPlease check the instance.")
7007 8729e0d7 Iustin Pop
7008 8729e0d7 Iustin Pop
7009 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
7010 a8083063 Iustin Pop
  """Query runtime instance data.
7011 a8083063 Iustin Pop

7012 a8083063 Iustin Pop
  """
7013 57821cac Iustin Pop
  _OP_REQP = ["instances", "static"]
7014 a987fa48 Guido Trotter
  REQ_BGL = False
7015 ae5849b5 Michael Hanselmann
7016 a987fa48 Guido Trotter
  def ExpandNames(self):
7017 a987fa48 Guido Trotter
    self.needed_locks = {}
7018 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
7019 a987fa48 Guido Trotter
7020 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
7021 a987fa48 Guido Trotter
      raise errors.OpPrereqError("Invalid argument type 'instances'")
7022 a987fa48 Guido Trotter
7023 a987fa48 Guido Trotter
    if self.op.instances:
7024 a987fa48 Guido Trotter
      self.wanted_names = []
7025 a987fa48 Guido Trotter
      for name in self.op.instances:
7026 a987fa48 Guido Trotter
        full_name = self.cfg.ExpandInstanceName(name)
7027 a987fa48 Guido Trotter
        if full_name is None:
7028 f57c76e4 Iustin Pop
          raise errors.OpPrereqError("Instance '%s' not known" % name)
7029 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
7030 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
7031 a987fa48 Guido Trotter
    else:
7032 a987fa48 Guido Trotter
      self.wanted_names = None
7033 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
7034 a987fa48 Guido Trotter
7035 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
7036 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7037 a987fa48 Guido Trotter
7038 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
7039 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
7040 a987fa48 Guido Trotter
      self._LockInstancesNodes()
7041 a8083063 Iustin Pop
7042 a8083063 Iustin Pop
  def CheckPrereq(self):
7043 a8083063 Iustin Pop
    """Check prerequisites.
7044 a8083063 Iustin Pop

7045 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
7046 a8083063 Iustin Pop

7047 a8083063 Iustin Pop
    """
7048 a987fa48 Guido Trotter
    if self.wanted_names is None:
7049 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
7050 a8083063 Iustin Pop
7051 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
7052 a987fa48 Guido Trotter
                             in self.wanted_names]
7053 a987fa48 Guido Trotter
    return
7054 a8083063 Iustin Pop
7055 98825740 Michael Hanselmann
  def _ComputeBlockdevStatus(self, node, instance_name, dev):
7056 98825740 Michael Hanselmann
    """Returns the status of a block device
7057 98825740 Michael Hanselmann

7058 98825740 Michael Hanselmann
    """
7059 4dce1a83 Michael Hanselmann
    if self.op.static or not node:
7060 98825740 Michael Hanselmann
      return None
7061 98825740 Michael Hanselmann
7062 98825740 Michael Hanselmann
    self.cfg.SetDiskID(dev, node)
7063 98825740 Michael Hanselmann
7064 98825740 Michael Hanselmann
    result = self.rpc.call_blockdev_find(node, dev)
7065 98825740 Michael Hanselmann
    if result.offline:
7066 98825740 Michael Hanselmann
      return None
7067 98825740 Michael Hanselmann
7068 98825740 Michael Hanselmann
    result.Raise("Can't compute disk status for %s" % instance_name)
7069 98825740 Michael Hanselmann
7070 98825740 Michael Hanselmann
    status = result.payload
7071 ddfe2228 Michael Hanselmann
    if status is None:
7072 ddfe2228 Michael Hanselmann
      return None
7073 98825740 Michael Hanselmann
7074 98825740 Michael Hanselmann
    return (status.dev_path, status.major, status.minor,
7075 98825740 Michael Hanselmann
            status.sync_percent, status.estimated_time,
7076 f208978a Michael Hanselmann
            status.is_degraded, status.ldisk_status)
7077 98825740 Michael Hanselmann
7078 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
7079 a8083063 Iustin Pop
    """Compute block device status.
7080 a8083063 Iustin Pop

7081 a8083063 Iustin Pop
    """
7082 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
7083 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
7084 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
7085 a8083063 Iustin Pop
        snode = dev.logical_id[1]
7086 a8083063 Iustin Pop
      else:
7087 a8083063 Iustin Pop
        snode = dev.logical_id[0]
7088 a8083063 Iustin Pop
7089 98825740 Michael Hanselmann
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
7090 98825740 Michael Hanselmann
                                              instance.name, dev)
7091 98825740 Michael Hanselmann
    dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
7092 a8083063 Iustin Pop
7093 a8083063 Iustin Pop
    if dev.children:
7094 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
7095 a8083063 Iustin Pop
                      for child in dev.children]
7096 a8083063 Iustin Pop
    else:
7097 a8083063 Iustin Pop
      dev_children = []
7098 a8083063 Iustin Pop
7099 a8083063 Iustin Pop
    data = {
7100 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
7101 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
7102 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
7103 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
7104 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
7105 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
7106 a8083063 Iustin Pop
      "children": dev_children,
7107 b6fdf8b8 Iustin Pop
      "mode": dev.mode,
7108 c98162a7 Iustin Pop
      "size": dev.size,
7109 a8083063 Iustin Pop
      }
7110 a8083063 Iustin Pop
7111 a8083063 Iustin Pop
    return data
7112 a8083063 Iustin Pop
7113 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
7114 a8083063 Iustin Pop
    """Gather and return data"""
7115 a8083063 Iustin Pop
    result = {}
7116 338e51e8 Iustin Pop
7117 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
7118 338e51e8 Iustin Pop
7119 a8083063 Iustin Pop
    for instance in self.wanted_instances:
7120 57821cac Iustin Pop
      if not self.op.static:
7121 57821cac Iustin Pop
        remote_info = self.rpc.call_instance_info(instance.primary_node,
7122 57821cac Iustin Pop
                                                  instance.name,
7123 57821cac Iustin Pop
                                                  instance.hypervisor)
7124 4c4e4e1e Iustin Pop
        remote_info.Raise("Error checking node %s" % instance.primary_node)
7125 7ad1af4a Iustin Pop
        remote_info = remote_info.payload
7126 57821cac Iustin Pop
        if remote_info and "state" in remote_info:
7127 57821cac Iustin Pop
          remote_state = "up"
7128 57821cac Iustin Pop
        else:
7129 57821cac Iustin Pop
          remote_state = "down"
7130 a8083063 Iustin Pop
      else:
7131 57821cac Iustin Pop
        remote_state = None
7132 0d68c45d Iustin Pop
      if instance.admin_up:
7133 a8083063 Iustin Pop
        config_state = "up"
7134 0d68c45d Iustin Pop
      else:
7135 0d68c45d Iustin Pop
        config_state = "down"
7136 a8083063 Iustin Pop
7137 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
7138 a8083063 Iustin Pop
               for device in instance.disks]
7139 a8083063 Iustin Pop
7140 a8083063 Iustin Pop
      idict = {
7141 a8083063 Iustin Pop
        "name": instance.name,
7142 a8083063 Iustin Pop
        "config_state": config_state,
7143 a8083063 Iustin Pop
        "run_state": remote_state,
7144 a8083063 Iustin Pop
        "pnode": instance.primary_node,
7145 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
7146 a8083063 Iustin Pop
        "os": instance.os,
7147 0b13832c Guido Trotter
        # this happens to be the same format used for hooks
7148 0b13832c Guido Trotter
        "nics": _NICListToTuple(self, instance.nics),
7149 a8083063 Iustin Pop
        "disks": disks,
7150 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
7151 24838135 Iustin Pop
        "network_port": instance.network_port,
7152 24838135 Iustin Pop
        "hv_instance": instance.hvparams,
7153 338e51e8 Iustin Pop
        "hv_actual": cluster.FillHV(instance),
7154 338e51e8 Iustin Pop
        "be_instance": instance.beparams,
7155 338e51e8 Iustin Pop
        "be_actual": cluster.FillBE(instance),
7156 90f72445 Iustin Pop
        "serial_no": instance.serial_no,
7157 90f72445 Iustin Pop
        "mtime": instance.mtime,
7158 90f72445 Iustin Pop
        "ctime": instance.ctime,
7159 033d58b0 Iustin Pop
        "uuid": instance.uuid,
7160 a8083063 Iustin Pop
        }
7161 a8083063 Iustin Pop
7162 a8083063 Iustin Pop
      result[instance.name] = idict
7163 a8083063 Iustin Pop
7164 a8083063 Iustin Pop
    return result
7165 a8083063 Iustin Pop
7166 a8083063 Iustin Pop
7167 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
7168 a8083063 Iustin Pop
  """Modifies an instances's parameters.
7169 a8083063 Iustin Pop

7170 a8083063 Iustin Pop
  """
7171 a8083063 Iustin Pop
  HPATH = "instance-modify"
7172 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
7173 24991749 Iustin Pop
  _OP_REQP = ["instance_name"]
7174 1a5c7281 Guido Trotter
  REQ_BGL = False
7175 1a5c7281 Guido Trotter
7176 24991749 Iustin Pop
  def CheckArguments(self):
7177 24991749 Iustin Pop
    if not hasattr(self.op, 'nics'):
7178 24991749 Iustin Pop
      self.op.nics = []
7179 24991749 Iustin Pop
    if not hasattr(self.op, 'disks'):
7180 24991749 Iustin Pop
      self.op.disks = []
7181 24991749 Iustin Pop
    if not hasattr(self.op, 'beparams'):
7182 24991749 Iustin Pop
      self.op.beparams = {}
7183 24991749 Iustin Pop
    if not hasattr(self.op, 'hvparams'):
7184 24991749 Iustin Pop
      self.op.hvparams = {}
7185 24991749 Iustin Pop
    self.op.force = getattr(self.op, "force", False)
7186 24991749 Iustin Pop
    if not (self.op.nics or self.op.disks or
7187 24991749 Iustin Pop
            self.op.hvparams or self.op.beparams):
7188 24991749 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
7189 24991749 Iustin Pop
7190 24991749 Iustin Pop
    # Disk validation
7191 24991749 Iustin Pop
    disk_addremove = 0
7192 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
7193 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
7194 24991749 Iustin Pop
        disk_addremove += 1
7195 24991749 Iustin Pop
        continue
7196 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
7197 24991749 Iustin Pop
        disk_addremove += 1
7198 24991749 Iustin Pop
      else:
7199 24991749 Iustin Pop
        if not isinstance(disk_op, int):
7200 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index")
7201 8b46606c Guido Trotter
        if not isinstance(disk_dict, dict):
7202 8b46606c Guido Trotter
          msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
7203 8b46606c Guido Trotter
          raise errors.OpPrereqError(msg)
7204 8b46606c Guido Trotter
7205 24991749 Iustin Pop
      if disk_op == constants.DDM_ADD:
7206 24991749 Iustin Pop
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
7207 6ec66eae Iustin Pop
        if mode not in constants.DISK_ACCESS_SET:
7208 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode)
7209 24991749 Iustin Pop
        size = disk_dict.get('size', None)
7210 24991749 Iustin Pop
        if size is None:
7211 24991749 Iustin Pop
          raise errors.OpPrereqError("Required disk parameter size missing")
7212 24991749 Iustin Pop
        try:
7213 24991749 Iustin Pop
          size = int(size)
7214 24991749 Iustin Pop
        except ValueError, err:
7215 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
7216 24991749 Iustin Pop
                                     str(err))
7217 24991749 Iustin Pop
        disk_dict['size'] = size
7218 24991749 Iustin Pop
      else:
7219 24991749 Iustin Pop
        # modification of disk
7220 24991749 Iustin Pop
        if 'size' in disk_dict:
7221 24991749 Iustin Pop
          raise errors.OpPrereqError("Disk size change not possible, use"
7222 24991749 Iustin Pop
                                     " grow-disk")
7223 24991749 Iustin Pop
7224 24991749 Iustin Pop
    if disk_addremove > 1:
7225 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one disk add or remove operation"
7226 24991749 Iustin Pop
                                 " supported at a time")
7227 24991749 Iustin Pop
7228 24991749 Iustin Pop
    # NIC validation
7229 24991749 Iustin Pop
    nic_addremove = 0
7230 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
7231 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
7232 24991749 Iustin Pop
        nic_addremove += 1
7233 24991749 Iustin Pop
        continue
7234 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
7235 24991749 Iustin Pop
        nic_addremove += 1
7236 24991749 Iustin Pop
      else:
7237 24991749 Iustin Pop
        if not isinstance(nic_op, int):
7238 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid nic index")
7239 8b46606c Guido Trotter
        if not isinstance(nic_dict, dict):
7240 8b46606c Guido Trotter
          msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
7241 8b46606c Guido Trotter
          raise errors.OpPrereqError(msg)
7242 24991749 Iustin Pop
7243 24991749 Iustin Pop
      # nic_dict should be a dict
7244 24991749 Iustin Pop
      nic_ip = nic_dict.get('ip', None)
7245 24991749 Iustin Pop
      if nic_ip is not None:
7246 5c44da6a Guido Trotter
        if nic_ip.lower() == constants.VALUE_NONE:
7247 24991749 Iustin Pop
          nic_dict['ip'] = None
7248 24991749 Iustin Pop
        else:
7249 24991749 Iustin Pop
          if not utils.IsValidIP(nic_ip):
7250 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip)
7251 5c44da6a Guido Trotter
7252 cd098c41 Guido Trotter
      nic_bridge = nic_dict.get('bridge', None)
7253 cd098c41 Guido Trotter
      nic_link = nic_dict.get('link', None)
7254 cd098c41 Guido Trotter
      if nic_bridge and nic_link:
7255 29921401 Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
7256 29921401 Iustin Pop
                                   " at the same time")
7257 cd098c41 Guido Trotter
      elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
7258 cd098c41 Guido Trotter
        nic_dict['bridge'] = None
7259 cd098c41 Guido Trotter
      elif nic_link and nic_link.lower() == constants.VALUE_NONE:
7260 cd098c41 Guido Trotter
        nic_dict['link'] = None
7261 cd098c41 Guido Trotter
7262 5c44da6a Guido Trotter
      if nic_op == constants.DDM_ADD:
7263 5c44da6a Guido Trotter
        nic_mac = nic_dict.get('mac', None)
7264 5c44da6a Guido Trotter
        if nic_mac is None:
7265 5c44da6a Guido Trotter
          nic_dict['mac'] = constants.VALUE_AUTO
7266 5c44da6a Guido Trotter
7267 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
7268 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
7269 24991749 Iustin Pop
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7270 24991749 Iustin Pop
          if not utils.IsValidMac(nic_mac):
7271 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac)
7272 5c44da6a Guido Trotter
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
7273 5c44da6a Guido Trotter
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
7274 5c44da6a Guido Trotter
                                     " modifying an existing nic")
7275 5c44da6a Guido Trotter
7276 24991749 Iustin Pop
    if nic_addremove > 1:
7277 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one NIC add or remove operation"
7278 24991749 Iustin Pop
                                 " supported at a time")
7279 24991749 Iustin Pop
7280 1a5c7281 Guido Trotter
  def ExpandNames(self):
7281 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
7282 74409b12 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
7283 74409b12 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7284 74409b12 Iustin Pop
7285 74409b12 Iustin Pop
  def DeclareLocks(self, level):
7286 74409b12 Iustin Pop
    if level == locking.LEVEL_NODE:
7287 74409b12 Iustin Pop
      self._LockInstancesNodes()
7288 a8083063 Iustin Pop
7289 a8083063 Iustin Pop
  def BuildHooksEnv(self):
7290 a8083063 Iustin Pop
    """Build hooks env.
7291 a8083063 Iustin Pop

7292 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
7293 a8083063 Iustin Pop

7294 a8083063 Iustin Pop
    """
7295 396e1b78 Michael Hanselmann
    args = dict()
7296 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.be_new:
7297 338e51e8 Iustin Pop
      args['memory'] = self.be_new[constants.BE_MEMORY]
7298 338e51e8 Iustin Pop
    if constants.BE_VCPUS in self.be_new:
7299 61be6ba4 Iustin Pop
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
7300 d8dcf3c9 Guido Trotter
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
7301 d8dcf3c9 Guido Trotter
    # information at all.
7302 d8dcf3c9 Guido Trotter
    if self.op.nics:
7303 d8dcf3c9 Guido Trotter
      args['nics'] = []
7304 d8dcf3c9 Guido Trotter
      nic_override = dict(self.op.nics)
7305 62f0dd02 Guido Trotter
      c_nicparams = self.cluster.nicparams[constants.PP_DEFAULT]
7306 d8dcf3c9 Guido Trotter
      for idx, nic in enumerate(self.instance.nics):
7307 d8dcf3c9 Guido Trotter
        if idx in nic_override:
7308 d8dcf3c9 Guido Trotter
          this_nic_override = nic_override[idx]
7309 d8dcf3c9 Guido Trotter
        else:
7310 d8dcf3c9 Guido Trotter
          this_nic_override = {}
7311 d8dcf3c9 Guido Trotter
        if 'ip' in this_nic_override:
7312 d8dcf3c9 Guido Trotter
          ip = this_nic_override['ip']
7313 d8dcf3c9 Guido Trotter
        else:
7314 d8dcf3c9 Guido Trotter
          ip = nic.ip
7315 d8dcf3c9 Guido Trotter
        if 'mac' in this_nic_override:
7316 d8dcf3c9 Guido Trotter
          mac = this_nic_override['mac']
7317 d8dcf3c9 Guido Trotter
        else:
7318 d8dcf3c9 Guido Trotter
          mac = nic.mac
7319 62f0dd02 Guido Trotter
        if idx in self.nic_pnew:
7320 62f0dd02 Guido Trotter
          nicparams = self.nic_pnew[idx]
7321 62f0dd02 Guido Trotter
        else:
7322 62f0dd02 Guido Trotter
          nicparams = objects.FillDict(c_nicparams, nic.nicparams)
7323 62f0dd02 Guido Trotter
        mode = nicparams[constants.NIC_MODE]
7324 62f0dd02 Guido Trotter
        link = nicparams[constants.NIC_LINK]
7325 62f0dd02 Guido Trotter
        args['nics'].append((ip, mac, mode, link))
7326 d8dcf3c9 Guido Trotter
      if constants.DDM_ADD in nic_override:
7327 d8dcf3c9 Guido Trotter
        ip = nic_override[constants.DDM_ADD].get('ip', None)
7328 d8dcf3c9 Guido Trotter
        mac = nic_override[constants.DDM_ADD]['mac']
7329 62f0dd02 Guido Trotter
        nicparams = self.nic_pnew[constants.DDM_ADD]
7330 62f0dd02 Guido Trotter
        mode = nicparams[constants.NIC_MODE]
7331 62f0dd02 Guido Trotter
        link = nicparams[constants.NIC_LINK]
7332 62f0dd02 Guido Trotter
        args['nics'].append((ip, mac, mode, link))
7333 d8dcf3c9 Guido Trotter
      elif constants.DDM_REMOVE in nic_override:
7334 d8dcf3c9 Guido Trotter
        del args['nics'][-1]
7335 d8dcf3c9 Guido Trotter
7336 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
7337 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
7338 a8083063 Iustin Pop
    return env, nl, nl
7339 a8083063 Iustin Pop
7340 0329617a Guido Trotter
  def _GetUpdatedParams(self, old_params, update_dict,
7341 0329617a Guido Trotter
                        default_values, parameter_types):
7342 0329617a Guido Trotter
    """Return the new params dict for the given params.
7343 0329617a Guido Trotter

7344 0329617a Guido Trotter
    @type old_params: dict
7345 f2fd87d7 Iustin Pop
    @param old_params: old parameters
7346 0329617a Guido Trotter
    @type update_dict: dict
7347 f2fd87d7 Iustin Pop
    @param update_dict: dict containing new parameter values,
7348 f2fd87d7 Iustin Pop
                        or constants.VALUE_DEFAULT to reset the
7349 f2fd87d7 Iustin Pop
                        parameter to its default value
7350 0329617a Guido Trotter
    @type default_values: dict
7351 0329617a Guido Trotter
    @param default_values: default values for the filled parameters
7352 0329617a Guido Trotter
    @type parameter_types: dict
7353 0329617a Guido Trotter
    @param parameter_types: dict mapping target dict keys to types
7354 0329617a Guido Trotter
                            in constants.ENFORCEABLE_TYPES
7355 0329617a Guido Trotter
    @rtype: (dict, dict)
7356 0329617a Guido Trotter
    @return: (new_parameters, filled_parameters)
7357 0329617a Guido Trotter

7358 0329617a Guido Trotter
    """
7359 0329617a Guido Trotter
    params_copy = copy.deepcopy(old_params)
7360 0329617a Guido Trotter
    for key, val in update_dict.iteritems():
7361 0329617a Guido Trotter
      if val == constants.VALUE_DEFAULT:
7362 0329617a Guido Trotter
        try:
7363 0329617a Guido Trotter
          del params_copy[key]
7364 0329617a Guido Trotter
        except KeyError:
7365 0329617a Guido Trotter
          pass
7366 0329617a Guido Trotter
      else:
7367 0329617a Guido Trotter
        params_copy[key] = val
7368 0329617a Guido Trotter
    utils.ForceDictType(params_copy, parameter_types)
7369 0329617a Guido Trotter
    params_filled = objects.FillDict(default_values, params_copy)
7370 0329617a Guido Trotter
    return (params_copy, params_filled)
7371 0329617a Guido Trotter
7372 a8083063 Iustin Pop
  def CheckPrereq(self):
7373 a8083063 Iustin Pop
    """Check prerequisites.
7374 a8083063 Iustin Pop

7375 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
7376 a8083063 Iustin Pop

7377 a8083063 Iustin Pop
    """
7378 7c4d6c7b Michael Hanselmann
    self.force = self.op.force
7379 a8083063 Iustin Pop
7380 74409b12 Iustin Pop
    # checking the new params on the primary/secondary nodes
7381 31a853d2 Iustin Pop
7382 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7383 2ee88aeb Guido Trotter
    cluster = self.cluster = self.cfg.GetClusterInfo()
7384 1a5c7281 Guido Trotter
    assert self.instance is not None, \
7385 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
7386 6b12959c Iustin Pop
    pnode = instance.primary_node
7387 6b12959c Iustin Pop
    nodelist = list(instance.all_nodes)
7388 74409b12 Iustin Pop
7389 338e51e8 Iustin Pop
    # hvparams processing
7390 74409b12 Iustin Pop
    if self.op.hvparams:
7391 0329617a Guido Trotter
      i_hvdict, hv_new = self._GetUpdatedParams(
7392 0329617a Guido Trotter
                             instance.hvparams, self.op.hvparams,
7393 0329617a Guido Trotter
                             cluster.hvparams[instance.hypervisor],
7394 0329617a Guido Trotter
                             constants.HVS_PARAMETER_TYPES)
7395 74409b12 Iustin Pop
      # local check
7396 74409b12 Iustin Pop
      hypervisor.GetHypervisor(
7397 74409b12 Iustin Pop
        instance.hypervisor).CheckParameterSyntax(hv_new)
7398 74409b12 Iustin Pop
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
7399 338e51e8 Iustin Pop
      self.hv_new = hv_new # the new actual values
7400 338e51e8 Iustin Pop
      self.hv_inst = i_hvdict # the new dict (without defaults)
7401 338e51e8 Iustin Pop
    else:
7402 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
7403 338e51e8 Iustin Pop
7404 338e51e8 Iustin Pop
    # beparams processing
7405 338e51e8 Iustin Pop
    if self.op.beparams:
7406 0329617a Guido Trotter
      i_bedict, be_new = self._GetUpdatedParams(
7407 0329617a Guido Trotter
                             instance.beparams, self.op.beparams,
7408 0329617a Guido Trotter
                             cluster.beparams[constants.PP_DEFAULT],
7409 0329617a Guido Trotter
                             constants.BES_PARAMETER_TYPES)
7410 338e51e8 Iustin Pop
      self.be_new = be_new # the new actual values
7411 338e51e8 Iustin Pop
      self.be_inst = i_bedict # the new dict (without defaults)
7412 338e51e8 Iustin Pop
    else:
7413 b637ae4d Iustin Pop
      self.be_new = self.be_inst = {}
7414 74409b12 Iustin Pop
7415 cfefe007 Guido Trotter
    self.warn = []
7416 647a5d80 Iustin Pop
7417 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.op.beparams and not self.force:
7418 647a5d80 Iustin Pop
      mem_check_list = [pnode]
7419 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
7420 c0f2b229 Iustin Pop
        # either we changed auto_balance to yes or it was from before
7421 647a5d80 Iustin Pop
        mem_check_list.extend(instance.secondary_nodes)
7422 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
7423 72737a7f Iustin Pop
                                                  instance.hypervisor)
7424 647a5d80 Iustin Pop
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
7425 72737a7f Iustin Pop
                                         instance.hypervisor)
7426 070e998b Iustin Pop
      pninfo = nodeinfo[pnode]
7427 4c4e4e1e Iustin Pop
      msg = pninfo.fail_msg
7428 070e998b Iustin Pop
      if msg:
7429 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
7430 070e998b Iustin Pop
        self.warn.append("Can't get info from primary node %s: %s" %
7431 070e998b Iustin Pop
                         (pnode,  msg))
7432 070e998b Iustin Pop
      elif not isinstance(pninfo.payload.get('memory_free', None), int):
7433 070e998b Iustin Pop
        self.warn.append("Node data from primary node %s doesn't contain"
7434 070e998b Iustin Pop
                         " free memory information" % pnode)
7435 4c4e4e1e Iustin Pop
      elif instance_info.fail_msg:
7436 7ad1af4a Iustin Pop
        self.warn.append("Can't get instance runtime information: %s" %
7437 4c4e4e1e Iustin Pop
                        instance_info.fail_msg)
7438 cfefe007 Guido Trotter
      else:
7439 7ad1af4a Iustin Pop
        if instance_info.payload:
7440 7ad1af4a Iustin Pop
          current_mem = int(instance_info.payload['memory'])
7441 cfefe007 Guido Trotter
        else:
7442 cfefe007 Guido Trotter
          # Assume instance not running
7443 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
7444 cfefe007 Guido Trotter
          # and we have no other way to check)
7445 cfefe007 Guido Trotter
          current_mem = 0
7446 338e51e8 Iustin Pop
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
7447 070e998b Iustin Pop
                    pninfo.payload['memory_free'])
7448 cfefe007 Guido Trotter
        if miss_mem > 0:
7449 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
7450 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
7451 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
7452 cfefe007 Guido Trotter
7453 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
7454 070e998b Iustin Pop
        for node, nres in nodeinfo.items():
7455 ea33068f Iustin Pop
          if node not in instance.secondary_nodes:
7456 ea33068f Iustin Pop
            continue
7457 4c4e4e1e Iustin Pop
          msg = nres.fail_msg
7458 070e998b Iustin Pop
          if msg:
7459 070e998b Iustin Pop
            self.warn.append("Can't get info from secondary node %s: %s" %
7460 070e998b Iustin Pop
                             (node, msg))
7461 070e998b Iustin Pop
          elif not isinstance(nres.payload.get('memory_free', None), int):
7462 070e998b Iustin Pop
            self.warn.append("Secondary node %s didn't return free"
7463 070e998b Iustin Pop
                             " memory information" % node)
7464 070e998b Iustin Pop
          elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
7465 647a5d80 Iustin Pop
            self.warn.append("Not enough memory to failover instance to"
7466 647a5d80 Iustin Pop
                             " secondary node %s" % node)
7467 5bc84f33 Alexander Schreiber
7468 24991749 Iustin Pop
    # NIC processing
7469 cd098c41 Guido Trotter
    self.nic_pnew = {}
7470 cd098c41 Guido Trotter
    self.nic_pinst = {}
7471 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
7472 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
7473 24991749 Iustin Pop
        if not instance.nics:
7474 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance has no NICs, cannot remove")
7475 24991749 Iustin Pop
        continue
7476 24991749 Iustin Pop
      if nic_op != constants.DDM_ADD:
7477 24991749 Iustin Pop
        # an existing nic
7478 24991749 Iustin Pop
        if nic_op < 0 or nic_op >= len(instance.nics):
7479 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
7480 24991749 Iustin Pop
                                     " are 0 to %d" %
7481 24991749 Iustin Pop
                                     (nic_op, len(instance.nics)))
7482 cd098c41 Guido Trotter
        old_nic_params = instance.nics[nic_op].nicparams
7483 cd098c41 Guido Trotter
        old_nic_ip = instance.nics[nic_op].ip
7484 cd098c41 Guido Trotter
      else:
7485 cd098c41 Guido Trotter
        old_nic_params = {}
7486 cd098c41 Guido Trotter
        old_nic_ip = None
7487 cd098c41 Guido Trotter
7488 cd098c41 Guido Trotter
      update_params_dict = dict([(key, nic_dict[key])
7489 cd098c41 Guido Trotter
                                 for key in constants.NICS_PARAMETERS
7490 cd098c41 Guido Trotter
                                 if key in nic_dict])
7491 cd098c41 Guido Trotter
7492 5c44da6a Guido Trotter
      if 'bridge' in nic_dict:
7493 cd098c41 Guido Trotter
        update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
7494 cd098c41 Guido Trotter
7495 cd098c41 Guido Trotter
      new_nic_params, new_filled_nic_params = \
7496 cd098c41 Guido Trotter
          self._GetUpdatedParams(old_nic_params, update_params_dict,
7497 cd098c41 Guido Trotter
                                 cluster.nicparams[constants.PP_DEFAULT],
7498 cd098c41 Guido Trotter
                                 constants.NICS_PARAMETER_TYPES)
7499 cd098c41 Guido Trotter
      objects.NIC.CheckParameterSyntax(new_filled_nic_params)
7500 cd098c41 Guido Trotter
      self.nic_pinst[nic_op] = new_nic_params
7501 cd098c41 Guido Trotter
      self.nic_pnew[nic_op] = new_filled_nic_params
7502 cd098c41 Guido Trotter
      new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
7503 cd098c41 Guido Trotter
7504 cd098c41 Guido Trotter
      if new_nic_mode == constants.NIC_MODE_BRIDGED:
7505 cd098c41 Guido Trotter
        nic_bridge = new_filled_nic_params[constants.NIC_LINK]
7506 4c4e4e1e Iustin Pop
        msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
7507 35c0c8da Iustin Pop
        if msg:
7508 35c0c8da Iustin Pop
          msg = "Error checking bridges on node %s: %s" % (pnode, msg)
7509 24991749 Iustin Pop
          if self.force:
7510 24991749 Iustin Pop
            self.warn.append(msg)
7511 24991749 Iustin Pop
          else:
7512 24991749 Iustin Pop
            raise errors.OpPrereqError(msg)
7513 cd098c41 Guido Trotter
      if new_nic_mode == constants.NIC_MODE_ROUTED:
7514 cd098c41 Guido Trotter
        if 'ip' in nic_dict:
7515 cd098c41 Guido Trotter
          nic_ip = nic_dict['ip']
7516 cd098c41 Guido Trotter
        else:
7517 cd098c41 Guido Trotter
          nic_ip = old_nic_ip
7518 cd098c41 Guido Trotter
        if nic_ip is None:
7519 cd098c41 Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic ip to None'
7520 cd098c41 Guido Trotter
                                     ' on a routed nic')
7521 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
7522 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
7523 5c44da6a Guido Trotter
        if nic_mac is None:
7524 5c44da6a Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic mac to None')
7525 5c44da6a Guido Trotter
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7526 5c44da6a Guido Trotter
          # otherwise generate the mac
7527 5c44da6a Guido Trotter
          nic_dict['mac'] = self.cfg.GenerateMAC()
7528 5c44da6a Guido Trotter
        else:
7529 5c44da6a Guido Trotter
          # or validate/reserve the current one
7530 5c44da6a Guido Trotter
          if self.cfg.IsMacInUse(nic_mac):
7531 5c44da6a Guido Trotter
            raise errors.OpPrereqError("MAC address %s already in use"
7532 5c44da6a Guido Trotter
                                       " in cluster" % nic_mac)
7533 24991749 Iustin Pop
7534 24991749 Iustin Pop
    # DISK processing
7535 24991749 Iustin Pop
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
7536 24991749 Iustin Pop
      raise errors.OpPrereqError("Disk operations not supported for"
7537 24991749 Iustin Pop
                                 " diskless instances")
7538 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
7539 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
7540 24991749 Iustin Pop
        if len(instance.disks) == 1:
7541 24991749 Iustin Pop
          raise errors.OpPrereqError("Cannot remove the last disk of"
7542 24991749 Iustin Pop
                                     " an instance")
7543 24991749 Iustin Pop
        ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
7544 24991749 Iustin Pop
        ins_l = ins_l[pnode]
7545 4c4e4e1e Iustin Pop
        msg = ins_l.fail_msg
7546 aca13712 Iustin Pop
        if msg:
7547 aca13712 Iustin Pop
          raise errors.OpPrereqError("Can't contact node %s: %s" %
7548 aca13712 Iustin Pop
                                     (pnode, msg))
7549 aca13712 Iustin Pop
        if instance.name in ins_l.payload:
7550 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance is running, can't remove"
7551 24991749 Iustin Pop
                                     " disks.")
7552 24991749 Iustin Pop
7553 24991749 Iustin Pop
      if (disk_op == constants.DDM_ADD and
7554 24991749 Iustin Pop
          len(instance.nics) >= constants.MAX_DISKS):
7555 24991749 Iustin Pop
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
7556 24991749 Iustin Pop
                                   " add more" % constants.MAX_DISKS)
7557 24991749 Iustin Pop
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
7558 24991749 Iustin Pop
        # an existing disk
7559 24991749 Iustin Pop
        if disk_op < 0 or disk_op >= len(instance.disks):
7560 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
7561 24991749 Iustin Pop
                                     " are 0 to %d" %
7562 24991749 Iustin Pop
                                     (disk_op, len(instance.disks)))
7563 24991749 Iustin Pop
7564 a8083063 Iustin Pop
    return
7565 a8083063 Iustin Pop
7566 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
7567 a8083063 Iustin Pop
    """Modifies an instance.
7568 a8083063 Iustin Pop

7569 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
7570 24991749 Iustin Pop

7571 a8083063 Iustin Pop
    """
7572 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
7573 cfefe007 Guido Trotter
    # feedback_fn there.
7574 cfefe007 Guido Trotter
    for warn in self.warn:
7575 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
7576 cfefe007 Guido Trotter
7577 a8083063 Iustin Pop
    result = []
7578 a8083063 Iustin Pop
    instance = self.instance
7579 cd098c41 Guido Trotter
    cluster = self.cluster
7580 24991749 Iustin Pop
    # disk changes
7581 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
7582 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
7583 24991749 Iustin Pop
        # remove the last disk
7584 24991749 Iustin Pop
        device = instance.disks.pop()
7585 24991749 Iustin Pop
        device_idx = len(instance.disks)
7586 24991749 Iustin Pop
        for node, disk in device.ComputeNodeTree(instance.primary_node):
7587 24991749 Iustin Pop
          self.cfg.SetDiskID(disk, node)
7588 4c4e4e1e Iustin Pop
          msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
7589 e1bc0878 Iustin Pop
          if msg:
7590 e1bc0878 Iustin Pop
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
7591 e1bc0878 Iustin Pop
                            " continuing anyway", device_idx, node, msg)
7592 24991749 Iustin Pop
        result.append(("disk/%d" % device_idx, "remove"))
7593 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
7594 24991749 Iustin Pop
        # add a new disk
7595 24991749 Iustin Pop
        if instance.disk_template == constants.DT_FILE:
7596 24991749 Iustin Pop
          file_driver, file_path = instance.disks[0].logical_id
7597 24991749 Iustin Pop
          file_path = os.path.dirname(file_path)
7598 24991749 Iustin Pop
        else:
7599 24991749 Iustin Pop
          file_driver = file_path = None
7600 24991749 Iustin Pop
        disk_idx_base = len(instance.disks)
7601 24991749 Iustin Pop
        new_disk = _GenerateDiskTemplate(self,
7602 24991749 Iustin Pop
                                         instance.disk_template,
7603 32388e6d Iustin Pop
                                         instance.name, instance.primary_node,
7604 24991749 Iustin Pop
                                         instance.secondary_nodes,
7605 24991749 Iustin Pop
                                         [disk_dict],
7606 24991749 Iustin Pop
                                         file_path,
7607 24991749 Iustin Pop
                                         file_driver,
7608 24991749 Iustin Pop
                                         disk_idx_base)[0]
7609 24991749 Iustin Pop
        instance.disks.append(new_disk)
7610 24991749 Iustin Pop
        info = _GetInstanceInfoText(instance)
7611 24991749 Iustin Pop
7612 24991749 Iustin Pop
        logging.info("Creating volume %s for instance %s",
7613 24991749 Iustin Pop
                     new_disk.iv_name, instance.name)
7614 24991749 Iustin Pop
        # Note: this needs to be kept in sync with _CreateDisks
7615 24991749 Iustin Pop
        #HARDCODE
7616 428958aa Iustin Pop
        for node in instance.all_nodes:
7617 428958aa Iustin Pop
          f_create = node == instance.primary_node
7618 796cab27 Iustin Pop
          try:
7619 428958aa Iustin Pop
            _CreateBlockDev(self, node, instance, new_disk,
7620 428958aa Iustin Pop
                            f_create, info, f_create)
7621 1492cca7 Iustin Pop
          except errors.OpExecError, err:
7622 24991749 Iustin Pop
            self.LogWarning("Failed to create volume %s (%s) on"
7623 428958aa Iustin Pop
                            " node %s: %s",
7624 428958aa Iustin Pop
                            new_disk.iv_name, new_disk, node, err)
7625 24991749 Iustin Pop
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
7626 24991749 Iustin Pop
                       (new_disk.size, new_disk.mode)))
7627 24991749 Iustin Pop
      else:
7628 24991749 Iustin Pop
        # change a given disk
7629 24991749 Iustin Pop
        instance.disks[disk_op].mode = disk_dict['mode']
7630 24991749 Iustin Pop
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
7631 24991749 Iustin Pop
    # NIC changes
7632 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
7633 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
7634 24991749 Iustin Pop
        # remove the last nic
7635 24991749 Iustin Pop
        del instance.nics[-1]
7636 24991749 Iustin Pop
        result.append(("nic.%d" % len(instance.nics), "remove"))
7637 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
7638 5c44da6a Guido Trotter
        # mac and bridge should be set, by now
7639 5c44da6a Guido Trotter
        mac = nic_dict['mac']
7640 cd098c41 Guido Trotter
        ip = nic_dict.get('ip', None)
7641 cd098c41 Guido Trotter
        nicparams = self.nic_pinst[constants.DDM_ADD]
7642 cd098c41 Guido Trotter
        new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
7643 24991749 Iustin Pop
        instance.nics.append(new_nic)
7644 24991749 Iustin Pop
        result.append(("nic.%d" % (len(instance.nics) - 1),
7645 cd098c41 Guido Trotter
                       "add:mac=%s,ip=%s,mode=%s,link=%s" %
7646 cd098c41 Guido Trotter
                       (new_nic.mac, new_nic.ip,
7647 cd098c41 Guido Trotter
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
7648 cd098c41 Guido Trotter
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
7649 cd098c41 Guido Trotter
                       )))
7650 24991749 Iustin Pop
      else:
7651 cd098c41 Guido Trotter
        for key in 'mac', 'ip':
7652 24991749 Iustin Pop
          if key in nic_dict:
7653 24991749 Iustin Pop
            setattr(instance.nics[nic_op], key, nic_dict[key])
7654 cd098c41 Guido Trotter
        if nic_op in self.nic_pnew:
7655 cd098c41 Guido Trotter
          instance.nics[nic_op].nicparams = self.nic_pnew[nic_op]
7656 cd098c41 Guido Trotter
        for key, val in nic_dict.iteritems():
7657 cd098c41 Guido Trotter
          result.append(("nic.%s/%d" % (key, nic_op), val))
7658 24991749 Iustin Pop
7659 24991749 Iustin Pop
    # hvparams changes
7660 74409b12 Iustin Pop
    if self.op.hvparams:
7661 12649e35 Guido Trotter
      instance.hvparams = self.hv_inst
7662 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
7663 74409b12 Iustin Pop
        result.append(("hv/%s" % key, val))
7664 24991749 Iustin Pop
7665 24991749 Iustin Pop
    # beparams changes
7666 338e51e8 Iustin Pop
    if self.op.beparams:
7667 338e51e8 Iustin Pop
      instance.beparams = self.be_inst
7668 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
7669 338e51e8 Iustin Pop
        result.append(("be/%s" % key, val))
7670 a8083063 Iustin Pop
7671 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
7672 a8083063 Iustin Pop
7673 a8083063 Iustin Pop
    return result
7674 a8083063 Iustin Pop
7675 a8083063 Iustin Pop
7676 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
7677 a8083063 Iustin Pop
  """Query the exports list
7678 a8083063 Iustin Pop

7679 a8083063 Iustin Pop
  """
7680 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
7681 21a15682 Guido Trotter
  REQ_BGL = False
7682 21a15682 Guido Trotter
7683 21a15682 Guido Trotter
  def ExpandNames(self):
7684 21a15682 Guido Trotter
    self.needed_locks = {}
7685 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
7686 21a15682 Guido Trotter
    if not self.op.nodes:
7687 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7688 21a15682 Guido Trotter
    else:
7689 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
7690 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
7691 a8083063 Iustin Pop
7692 a8083063 Iustin Pop
  def CheckPrereq(self):
7693 21a15682 Guido Trotter
    """Check prerequisites.
7694 a8083063 Iustin Pop

7695 a8083063 Iustin Pop
    """
7696 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
7697 a8083063 Iustin Pop
7698 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
7699 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
7700 a8083063 Iustin Pop

7701 e4376078 Iustin Pop
    @rtype: dict
7702 e4376078 Iustin Pop
    @return: a dictionary with the structure node->(export-list)
7703 e4376078 Iustin Pop
        where export-list is a list of the instances exported on
7704 e4376078 Iustin Pop
        that node.
7705 a8083063 Iustin Pop

7706 a8083063 Iustin Pop
    """
7707 b04285f2 Guido Trotter
    rpcresult = self.rpc.call_export_list(self.nodes)
7708 b04285f2 Guido Trotter
    result = {}
7709 b04285f2 Guido Trotter
    for node in rpcresult:
7710 4c4e4e1e Iustin Pop
      if rpcresult[node].fail_msg:
7711 b04285f2 Guido Trotter
        result[node] = False
7712 b04285f2 Guido Trotter
      else:
7713 1b7bfbb7 Iustin Pop
        result[node] = rpcresult[node].payload
7714 b04285f2 Guido Trotter
7715 b04285f2 Guido Trotter
    return result
7716 a8083063 Iustin Pop
7717 a8083063 Iustin Pop
7718 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
7719 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
7720 a8083063 Iustin Pop

7721 a8083063 Iustin Pop
  """
7722 a8083063 Iustin Pop
  HPATH = "instance-export"
7723 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
7724 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
7725 6657590e Guido Trotter
  REQ_BGL = False
7726 6657590e Guido Trotter
7727 17c3f802 Guido Trotter
  def CheckArguments(self):
7728 17c3f802 Guido Trotter
    """Check the arguments.
7729 17c3f802 Guido Trotter

7730 17c3f802 Guido Trotter
    """
7731 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
7732 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
7733 17c3f802 Guido Trotter
7734 6657590e Guido Trotter
  def ExpandNames(self):
7735 6657590e Guido Trotter
    self._ExpandAndLockInstance()
7736 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
7737 6657590e Guido Trotter
    #
7738 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
7739 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
7740 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
7741 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
7742 6657590e Guido Trotter
    #    then one to remove, after
7743 5bbd3f7f Michael Hanselmann
    #  - removing the removal operation altogether
7744 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7745 6657590e Guido Trotter
7746 6657590e Guido Trotter
  def DeclareLocks(self, level):
7747 6657590e Guido Trotter
    """Last minute lock declaration."""
7748 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
7749 a8083063 Iustin Pop
7750 a8083063 Iustin Pop
  def BuildHooksEnv(self):
7751 a8083063 Iustin Pop
    """Build hooks env.
7752 a8083063 Iustin Pop

7753 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
7754 a8083063 Iustin Pop

7755 a8083063 Iustin Pop
    """
7756 a8083063 Iustin Pop
    env = {
7757 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
7758 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
7759 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
7760 a8083063 Iustin Pop
      }
7761 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
7762 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
7763 a8083063 Iustin Pop
          self.op.target_node]
7764 a8083063 Iustin Pop
    return env, nl, nl
7765 a8083063 Iustin Pop
7766 a8083063 Iustin Pop
  def CheckPrereq(self):
7767 a8083063 Iustin Pop
    """Check prerequisites.
7768 a8083063 Iustin Pop

7769 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
7770 a8083063 Iustin Pop

7771 a8083063 Iustin Pop
    """
7772 6657590e Guido Trotter
    instance_name = self.op.instance_name
7773 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
7774 6657590e Guido Trotter
    assert self.instance is not None, \
7775 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
7776 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
7777 a8083063 Iustin Pop
7778 6657590e Guido Trotter
    self.dst_node = self.cfg.GetNodeInfo(
7779 6657590e Guido Trotter
      self.cfg.ExpandNodeName(self.op.target_node))
7780 a8083063 Iustin Pop
7781 268b8e42 Iustin Pop
    if self.dst_node is None:
7782 268b8e42 Iustin Pop
      # This is wrong node name, not a non-locked node
7783 268b8e42 Iustin Pop
      raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node)
7784 aeb83a2b Iustin Pop
    _CheckNodeOnline(self, self.dst_node.name)
7785 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, self.dst_node.name)
7786 a8083063 Iustin Pop
7787 b6023d6c Manuel Franceschini
    # instance disk type verification
7788 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
7789 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
7790 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
7791 b6023d6c Manuel Franceschini
                                   " file-based disks")
7792 b6023d6c Manuel Franceschini
7793 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
7794 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
7795 a8083063 Iustin Pop

7796 a8083063 Iustin Pop
    """
7797 a8083063 Iustin Pop
    instance = self.instance
7798 a8083063 Iustin Pop
    dst_node = self.dst_node
7799 a8083063 Iustin Pop
    src_node = instance.primary_node
7800 37972df0 Michael Hanselmann
7801 a8083063 Iustin Pop
    if self.op.shutdown:
7802 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
7803 37972df0 Michael Hanselmann
      feedback_fn("Shutting down instance %s" % instance.name)
7804 17c3f802 Guido Trotter
      result = self.rpc.call_instance_shutdown(src_node, instance,
7805 17c3f802 Guido Trotter
                                               self.shutdown_timeout)
7806 4c4e4e1e Iustin Pop
      result.Raise("Could not shutdown instance %s on"
7807 4c4e4e1e Iustin Pop
                   " node %s" % (instance.name, src_node))
7808 a8083063 Iustin Pop
7809 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
7810 a8083063 Iustin Pop
7811 a8083063 Iustin Pop
    snap_disks = []
7812 a8083063 Iustin Pop
7813 998c712c Iustin Pop
    # set the disks ID correctly since call_instance_start needs the
7814 998c712c Iustin Pop
    # correct drbd minor to create the symlinks
7815 998c712c Iustin Pop
    for disk in instance.disks:
7816 998c712c Iustin Pop
      self.cfg.SetDiskID(disk, src_node)
7817 998c712c Iustin Pop
7818 084f05a5 Iustin Pop
    # per-disk results
7819 084f05a5 Iustin Pop
    dresults = []
7820 a8083063 Iustin Pop
    try:
7821 a97da6b7 Iustin Pop
      for idx, disk in enumerate(instance.disks):
7822 37972df0 Michael Hanselmann
        feedback_fn("Creating a snapshot of disk/%s on node %s" %
7823 37972df0 Michael Hanselmann
                    (idx, src_node))
7824 37972df0 Michael Hanselmann
7825 87812fd3 Iustin Pop
        # result.payload will be a snapshot of an lvm leaf of the one we passed
7826 87812fd3 Iustin Pop
        result = self.rpc.call_blockdev_snapshot(src_node, disk)
7827 4c4e4e1e Iustin Pop
        msg = result.fail_msg
7828 87812fd3 Iustin Pop
        if msg:
7829 af0413bb Guido Trotter
          self.LogWarning("Could not snapshot disk/%s on node %s: %s",
7830 af0413bb Guido Trotter
                          idx, src_node, msg)
7831 19d7f90a Guido Trotter
          snap_disks.append(False)
7832 19d7f90a Guido Trotter
        else:
7833 87812fd3 Iustin Pop
          disk_id = (vgname, result.payload)
7834 19d7f90a Guido Trotter
          new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
7835 87812fd3 Iustin Pop
                                 logical_id=disk_id, physical_id=disk_id,
7836 19d7f90a Guido Trotter
                                 iv_name=disk.iv_name)
7837 19d7f90a Guido Trotter
          snap_disks.append(new_dev)
7838 a8083063 Iustin Pop
7839 a8083063 Iustin Pop
    finally:
7840 0d68c45d Iustin Pop
      if self.op.shutdown and instance.admin_up:
7841 37972df0 Michael Hanselmann
        feedback_fn("Starting instance %s" % instance.name)
7842 0eca8e0c Iustin Pop
        result = self.rpc.call_instance_start(src_node, instance, None, None)
7843 4c4e4e1e Iustin Pop
        msg = result.fail_msg
7844 dd279568 Iustin Pop
        if msg:
7845 b9bddb6b Iustin Pop
          _ShutdownInstanceDisks(self, instance)
7846 dd279568 Iustin Pop
          raise errors.OpExecError("Could not start instance: %s" % msg)
7847 a8083063 Iustin Pop
7848 a8083063 Iustin Pop
    # TODO: check for size
7849 a8083063 Iustin Pop
7850 62c9ec92 Iustin Pop
    cluster_name = self.cfg.GetClusterName()
7851 74c47259 Iustin Pop
    for idx, dev in enumerate(snap_disks):
7852 37972df0 Michael Hanselmann
      feedback_fn("Exporting snapshot %s from %s to %s" %
7853 37972df0 Michael Hanselmann
                  (idx, src_node, dst_node.name))
7854 19d7f90a Guido Trotter
      if dev:
7855 781de953 Iustin Pop
        result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
7856 781de953 Iustin Pop
                                               instance, cluster_name, idx)
7857 4c4e4e1e Iustin Pop
        msg = result.fail_msg
7858 ba55d062 Iustin Pop
        if msg:
7859 af0413bb Guido Trotter
          self.LogWarning("Could not export disk/%s from node %s to"
7860 af0413bb Guido Trotter
                          " node %s: %s", idx, src_node, dst_node.name, msg)
7861 084f05a5 Iustin Pop
          dresults.append(False)
7862 084f05a5 Iustin Pop
        else:
7863 084f05a5 Iustin Pop
          dresults.append(True)
7864 4c4e4e1e Iustin Pop
        msg = self.rpc.call_blockdev_remove(src_node, dev).fail_msg
7865 e1bc0878 Iustin Pop
        if msg:
7866 a97da6b7 Iustin Pop
          self.LogWarning("Could not remove snapshot for disk/%d from node"
7867 a97da6b7 Iustin Pop
                          " %s: %s", idx, src_node, msg)
7868 084f05a5 Iustin Pop
      else:
7869 084f05a5 Iustin Pop
        dresults.append(False)
7870 a8083063 Iustin Pop
7871 37972df0 Michael Hanselmann
    feedback_fn("Finalizing export on %s" % dst_node.name)
7872 781de953 Iustin Pop
    result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
7873 084f05a5 Iustin Pop
    fin_resu = True
7874 4c4e4e1e Iustin Pop
    msg = result.fail_msg
7875 9b201a0d Iustin Pop
    if msg:
7876 9b201a0d Iustin Pop
      self.LogWarning("Could not finalize export for instance %s"
7877 9b201a0d Iustin Pop
                      " on node %s: %s", instance.name, dst_node.name, msg)
7878 084f05a5 Iustin Pop
      fin_resu = False
7879 a8083063 Iustin Pop
7880 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
7881 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
7882 a8083063 Iustin Pop
7883 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
7884 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
7885 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
7886 35fbcd11 Iustin Pop
    iname = instance.name
7887 a8083063 Iustin Pop
    if nodelist:
7888 37972df0 Michael Hanselmann
      feedback_fn("Removing old exports for instance %s" % iname)
7889 72737a7f Iustin Pop
      exportlist = self.rpc.call_export_list(nodelist)
7890 a8083063 Iustin Pop
      for node in exportlist:
7891 4c4e4e1e Iustin Pop
        if exportlist[node].fail_msg:
7892 781de953 Iustin Pop
          continue
7893 35fbcd11 Iustin Pop
        if iname in exportlist[node].payload:
7894 4c4e4e1e Iustin Pop
          msg = self.rpc.call_export_remove(node, iname).fail_msg
7895 35fbcd11 Iustin Pop
          if msg:
7896 19d7f90a Guido Trotter
            self.LogWarning("Could not remove older export for instance %s"
7897 35fbcd11 Iustin Pop
                            " on node %s: %s", iname, node, msg)
7898 084f05a5 Iustin Pop
    return fin_resu, dresults
7899 5c947f38 Iustin Pop
7900 5c947f38 Iustin Pop
7901 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
7902 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
7903 9ac99fda Guido Trotter

7904 9ac99fda Guido Trotter
  """
7905 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
7906 3656b3af Guido Trotter
  REQ_BGL = False
7907 3656b3af Guido Trotter
7908 3656b3af Guido Trotter
  def ExpandNames(self):
7909 3656b3af Guido Trotter
    self.needed_locks = {}
7910 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
7911 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
7912 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
7913 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7914 9ac99fda Guido Trotter
7915 9ac99fda Guido Trotter
  def CheckPrereq(self):
7916 9ac99fda Guido Trotter
    """Check prerequisites.
7917 9ac99fda Guido Trotter
    """
7918 9ac99fda Guido Trotter
    pass
7919 9ac99fda Guido Trotter
7920 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
7921 9ac99fda Guido Trotter
    """Remove any export.
7922 9ac99fda Guido Trotter

7923 9ac99fda Guido Trotter
    """
7924 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
7925 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
7926 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
7927 9ac99fda Guido Trotter
    fqdn_warn = False
7928 9ac99fda Guido Trotter
    if not instance_name:
7929 9ac99fda Guido Trotter
      fqdn_warn = True
7930 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
7931 9ac99fda Guido Trotter
7932 1b7bfbb7 Iustin Pop
    locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
7933 1b7bfbb7 Iustin Pop
    exportlist = self.rpc.call_export_list(locked_nodes)
7934 9ac99fda Guido Trotter
    found = False
7935 9ac99fda Guido Trotter
    for node in exportlist:
7936 4c4e4e1e Iustin Pop
      msg = exportlist[node].fail_msg
7937 1b7bfbb7 Iustin Pop
      if msg:
7938 1b7bfbb7 Iustin Pop
        self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
7939 781de953 Iustin Pop
        continue
7940 1b7bfbb7 Iustin Pop
      if instance_name in exportlist[node].payload:
7941 9ac99fda Guido Trotter
        found = True
7942 781de953 Iustin Pop
        result = self.rpc.call_export_remove(node, instance_name)
7943 4c4e4e1e Iustin Pop
        msg = result.fail_msg
7944 35fbcd11 Iustin Pop
        if msg:
7945 9a4f63d1 Iustin Pop
          logging.error("Could not remove export for instance %s"
7946 35fbcd11 Iustin Pop
                        " on node %s: %s", instance_name, node, msg)
7947 9ac99fda Guido Trotter
7948 9ac99fda Guido Trotter
    if fqdn_warn and not found:
7949 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
7950 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
7951 9ac99fda Guido Trotter
                  " Domain Name.")
7952 9ac99fda Guido Trotter
7953 9ac99fda Guido Trotter
7954 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
7955 5c947f38 Iustin Pop
  """Generic tags LU.
7956 5c947f38 Iustin Pop

7957 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
7958 5c947f38 Iustin Pop

7959 5c947f38 Iustin Pop
  """
7960 5c947f38 Iustin Pop
7961 8646adce Guido Trotter
  def ExpandNames(self):
7962 8646adce Guido Trotter
    self.needed_locks = {}
7963 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
7964 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
7965 5c947f38 Iustin Pop
      if name is None:
7966 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
7967 3ecf6786 Iustin Pop
                                   (self.op.name,))
7968 5c947f38 Iustin Pop
      self.op.name = name
7969 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = name
7970 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
7971 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
7972 5c947f38 Iustin Pop
      if name is None:
7973 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
7974 3ecf6786 Iustin Pop
                                   (self.op.name,))
7975 5c947f38 Iustin Pop
      self.op.name = name
7976 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = name
7977 8646adce Guido Trotter
7978 8646adce Guido Trotter
  def CheckPrereq(self):
7979 8646adce Guido Trotter
    """Check prerequisites.
7980 8646adce Guido Trotter

7981 8646adce Guido Trotter
    """
7982 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
7983 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
7984 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
7985 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
7986 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
7987 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
7988 5c947f38 Iustin Pop
    else:
7989 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
7990 3ecf6786 Iustin Pop
                                 str(self.op.kind))
7991 5c947f38 Iustin Pop
7992 5c947f38 Iustin Pop
7993 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
7994 5c947f38 Iustin Pop
  """Returns the tags of a given object.
7995 5c947f38 Iustin Pop

7996 5c947f38 Iustin Pop
  """
7997 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
7998 8646adce Guido Trotter
  REQ_BGL = False
7999 5c947f38 Iustin Pop
8000 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
8001 5c947f38 Iustin Pop
    """Returns the tag list.
8002 5c947f38 Iustin Pop

8003 5c947f38 Iustin Pop
    """
8004 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
8005 5c947f38 Iustin Pop
8006 5c947f38 Iustin Pop
8007 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
8008 73415719 Iustin Pop
  """Searches the tags for a given pattern.
8009 73415719 Iustin Pop

8010 73415719 Iustin Pop
  """
8011 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
8012 8646adce Guido Trotter
  REQ_BGL = False
8013 8646adce Guido Trotter
8014 8646adce Guido Trotter
  def ExpandNames(self):
8015 8646adce Guido Trotter
    self.needed_locks = {}
8016 73415719 Iustin Pop
8017 73415719 Iustin Pop
  def CheckPrereq(self):
8018 73415719 Iustin Pop
    """Check prerequisites.
8019 73415719 Iustin Pop

8020 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
8021 73415719 Iustin Pop

8022 73415719 Iustin Pop
    """
8023 73415719 Iustin Pop
    try:
8024 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
8025 73415719 Iustin Pop
    except re.error, err:
8026 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
8027 73415719 Iustin Pop
                                 (self.op.pattern, err))
8028 73415719 Iustin Pop
8029 73415719 Iustin Pop
  def Exec(self, feedback_fn):
8030 73415719 Iustin Pop
    """Returns the tag list.
8031 73415719 Iustin Pop

8032 73415719 Iustin Pop
    """
8033 73415719 Iustin Pop
    cfg = self.cfg
8034 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
8035 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
8036 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
8037 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
8038 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
8039 73415719 Iustin Pop
    results = []
8040 73415719 Iustin Pop
    for path, target in tgts:
8041 73415719 Iustin Pop
      for tag in target.GetTags():
8042 73415719 Iustin Pop
        if self.re.search(tag):
8043 73415719 Iustin Pop
          results.append((path, tag))
8044 73415719 Iustin Pop
    return results
8045 73415719 Iustin Pop
8046 73415719 Iustin Pop
8047 f27302fa Iustin Pop
class LUAddTags(TagsLU):
8048 5c947f38 Iustin Pop
  """Sets a tag on a given object.
8049 5c947f38 Iustin Pop

8050 5c947f38 Iustin Pop
  """
8051 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
8052 8646adce Guido Trotter
  REQ_BGL = False
8053 5c947f38 Iustin Pop
8054 5c947f38 Iustin Pop
  def CheckPrereq(self):
8055 5c947f38 Iustin Pop
    """Check prerequisites.
8056 5c947f38 Iustin Pop

8057 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
8058 5c947f38 Iustin Pop

8059 5c947f38 Iustin Pop
    """
8060 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
8061 f27302fa Iustin Pop
    for tag in self.op.tags:
8062 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
8063 5c947f38 Iustin Pop
8064 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
8065 5c947f38 Iustin Pop
    """Sets the tag.
8066 5c947f38 Iustin Pop

8067 5c947f38 Iustin Pop
    """
8068 5c947f38 Iustin Pop
    try:
8069 f27302fa Iustin Pop
      for tag in self.op.tags:
8070 f27302fa Iustin Pop
        self.target.AddTag(tag)
8071 5c947f38 Iustin Pop
    except errors.TagError, err:
8072 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
8073 5c947f38 Iustin Pop
    try:
8074 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
8075 5c947f38 Iustin Pop
    except errors.ConfigurationError:
8076 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
8077 3ecf6786 Iustin Pop
                                " config file and the operation has been"
8078 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
8079 5c947f38 Iustin Pop
8080 5c947f38 Iustin Pop
8081 f27302fa Iustin Pop
class LUDelTags(TagsLU):
8082 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
8083 5c947f38 Iustin Pop

8084 5c947f38 Iustin Pop
  """
8085 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
8086 8646adce Guido Trotter
  REQ_BGL = False
8087 5c947f38 Iustin Pop
8088 5c947f38 Iustin Pop
  def CheckPrereq(self):
8089 5c947f38 Iustin Pop
    """Check prerequisites.
8090 5c947f38 Iustin Pop

8091 5c947f38 Iustin Pop
    This checks that we have the given tag.
8092 5c947f38 Iustin Pop

8093 5c947f38 Iustin Pop
    """
8094 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
8095 f27302fa Iustin Pop
    for tag in self.op.tags:
8096 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
8097 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
8098 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
8099 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
8100 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
8101 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
8102 f27302fa Iustin Pop
      diff_names.sort()
8103 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
8104 f27302fa Iustin Pop
                                 (",".join(diff_names)))
8105 5c947f38 Iustin Pop
8106 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
8107 5c947f38 Iustin Pop
    """Remove the tag from the object.
8108 5c947f38 Iustin Pop

8109 5c947f38 Iustin Pop
    """
8110 f27302fa Iustin Pop
    for tag in self.op.tags:
8111 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
8112 5c947f38 Iustin Pop
    try:
8113 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
8114 5c947f38 Iustin Pop
    except errors.ConfigurationError:
8115 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
8116 3ecf6786 Iustin Pop
                                " config file and the operation has been"
8117 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
8118 06009e27 Iustin Pop
8119 0eed6e61 Guido Trotter
8120 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
8121 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
8122 06009e27 Iustin Pop

8123 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
8124 06009e27 Iustin Pop
  time.
8125 06009e27 Iustin Pop

8126 06009e27 Iustin Pop
  """
8127 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
8128 fbe9022f Guido Trotter
  REQ_BGL = False
8129 06009e27 Iustin Pop
8130 fbe9022f Guido Trotter
  def ExpandNames(self):
8131 fbe9022f Guido Trotter
    """Expand names and set required locks.
8132 06009e27 Iustin Pop

8133 fbe9022f Guido Trotter
    This expands the node list, if any.
8134 06009e27 Iustin Pop

8135 06009e27 Iustin Pop
    """
8136 fbe9022f Guido Trotter
    self.needed_locks = {}
8137 06009e27 Iustin Pop
    if self.op.on_nodes:
8138 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
8139 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
8140 fbe9022f Guido Trotter
      # more information.
8141 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
8142 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
8143 fbe9022f Guido Trotter
8144 fbe9022f Guido Trotter
  def CheckPrereq(self):
8145 fbe9022f Guido Trotter
    """Check prerequisites.
8146 fbe9022f Guido Trotter

8147 fbe9022f Guido Trotter
    """
8148 06009e27 Iustin Pop
8149 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
8150 06009e27 Iustin Pop
    """Do the actual sleep.
8151 06009e27 Iustin Pop

8152 06009e27 Iustin Pop
    """
8153 06009e27 Iustin Pop
    if self.op.on_master:
8154 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
8155 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
8156 06009e27 Iustin Pop
    if self.op.on_nodes:
8157 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
8158 06009e27 Iustin Pop
      for node, node_result in result.items():
8159 4c4e4e1e Iustin Pop
        node_result.Raise("Failure during rpc call to node %s" % node)
8160 d61df03e Iustin Pop
8161 d61df03e Iustin Pop
8162 d1c2dd75 Iustin Pop
class IAllocator(object):
8163 d1c2dd75 Iustin Pop
  """IAllocator framework.
8164 d61df03e Iustin Pop

8165 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
8166 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
8167 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
8168 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
8169 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
8170 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
8171 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
8172 d1c2dd75 Iustin Pop
      easy usage
8173 d61df03e Iustin Pop

8174 d61df03e Iustin Pop
  """
8175 29859cb7 Iustin Pop
  _ALLO_KEYS = [
8176 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
8177 8cc7e742 Guido Trotter
    "os", "tags", "nics", "vcpus", "hypervisor",
8178 d1c2dd75 Iustin Pop
    ]
8179 29859cb7 Iustin Pop
  _RELO_KEYS = [
8180 29859cb7 Iustin Pop
    "relocate_from",
8181 29859cb7 Iustin Pop
    ]
8182 d1c2dd75 Iustin Pop
8183 923ddac0 Michael Hanselmann
  def __init__(self, cfg, rpc, mode, name, **kwargs):
8184 923ddac0 Michael Hanselmann
    self.cfg = cfg
8185 923ddac0 Michael Hanselmann
    self.rpc = rpc
8186 d1c2dd75 Iustin Pop
    # init buffer variables
8187 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
8188 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
8189 29859cb7 Iustin Pop
    self.mode = mode
8190 29859cb7 Iustin Pop
    self.name = name
8191 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
8192 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
8193 a0add446 Iustin Pop
    self.hypervisor = None
8194 29859cb7 Iustin Pop
    self.relocate_from = None
8195 27579978 Iustin Pop
    # computed fields
8196 27579978 Iustin Pop
    self.required_nodes = None
8197 d1c2dd75 Iustin Pop
    # init result fields
8198 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
8199 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
8200 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
8201 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
8202 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
8203 29859cb7 Iustin Pop
    else:
8204 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
8205 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
8206 d1c2dd75 Iustin Pop
    for key in kwargs:
8207 29859cb7 Iustin Pop
      if key not in keyset:
8208 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
8209 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
8210 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
8211 29859cb7 Iustin Pop
    for key in keyset:
8212 d1c2dd75 Iustin Pop
      if key not in kwargs:
8213 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
8214 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
8215 d1c2dd75 Iustin Pop
    self._BuildInputData()
8216 d1c2dd75 Iustin Pop
8217 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
8218 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
8219 d1c2dd75 Iustin Pop

8220 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
8221 d1c2dd75 Iustin Pop

8222 d1c2dd75 Iustin Pop
    """
8223 923ddac0 Michael Hanselmann
    cfg = self.cfg
8224 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
8225 d1c2dd75 Iustin Pop
    # cluster data
8226 d1c2dd75 Iustin Pop
    data = {
8227 77031881 Iustin Pop
      "version": constants.IALLOCATOR_VERSION,
8228 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
8229 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
8230 1325da74 Iustin Pop
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
8231 d1c2dd75 Iustin Pop
      # we don't have job IDs
8232 d61df03e Iustin Pop
      }
8233 b57e9819 Guido Trotter
    iinfo = cfg.GetAllInstancesInfo().values()
8234 b57e9819 Guido Trotter
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
8235 6286519f Iustin Pop
8236 d1c2dd75 Iustin Pop
    # node data
8237 d1c2dd75 Iustin Pop
    node_results = {}
8238 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
8239 8cc7e742 Guido Trotter
8240 8cc7e742 Guido Trotter
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
8241 a0add446 Iustin Pop
      hypervisor_name = self.hypervisor
8242 8cc7e742 Guido Trotter
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
8243 a0add446 Iustin Pop
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
8244 8cc7e742 Guido Trotter
8245 923ddac0 Michael Hanselmann
    node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
8246 923ddac0 Michael Hanselmann
                                        hypervisor_name)
8247 923ddac0 Michael Hanselmann
    node_iinfo = \
8248 923ddac0 Michael Hanselmann
      self.rpc.call_all_instances_info(node_list,
8249 923ddac0 Michael Hanselmann
                                       cluster_info.enabled_hypervisors)
8250 1325da74 Iustin Pop
    for nname, nresult in node_data.items():
8251 1325da74 Iustin Pop
      # first fill in static (config-based) values
8252 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
8253 d1c2dd75 Iustin Pop
      pnr = {
8254 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
8255 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
8256 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
8257 fc0fe88c Iustin Pop
        "offline": ninfo.offline,
8258 0b2454b9 Iustin Pop
        "drained": ninfo.drained,
8259 1325da74 Iustin Pop
        "master_candidate": ninfo.master_candidate,
8260 d1c2dd75 Iustin Pop
        }
8261 1325da74 Iustin Pop
8262 0d853843 Iustin Pop
      if not (ninfo.offline or ninfo.drained):
8263 4c4e4e1e Iustin Pop
        nresult.Raise("Can't get data for node %s" % nname)
8264 4c4e4e1e Iustin Pop
        node_iinfo[nname].Raise("Can't get node instance info from node %s" %
8265 4c4e4e1e Iustin Pop
                                nname)
8266 070e998b Iustin Pop
        remote_info = nresult.payload
8267 b142ef15 Iustin Pop
8268 1325da74 Iustin Pop
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
8269 1325da74 Iustin Pop
                     'vg_size', 'vg_free', 'cpu_total']:
8270 1325da74 Iustin Pop
          if attr not in remote_info:
8271 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' didn't return attribute"
8272 1325da74 Iustin Pop
                                     " '%s'" % (nname, attr))
8273 070e998b Iustin Pop
          if not isinstance(remote_info[attr], int):
8274 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' returned invalid value"
8275 070e998b Iustin Pop
                                     " for '%s': %s" %
8276 070e998b Iustin Pop
                                     (nname, attr, remote_info[attr]))
8277 1325da74 Iustin Pop
        # compute memory used by primary instances
8278 1325da74 Iustin Pop
        i_p_mem = i_p_up_mem = 0
8279 1325da74 Iustin Pop
        for iinfo, beinfo in i_list:
8280 1325da74 Iustin Pop
          if iinfo.primary_node == nname:
8281 1325da74 Iustin Pop
            i_p_mem += beinfo[constants.BE_MEMORY]
8282 2fa74ef4 Iustin Pop
            if iinfo.name not in node_iinfo[nname].payload:
8283 1325da74 Iustin Pop
              i_used_mem = 0
8284 1325da74 Iustin Pop
            else:
8285 2fa74ef4 Iustin Pop
              i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory'])
8286 1325da74 Iustin Pop
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
8287 1325da74 Iustin Pop
            remote_info['memory_free'] -= max(0, i_mem_diff)
8288 1325da74 Iustin Pop
8289 1325da74 Iustin Pop
            if iinfo.admin_up:
8290 1325da74 Iustin Pop
              i_p_up_mem += beinfo[constants.BE_MEMORY]
8291 1325da74 Iustin Pop
8292 1325da74 Iustin Pop
        # compute memory used by instances
8293 1325da74 Iustin Pop
        pnr_dyn = {
8294 1325da74 Iustin Pop
          "total_memory": remote_info['memory_total'],
8295 1325da74 Iustin Pop
          "reserved_memory": remote_info['memory_dom0'],
8296 1325da74 Iustin Pop
          "free_memory": remote_info['memory_free'],
8297 1325da74 Iustin Pop
          "total_disk": remote_info['vg_size'],
8298 1325da74 Iustin Pop
          "free_disk": remote_info['vg_free'],
8299 1325da74 Iustin Pop
          "total_cpus": remote_info['cpu_total'],
8300 1325da74 Iustin Pop
          "i_pri_memory": i_p_mem,
8301 1325da74 Iustin Pop
          "i_pri_up_memory": i_p_up_mem,
8302 1325da74 Iustin Pop
          }
8303 1325da74 Iustin Pop
        pnr.update(pnr_dyn)
8304 1325da74 Iustin Pop
8305 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
8306 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
8307 d1c2dd75 Iustin Pop
8308 d1c2dd75 Iustin Pop
    # instance data
8309 d1c2dd75 Iustin Pop
    instance_data = {}
8310 338e51e8 Iustin Pop
    for iinfo, beinfo in i_list:
8311 a9fe7e8f Guido Trotter
      nic_data = []
8312 a9fe7e8f Guido Trotter
      for nic in iinfo.nics:
8313 a9fe7e8f Guido Trotter
        filled_params = objects.FillDict(
8314 a9fe7e8f Guido Trotter
            cluster_info.nicparams[constants.PP_DEFAULT],
8315 a9fe7e8f Guido Trotter
            nic.nicparams)
8316 a9fe7e8f Guido Trotter
        nic_dict = {"mac": nic.mac,
8317 a9fe7e8f Guido Trotter
                    "ip": nic.ip,
8318 a9fe7e8f Guido Trotter
                    "mode": filled_params[constants.NIC_MODE],
8319 a9fe7e8f Guido Trotter
                    "link": filled_params[constants.NIC_LINK],
8320 a9fe7e8f Guido Trotter
                   }
8321 a9fe7e8f Guido Trotter
        if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
8322 a9fe7e8f Guido Trotter
          nic_dict["bridge"] = filled_params[constants.NIC_LINK]
8323 a9fe7e8f Guido Trotter
        nic_data.append(nic_dict)
8324 d1c2dd75 Iustin Pop
      pir = {
8325 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
8326 1325da74 Iustin Pop
        "admin_up": iinfo.admin_up,
8327 338e51e8 Iustin Pop
        "vcpus": beinfo[constants.BE_VCPUS],
8328 338e51e8 Iustin Pop
        "memory": beinfo[constants.BE_MEMORY],
8329 d1c2dd75 Iustin Pop
        "os": iinfo.os,
8330 1325da74 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
8331 d1c2dd75 Iustin Pop
        "nics": nic_data,
8332 1325da74 Iustin Pop
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
8333 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
8334 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
8335 d1c2dd75 Iustin Pop
        }
8336 88ae4f85 Iustin Pop
      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
8337 88ae4f85 Iustin Pop
                                                 pir["disks"])
8338 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
8339 d61df03e Iustin Pop
8340 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
8341 d61df03e Iustin Pop
8342 d1c2dd75 Iustin Pop
    self.in_data = data
8343 d61df03e Iustin Pop
8344 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
8345 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
8346 d61df03e Iustin Pop

8347 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
8348 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
8349 d61df03e Iustin Pop

8350 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
8351 d1c2dd75 Iustin Pop
    done.
8352 d61df03e Iustin Pop

8353 d1c2dd75 Iustin Pop
    """
8354 d1c2dd75 Iustin Pop
    data = self.in_data
8355 d1c2dd75 Iustin Pop
8356 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
8357 d1c2dd75 Iustin Pop
8358 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
8359 27579978 Iustin Pop
      self.required_nodes = 2
8360 27579978 Iustin Pop
    else:
8361 27579978 Iustin Pop
      self.required_nodes = 1
8362 d1c2dd75 Iustin Pop
    request = {
8363 d1c2dd75 Iustin Pop
      "type": "allocate",
8364 d1c2dd75 Iustin Pop
      "name": self.name,
8365 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
8366 d1c2dd75 Iustin Pop
      "tags": self.tags,
8367 d1c2dd75 Iustin Pop
      "os": self.os,
8368 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
8369 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
8370 d1c2dd75 Iustin Pop
      "disks": self.disks,
8371 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
8372 d1c2dd75 Iustin Pop
      "nics": self.nics,
8373 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
8374 d1c2dd75 Iustin Pop
      }
8375 d1c2dd75 Iustin Pop
    data["request"] = request
8376 298fe380 Iustin Pop
8377 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
8378 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
8379 298fe380 Iustin Pop

8380 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
8381 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
8382 d61df03e Iustin Pop

8383 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
8384 d1c2dd75 Iustin Pop
    done.
8385 d61df03e Iustin Pop

8386 d1c2dd75 Iustin Pop
    """
8387 923ddac0 Michael Hanselmann
    instance = self.cfg.GetInstanceInfo(self.name)
8388 27579978 Iustin Pop
    if instance is None:
8389 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
8390 27579978 Iustin Pop
                                   " IAllocator" % self.name)
8391 27579978 Iustin Pop
8392 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
8393 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
8394 27579978 Iustin Pop
8395 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
8396 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
8397 2a139bb0 Iustin Pop
8398 27579978 Iustin Pop
    self.required_nodes = 1
8399 dafc7302 Guido Trotter
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
8400 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
8401 27579978 Iustin Pop
8402 d1c2dd75 Iustin Pop
    request = {
8403 2a139bb0 Iustin Pop
      "type": "relocate",
8404 d1c2dd75 Iustin Pop
      "name": self.name,
8405 27579978 Iustin Pop
      "disk_space_total": disk_space,
8406 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
8407 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
8408 d1c2dd75 Iustin Pop
      }
8409 27579978 Iustin Pop
    self.in_data["request"] = request
8410 d61df03e Iustin Pop
8411 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
8412 d1c2dd75 Iustin Pop
    """Build input data structures.
8413 d61df03e Iustin Pop

8414 d1c2dd75 Iustin Pop
    """
8415 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
8416 d61df03e Iustin Pop
8417 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
8418 d1c2dd75 Iustin Pop
      self._AddNewInstance()
8419 d1c2dd75 Iustin Pop
    else:
8420 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
8421 d61df03e Iustin Pop
8422 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
8423 d61df03e Iustin Pop
8424 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
8425 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
8426 298fe380 Iustin Pop

8427 d1c2dd75 Iustin Pop
    """
8428 72737a7f Iustin Pop
    if call_fn is None:
8429 923ddac0 Michael Hanselmann
      call_fn = self.rpc.call_iallocator_runner
8430 298fe380 Iustin Pop
8431 923ddac0 Michael Hanselmann
    result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
8432 4c4e4e1e Iustin Pop
    result.Raise("Failure while running the iallocator script")
8433 8d528b7c Iustin Pop
8434 87f5c298 Iustin Pop
    self.out_text = result.payload
8435 d1c2dd75 Iustin Pop
    if validate:
8436 d1c2dd75 Iustin Pop
      self._ValidateResult()
8437 298fe380 Iustin Pop
8438 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
8439 d1c2dd75 Iustin Pop
    """Process the allocator results.
8440 538475ca Iustin Pop

8441 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
8442 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
8443 538475ca Iustin Pop

8444 d1c2dd75 Iustin Pop
    """
8445 d1c2dd75 Iustin Pop
    try:
8446 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
8447 d1c2dd75 Iustin Pop
    except Exception, err:
8448 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
8449 d1c2dd75 Iustin Pop
8450 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
8451 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
8452 538475ca Iustin Pop
8453 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
8454 d1c2dd75 Iustin Pop
      if key not in rdict:
8455 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
8456 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
8457 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
8458 538475ca Iustin Pop
8459 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
8460 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
8461 d1c2dd75 Iustin Pop
                               " is not a list")
8462 d1c2dd75 Iustin Pop
    self.out_data = rdict
8463 538475ca Iustin Pop
8464 538475ca Iustin Pop
8465 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
8466 d61df03e Iustin Pop
  """Run allocator tests.
8467 d61df03e Iustin Pop

8468 d61df03e Iustin Pop
  This LU runs the allocator tests
8469 d61df03e Iustin Pop

8470 d61df03e Iustin Pop
  """
8471 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
8472 d61df03e Iustin Pop
8473 d61df03e Iustin Pop
  def CheckPrereq(self):
8474 d61df03e Iustin Pop
    """Check prerequisites.
8475 d61df03e Iustin Pop

8476 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
8477 d61df03e Iustin Pop

8478 d61df03e Iustin Pop
    """
8479 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
8480 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
8481 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
8482 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
8483 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
8484 d61df03e Iustin Pop
                                     attr)
8485 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
8486 d61df03e Iustin Pop
      if iname is not None:
8487 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
8488 d61df03e Iustin Pop
                                   iname)
8489 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
8490 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
8491 d61df03e Iustin Pop
      for row in self.op.nics:
8492 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
8493 d61df03e Iustin Pop
            "mac" not in row or
8494 d61df03e Iustin Pop
            "ip" not in row or
8495 d61df03e Iustin Pop
            "bridge" not in row):
8496 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
8497 d61df03e Iustin Pop
                                     " 'nics' parameter")
8498 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
8499 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
8500 d61df03e Iustin Pop
      for row in self.op.disks:
8501 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
8502 d61df03e Iustin Pop
            "size" not in row or
8503 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
8504 d61df03e Iustin Pop
            "mode" not in row or
8505 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
8506 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
8507 d61df03e Iustin Pop
                                     " 'disks' parameter")
8508 8901997e Iustin Pop
      if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None:
8509 8cc7e742 Guido Trotter
        self.op.hypervisor = self.cfg.GetHypervisorType()
8510 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
8511 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
8512 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
8513 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
8514 d61df03e Iustin Pop
      if fname is None:
8515 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
8516 d61df03e Iustin Pop
                                   self.op.name)
8517 d61df03e Iustin Pop
      self.op.name = fname
8518 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
8519 d61df03e Iustin Pop
    else:
8520 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
8521 d61df03e Iustin Pop
                                 self.op.mode)
8522 d61df03e Iustin Pop
8523 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
8524 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
8525 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
8526 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
8527 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
8528 d61df03e Iustin Pop
                                 self.op.direction)
8529 d61df03e Iustin Pop
8530 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
8531 d61df03e Iustin Pop
    """Run the allocator test.
8532 d61df03e Iustin Pop

8533 d61df03e Iustin Pop
    """
8534 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
8535 923ddac0 Michael Hanselmann
      ial = IAllocator(self.cfg, self.rpc,
8536 29859cb7 Iustin Pop
                       mode=self.op.mode,
8537 29859cb7 Iustin Pop
                       name=self.op.name,
8538 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
8539 29859cb7 Iustin Pop
                       disks=self.op.disks,
8540 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
8541 29859cb7 Iustin Pop
                       os=self.op.os,
8542 29859cb7 Iustin Pop
                       tags=self.op.tags,
8543 29859cb7 Iustin Pop
                       nics=self.op.nics,
8544 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
8545 8cc7e742 Guido Trotter
                       hypervisor=self.op.hypervisor,
8546 29859cb7 Iustin Pop
                       )
8547 29859cb7 Iustin Pop
    else:
8548 923ddac0 Michael Hanselmann
      ial = IAllocator(self.cfg, self.rpc,
8549 29859cb7 Iustin Pop
                       mode=self.op.mode,
8550 29859cb7 Iustin Pop
                       name=self.op.name,
8551 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
8552 29859cb7 Iustin Pop
                       )
8553 d61df03e Iustin Pop
8554 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
8555 d1c2dd75 Iustin Pop
      result = ial.in_text
8556 298fe380 Iustin Pop
    else:
8557 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
8558 d1c2dd75 Iustin Pop
      result = ial.out_text
8559 298fe380 Iustin Pop
    return result