Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 045dd6d9

History | View | Annotate | Download (303.5 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import time
29 a8083063 Iustin Pop
import re
30 a8083063 Iustin Pop
import platform
31 ffa1c0dc Iustin Pop
import logging
32 74409b12 Iustin Pop
import copy
33 a8083063 Iustin Pop
34 a8083063 Iustin Pop
from ganeti import ssh
35 a8083063 Iustin Pop
from ganeti import utils
36 a8083063 Iustin Pop
from ganeti import errors
37 a8083063 Iustin Pop
from ganeti import hypervisor
38 6048c986 Guido Trotter
from ganeti import locking
39 a8083063 Iustin Pop
from ganeti import constants
40 a8083063 Iustin Pop
from ganeti import objects
41 8d14b30d Iustin Pop
from ganeti import serializer
42 112f18a5 Iustin Pop
from ganeti import ssconf
43 d61df03e Iustin Pop
44 d61df03e Iustin Pop
45 a8083063 Iustin Pop
class LogicalUnit(object):
46 396e1b78 Michael Hanselmann
  """Logical Unit base class.
47 a8083063 Iustin Pop

48 a8083063 Iustin Pop
  Subclasses must follow these rules:
49 d465bdc8 Guido Trotter
    - implement ExpandNames
50 6fd35c4d Michael Hanselmann
    - implement CheckPrereq (except when tasklets are used)
51 6fd35c4d Michael Hanselmann
    - implement Exec (except when tasklets are used)
52 a8083063 Iustin Pop
    - implement BuildHooksEnv
53 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
54 05f86716 Guido Trotter
    - optionally redefine their run requirements:
55 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
56 05f86716 Guido Trotter

57 05f86716 Guido Trotter
  Note that all commands require root permissions.
58 a8083063 Iustin Pop

59 20777413 Iustin Pop
  @ivar dry_run_result: the value (if any) that will be returned to the caller
60 20777413 Iustin Pop
      in dry-run mode (signalled by opcode dry_run parameter)
61 20777413 Iustin Pop

62 a8083063 Iustin Pop
  """
63 a8083063 Iustin Pop
  HPATH = None
64 a8083063 Iustin Pop
  HTYPE = None
65 a8083063 Iustin Pop
  _OP_REQP = []
66 7e55040e Guido Trotter
  REQ_BGL = True
67 a8083063 Iustin Pop
68 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
69 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
70 a8083063 Iustin Pop

71 5bbd3f7f Michael Hanselmann
    This needs to be overridden in derived classes in order to check op
72 a8083063 Iustin Pop
    validity.
73 a8083063 Iustin Pop

74 a8083063 Iustin Pop
    """
75 5bfac263 Iustin Pop
    self.proc = processor
76 a8083063 Iustin Pop
    self.op = op
77 77b657a3 Guido Trotter
    self.cfg = context.cfg
78 77b657a3 Guido Trotter
    self.context = context
79 72737a7f Iustin Pop
    self.rpc = rpc
80 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
81 d465bdc8 Guido Trotter
    self.needed_locks = None
82 6683bba2 Guido Trotter
    self.acquired_locks = {}
83 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 0)
84 ca2a79e1 Guido Trotter
    self.add_locks = {}
85 ca2a79e1 Guido Trotter
    self.remove_locks = {}
86 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
87 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
88 c92b310a Michael Hanselmann
    self.__ssh = None
89 86d9d3bb Iustin Pop
    # logging
90 86d9d3bb Iustin Pop
    self.LogWarning = processor.LogWarning
91 86d9d3bb Iustin Pop
    self.LogInfo = processor.LogInfo
92 2bb5c911 Michael Hanselmann
    self.LogStep = processor.LogStep
93 20777413 Iustin Pop
    # support for dry-run
94 20777413 Iustin Pop
    self.dry_run_result = None
95 c92b310a Michael Hanselmann
96 6fd35c4d Michael Hanselmann
    # Tasklets
97 3a012b41 Michael Hanselmann
    self.tasklets = None
98 6fd35c4d Michael Hanselmann
99 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
100 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
101 a8083063 Iustin Pop
      if attr_val is None:
102 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
103 5c983ee5 Iustin Pop
                                   attr_name, errors.ECODE_INVAL)
104 6fd35c4d Michael Hanselmann
105 4be4691d Iustin Pop
    self.CheckArguments()
106 a8083063 Iustin Pop
107 c92b310a Michael Hanselmann
  def __GetSSH(self):
108 c92b310a Michael Hanselmann
    """Returns the SshRunner object
109 c92b310a Michael Hanselmann

110 c92b310a Michael Hanselmann
    """
111 c92b310a Michael Hanselmann
    if not self.__ssh:
112 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
113 c92b310a Michael Hanselmann
    return self.__ssh
114 c92b310a Michael Hanselmann
115 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
116 c92b310a Michael Hanselmann
117 4be4691d Iustin Pop
  def CheckArguments(self):
118 4be4691d Iustin Pop
    """Check syntactic validity for the opcode arguments.
119 4be4691d Iustin Pop

120 4be4691d Iustin Pop
    This method is for doing a simple syntactic check and ensure
121 4be4691d Iustin Pop
    validity of opcode parameters, without any cluster-related
122 4be4691d Iustin Pop
    checks. While the same can be accomplished in ExpandNames and/or
123 4be4691d Iustin Pop
    CheckPrereq, doing these separate is better because:
124 4be4691d Iustin Pop

125 4be4691d Iustin Pop
      - ExpandNames is left as as purely a lock-related function
126 5bbd3f7f Michael Hanselmann
      - CheckPrereq is run after we have acquired locks (and possible
127 4be4691d Iustin Pop
        waited for them)
128 4be4691d Iustin Pop

129 4be4691d Iustin Pop
    The function is allowed to change the self.op attribute so that
130 4be4691d Iustin Pop
    later methods can no longer worry about missing parameters.
131 4be4691d Iustin Pop

132 4be4691d Iustin Pop
    """
133 4be4691d Iustin Pop
    pass
134 4be4691d Iustin Pop
135 d465bdc8 Guido Trotter
  def ExpandNames(self):
136 d465bdc8 Guido Trotter
    """Expand names for this LU.
137 d465bdc8 Guido Trotter

138 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
139 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
140 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
141 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
142 d465bdc8 Guido Trotter

143 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
144 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
145 d465bdc8 Guido Trotter
    as values. Rules:
146 e4376078 Iustin Pop

147 e4376078 Iustin Pop
      - use an empty dict if you don't need any lock
148 e4376078 Iustin Pop
      - if you don't need any lock at a particular level omit that level
149 e4376078 Iustin Pop
      - don't put anything for the BGL level
150 e4376078 Iustin Pop
      - if you want all locks at a level use locking.ALL_SET as a value
151 d465bdc8 Guido Trotter

152 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
153 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
154 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
155 3977a4c1 Guido Trotter

156 6fd35c4d Michael Hanselmann
    This function can also define a list of tasklets, which then will be
157 6fd35c4d Michael Hanselmann
    executed in order instead of the usual LU-level CheckPrereq and Exec
158 6fd35c4d Michael Hanselmann
    functions, if those are not defined by the LU.
159 6fd35c4d Michael Hanselmann

160 e4376078 Iustin Pop
    Examples::
161 e4376078 Iustin Pop

162 e4376078 Iustin Pop
      # Acquire all nodes and one instance
163 e4376078 Iustin Pop
      self.needed_locks = {
164 e4376078 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
165 e4376078 Iustin Pop
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
166 e4376078 Iustin Pop
      }
167 e4376078 Iustin Pop
      # Acquire just two nodes
168 e4376078 Iustin Pop
      self.needed_locks = {
169 e4376078 Iustin Pop
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
170 e4376078 Iustin Pop
      }
171 e4376078 Iustin Pop
      # Acquire no locks
172 e4376078 Iustin Pop
      self.needed_locks = {} # No, you can't leave it to the default value None
173 d465bdc8 Guido Trotter

174 d465bdc8 Guido Trotter
    """
175 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
176 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
177 d465bdc8 Guido Trotter
    # time.
178 d465bdc8 Guido Trotter
    if self.REQ_BGL:
179 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
180 d465bdc8 Guido Trotter
    else:
181 d465bdc8 Guido Trotter
      raise NotImplementedError
182 d465bdc8 Guido Trotter
183 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
184 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
185 fb8dcb62 Guido Trotter

186 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
187 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
188 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
189 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
190 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
191 fb8dcb62 Guido Trotter
    default it does nothing.
192 fb8dcb62 Guido Trotter

193 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
194 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
195 fb8dcb62 Guido Trotter

196 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
197 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
198 fb8dcb62 Guido Trotter

199 fb8dcb62 Guido Trotter
    """
200 fb8dcb62 Guido Trotter
201 a8083063 Iustin Pop
  def CheckPrereq(self):
202 a8083063 Iustin Pop
    """Check prerequisites for this LU.
203 a8083063 Iustin Pop

204 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
205 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
206 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
207 a8083063 Iustin Pop
    allowed.
208 a8083063 Iustin Pop

209 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
210 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
211 a8083063 Iustin Pop

212 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
213 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
214 a8083063 Iustin Pop

215 a8083063 Iustin Pop
    """
216 3a012b41 Michael Hanselmann
    if self.tasklets is not None:
217 b4a9eb66 Michael Hanselmann
      for (idx, tl) in enumerate(self.tasklets):
218 abae1b2b Michael Hanselmann
        logging.debug("Checking prerequisites for tasklet %s/%s",
219 abae1b2b Michael Hanselmann
                      idx + 1, len(self.tasklets))
220 6fd35c4d Michael Hanselmann
        tl.CheckPrereq()
221 6fd35c4d Michael Hanselmann
    else:
222 6fd35c4d Michael Hanselmann
      raise NotImplementedError
223 a8083063 Iustin Pop
224 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
225 a8083063 Iustin Pop
    """Execute the LU.
226 a8083063 Iustin Pop

227 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
228 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
229 a8083063 Iustin Pop
    code, or expected.
230 a8083063 Iustin Pop

231 a8083063 Iustin Pop
    """
232 3a012b41 Michael Hanselmann
    if self.tasklets is not None:
233 b4a9eb66 Michael Hanselmann
      for (idx, tl) in enumerate(self.tasklets):
234 abae1b2b Michael Hanselmann
        logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
235 6fd35c4d Michael Hanselmann
        tl.Exec(feedback_fn)
236 6fd35c4d Michael Hanselmann
    else:
237 6fd35c4d Michael Hanselmann
      raise NotImplementedError
238 a8083063 Iustin Pop
239 a8083063 Iustin Pop
  def BuildHooksEnv(self):
240 a8083063 Iustin Pop
    """Build hooks environment for this LU.
241 a8083063 Iustin Pop

242 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
243 a8083063 Iustin Pop
    containing the environment that will be used for running the
244 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
245 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
246 a8083063 Iustin Pop
    the hook should run after the execution.
247 a8083063 Iustin Pop

248 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
249 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
250 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
251 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
252 a8083063 Iustin Pop

253 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
254 a8083063 Iustin Pop

255 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
256 a8083063 Iustin Pop
    not be called.
257 a8083063 Iustin Pop

258 a8083063 Iustin Pop
    """
259 a8083063 Iustin Pop
    raise NotImplementedError
260 a8083063 Iustin Pop
261 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
262 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
263 1fce5219 Guido Trotter

264 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
265 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
266 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
267 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
268 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
269 1fce5219 Guido Trotter

270 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
271 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
272 e4376078 Iustin Pop
    @param hook_results: the results of the multi-node hooks rpc call
273 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
274 e4376078 Iustin Pop
    @param lu_result: the previous Exec result this LU had, or None
275 e4376078 Iustin Pop
        in the PRE phase
276 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
277 e4376078 Iustin Pop
        and hook results
278 1fce5219 Guido Trotter

279 1fce5219 Guido Trotter
    """
280 1fce5219 Guido Trotter
    return lu_result
281 1fce5219 Guido Trotter
282 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
283 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
284 43905206 Guido Trotter

285 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
286 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
287 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
288 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
289 43905206 Guido Trotter
    before.
290 43905206 Guido Trotter

291 43905206 Guido Trotter
    """
292 43905206 Guido Trotter
    if self.needed_locks is None:
293 43905206 Guido Trotter
      self.needed_locks = {}
294 43905206 Guido Trotter
    else:
295 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
296 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
297 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
298 43905206 Guido Trotter
    if expanded_name is None:
299 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
300 5c983ee5 Iustin Pop
                                 self.op.instance_name, errors.ECODE_NOENT)
301 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
302 43905206 Guido Trotter
    self.op.instance_name = expanded_name
303 43905206 Guido Trotter
304 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
305 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
306 c4a2fee1 Guido Trotter

307 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
308 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
309 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
310 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
311 c4a2fee1 Guido Trotter

312 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
313 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
314 c4a2fee1 Guido Trotter

315 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
316 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
317 c4a2fee1 Guido Trotter

318 e4376078 Iustin Pop
    If should be called in DeclareLocks in a way similar to::
319 c4a2fee1 Guido Trotter

320 e4376078 Iustin Pop
      if level == locking.LEVEL_NODE:
321 e4376078 Iustin Pop
        self._LockInstancesNodes()
322 c4a2fee1 Guido Trotter

323 a82ce292 Guido Trotter
    @type primary_only: boolean
324 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
325 a82ce292 Guido Trotter

326 c4a2fee1 Guido Trotter
    """
327 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
328 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
329 c4a2fee1 Guido Trotter
330 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
331 c4a2fee1 Guido Trotter
332 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
333 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
334 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
335 c4a2fee1 Guido Trotter
    wanted_nodes = []
336 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
337 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
338 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
339 a82ce292 Guido Trotter
      if not primary_only:
340 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
341 9513b6ab Guido Trotter
342 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
343 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
344 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
345 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
346 c4a2fee1 Guido Trotter
347 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
348 c4a2fee1 Guido Trotter
349 a8083063 Iustin Pop
350 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
351 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
352 a8083063 Iustin Pop

353 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
354 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
355 a8083063 Iustin Pop

356 a8083063 Iustin Pop
  """
357 a8083063 Iustin Pop
  HPATH = None
358 a8083063 Iustin Pop
  HTYPE = None
359 a8083063 Iustin Pop
360 a8083063 Iustin Pop
361 9a6800e1 Michael Hanselmann
class Tasklet:
362 9a6800e1 Michael Hanselmann
  """Tasklet base class.
363 9a6800e1 Michael Hanselmann

364 9a6800e1 Michael Hanselmann
  Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
365 9a6800e1 Michael Hanselmann
  they can mix legacy code with tasklets. Locking needs to be done in the LU,
366 9a6800e1 Michael Hanselmann
  tasklets know nothing about locks.
367 9a6800e1 Michael Hanselmann

368 9a6800e1 Michael Hanselmann
  Subclasses must follow these rules:
369 9a6800e1 Michael Hanselmann
    - Implement CheckPrereq
370 9a6800e1 Michael Hanselmann
    - Implement Exec
371 9a6800e1 Michael Hanselmann

372 9a6800e1 Michael Hanselmann
  """
373 464243a7 Michael Hanselmann
  def __init__(self, lu):
374 464243a7 Michael Hanselmann
    self.lu = lu
375 464243a7 Michael Hanselmann
376 464243a7 Michael Hanselmann
    # Shortcuts
377 464243a7 Michael Hanselmann
    self.cfg = lu.cfg
378 464243a7 Michael Hanselmann
    self.rpc = lu.rpc
379 464243a7 Michael Hanselmann
380 9a6800e1 Michael Hanselmann
  def CheckPrereq(self):
381 9a6800e1 Michael Hanselmann
    """Check prerequisites for this tasklets.
382 9a6800e1 Michael Hanselmann

383 9a6800e1 Michael Hanselmann
    This method should check whether the prerequisites for the execution of
384 9a6800e1 Michael Hanselmann
    this tasklet are fulfilled. It can do internode communication, but it
385 9a6800e1 Michael Hanselmann
    should be idempotent - no cluster or system changes are allowed.
386 9a6800e1 Michael Hanselmann

387 9a6800e1 Michael Hanselmann
    The method should raise errors.OpPrereqError in case something is not
388 9a6800e1 Michael Hanselmann
    fulfilled. Its return value is ignored.
389 9a6800e1 Michael Hanselmann

390 9a6800e1 Michael Hanselmann
    This method should also update all parameters to their canonical form if it
391 9a6800e1 Michael Hanselmann
    hasn't been done before.
392 9a6800e1 Michael Hanselmann

393 9a6800e1 Michael Hanselmann
    """
394 9a6800e1 Michael Hanselmann
    raise NotImplementedError
395 9a6800e1 Michael Hanselmann
396 9a6800e1 Michael Hanselmann
  def Exec(self, feedback_fn):
397 9a6800e1 Michael Hanselmann
    """Execute the tasklet.
398 9a6800e1 Michael Hanselmann

399 9a6800e1 Michael Hanselmann
    This method should implement the actual work. It should raise
400 9a6800e1 Michael Hanselmann
    errors.OpExecError for failures that are somewhat dealt with in code, or
401 9a6800e1 Michael Hanselmann
    expected.
402 9a6800e1 Michael Hanselmann

403 9a6800e1 Michael Hanselmann
    """
404 9a6800e1 Michael Hanselmann
    raise NotImplementedError
405 9a6800e1 Michael Hanselmann
406 9a6800e1 Michael Hanselmann
407 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
408 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
409 83120a01 Michael Hanselmann

410 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
411 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
412 e4376078 Iustin Pop
  @type nodes: list
413 e4376078 Iustin Pop
  @param nodes: list of node names or None for all nodes
414 e4376078 Iustin Pop
  @rtype: list
415 e4376078 Iustin Pop
  @return: the list of nodes, sorted
416 e4376078 Iustin Pop
  @raise errors.OpProgrammerError: if the nodes parameter is wrong type
417 83120a01 Michael Hanselmann

418 83120a01 Michael Hanselmann
  """
419 3312b702 Iustin Pop
  if not isinstance(nodes, list):
420 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'",
421 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
422 dcb93971 Michael Hanselmann
423 ea47808a Guido Trotter
  if not nodes:
424 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
425 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
426 dcb93971 Michael Hanselmann
427 ea47808a Guido Trotter
  wanted = []
428 ea47808a Guido Trotter
  for name in nodes:
429 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
430 ea47808a Guido Trotter
    if node is None:
431 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("No such node name '%s'" % name,
432 5c983ee5 Iustin Pop
                                 errors.ECODE_NOENT)
433 ea47808a Guido Trotter
    wanted.append(node)
434 dcb93971 Michael Hanselmann
435 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
436 3312b702 Iustin Pop
437 3312b702 Iustin Pop
438 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
439 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
440 3312b702 Iustin Pop

441 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
442 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
443 e4376078 Iustin Pop
  @type instances: list
444 e4376078 Iustin Pop
  @param instances: list of instance names or None for all instances
445 e4376078 Iustin Pop
  @rtype: list
446 e4376078 Iustin Pop
  @return: the list of instances, sorted
447 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if the instances parameter is wrong type
448 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if any of the passed instances is not found
449 3312b702 Iustin Pop

450 3312b702 Iustin Pop
  """
451 3312b702 Iustin Pop
  if not isinstance(instances, list):
452 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'",
453 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
454 3312b702 Iustin Pop
455 3312b702 Iustin Pop
  if instances:
456 3312b702 Iustin Pop
    wanted = []
457 3312b702 Iustin Pop
458 3312b702 Iustin Pop
    for name in instances:
459 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
460 3312b702 Iustin Pop
      if instance is None:
461 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name,
462 5c983ee5 Iustin Pop
                                   errors.ECODE_NOENT)
463 3312b702 Iustin Pop
      wanted.append(instance)
464 3312b702 Iustin Pop
465 3312b702 Iustin Pop
  else:
466 a7f5dc98 Iustin Pop
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
467 a7f5dc98 Iustin Pop
  return wanted
468 dcb93971 Michael Hanselmann
469 dcb93971 Michael Hanselmann
470 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
471 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
472 83120a01 Michael Hanselmann

473 a2d2e1a7 Iustin Pop
  @type static: L{utils.FieldSet}
474 31bf511f Iustin Pop
  @param static: static fields set
475 a2d2e1a7 Iustin Pop
  @type dynamic: L{utils.FieldSet}
476 31bf511f Iustin Pop
  @param dynamic: dynamic fields set
477 83120a01 Michael Hanselmann

478 83120a01 Michael Hanselmann
  """
479 a2d2e1a7 Iustin Pop
  f = utils.FieldSet()
480 31bf511f Iustin Pop
  f.Extend(static)
481 31bf511f Iustin Pop
  f.Extend(dynamic)
482 dcb93971 Michael Hanselmann
483 31bf511f Iustin Pop
  delta = f.NonMatching(selected)
484 31bf511f Iustin Pop
  if delta:
485 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
486 5c983ee5 Iustin Pop
                               % ",".join(delta), errors.ECODE_INVAL)
487 dcb93971 Michael Hanselmann
488 dcb93971 Michael Hanselmann
489 a5961235 Iustin Pop
def _CheckBooleanOpField(op, name):
490 a5961235 Iustin Pop
  """Validates boolean opcode parameters.
491 a5961235 Iustin Pop

492 a5961235 Iustin Pop
  This will ensure that an opcode parameter is either a boolean value,
493 a5961235 Iustin Pop
  or None (but that it always exists).
494 a5961235 Iustin Pop

495 a5961235 Iustin Pop
  """
496 a5961235 Iustin Pop
  val = getattr(op, name, None)
497 a5961235 Iustin Pop
  if not (val is None or isinstance(val, bool)):
498 a5961235 Iustin Pop
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
499 5c983ee5 Iustin Pop
                               (name, str(val)), errors.ECODE_INVAL)
500 a5961235 Iustin Pop
  setattr(op, name, val)
501 a5961235 Iustin Pop
502 a5961235 Iustin Pop
503 a5961235 Iustin Pop
def _CheckNodeOnline(lu, node):
504 a5961235 Iustin Pop
  """Ensure that a given node is online.
505 a5961235 Iustin Pop

506 a5961235 Iustin Pop
  @param lu: the LU on behalf of which we make the check
507 a5961235 Iustin Pop
  @param node: the node to check
508 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is offline
509 a5961235 Iustin Pop

510 a5961235 Iustin Pop
  """
511 a5961235 Iustin Pop
  if lu.cfg.GetNodeInfo(node).offline:
512 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Can't use offline node %s" % node,
513 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
514 a5961235 Iustin Pop
515 a5961235 Iustin Pop
516 733a2b6a Iustin Pop
def _CheckNodeNotDrained(lu, node):
517 733a2b6a Iustin Pop
  """Ensure that a given node is not drained.
518 733a2b6a Iustin Pop

519 733a2b6a Iustin Pop
  @param lu: the LU on behalf of which we make the check
520 733a2b6a Iustin Pop
  @param node: the node to check
521 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is drained
522 733a2b6a Iustin Pop

523 733a2b6a Iustin Pop
  """
524 733a2b6a Iustin Pop
  if lu.cfg.GetNodeInfo(node).drained:
525 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Can't use drained node %s" % node,
526 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
527 733a2b6a Iustin Pop
528 733a2b6a Iustin Pop
529 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
530 67fc3042 Iustin Pop
                          memory, vcpus, nics, disk_template, disks,
531 7c4d6c7b Michael Hanselmann
                          bep, hvp, hypervisor_name):
532 e4376078 Iustin Pop
  """Builds instance related env variables for hooks
533 e4376078 Iustin Pop

534 e4376078 Iustin Pop
  This builds the hook environment from individual variables.
535 e4376078 Iustin Pop

536 e4376078 Iustin Pop
  @type name: string
537 e4376078 Iustin Pop
  @param name: the name of the instance
538 e4376078 Iustin Pop
  @type primary_node: string
539 e4376078 Iustin Pop
  @param primary_node: the name of the instance's primary node
540 e4376078 Iustin Pop
  @type secondary_nodes: list
541 e4376078 Iustin Pop
  @param secondary_nodes: list of secondary nodes as strings
542 e4376078 Iustin Pop
  @type os_type: string
543 e4376078 Iustin Pop
  @param os_type: the name of the instance's OS
544 0d68c45d Iustin Pop
  @type status: boolean
545 0d68c45d Iustin Pop
  @param status: the should_run status of the instance
546 e4376078 Iustin Pop
  @type memory: string
547 e4376078 Iustin Pop
  @param memory: the memory size of the instance
548 e4376078 Iustin Pop
  @type vcpus: string
549 e4376078 Iustin Pop
  @param vcpus: the count of VCPUs the instance has
550 e4376078 Iustin Pop
  @type nics: list
551 5e3d3eb3 Guido Trotter
  @param nics: list of tuples (ip, mac, mode, link) representing
552 5e3d3eb3 Guido Trotter
      the NICs the instance has
553 2c2690c9 Iustin Pop
  @type disk_template: string
554 5bbd3f7f Michael Hanselmann
  @param disk_template: the disk template of the instance
555 2c2690c9 Iustin Pop
  @type disks: list
556 2c2690c9 Iustin Pop
  @param disks: the list of (size, mode) pairs
557 67fc3042 Iustin Pop
  @type bep: dict
558 67fc3042 Iustin Pop
  @param bep: the backend parameters for the instance
559 67fc3042 Iustin Pop
  @type hvp: dict
560 67fc3042 Iustin Pop
  @param hvp: the hypervisor parameters for the instance
561 7c4d6c7b Michael Hanselmann
  @type hypervisor_name: string
562 7c4d6c7b Michael Hanselmann
  @param hypervisor_name: the hypervisor for the instance
563 e4376078 Iustin Pop
  @rtype: dict
564 e4376078 Iustin Pop
  @return: the hook environment for this instance
565 ecb215b5 Michael Hanselmann

566 396e1b78 Michael Hanselmann
  """
567 0d68c45d Iustin Pop
  if status:
568 0d68c45d Iustin Pop
    str_status = "up"
569 0d68c45d Iustin Pop
  else:
570 0d68c45d Iustin Pop
    str_status = "down"
571 396e1b78 Michael Hanselmann
  env = {
572 0e137c28 Iustin Pop
    "OP_TARGET": name,
573 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
574 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
575 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
576 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
577 0d68c45d Iustin Pop
    "INSTANCE_STATUS": str_status,
578 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
579 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
580 2c2690c9 Iustin Pop
    "INSTANCE_DISK_TEMPLATE": disk_template,
581 7c4d6c7b Michael Hanselmann
    "INSTANCE_HYPERVISOR": hypervisor_name,
582 396e1b78 Michael Hanselmann
  }
583 396e1b78 Michael Hanselmann
584 396e1b78 Michael Hanselmann
  if nics:
585 396e1b78 Michael Hanselmann
    nic_count = len(nics)
586 62f0dd02 Guido Trotter
    for idx, (ip, mac, mode, link) in enumerate(nics):
587 396e1b78 Michael Hanselmann
      if ip is None:
588 396e1b78 Michael Hanselmann
        ip = ""
589 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
590 2c2690c9 Iustin Pop
      env["INSTANCE_NIC%d_MAC" % idx] = mac
591 62f0dd02 Guido Trotter
      env["INSTANCE_NIC%d_MODE" % idx] = mode
592 62f0dd02 Guido Trotter
      env["INSTANCE_NIC%d_LINK" % idx] = link
593 62f0dd02 Guido Trotter
      if mode == constants.NIC_MODE_BRIDGED:
594 62f0dd02 Guido Trotter
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
595 396e1b78 Michael Hanselmann
  else:
596 396e1b78 Michael Hanselmann
    nic_count = 0
597 396e1b78 Michael Hanselmann
598 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
599 396e1b78 Michael Hanselmann
600 2c2690c9 Iustin Pop
  if disks:
601 2c2690c9 Iustin Pop
    disk_count = len(disks)
602 2c2690c9 Iustin Pop
    for idx, (size, mode) in enumerate(disks):
603 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_SIZE" % idx] = size
604 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_MODE" % idx] = mode
605 2c2690c9 Iustin Pop
  else:
606 2c2690c9 Iustin Pop
    disk_count = 0
607 2c2690c9 Iustin Pop
608 2c2690c9 Iustin Pop
  env["INSTANCE_DISK_COUNT"] = disk_count
609 2c2690c9 Iustin Pop
610 67fc3042 Iustin Pop
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
611 67fc3042 Iustin Pop
    for key, value in source.items():
612 67fc3042 Iustin Pop
      env["INSTANCE_%s_%s" % (kind, key)] = value
613 67fc3042 Iustin Pop
614 396e1b78 Michael Hanselmann
  return env
615 396e1b78 Michael Hanselmann
616 96acbc09 Michael Hanselmann
617 f9b10246 Guido Trotter
def _NICListToTuple(lu, nics):
618 62f0dd02 Guido Trotter
  """Build a list of nic information tuples.
619 62f0dd02 Guido Trotter

620 f9b10246 Guido Trotter
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
621 f9b10246 Guido Trotter
  value in LUQueryInstanceData.
622 62f0dd02 Guido Trotter

623 62f0dd02 Guido Trotter
  @type lu:  L{LogicalUnit}
624 62f0dd02 Guido Trotter
  @param lu: the logical unit on whose behalf we execute
625 62f0dd02 Guido Trotter
  @type nics: list of L{objects.NIC}
626 62f0dd02 Guido Trotter
  @param nics: list of nics to convert to hooks tuples
627 62f0dd02 Guido Trotter

628 62f0dd02 Guido Trotter
  """
629 62f0dd02 Guido Trotter
  hooks_nics = []
630 62f0dd02 Guido Trotter
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[constants.PP_DEFAULT]
631 62f0dd02 Guido Trotter
  for nic in nics:
632 62f0dd02 Guido Trotter
    ip = nic.ip
633 62f0dd02 Guido Trotter
    mac = nic.mac
634 62f0dd02 Guido Trotter
    filled_params = objects.FillDict(c_nicparams, nic.nicparams)
635 62f0dd02 Guido Trotter
    mode = filled_params[constants.NIC_MODE]
636 62f0dd02 Guido Trotter
    link = filled_params[constants.NIC_LINK]
637 62f0dd02 Guido Trotter
    hooks_nics.append((ip, mac, mode, link))
638 62f0dd02 Guido Trotter
  return hooks_nics
639 396e1b78 Michael Hanselmann
640 96acbc09 Michael Hanselmann
641 338e51e8 Iustin Pop
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
642 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
643 ecb215b5 Michael Hanselmann

644 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
645 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
646 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
647 e4376078 Iustin Pop
  @param instance: the instance for which we should build the
648 e4376078 Iustin Pop
      environment
649 e4376078 Iustin Pop
  @type override: dict
650 e4376078 Iustin Pop
  @param override: dictionary with key/values that will override
651 e4376078 Iustin Pop
      our values
652 e4376078 Iustin Pop
  @rtype: dict
653 e4376078 Iustin Pop
  @return: the hook environment dictionary
654 e4376078 Iustin Pop

655 ecb215b5 Michael Hanselmann
  """
656 67fc3042 Iustin Pop
  cluster = lu.cfg.GetClusterInfo()
657 67fc3042 Iustin Pop
  bep = cluster.FillBE(instance)
658 67fc3042 Iustin Pop
  hvp = cluster.FillHV(instance)
659 396e1b78 Michael Hanselmann
  args = {
660 396e1b78 Michael Hanselmann
    'name': instance.name,
661 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
662 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
663 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
664 0d68c45d Iustin Pop
    'status': instance.admin_up,
665 338e51e8 Iustin Pop
    'memory': bep[constants.BE_MEMORY],
666 338e51e8 Iustin Pop
    'vcpus': bep[constants.BE_VCPUS],
667 f9b10246 Guido Trotter
    'nics': _NICListToTuple(lu, instance.nics),
668 2c2690c9 Iustin Pop
    'disk_template': instance.disk_template,
669 2c2690c9 Iustin Pop
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
670 67fc3042 Iustin Pop
    'bep': bep,
671 67fc3042 Iustin Pop
    'hvp': hvp,
672 b0c63e2b Iustin Pop
    'hypervisor_name': instance.hypervisor,
673 396e1b78 Michael Hanselmann
  }
674 396e1b78 Michael Hanselmann
  if override:
675 396e1b78 Michael Hanselmann
    args.update(override)
676 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
677 396e1b78 Michael Hanselmann
678 396e1b78 Michael Hanselmann
679 44485f49 Guido Trotter
def _AdjustCandidatePool(lu, exceptions):
680 ec0292f1 Iustin Pop
  """Adjust the candidate pool after node operations.
681 ec0292f1 Iustin Pop

682 ec0292f1 Iustin Pop
  """
683 44485f49 Guido Trotter
  mod_list = lu.cfg.MaintainCandidatePool(exceptions)
684 ec0292f1 Iustin Pop
  if mod_list:
685 ec0292f1 Iustin Pop
    lu.LogInfo("Promoted nodes to master candidate role: %s",
686 ee513a66 Iustin Pop
               ", ".join(node.name for node in mod_list))
687 ec0292f1 Iustin Pop
    for name in mod_list:
688 ec0292f1 Iustin Pop
      lu.context.ReaddNode(name)
689 44485f49 Guido Trotter
  mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
690 ec0292f1 Iustin Pop
  if mc_now > mc_max:
691 ec0292f1 Iustin Pop
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
692 ec0292f1 Iustin Pop
               (mc_now, mc_max))
693 ec0292f1 Iustin Pop
694 ec0292f1 Iustin Pop
695 6d7e1f20 Guido Trotter
def _DecideSelfPromotion(lu, exceptions=None):
696 6d7e1f20 Guido Trotter
  """Decide whether I should promote myself as a master candidate.
697 6d7e1f20 Guido Trotter

698 6d7e1f20 Guido Trotter
  """
699 6d7e1f20 Guido Trotter
  cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
700 6d7e1f20 Guido Trotter
  mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
701 6d7e1f20 Guido Trotter
  # the new node will increase mc_max with one, so:
702 6d7e1f20 Guido Trotter
  mc_should = min(mc_should + 1, cp_size)
703 6d7e1f20 Guido Trotter
  return mc_now < mc_should
704 6d7e1f20 Guido Trotter
705 6d7e1f20 Guido Trotter
706 b165e77e Guido Trotter
def _CheckNicsBridgesExist(lu, target_nics, target_node,
707 b165e77e Guido Trotter
                               profile=constants.PP_DEFAULT):
708 b165e77e Guido Trotter
  """Check that the brigdes needed by a list of nics exist.
709 b165e77e Guido Trotter

710 b165e77e Guido Trotter
  """
711 b165e77e Guido Trotter
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[profile]
712 b165e77e Guido Trotter
  paramslist = [objects.FillDict(c_nicparams, nic.nicparams)
713 b165e77e Guido Trotter
                for nic in target_nics]
714 b165e77e Guido Trotter
  brlist = [params[constants.NIC_LINK] for params in paramslist
715 b165e77e Guido Trotter
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
716 b165e77e Guido Trotter
  if brlist:
717 b165e77e Guido Trotter
    result = lu.rpc.call_bridges_exist(target_node, brlist)
718 4c4e4e1e Iustin Pop
    result.Raise("Error checking bridges on destination node '%s'" %
719 045dd6d9 Iustin Pop
                 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
720 b165e77e Guido Trotter
721 b165e77e Guido Trotter
722 b165e77e Guido Trotter
def _CheckInstanceBridgesExist(lu, instance, node=None):
723 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
724 bf6929a2 Alexander Schreiber

725 bf6929a2 Alexander Schreiber
  """
726 b165e77e Guido Trotter
  if node is None:
727 29921401 Iustin Pop
    node = instance.primary_node
728 b165e77e Guido Trotter
  _CheckNicsBridgesExist(lu, instance.nics, node)
729 bf6929a2 Alexander Schreiber
730 bf6929a2 Alexander Schreiber
731 c6f1af07 Iustin Pop
def _CheckOSVariant(os_obj, name):
732 f2c05717 Guido Trotter
  """Check whether an OS name conforms to the os variants specification.
733 f2c05717 Guido Trotter

734 c6f1af07 Iustin Pop
  @type os_obj: L{objects.OS}
735 c6f1af07 Iustin Pop
  @param os_obj: OS object to check
736 f2c05717 Guido Trotter
  @type name: string
737 f2c05717 Guido Trotter
  @param name: OS name passed by the user, to check for validity
738 f2c05717 Guido Trotter

739 f2c05717 Guido Trotter
  """
740 c6f1af07 Iustin Pop
  if not os_obj.supported_variants:
741 f2c05717 Guido Trotter
    return
742 f2c05717 Guido Trotter
  try:
743 f2c05717 Guido Trotter
    variant = name.split("+", 1)[1]
744 f2c05717 Guido Trotter
  except IndexError:
745 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("OS name must include a variant",
746 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
747 f2c05717 Guido Trotter
748 c6f1af07 Iustin Pop
  if variant not in os_obj.supported_variants:
749 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
750 f2c05717 Guido Trotter
751 f2c05717 Guido Trotter
752 5ba9701d Michael Hanselmann
def _GetNodeInstancesInner(cfg, fn):
753 5ba9701d Michael Hanselmann
  return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
754 5ba9701d Michael Hanselmann
755 5ba9701d Michael Hanselmann
756 e9721add Michael Hanselmann
def _GetNodeInstances(cfg, node_name):
757 e9721add Michael Hanselmann
  """Returns a list of all primary and secondary instances on a node.
758 e9721add Michael Hanselmann

759 e9721add Michael Hanselmann
  """
760 e9721add Michael Hanselmann
761 e9721add Michael Hanselmann
  return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
762 e9721add Michael Hanselmann
763 e9721add Michael Hanselmann
764 80cb875c Michael Hanselmann
def _GetNodePrimaryInstances(cfg, node_name):
765 80cb875c Michael Hanselmann
  """Returns primary instances on a node.
766 80cb875c Michael Hanselmann

767 80cb875c Michael Hanselmann
  """
768 5ba9701d Michael Hanselmann
  return _GetNodeInstancesInner(cfg,
769 5ba9701d Michael Hanselmann
                                lambda inst: node_name == inst.primary_node)
770 80cb875c Michael Hanselmann
771 80cb875c Michael Hanselmann
772 692738fc Michael Hanselmann
def _GetNodeSecondaryInstances(cfg, node_name):
773 692738fc Michael Hanselmann
  """Returns secondary instances on a node.
774 692738fc Michael Hanselmann

775 692738fc Michael Hanselmann
  """
776 5ba9701d Michael Hanselmann
  return _GetNodeInstancesInner(cfg,
777 5ba9701d Michael Hanselmann
                                lambda inst: node_name in inst.secondary_nodes)
778 692738fc Michael Hanselmann
779 692738fc Michael Hanselmann
780 efb8da02 Michael Hanselmann
def _GetStorageTypeArgs(cfg, storage_type):
781 efb8da02 Michael Hanselmann
  """Returns the arguments for a storage type.
782 efb8da02 Michael Hanselmann

783 efb8da02 Michael Hanselmann
  """
784 efb8da02 Michael Hanselmann
  # Special case for file storage
785 efb8da02 Michael Hanselmann
  if storage_type == constants.ST_FILE:
786 a4d138b7 Michael Hanselmann
    # storage.FileStorage wants a list of storage directories
787 a4d138b7 Michael Hanselmann
    return [[cfg.GetFileStorageDir()]]
788 efb8da02 Michael Hanselmann
789 efb8da02 Michael Hanselmann
  return []
790 efb8da02 Michael Hanselmann
791 efb8da02 Michael Hanselmann
792 2d9005d8 Michael Hanselmann
def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
793 2d9005d8 Michael Hanselmann
  faulty = []
794 2d9005d8 Michael Hanselmann
795 2d9005d8 Michael Hanselmann
  for dev in instance.disks:
796 2d9005d8 Michael Hanselmann
    cfg.SetDiskID(dev, node_name)
797 2d9005d8 Michael Hanselmann
798 2d9005d8 Michael Hanselmann
  result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
799 2d9005d8 Michael Hanselmann
  result.Raise("Failed to get disk status from node %s" % node_name,
800 045dd6d9 Iustin Pop
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
801 2d9005d8 Michael Hanselmann
802 2d9005d8 Michael Hanselmann
  for idx, bdev_status in enumerate(result.payload):
803 2d9005d8 Michael Hanselmann
    if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
804 2d9005d8 Michael Hanselmann
      faulty.append(idx)
805 2d9005d8 Michael Hanselmann
806 2d9005d8 Michael Hanselmann
  return faulty
807 2d9005d8 Michael Hanselmann
808 2d9005d8 Michael Hanselmann
809 b5f5fae9 Luca Bigliardi
class LUPostInitCluster(LogicalUnit):
810 b5f5fae9 Luca Bigliardi
  """Logical unit for running hooks after cluster initialization.
811 b5f5fae9 Luca Bigliardi

812 b5f5fae9 Luca Bigliardi
  """
813 b5f5fae9 Luca Bigliardi
  HPATH = "cluster-init"
814 b5f5fae9 Luca Bigliardi
  HTYPE = constants.HTYPE_CLUSTER
815 b5f5fae9 Luca Bigliardi
  _OP_REQP = []
816 b5f5fae9 Luca Bigliardi
817 b5f5fae9 Luca Bigliardi
  def BuildHooksEnv(self):
818 b5f5fae9 Luca Bigliardi
    """Build hooks env.
819 b5f5fae9 Luca Bigliardi

820 b5f5fae9 Luca Bigliardi
    """
821 b5f5fae9 Luca Bigliardi
    env = {"OP_TARGET": self.cfg.GetClusterName()}
822 b5f5fae9 Luca Bigliardi
    mn = self.cfg.GetMasterNode()
823 b5f5fae9 Luca Bigliardi
    return env, [], [mn]
824 b5f5fae9 Luca Bigliardi
825 b5f5fae9 Luca Bigliardi
  def CheckPrereq(self):
826 b5f5fae9 Luca Bigliardi
    """No prerequisites to check.
827 b5f5fae9 Luca Bigliardi

828 b5f5fae9 Luca Bigliardi
    """
829 b5f5fae9 Luca Bigliardi
    return True
830 b5f5fae9 Luca Bigliardi
831 b5f5fae9 Luca Bigliardi
  def Exec(self, feedback_fn):
832 b5f5fae9 Luca Bigliardi
    """Nothing to do.
833 b5f5fae9 Luca Bigliardi

834 b5f5fae9 Luca Bigliardi
    """
835 b5f5fae9 Luca Bigliardi
    return True
836 b5f5fae9 Luca Bigliardi
837 b5f5fae9 Luca Bigliardi
838 b2c750a4 Luca Bigliardi
class LUDestroyCluster(LogicalUnit):
839 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
840 a8083063 Iustin Pop

841 a8083063 Iustin Pop
  """
842 b2c750a4 Luca Bigliardi
  HPATH = "cluster-destroy"
843 b2c750a4 Luca Bigliardi
  HTYPE = constants.HTYPE_CLUSTER
844 a8083063 Iustin Pop
  _OP_REQP = []
845 a8083063 Iustin Pop
846 b2c750a4 Luca Bigliardi
  def BuildHooksEnv(self):
847 b2c750a4 Luca Bigliardi
    """Build hooks env.
848 b2c750a4 Luca Bigliardi

849 b2c750a4 Luca Bigliardi
    """
850 b2c750a4 Luca Bigliardi
    env = {"OP_TARGET": self.cfg.GetClusterName()}
851 b2c750a4 Luca Bigliardi
    return env, [], []
852 b2c750a4 Luca Bigliardi
853 a8083063 Iustin Pop
  def CheckPrereq(self):
854 a8083063 Iustin Pop
    """Check prerequisites.
855 a8083063 Iustin Pop

856 a8083063 Iustin Pop
    This checks whether the cluster is empty.
857 a8083063 Iustin Pop

858 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
859 a8083063 Iustin Pop

860 a8083063 Iustin Pop
    """
861 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
862 a8083063 Iustin Pop
863 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
864 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
865 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
866 5c983ee5 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1),
867 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
868 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
869 db915bd1 Michael Hanselmann
    if instancelist:
870 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
871 5c983ee5 Iustin Pop
                                 " this cluster." % len(instancelist),
872 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
873 a8083063 Iustin Pop
874 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
875 a8083063 Iustin Pop
    """Destroys the cluster.
876 a8083063 Iustin Pop

877 a8083063 Iustin Pop
    """
878 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
879 b989b9d9 Ken Wehr
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
880 3141ad3b Luca Bigliardi
881 3141ad3b Luca Bigliardi
    # Run post hooks on master node before it's removed
882 3141ad3b Luca Bigliardi
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
883 3141ad3b Luca Bigliardi
    try:
884 3141ad3b Luca Bigliardi
      hm.RunPhase(constants.HOOKS_PHASE_POST, [master])
885 3141ad3b Luca Bigliardi
    except:
886 3141ad3b Luca Bigliardi
      self.LogWarning("Errors occurred running hooks on %s" % master)
887 3141ad3b Luca Bigliardi
888 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
889 4c4e4e1e Iustin Pop
    result.Raise("Could not disable the master role")
890 b989b9d9 Ken Wehr
891 b989b9d9 Ken Wehr
    if modify_ssh_setup:
892 b989b9d9 Ken Wehr
      priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
893 b989b9d9 Ken Wehr
      utils.CreateBackup(priv_key)
894 b989b9d9 Ken Wehr
      utils.CreateBackup(pub_key)
895 b989b9d9 Ken Wehr
896 140aa4a8 Iustin Pop
    return master
897 a8083063 Iustin Pop
898 a8083063 Iustin Pop
899 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
900 a8083063 Iustin Pop
  """Verifies the cluster status.
901 a8083063 Iustin Pop

902 a8083063 Iustin Pop
  """
903 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
904 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
905 a0c9776a Iustin Pop
  _OP_REQP = ["skip_checks", "verbose", "error_codes", "debug_simulate_errors"]
906 d4b9d97f Guido Trotter
  REQ_BGL = False
907 d4b9d97f Guido Trotter
908 7c874ee1 Iustin Pop
  TCLUSTER = "cluster"
909 7c874ee1 Iustin Pop
  TNODE = "node"
910 7c874ee1 Iustin Pop
  TINSTANCE = "instance"
911 7c874ee1 Iustin Pop
912 7c874ee1 Iustin Pop
  ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
913 7c874ee1 Iustin Pop
  EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
914 7c874ee1 Iustin Pop
  EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
915 7c874ee1 Iustin Pop
  EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
916 7c874ee1 Iustin Pop
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
917 7c874ee1 Iustin Pop
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
918 7c874ee1 Iustin Pop
  EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
919 7c874ee1 Iustin Pop
  ENODEDRBD = (TNODE, "ENODEDRBD")
920 7c874ee1 Iustin Pop
  ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
921 7c874ee1 Iustin Pop
  ENODEHOOKS = (TNODE, "ENODEHOOKS")
922 7c874ee1 Iustin Pop
  ENODEHV = (TNODE, "ENODEHV")
923 7c874ee1 Iustin Pop
  ENODELVM = (TNODE, "ENODELVM")
924 7c874ee1 Iustin Pop
  ENODEN1 = (TNODE, "ENODEN1")
925 7c874ee1 Iustin Pop
  ENODENET = (TNODE, "ENODENET")
926 7c874ee1 Iustin Pop
  ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
927 7c874ee1 Iustin Pop
  ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
928 7c874ee1 Iustin Pop
  ENODERPC = (TNODE, "ENODERPC")
929 7c874ee1 Iustin Pop
  ENODESSH = (TNODE, "ENODESSH")
930 7c874ee1 Iustin Pop
  ENODEVERSION = (TNODE, "ENODEVERSION")
931 7c0aa8e9 Iustin Pop
  ENODESETUP = (TNODE, "ENODESETUP")
932 7c874ee1 Iustin Pop
933 a0c9776a Iustin Pop
  ETYPE_FIELD = "code"
934 a0c9776a Iustin Pop
  ETYPE_ERROR = "ERROR"
935 a0c9776a Iustin Pop
  ETYPE_WARNING = "WARNING"
936 a0c9776a Iustin Pop
937 d4b9d97f Guido Trotter
  def ExpandNames(self):
938 d4b9d97f Guido Trotter
    self.needed_locks = {
939 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
940 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
941 d4b9d97f Guido Trotter
    }
942 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
943 a8083063 Iustin Pop
944 7c874ee1 Iustin Pop
  def _Error(self, ecode, item, msg, *args, **kwargs):
945 7c874ee1 Iustin Pop
    """Format an error message.
946 7c874ee1 Iustin Pop

947 7c874ee1 Iustin Pop
    Based on the opcode's error_codes parameter, either format a
948 7c874ee1 Iustin Pop
    parseable error code, or a simpler error string.
949 7c874ee1 Iustin Pop

950 7c874ee1 Iustin Pop
    This must be called only from Exec and functions called from Exec.
951 7c874ee1 Iustin Pop

952 7c874ee1 Iustin Pop
    """
953 a0c9776a Iustin Pop
    ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
954 7c874ee1 Iustin Pop
    itype, etxt = ecode
955 7c874ee1 Iustin Pop
    # first complete the msg
956 7c874ee1 Iustin Pop
    if args:
957 7c874ee1 Iustin Pop
      msg = msg % args
958 7c874ee1 Iustin Pop
    # then format the whole message
959 7c874ee1 Iustin Pop
    if self.op.error_codes:
960 7c874ee1 Iustin Pop
      msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
961 7c874ee1 Iustin Pop
    else:
962 7c874ee1 Iustin Pop
      if item:
963 7c874ee1 Iustin Pop
        item = " " + item
964 7c874ee1 Iustin Pop
      else:
965 7c874ee1 Iustin Pop
        item = ""
966 7c874ee1 Iustin Pop
      msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
967 7c874ee1 Iustin Pop
    # and finally report it via the feedback_fn
968 7c874ee1 Iustin Pop
    self._feedback_fn("  - %s" % msg)
969 7c874ee1 Iustin Pop
970 a0c9776a Iustin Pop
  def _ErrorIf(self, cond, *args, **kwargs):
971 a0c9776a Iustin Pop
    """Log an error message if the passed condition is True.
972 a0c9776a Iustin Pop

973 a0c9776a Iustin Pop
    """
974 a0c9776a Iustin Pop
    cond = bool(cond) or self.op.debug_simulate_errors
975 a0c9776a Iustin Pop
    if cond:
976 a0c9776a Iustin Pop
      self._Error(*args, **kwargs)
977 a0c9776a Iustin Pop
    # do not mark the operation as failed for WARN cases only
978 a0c9776a Iustin Pop
    if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
979 a0c9776a Iustin Pop
      self.bad = self.bad or cond
980 a0c9776a Iustin Pop
981 25361b9a Iustin Pop
  def _VerifyNode(self, nodeinfo, file_list, local_cksum,
982 7c874ee1 Iustin Pop
                  node_result, master_files, drbd_map, vg_name):
983 a8083063 Iustin Pop
    """Run multiple tests against a node.
984 a8083063 Iustin Pop

985 112f18a5 Iustin Pop
    Test list:
986 e4376078 Iustin Pop

987 a8083063 Iustin Pop
      - compares ganeti version
988 5bbd3f7f Michael Hanselmann
      - checks vg existence and size > 20G
989 a8083063 Iustin Pop
      - checks config file checksum
990 a8083063 Iustin Pop
      - checks ssh to other nodes
991 a8083063 Iustin Pop

992 112f18a5 Iustin Pop
    @type nodeinfo: L{objects.Node}
993 112f18a5 Iustin Pop
    @param nodeinfo: the node to check
994 e4376078 Iustin Pop
    @param file_list: required list of files
995 e4376078 Iustin Pop
    @param local_cksum: dictionary of local files and their checksums
996 e4376078 Iustin Pop
    @param node_result: the results from the node
997 112f18a5 Iustin Pop
    @param master_files: list of files that only masters should have
998 6d2e83d5 Iustin Pop
    @param drbd_map: the useddrbd minors for this node, in
999 6d2e83d5 Iustin Pop
        form of minor: (instance, must_exist) which correspond to instances
1000 6d2e83d5 Iustin Pop
        and their running status
1001 cc9e1230 Guido Trotter
    @param vg_name: Ganeti Volume Group (result of self.cfg.GetVGName())
1002 098c0958 Michael Hanselmann

1003 a8083063 Iustin Pop
    """
1004 112f18a5 Iustin Pop
    node = nodeinfo.name
1005 a0c9776a Iustin Pop
    _ErrorIf = self._ErrorIf
1006 25361b9a Iustin Pop
1007 25361b9a Iustin Pop
    # main result, node_result should be a non-empty dict
1008 a0c9776a Iustin Pop
    test = not node_result or not isinstance(node_result, dict)
1009 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODERPC, node,
1010 7c874ee1 Iustin Pop
                  "unable to verify node: no data returned")
1011 a0c9776a Iustin Pop
    if test:
1012 a0c9776a Iustin Pop
      return
1013 25361b9a Iustin Pop
1014 a8083063 Iustin Pop
    # compares ganeti version
1015 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
1016 25361b9a Iustin Pop
    remote_version = node_result.get('version', None)
1017 a0c9776a Iustin Pop
    test = not (remote_version and
1018 a0c9776a Iustin Pop
                isinstance(remote_version, (list, tuple)) and
1019 a0c9776a Iustin Pop
                len(remote_version) == 2)
1020 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODERPC, node,
1021 a0c9776a Iustin Pop
             "connection to node returned invalid data")
1022 a0c9776a Iustin Pop
    if test:
1023 a0c9776a Iustin Pop
      return
1024 a0c9776a Iustin Pop
1025 a0c9776a Iustin Pop
    test = local_version != remote_version[0]
1026 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODEVERSION, node,
1027 a0c9776a Iustin Pop
             "incompatible protocol versions: master %s,"
1028 a0c9776a Iustin Pop
             " node %s", local_version, remote_version[0])
1029 a0c9776a Iustin Pop
    if test:
1030 a0c9776a Iustin Pop
      return
1031 a8083063 Iustin Pop
1032 e9ce0a64 Iustin Pop
    # node seems compatible, we can actually try to look into its results
1033 a8083063 Iustin Pop
1034 e9ce0a64 Iustin Pop
    # full package version
1035 a0c9776a Iustin Pop
    self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
1036 a0c9776a Iustin Pop
                  self.ENODEVERSION, node,
1037 7c874ee1 Iustin Pop
                  "software version mismatch: master %s, node %s",
1038 7c874ee1 Iustin Pop
                  constants.RELEASE_VERSION, remote_version[1],
1039 a0c9776a Iustin Pop
                  code=self.ETYPE_WARNING)
1040 e9ce0a64 Iustin Pop
1041 e9ce0a64 Iustin Pop
    # checks vg existence and size > 20G
1042 cc9e1230 Guido Trotter
    if vg_name is not None:
1043 cc9e1230 Guido Trotter
      vglist = node_result.get(constants.NV_VGLIST, None)
1044 a0c9776a Iustin Pop
      test = not vglist
1045 a0c9776a Iustin Pop
      _ErrorIf(test, self.ENODELVM, node, "unable to check volume groups")
1046 a0c9776a Iustin Pop
      if not test:
1047 cc9e1230 Guido Trotter
        vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
1048 cc9e1230 Guido Trotter
                                              constants.MIN_VG_SIZE)
1049 a0c9776a Iustin Pop
        _ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
1050 a8083063 Iustin Pop
1051 a8083063 Iustin Pop
    # checks config file checksum
1052 a8083063 Iustin Pop
1053 25361b9a Iustin Pop
    remote_cksum = node_result.get(constants.NV_FILELIST, None)
1054 a0c9776a Iustin Pop
    test = not isinstance(remote_cksum, dict)
1055 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODEFILECHECK, node,
1056 a0c9776a Iustin Pop
             "node hasn't returned file checksum data")
1057 a0c9776a Iustin Pop
    if not test:
1058 a8083063 Iustin Pop
      for file_name in file_list:
1059 112f18a5 Iustin Pop
        node_is_mc = nodeinfo.master_candidate
1060 a0c9776a Iustin Pop
        must_have = (file_name not in master_files) or node_is_mc
1061 a0c9776a Iustin Pop
        # missing
1062 a0c9776a Iustin Pop
        test1 = file_name not in remote_cksum
1063 a0c9776a Iustin Pop
        # invalid checksum
1064 a0c9776a Iustin Pop
        test2 = not test1 and remote_cksum[file_name] != local_cksum[file_name]
1065 a0c9776a Iustin Pop
        # existing and good
1066 a0c9776a Iustin Pop
        test3 = not test1 and remote_cksum[file_name] == local_cksum[file_name]
1067 a0c9776a Iustin Pop
        _ErrorIf(test1 and must_have, self.ENODEFILECHECK, node,
1068 a0c9776a Iustin Pop
                 "file '%s' missing", file_name)
1069 a0c9776a Iustin Pop
        _ErrorIf(test2 and must_have, self.ENODEFILECHECK, node,
1070 a0c9776a Iustin Pop
                 "file '%s' has wrong checksum", file_name)
1071 a0c9776a Iustin Pop
        # not candidate and this is not a must-have file
1072 a0c9776a Iustin Pop
        _ErrorIf(test2 and not must_have, self.ENODEFILECHECK, node,
1073 a0c9776a Iustin Pop
                 "file '%s' should not exist on non master"
1074 a0c9776a Iustin Pop
                 " candidates (and the file is outdated)", file_name)
1075 a0c9776a Iustin Pop
        # all good, except non-master/non-must have combination
1076 a0c9776a Iustin Pop
        _ErrorIf(test3 and not must_have, self.ENODEFILECHECK, node,
1077 a0c9776a Iustin Pop
                 "file '%s' should not exist"
1078 a0c9776a Iustin Pop
                 " on non master candidates", file_name)
1079 a8083063 Iustin Pop
1080 25361b9a Iustin Pop
    # checks ssh to any
1081 25361b9a Iustin Pop
1082 a0c9776a Iustin Pop
    test = constants.NV_NODELIST not in node_result
1083 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODESSH, node,
1084 a0c9776a Iustin Pop
             "node hasn't returned node ssh connectivity data")
1085 a0c9776a Iustin Pop
    if not test:
1086 25361b9a Iustin Pop
      if node_result[constants.NV_NODELIST]:
1087 7c874ee1 Iustin Pop
        for a_node, a_msg in node_result[constants.NV_NODELIST].items():
1088 a0c9776a Iustin Pop
          _ErrorIf(True, self.ENODESSH, node,
1089 a0c9776a Iustin Pop
                   "ssh communication with node '%s': %s", a_node, a_msg)
1090 25361b9a Iustin Pop
1091 a0c9776a Iustin Pop
    test = constants.NV_NODENETTEST not in node_result
1092 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODENET, node,
1093 a0c9776a Iustin Pop
             "node hasn't returned node tcp connectivity data")
1094 a0c9776a Iustin Pop
    if not test:
1095 25361b9a Iustin Pop
      if node_result[constants.NV_NODENETTEST]:
1096 25361b9a Iustin Pop
        nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys())
1097 7c874ee1 Iustin Pop
        for anode in nlist:
1098 a0c9776a Iustin Pop
          _ErrorIf(True, self.ENODENET, node,
1099 a0c9776a Iustin Pop
                   "tcp communication with node '%s': %s",
1100 a0c9776a Iustin Pop
                   anode, node_result[constants.NV_NODENETTEST][anode])
1101 9d4bfc96 Iustin Pop
1102 25361b9a Iustin Pop
    hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
1103 e69d05fd Iustin Pop
    if isinstance(hyp_result, dict):
1104 e69d05fd Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
1105 a0c9776a Iustin Pop
        test = hv_result is not None
1106 a0c9776a Iustin Pop
        _ErrorIf(test, self.ENODEHV, node,
1107 a0c9776a Iustin Pop
                 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1108 6d2e83d5 Iustin Pop
1109 6d2e83d5 Iustin Pop
    # check used drbd list
1110 cc9e1230 Guido Trotter
    if vg_name is not None:
1111 cc9e1230 Guido Trotter
      used_minors = node_result.get(constants.NV_DRBDLIST, [])
1112 a0c9776a Iustin Pop
      test = not isinstance(used_minors, (tuple, list))
1113 a0c9776a Iustin Pop
      _ErrorIf(test, self.ENODEDRBD, node,
1114 a0c9776a Iustin Pop
               "cannot parse drbd status file: %s", str(used_minors))
1115 a0c9776a Iustin Pop
      if not test:
1116 cc9e1230 Guido Trotter
        for minor, (iname, must_exist) in drbd_map.items():
1117 a0c9776a Iustin Pop
          test = minor not in used_minors and must_exist
1118 a0c9776a Iustin Pop
          _ErrorIf(test, self.ENODEDRBD, node,
1119 a0c9776a Iustin Pop
                   "drbd minor %d of instance %s is not active",
1120 a0c9776a Iustin Pop
                   minor, iname)
1121 cc9e1230 Guido Trotter
        for minor in used_minors:
1122 a0c9776a Iustin Pop
          test = minor not in drbd_map
1123 a0c9776a Iustin Pop
          _ErrorIf(test, self.ENODEDRBD, node,
1124 a0c9776a Iustin Pop
                   "unallocated drbd minor %d is in use", minor)
1125 7c0aa8e9 Iustin Pop
    test = node_result.get(constants.NV_NODESETUP,
1126 7c0aa8e9 Iustin Pop
                           ["Missing NODESETUP results"])
1127 7c0aa8e9 Iustin Pop
    _ErrorIf(test, self.ENODESETUP, node, "node setup error: %s",
1128 7c0aa8e9 Iustin Pop
             "; ".join(test))
1129 a8083063 Iustin Pop
1130 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
1131 7c874ee1 Iustin Pop
                      node_instance, n_offline):
1132 a8083063 Iustin Pop
    """Verify an instance.
1133 a8083063 Iustin Pop

1134 a8083063 Iustin Pop
    This function checks to see if the required block devices are
1135 a8083063 Iustin Pop
    available on the instance's node.
1136 a8083063 Iustin Pop

1137 a8083063 Iustin Pop
    """
1138 a0c9776a Iustin Pop
    _ErrorIf = self._ErrorIf
1139 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
1140 a8083063 Iustin Pop
1141 a8083063 Iustin Pop
    node_vol_should = {}
1142 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
1143 a8083063 Iustin Pop
1144 a8083063 Iustin Pop
    for node in node_vol_should:
1145 0a66c968 Iustin Pop
      if node in n_offline:
1146 0a66c968 Iustin Pop
        # ignore missing volumes on offline nodes
1147 0a66c968 Iustin Pop
        continue
1148 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
1149 a0c9776a Iustin Pop
        test = node not in node_vol_is or volume not in node_vol_is[node]
1150 a0c9776a Iustin Pop
        _ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
1151 a0c9776a Iustin Pop
                 "volume %s missing on node %s", volume, node)
1152 a8083063 Iustin Pop
1153 0d68c45d Iustin Pop
    if instanceconfig.admin_up:
1154 a0c9776a Iustin Pop
      test = ((node_current not in node_instance or
1155 a0c9776a Iustin Pop
               not instance in node_instance[node_current]) and
1156 a0c9776a Iustin Pop
              node_current not in n_offline)
1157 a0c9776a Iustin Pop
      _ErrorIf(test, self.EINSTANCEDOWN, instance,
1158 a0c9776a Iustin Pop
               "instance not running on its primary node %s",
1159 a0c9776a Iustin Pop
               node_current)
1160 a8083063 Iustin Pop
1161 a8083063 Iustin Pop
    for node in node_instance:
1162 a8083063 Iustin Pop
      if (not node == node_current):
1163 a0c9776a Iustin Pop
        test = instance in node_instance[node]
1164 a0c9776a Iustin Pop
        _ErrorIf(test, self.EINSTANCEWRONGNODE, instance,
1165 a0c9776a Iustin Pop
                 "instance should not run on node %s", node)
1166 a8083063 Iustin Pop
1167 7c874ee1 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is):
1168 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
1169 a8083063 Iustin Pop

1170 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
1171 a8083063 Iustin Pop
    reported as unknown.
1172 a8083063 Iustin Pop

1173 a8083063 Iustin Pop
    """
1174 a8083063 Iustin Pop
    for node in node_vol_is:
1175 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
1176 a0c9776a Iustin Pop
        test = (node not in node_vol_should or
1177 a0c9776a Iustin Pop
                volume not in node_vol_should[node])
1178 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEORPHANLV, node,
1179 7c874ee1 Iustin Pop
                      "volume %s is unknown", volume)
1180 a8083063 Iustin Pop
1181 7c874ee1 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance):
1182 a8083063 Iustin Pop
    """Verify the list of running instances.
1183 a8083063 Iustin Pop

1184 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
1185 a8083063 Iustin Pop

1186 a8083063 Iustin Pop
    """
1187 a8083063 Iustin Pop
    for node in node_instance:
1188 7c874ee1 Iustin Pop
      for o_inst in node_instance[node]:
1189 a0c9776a Iustin Pop
        test = o_inst not in instancelist
1190 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEORPHANINSTANCE, node,
1191 7c874ee1 Iustin Pop
                      "instance %s on node %s should not exist", o_inst, node)
1192 a8083063 Iustin Pop
1193 7c874ee1 Iustin Pop
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg):
1194 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
1195 2b3b6ddd Guido Trotter

1196 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
1197 2b3b6ddd Guido Trotter
    was primary for.
1198 2b3b6ddd Guido Trotter

1199 2b3b6ddd Guido Trotter
    """
1200 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
1201 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
1202 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
1203 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
1204 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
1205 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
1206 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
1207 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
1208 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
1209 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
1210 2b3b6ddd Guido Trotter
        needed_mem = 0
1211 2b3b6ddd Guido Trotter
        for instance in instances:
1212 338e51e8 Iustin Pop
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
1213 c0f2b229 Iustin Pop
          if bep[constants.BE_AUTO_BALANCE]:
1214 3924700f Iustin Pop
            needed_mem += bep[constants.BE_MEMORY]
1215 a0c9776a Iustin Pop
        test = nodeinfo['mfree'] < needed_mem
1216 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEN1, node,
1217 7c874ee1 Iustin Pop
                      "not enough memory on to accommodate"
1218 7c874ee1 Iustin Pop
                      " failovers should peer node %s fail", prinode)
1219 2b3b6ddd Guido Trotter
1220 a8083063 Iustin Pop
  def CheckPrereq(self):
1221 a8083063 Iustin Pop
    """Check prerequisites.
1222 a8083063 Iustin Pop

1223 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
1224 e54c4c5e Guido Trotter
    all its members are valid.
1225 a8083063 Iustin Pop

1226 a8083063 Iustin Pop
    """
1227 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
1228 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
1229 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid checks to be skipped specified",
1230 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
1231 a8083063 Iustin Pop
1232 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
1233 d8fff41c Guido Trotter
    """Build hooks env.
1234 d8fff41c Guido Trotter

1235 5bbd3f7f Michael Hanselmann
    Cluster-Verify hooks just ran in the post phase and their failure makes
1236 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
1237 d8fff41c Guido Trotter

1238 d8fff41c Guido Trotter
    """
1239 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
1240 35e994e9 Iustin Pop
    env = {
1241 35e994e9 Iustin Pop
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
1242 35e994e9 Iustin Pop
      }
1243 35e994e9 Iustin Pop
    for node in self.cfg.GetAllNodesInfo().values():
1244 35e994e9 Iustin Pop
      env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
1245 35e994e9 Iustin Pop
1246 d8fff41c Guido Trotter
    return env, [], all_nodes
1247 d8fff41c Guido Trotter
1248 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1249 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
1250 a8083063 Iustin Pop

1251 a8083063 Iustin Pop
    """
1252 a0c9776a Iustin Pop
    self.bad = False
1253 a0c9776a Iustin Pop
    _ErrorIf = self._ErrorIf
1254 7c874ee1 Iustin Pop
    verbose = self.op.verbose
1255 7c874ee1 Iustin Pop
    self._feedback_fn = feedback_fn
1256 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
1257 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
1258 a0c9776a Iustin Pop
      _ErrorIf(True, self.ECLUSTERCFG, None, msg)
1259 a8083063 Iustin Pop
1260 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
1261 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
1262 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
1263 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
1264 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
1265 6d2e83d5 Iustin Pop
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
1266 6d2e83d5 Iustin Pop
                        for iname in instancelist)
1267 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
1268 3924700f Iustin Pop
    i_non_a_balanced = [] # Non auto-balanced instances
1269 0a66c968 Iustin Pop
    n_offline = [] # List of offline nodes
1270 22f0f71d Iustin Pop
    n_drained = [] # List of nodes being drained
1271 a8083063 Iustin Pop
    node_volume = {}
1272 a8083063 Iustin Pop
    node_instance = {}
1273 9c9c7d30 Guido Trotter
    node_info = {}
1274 26b6af5e Guido Trotter
    instance_cfg = {}
1275 a8083063 Iustin Pop
1276 a8083063 Iustin Pop
    # FIXME: verify OS list
1277 a8083063 Iustin Pop
    # do local checksums
1278 112f18a5 Iustin Pop
    master_files = [constants.CLUSTER_CONF_FILE]
1279 112f18a5 Iustin Pop
1280 112f18a5 Iustin Pop
    file_names = ssconf.SimpleStore().GetFileList()
1281 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
1282 699777f2 Michael Hanselmann
    file_names.append(constants.RAPI_CERT_FILE)
1283 112f18a5 Iustin Pop
    file_names.extend(master_files)
1284 112f18a5 Iustin Pop
1285 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
1286 a8083063 Iustin Pop
1287 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
1288 a8083063 Iustin Pop
    node_verify_param = {
1289 25361b9a Iustin Pop
      constants.NV_FILELIST: file_names,
1290 82e37788 Iustin Pop
      constants.NV_NODELIST: [node.name for node in nodeinfo
1291 82e37788 Iustin Pop
                              if not node.offline],
1292 25361b9a Iustin Pop
      constants.NV_HYPERVISOR: hypervisors,
1293 25361b9a Iustin Pop
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
1294 82e37788 Iustin Pop
                                  node.secondary_ip) for node in nodeinfo
1295 82e37788 Iustin Pop
                                 if not node.offline],
1296 25361b9a Iustin Pop
      constants.NV_INSTANCELIST: hypervisors,
1297 25361b9a Iustin Pop
      constants.NV_VERSION: None,
1298 25361b9a Iustin Pop
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
1299 7c0aa8e9 Iustin Pop
      constants.NV_NODESETUP: None,
1300 a8083063 Iustin Pop
      }
1301 cc9e1230 Guido Trotter
    if vg_name is not None:
1302 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_VGLIST] = None
1303 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_LVLIST] = vg_name
1304 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_DRBDLIST] = None
1305 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
1306 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
1307 a8083063 Iustin Pop
1308 3924700f Iustin Pop
    cluster = self.cfg.GetClusterInfo()
1309 112f18a5 Iustin Pop
    master_node = self.cfg.GetMasterNode()
1310 6d2e83d5 Iustin Pop
    all_drbd_map = self.cfg.ComputeDRBDMap()
1311 6d2e83d5 Iustin Pop
1312 7c874ee1 Iustin Pop
    feedback_fn("* Verifying node status")
1313 112f18a5 Iustin Pop
    for node_i in nodeinfo:
1314 112f18a5 Iustin Pop
      node = node_i.name
1315 25361b9a Iustin Pop
1316 0a66c968 Iustin Pop
      if node_i.offline:
1317 7c874ee1 Iustin Pop
        if verbose:
1318 7c874ee1 Iustin Pop
          feedback_fn("* Skipping offline node %s" % (node,))
1319 0a66c968 Iustin Pop
        n_offline.append(node)
1320 0a66c968 Iustin Pop
        continue
1321 0a66c968 Iustin Pop
1322 112f18a5 Iustin Pop
      if node == master_node:
1323 25361b9a Iustin Pop
        ntype = "master"
1324 112f18a5 Iustin Pop
      elif node_i.master_candidate:
1325 25361b9a Iustin Pop
        ntype = "master candidate"
1326 22f0f71d Iustin Pop
      elif node_i.drained:
1327 22f0f71d Iustin Pop
        ntype = "drained"
1328 22f0f71d Iustin Pop
        n_drained.append(node)
1329 112f18a5 Iustin Pop
      else:
1330 25361b9a Iustin Pop
        ntype = "regular"
1331 7c874ee1 Iustin Pop
      if verbose:
1332 7c874ee1 Iustin Pop
        feedback_fn("* Verifying node %s (%s)" % (node, ntype))
1333 25361b9a Iustin Pop
1334 4c4e4e1e Iustin Pop
      msg = all_nvinfo[node].fail_msg
1335 a0c9776a Iustin Pop
      _ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg)
1336 6f68a739 Iustin Pop
      if msg:
1337 25361b9a Iustin Pop
        continue
1338 25361b9a Iustin Pop
1339 6f68a739 Iustin Pop
      nresult = all_nvinfo[node].payload
1340 6d2e83d5 Iustin Pop
      node_drbd = {}
1341 6d2e83d5 Iustin Pop
      for minor, instance in all_drbd_map[node].items():
1342 a0c9776a Iustin Pop
        test = instance not in instanceinfo
1343 a0c9776a Iustin Pop
        _ErrorIf(test, self.ECLUSTERCFG, None,
1344 a0c9776a Iustin Pop
                 "ghost instance '%s' in temporary DRBD map", instance)
1345 c614e5fb Iustin Pop
          # ghost instance should not be running, but otherwise we
1346 c614e5fb Iustin Pop
          # don't give double warnings (both ghost instance and
1347 c614e5fb Iustin Pop
          # unallocated minor in use)
1348 a0c9776a Iustin Pop
        if test:
1349 c614e5fb Iustin Pop
          node_drbd[minor] = (instance, False)
1350 c614e5fb Iustin Pop
        else:
1351 c614e5fb Iustin Pop
          instance = instanceinfo[instance]
1352 c614e5fb Iustin Pop
          node_drbd[minor] = (instance.name, instance.admin_up)
1353 a0c9776a Iustin Pop
      self._VerifyNode(node_i, file_names, local_checksums,
1354 a0c9776a Iustin Pop
                       nresult, master_files, node_drbd, vg_name)
1355 a8083063 Iustin Pop
1356 25361b9a Iustin Pop
      lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1357 cc9e1230 Guido Trotter
      if vg_name is None:
1358 cc9e1230 Guido Trotter
        node_volume[node] = {}
1359 cc9e1230 Guido Trotter
      elif isinstance(lvdata, basestring):
1360 a0c9776a Iustin Pop
        _ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s",
1361 a0c9776a Iustin Pop
                 utils.SafeEncode(lvdata))
1362 b63ed789 Iustin Pop
        node_volume[node] = {}
1363 25361b9a Iustin Pop
      elif not isinstance(lvdata, dict):
1364 a0c9776a Iustin Pop
        _ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)")
1365 a8083063 Iustin Pop
        continue
1366 b63ed789 Iustin Pop
      else:
1367 25361b9a Iustin Pop
        node_volume[node] = lvdata
1368 a8083063 Iustin Pop
1369 a8083063 Iustin Pop
      # node_instance
1370 25361b9a Iustin Pop
      idata = nresult.get(constants.NV_INSTANCELIST, None)
1371 a0c9776a Iustin Pop
      test = not isinstance(idata, list)
1372 a0c9776a Iustin Pop
      _ErrorIf(test, self.ENODEHV, node,
1373 a0c9776a Iustin Pop
               "rpc call to node failed (instancelist)")
1374 a0c9776a Iustin Pop
      if test:
1375 a8083063 Iustin Pop
        continue
1376 a8083063 Iustin Pop
1377 25361b9a Iustin Pop
      node_instance[node] = idata
1378 a8083063 Iustin Pop
1379 9c9c7d30 Guido Trotter
      # node_info
1380 25361b9a Iustin Pop
      nodeinfo = nresult.get(constants.NV_HVINFO, None)
1381 a0c9776a Iustin Pop
      test = not isinstance(nodeinfo, dict)
1382 a0c9776a Iustin Pop
      _ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)")
1383 a0c9776a Iustin Pop
      if test:
1384 9c9c7d30 Guido Trotter
        continue
1385 9c9c7d30 Guido Trotter
1386 9c9c7d30 Guido Trotter
      try:
1387 9c9c7d30 Guido Trotter
        node_info[node] = {
1388 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
1389 93e4c50b Guido Trotter
          "pinst": [],
1390 93e4c50b Guido Trotter
          "sinst": [],
1391 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
1392 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
1393 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
1394 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
1395 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
1396 36e7da50 Guido Trotter
          # secondary.
1397 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
1398 9c9c7d30 Guido Trotter
        }
1399 cc9e1230 Guido Trotter
        # FIXME: devise a free space model for file based instances as well
1400 cc9e1230 Guido Trotter
        if vg_name is not None:
1401 a0c9776a Iustin Pop
          test = (constants.NV_VGLIST not in nresult or
1402 a0c9776a Iustin Pop
                  vg_name not in nresult[constants.NV_VGLIST])
1403 a0c9776a Iustin Pop
          _ErrorIf(test, self.ENODELVM, node,
1404 a0c9776a Iustin Pop
                   "node didn't return data for the volume group '%s'"
1405 a0c9776a Iustin Pop
                   " - it is either missing or broken", vg_name)
1406 a0c9776a Iustin Pop
          if test:
1407 9a198532 Iustin Pop
            continue
1408 cc9e1230 Guido Trotter
          node_info[node]["dfree"] = int(nresult[constants.NV_VGLIST][vg_name])
1409 9a198532 Iustin Pop
      except (ValueError, KeyError):
1410 a0c9776a Iustin Pop
        _ErrorIf(True, self.ENODERPC, node,
1411 a0c9776a Iustin Pop
                 "node returned invalid nodeinfo, check lvm/hypervisor")
1412 9c9c7d30 Guido Trotter
        continue
1413 9c9c7d30 Guido Trotter
1414 a8083063 Iustin Pop
    node_vol_should = {}
1415 a8083063 Iustin Pop
1416 7c874ee1 Iustin Pop
    feedback_fn("* Verifying instance status")
1417 a8083063 Iustin Pop
    for instance in instancelist:
1418 7c874ee1 Iustin Pop
      if verbose:
1419 7c874ee1 Iustin Pop
        feedback_fn("* Verifying instance %s" % instance)
1420 6d2e83d5 Iustin Pop
      inst_config = instanceinfo[instance]
1421 a0c9776a Iustin Pop
      self._VerifyInstance(instance, inst_config, node_volume,
1422 a0c9776a Iustin Pop
                           node_instance, n_offline)
1423 832261fd Iustin Pop
      inst_nodes_offline = []
1424 a8083063 Iustin Pop
1425 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
1426 a8083063 Iustin Pop
1427 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
1428 26b6af5e Guido Trotter
1429 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
1430 a0c9776a Iustin Pop
      _ErrorIf(pnode not in node_info and pnode not in n_offline,
1431 a0c9776a Iustin Pop
               self.ENODERPC, pnode, "instance %s, connection to"
1432 a0c9776a Iustin Pop
               " primary node failed", instance)
1433 93e4c50b Guido Trotter
      if pnode in node_info:
1434 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
1435 93e4c50b Guido Trotter
1436 832261fd Iustin Pop
      if pnode in n_offline:
1437 832261fd Iustin Pop
        inst_nodes_offline.append(pnode)
1438 832261fd Iustin Pop
1439 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
1440 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
1441 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
1442 93e4c50b Guido Trotter
      # supported either.
1443 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
1444 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
1445 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
1446 a0c9776a Iustin Pop
      _ErrorIf(len(inst_config.secondary_nodes) > 1,
1447 a0c9776a Iustin Pop
               self.EINSTANCELAYOUT, instance,
1448 a0c9776a Iustin Pop
               "instance has multiple secondary nodes", code="WARNING")
1449 93e4c50b Guido Trotter
1450 c0f2b229 Iustin Pop
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1451 3924700f Iustin Pop
        i_non_a_balanced.append(instance)
1452 3924700f Iustin Pop
1453 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
1454 a0c9776a Iustin Pop
        _ErrorIf(snode not in node_info and snode not in n_offline,
1455 a0c9776a Iustin Pop
                 self.ENODERPC, snode,
1456 a0c9776a Iustin Pop
                 "instance %s, connection to secondary node"
1457 a0c9776a Iustin Pop
                 "failed", instance)
1458 a0c9776a Iustin Pop
1459 93e4c50b Guido Trotter
        if snode in node_info:
1460 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
1461 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
1462 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
1463 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
1464 a0c9776a Iustin Pop
1465 832261fd Iustin Pop
        if snode in n_offline:
1466 832261fd Iustin Pop
          inst_nodes_offline.append(snode)
1467 832261fd Iustin Pop
1468 a0c9776a Iustin Pop
      # warn that the instance lives on offline nodes
1469 a0c9776a Iustin Pop
      _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
1470 a0c9776a Iustin Pop
               "instance lives on offline node(s) %s",
1471 a0c9776a Iustin Pop
               ", ".join(inst_nodes_offline))
1472 93e4c50b Guido Trotter
1473 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
1474 a0c9776a Iustin Pop
    self._VerifyOrphanVolumes(node_vol_should, node_volume)
1475 a8083063 Iustin Pop
1476 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
1477 a0c9776a Iustin Pop
    self._VerifyOrphanInstances(instancelist, node_instance)
1478 a8083063 Iustin Pop
1479 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1480 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
1481 a0c9776a Iustin Pop
      self._VerifyNPlusOneMemory(node_info, instance_cfg)
1482 2b3b6ddd Guido Trotter
1483 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
1484 2b3b6ddd Guido Trotter
    if i_non_redundant:
1485 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1486 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
1487 2b3b6ddd Guido Trotter
1488 3924700f Iustin Pop
    if i_non_a_balanced:
1489 3924700f Iustin Pop
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1490 3924700f Iustin Pop
                  % len(i_non_a_balanced))
1491 3924700f Iustin Pop
1492 0a66c968 Iustin Pop
    if n_offline:
1493 0a66c968 Iustin Pop
      feedback_fn("  - NOTICE: %d offline node(s) found." % len(n_offline))
1494 0a66c968 Iustin Pop
1495 22f0f71d Iustin Pop
    if n_drained:
1496 22f0f71d Iustin Pop
      feedback_fn("  - NOTICE: %d drained node(s) found." % len(n_drained))
1497 22f0f71d Iustin Pop
1498 a0c9776a Iustin Pop
    return not self.bad
1499 a8083063 Iustin Pop
1500 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1501 5bbd3f7f Michael Hanselmann
    """Analyze the post-hooks' result
1502 e4376078 Iustin Pop

1503 e4376078 Iustin Pop
    This method analyses the hook result, handles it, and sends some
1504 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
1505 d8fff41c Guido Trotter

1506 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1507 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1508 e4376078 Iustin Pop
    @param hooks_results: the results of the multi-node hooks rpc call
1509 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
1510 e4376078 Iustin Pop
    @param lu_result: previous Exec result
1511 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
1512 e4376078 Iustin Pop
        and hook results
1513 d8fff41c Guido Trotter

1514 d8fff41c Guido Trotter
    """
1515 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
1516 38206f3c Iustin Pop
    # their results
1517 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
1518 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
1519 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
1520 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
1521 7c874ee1 Iustin Pop
      assert hooks_results, "invalid result from hooks"
1522 7c874ee1 Iustin Pop
1523 7c874ee1 Iustin Pop
      for node_name in hooks_results:
1524 7c874ee1 Iustin Pop
        show_node_header = True
1525 7c874ee1 Iustin Pop
        res = hooks_results[node_name]
1526 7c874ee1 Iustin Pop
        msg = res.fail_msg
1527 a0c9776a Iustin Pop
        test = msg and not res.offline
1528 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEHOOKS, node_name,
1529 7c874ee1 Iustin Pop
                      "Communication failure in hooks execution: %s", msg)
1530 a0c9776a Iustin Pop
        if test:
1531 a0c9776a Iustin Pop
          # override manually lu_result here as _ErrorIf only
1532 a0c9776a Iustin Pop
          # overrides self.bad
1533 7c874ee1 Iustin Pop
          lu_result = 1
1534 7c874ee1 Iustin Pop
          continue
1535 7c874ee1 Iustin Pop
        for script, hkr, output in res.payload:
1536 a0c9776a Iustin Pop
          test = hkr == constants.HKR_FAIL
1537 a0c9776a Iustin Pop
          self._ErrorIf(test, self.ENODEHOOKS, node_name,
1538 7c874ee1 Iustin Pop
                        "Script %s failed, output:", script)
1539 a0c9776a Iustin Pop
          if test:
1540 7c874ee1 Iustin Pop
            output = indent_re.sub('      ', output)
1541 7c874ee1 Iustin Pop
            feedback_fn("%s" % output)
1542 7c874ee1 Iustin Pop
            lu_result = 1
1543 d8fff41c Guido Trotter
1544 d8fff41c Guido Trotter
      return lu_result
1545 d8fff41c Guido Trotter
1546 a8083063 Iustin Pop
1547 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
1548 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
1549 2c95a8d4 Iustin Pop

1550 2c95a8d4 Iustin Pop
  """
1551 2c95a8d4 Iustin Pop
  _OP_REQP = []
1552 d4b9d97f Guido Trotter
  REQ_BGL = False
1553 d4b9d97f Guido Trotter
1554 d4b9d97f Guido Trotter
  def ExpandNames(self):
1555 d4b9d97f Guido Trotter
    self.needed_locks = {
1556 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1557 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1558 d4b9d97f Guido Trotter
    }
1559 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1560 2c95a8d4 Iustin Pop
1561 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
1562 2c95a8d4 Iustin Pop
    """Check prerequisites.
1563 2c95a8d4 Iustin Pop

1564 2c95a8d4 Iustin Pop
    This has no prerequisites.
1565 2c95a8d4 Iustin Pop

1566 2c95a8d4 Iustin Pop
    """
1567 2c95a8d4 Iustin Pop
    pass
1568 2c95a8d4 Iustin Pop
1569 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
1570 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
1571 2c95a8d4 Iustin Pop

1572 29d376ec Iustin Pop
    @rtype: tuple of three items
1573 29d376ec Iustin Pop
    @return: a tuple of (dict of node-to-node_error, list of instances
1574 29d376ec Iustin Pop
        which need activate-disks, dict of instance: (node, volume) for
1575 29d376ec Iustin Pop
        missing volumes
1576 29d376ec Iustin Pop

1577 2c95a8d4 Iustin Pop
    """
1578 29d376ec Iustin Pop
    result = res_nodes, res_instances, res_missing = {}, [], {}
1579 2c95a8d4 Iustin Pop
1580 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
1581 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1582 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
1583 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
1584 2c95a8d4 Iustin Pop
1585 2c95a8d4 Iustin Pop
    nv_dict = {}
1586 2c95a8d4 Iustin Pop
    for inst in instances:
1587 2c95a8d4 Iustin Pop
      inst_lvs = {}
1588 0d68c45d Iustin Pop
      if (not inst.admin_up or
1589 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
1590 2c95a8d4 Iustin Pop
        continue
1591 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
1592 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1593 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
1594 2c95a8d4 Iustin Pop
        for vol in vol_list:
1595 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
1596 2c95a8d4 Iustin Pop
1597 2c95a8d4 Iustin Pop
    if not nv_dict:
1598 2c95a8d4 Iustin Pop
      return result
1599 2c95a8d4 Iustin Pop
1600 b2a6ccd4 Iustin Pop
    node_lvs = self.rpc.call_lv_list(nodes, vg_name)
1601 2c95a8d4 Iustin Pop
1602 2c95a8d4 Iustin Pop
    for node in nodes:
1603 2c95a8d4 Iustin Pop
      # node_volume
1604 29d376ec Iustin Pop
      node_res = node_lvs[node]
1605 29d376ec Iustin Pop
      if node_res.offline:
1606 ea9ddc07 Iustin Pop
        continue
1607 4c4e4e1e Iustin Pop
      msg = node_res.fail_msg
1608 29d376ec Iustin Pop
      if msg:
1609 29d376ec Iustin Pop
        logging.warning("Error enumerating LVs on node %s: %s", node, msg)
1610 29d376ec Iustin Pop
        res_nodes[node] = msg
1611 2c95a8d4 Iustin Pop
        continue
1612 2c95a8d4 Iustin Pop
1613 29d376ec Iustin Pop
      lvs = node_res.payload
1614 29d376ec Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.items():
1615 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
1616 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
1617 b63ed789 Iustin Pop
            and inst.name not in res_instances):
1618 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
1619 2c95a8d4 Iustin Pop
1620 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1621 b63ed789 Iustin Pop
    # data better
1622 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1623 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1624 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1625 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1626 b63ed789 Iustin Pop
1627 2c95a8d4 Iustin Pop
    return result
1628 2c95a8d4 Iustin Pop
1629 2c95a8d4 Iustin Pop
1630 60975797 Iustin Pop
class LURepairDiskSizes(NoHooksLU):
1631 60975797 Iustin Pop
  """Verifies the cluster disks sizes.
1632 60975797 Iustin Pop

1633 60975797 Iustin Pop
  """
1634 60975797 Iustin Pop
  _OP_REQP = ["instances"]
1635 60975797 Iustin Pop
  REQ_BGL = False
1636 60975797 Iustin Pop
1637 60975797 Iustin Pop
  def ExpandNames(self):
1638 60975797 Iustin Pop
    if not isinstance(self.op.instances, list):
1639 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'",
1640 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
1641 60975797 Iustin Pop
1642 60975797 Iustin Pop
    if self.op.instances:
1643 60975797 Iustin Pop
      self.wanted_names = []
1644 60975797 Iustin Pop
      for name in self.op.instances:
1645 60975797 Iustin Pop
        full_name = self.cfg.ExpandInstanceName(name)
1646 60975797 Iustin Pop
        if full_name is None:
1647 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Instance '%s' not known" % name,
1648 5c983ee5 Iustin Pop
                                     errors.ECODE_NOENT)
1649 60975797 Iustin Pop
        self.wanted_names.append(full_name)
1650 60975797 Iustin Pop
      self.needed_locks = {
1651 60975797 Iustin Pop
        locking.LEVEL_NODE: [],
1652 60975797 Iustin Pop
        locking.LEVEL_INSTANCE: self.wanted_names,
1653 60975797 Iustin Pop
        }
1654 60975797 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1655 60975797 Iustin Pop
    else:
1656 60975797 Iustin Pop
      self.wanted_names = None
1657 60975797 Iustin Pop
      self.needed_locks = {
1658 60975797 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
1659 60975797 Iustin Pop
        locking.LEVEL_INSTANCE: locking.ALL_SET,
1660 60975797 Iustin Pop
        }
1661 60975797 Iustin Pop
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1662 60975797 Iustin Pop
1663 60975797 Iustin Pop
  def DeclareLocks(self, level):
1664 60975797 Iustin Pop
    if level == locking.LEVEL_NODE and self.wanted_names is not None:
1665 60975797 Iustin Pop
      self._LockInstancesNodes(primary_only=True)
1666 60975797 Iustin Pop
1667 60975797 Iustin Pop
  def CheckPrereq(self):
1668 60975797 Iustin Pop
    """Check prerequisites.
1669 60975797 Iustin Pop

1670 60975797 Iustin Pop
    This only checks the optional instance list against the existing names.
1671 60975797 Iustin Pop

1672 60975797 Iustin Pop
    """
1673 60975797 Iustin Pop
    if self.wanted_names is None:
1674 60975797 Iustin Pop
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
1675 60975797 Iustin Pop
1676 60975797 Iustin Pop
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
1677 60975797 Iustin Pop
                             in self.wanted_names]
1678 60975797 Iustin Pop
1679 b775c337 Iustin Pop
  def _EnsureChildSizes(self, disk):
1680 b775c337 Iustin Pop
    """Ensure children of the disk have the needed disk size.
1681 b775c337 Iustin Pop

1682 b775c337 Iustin Pop
    This is valid mainly for DRBD8 and fixes an issue where the
1683 b775c337 Iustin Pop
    children have smaller disk size.
1684 b775c337 Iustin Pop

1685 b775c337 Iustin Pop
    @param disk: an L{ganeti.objects.Disk} object
1686 b775c337 Iustin Pop

1687 b775c337 Iustin Pop
    """
1688 b775c337 Iustin Pop
    if disk.dev_type == constants.LD_DRBD8:
1689 b775c337 Iustin Pop
      assert disk.children, "Empty children for DRBD8?"
1690 b775c337 Iustin Pop
      fchild = disk.children[0]
1691 b775c337 Iustin Pop
      mismatch = fchild.size < disk.size
1692 b775c337 Iustin Pop
      if mismatch:
1693 b775c337 Iustin Pop
        self.LogInfo("Child disk has size %d, parent %d, fixing",
1694 b775c337 Iustin Pop
                     fchild.size, disk.size)
1695 b775c337 Iustin Pop
        fchild.size = disk.size
1696 b775c337 Iustin Pop
1697 b775c337 Iustin Pop
      # and we recurse on this child only, not on the metadev
1698 b775c337 Iustin Pop
      return self._EnsureChildSizes(fchild) or mismatch
1699 b775c337 Iustin Pop
    else:
1700 b775c337 Iustin Pop
      return False
1701 b775c337 Iustin Pop
1702 60975797 Iustin Pop
  def Exec(self, feedback_fn):
1703 60975797 Iustin Pop
    """Verify the size of cluster disks.
1704 60975797 Iustin Pop

1705 60975797 Iustin Pop
    """
1706 60975797 Iustin Pop
    # TODO: check child disks too
1707 60975797 Iustin Pop
    # TODO: check differences in size between primary/secondary nodes
1708 60975797 Iustin Pop
    per_node_disks = {}
1709 60975797 Iustin Pop
    for instance in self.wanted_instances:
1710 60975797 Iustin Pop
      pnode = instance.primary_node
1711 60975797 Iustin Pop
      if pnode not in per_node_disks:
1712 60975797 Iustin Pop
        per_node_disks[pnode] = []
1713 60975797 Iustin Pop
      for idx, disk in enumerate(instance.disks):
1714 60975797 Iustin Pop
        per_node_disks[pnode].append((instance, idx, disk))
1715 60975797 Iustin Pop
1716 60975797 Iustin Pop
    changed = []
1717 60975797 Iustin Pop
    for node, dskl in per_node_disks.items():
1718 4d9e6835 Iustin Pop
      newl = [v[2].Copy() for v in dskl]
1719 4d9e6835 Iustin Pop
      for dsk in newl:
1720 4d9e6835 Iustin Pop
        self.cfg.SetDiskID(dsk, node)
1721 4d9e6835 Iustin Pop
      result = self.rpc.call_blockdev_getsizes(node, newl)
1722 3cebe102 Michael Hanselmann
      if result.fail_msg:
1723 60975797 Iustin Pop
        self.LogWarning("Failure in blockdev_getsizes call to node"
1724 60975797 Iustin Pop
                        " %s, ignoring", node)
1725 60975797 Iustin Pop
        continue
1726 60975797 Iustin Pop
      if len(result.data) != len(dskl):
1727 60975797 Iustin Pop
        self.LogWarning("Invalid result from node %s, ignoring node results",
1728 60975797 Iustin Pop
                        node)
1729 60975797 Iustin Pop
        continue
1730 60975797 Iustin Pop
      for ((instance, idx, disk), size) in zip(dskl, result.data):
1731 60975797 Iustin Pop
        if size is None:
1732 60975797 Iustin Pop
          self.LogWarning("Disk %d of instance %s did not return size"
1733 60975797 Iustin Pop
                          " information, ignoring", idx, instance.name)
1734 60975797 Iustin Pop
          continue
1735 60975797 Iustin Pop
        if not isinstance(size, (int, long)):
1736 60975797 Iustin Pop
          self.LogWarning("Disk %d of instance %s did not return valid"
1737 60975797 Iustin Pop
                          " size information, ignoring", idx, instance.name)
1738 60975797 Iustin Pop
          continue
1739 60975797 Iustin Pop
        size = size >> 20
1740 60975797 Iustin Pop
        if size != disk.size:
1741 60975797 Iustin Pop
          self.LogInfo("Disk %d of instance %s has mismatched size,"
1742 60975797 Iustin Pop
                       " correcting: recorded %d, actual %d", idx,
1743 60975797 Iustin Pop
                       instance.name, disk.size, size)
1744 60975797 Iustin Pop
          disk.size = size
1745 a4eae71f Michael Hanselmann
          self.cfg.Update(instance, feedback_fn)
1746 60975797 Iustin Pop
          changed.append((instance.name, idx, size))
1747 b775c337 Iustin Pop
        if self._EnsureChildSizes(disk):
1748 a4eae71f Michael Hanselmann
          self.cfg.Update(instance, feedback_fn)
1749 b775c337 Iustin Pop
          changed.append((instance.name, idx, disk.size))
1750 60975797 Iustin Pop
    return changed
1751 60975797 Iustin Pop
1752 60975797 Iustin Pop
1753 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1754 07bd8a51 Iustin Pop
  """Rename the cluster.
1755 07bd8a51 Iustin Pop

1756 07bd8a51 Iustin Pop
  """
1757 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1758 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1759 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1760 07bd8a51 Iustin Pop
1761 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1762 07bd8a51 Iustin Pop
    """Build hooks env.
1763 07bd8a51 Iustin Pop

1764 07bd8a51 Iustin Pop
    """
1765 07bd8a51 Iustin Pop
    env = {
1766 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1767 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1768 07bd8a51 Iustin Pop
      }
1769 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1770 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1771 07bd8a51 Iustin Pop
1772 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1773 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1774 07bd8a51 Iustin Pop

1775 07bd8a51 Iustin Pop
    """
1776 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1777 07bd8a51 Iustin Pop
1778 bcf043c9 Iustin Pop
    new_name = hostname.name
1779 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1780 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
1781 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
1782 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1783 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1784 5c983ee5 Iustin Pop
                                 " cluster has changed",
1785 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
1786 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1787 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1788 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1789 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1790 5c983ee5 Iustin Pop
                                   new_ip, errors.ECODE_NOTUNIQUE)
1791 07bd8a51 Iustin Pop
1792 07bd8a51 Iustin Pop
    self.op.name = new_name
1793 07bd8a51 Iustin Pop
1794 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1795 07bd8a51 Iustin Pop
    """Rename the cluster.
1796 07bd8a51 Iustin Pop

1797 07bd8a51 Iustin Pop
    """
1798 07bd8a51 Iustin Pop
    clustername = self.op.name
1799 07bd8a51 Iustin Pop
    ip = self.ip
1800 07bd8a51 Iustin Pop
1801 07bd8a51 Iustin Pop
    # shutdown the master IP
1802 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
1803 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
1804 4c4e4e1e Iustin Pop
    result.Raise("Could not disable the master role")
1805 07bd8a51 Iustin Pop
1806 07bd8a51 Iustin Pop
    try:
1807 55cf7d83 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
1808 55cf7d83 Iustin Pop
      cluster.cluster_name = clustername
1809 55cf7d83 Iustin Pop
      cluster.master_ip = ip
1810 a4eae71f Michael Hanselmann
      self.cfg.Update(cluster, feedback_fn)
1811 ec85e3d5 Iustin Pop
1812 ec85e3d5 Iustin Pop
      # update the known hosts file
1813 ec85e3d5 Iustin Pop
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
1814 ec85e3d5 Iustin Pop
      node_list = self.cfg.GetNodeList()
1815 ec85e3d5 Iustin Pop
      try:
1816 ec85e3d5 Iustin Pop
        node_list.remove(master)
1817 ec85e3d5 Iustin Pop
      except ValueError:
1818 ec85e3d5 Iustin Pop
        pass
1819 ec85e3d5 Iustin Pop
      result = self.rpc.call_upload_file(node_list,
1820 ec85e3d5 Iustin Pop
                                         constants.SSH_KNOWN_HOSTS_FILE)
1821 ec85e3d5 Iustin Pop
      for to_node, to_result in result.iteritems():
1822 6f7d4e75 Iustin Pop
        msg = to_result.fail_msg
1823 6f7d4e75 Iustin Pop
        if msg:
1824 6f7d4e75 Iustin Pop
          msg = ("Copy of file %s to node %s failed: %s" %
1825 6f7d4e75 Iustin Pop
                 (constants.SSH_KNOWN_HOSTS_FILE, to_node, msg))
1826 6f7d4e75 Iustin Pop
          self.proc.LogWarning(msg)
1827 ec85e3d5 Iustin Pop
1828 07bd8a51 Iustin Pop
    finally:
1829 3583908a Guido Trotter
      result = self.rpc.call_node_start_master(master, False, False)
1830 4c4e4e1e Iustin Pop
      msg = result.fail_msg
1831 b726aff0 Iustin Pop
      if msg:
1832 86d9d3bb Iustin Pop
        self.LogWarning("Could not re-enable the master role on"
1833 b726aff0 Iustin Pop
                        " the master, please restart manually: %s", msg)
1834 07bd8a51 Iustin Pop
1835 07bd8a51 Iustin Pop
1836 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1837 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1838 8084f9f6 Manuel Franceschini

1839 e4376078 Iustin Pop
  @type disk: L{objects.Disk}
1840 e4376078 Iustin Pop
  @param disk: the disk to check
1841 5bbd3f7f Michael Hanselmann
  @rtype: boolean
1842 e4376078 Iustin Pop
  @return: boolean indicating whether a LD_LV dev_type was found or not
1843 8084f9f6 Manuel Franceschini

1844 8084f9f6 Manuel Franceschini
  """
1845 8084f9f6 Manuel Franceschini
  if disk.children:
1846 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1847 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1848 8084f9f6 Manuel Franceschini
        return True
1849 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1850 8084f9f6 Manuel Franceschini
1851 8084f9f6 Manuel Franceschini
1852 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1853 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1854 8084f9f6 Manuel Franceschini

1855 8084f9f6 Manuel Franceschini
  """
1856 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1857 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1858 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1859 c53279cf Guido Trotter
  REQ_BGL = False
1860 c53279cf Guido Trotter
1861 3994f455 Iustin Pop
  def CheckArguments(self):
1862 4b7735f9 Iustin Pop
    """Check parameters
1863 4b7735f9 Iustin Pop

1864 4b7735f9 Iustin Pop
    """
1865 4b7735f9 Iustin Pop
    if not hasattr(self.op, "candidate_pool_size"):
1866 4b7735f9 Iustin Pop
      self.op.candidate_pool_size = None
1867 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1868 4b7735f9 Iustin Pop
      try:
1869 4b7735f9 Iustin Pop
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1870 3994f455 Iustin Pop
      except (ValueError, TypeError), err:
1871 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1872 5c983ee5 Iustin Pop
                                   str(err), errors.ECODE_INVAL)
1873 4b7735f9 Iustin Pop
      if self.op.candidate_pool_size < 1:
1874 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("At least one master candidate needed",
1875 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
1876 4b7735f9 Iustin Pop
1877 c53279cf Guido Trotter
  def ExpandNames(self):
1878 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
1879 c53279cf Guido Trotter
    # all nodes to be modified.
1880 c53279cf Guido Trotter
    self.needed_locks = {
1881 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1882 c53279cf Guido Trotter
    }
1883 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1884 8084f9f6 Manuel Franceschini
1885 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1886 8084f9f6 Manuel Franceschini
    """Build hooks env.
1887 8084f9f6 Manuel Franceschini

1888 8084f9f6 Manuel Franceschini
    """
1889 8084f9f6 Manuel Franceschini
    env = {
1890 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1891 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1892 8084f9f6 Manuel Franceschini
      }
1893 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1894 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1895 8084f9f6 Manuel Franceschini
1896 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1897 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1898 8084f9f6 Manuel Franceschini

1899 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1900 5f83e263 Iustin Pop
    if the given volume group is valid.
1901 8084f9f6 Manuel Franceschini

1902 8084f9f6 Manuel Franceschini
    """
1903 779c15bb Iustin Pop
    if self.op.vg_name is not None and not self.op.vg_name:
1904 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
1905 8084f9f6 Manuel Franceschini
      for inst in instances:
1906 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1907 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1908 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1909 5c983ee5 Iustin Pop
                                       " lvm-based instances exist",
1910 5c983ee5 Iustin Pop
                                       errors.ECODE_INVAL)
1911 8084f9f6 Manuel Franceschini
1912 779c15bb Iustin Pop
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1913 779c15bb Iustin Pop
1914 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1915 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1916 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
1917 8084f9f6 Manuel Franceschini
      for node in node_list:
1918 4c4e4e1e Iustin Pop
        msg = vglist[node].fail_msg
1919 e480923b Iustin Pop
        if msg:
1920 781de953 Iustin Pop
          # ignoring down node
1921 e480923b Iustin Pop
          self.LogWarning("Error while gathering data on node %s"
1922 e480923b Iustin Pop
                          " (ignoring node): %s", node, msg)
1923 781de953 Iustin Pop
          continue
1924 e480923b Iustin Pop
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
1925 781de953 Iustin Pop
                                              self.op.vg_name,
1926 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1927 8084f9f6 Manuel Franceschini
        if vgstatus:
1928 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1929 5c983ee5 Iustin Pop
                                     (node, vgstatus), errors.ECODE_ENVIRON)
1930 8084f9f6 Manuel Franceschini
1931 779c15bb Iustin Pop
    self.cluster = cluster = self.cfg.GetClusterInfo()
1932 5af3da74 Guido Trotter
    # validate params changes
1933 779c15bb Iustin Pop
    if self.op.beparams:
1934 a5728081 Guido Trotter
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
1935 abe609b2 Guido Trotter
      self.new_beparams = objects.FillDict(
1936 4ef7f423 Guido Trotter
        cluster.beparams[constants.PP_DEFAULT], self.op.beparams)
1937 779c15bb Iustin Pop
1938 5af3da74 Guido Trotter
    if self.op.nicparams:
1939 5af3da74 Guido Trotter
      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
1940 5af3da74 Guido Trotter
      self.new_nicparams = objects.FillDict(
1941 5af3da74 Guido Trotter
        cluster.nicparams[constants.PP_DEFAULT], self.op.nicparams)
1942 5af3da74 Guido Trotter
      objects.NIC.CheckParameterSyntax(self.new_nicparams)
1943 5af3da74 Guido Trotter
1944 779c15bb Iustin Pop
    # hypervisor list/parameters
1945 abe609b2 Guido Trotter
    self.new_hvparams = objects.FillDict(cluster.hvparams, {})
1946 779c15bb Iustin Pop
    if self.op.hvparams:
1947 779c15bb Iustin Pop
      if not isinstance(self.op.hvparams, dict):
1948 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input",
1949 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
1950 779c15bb Iustin Pop
      for hv_name, hv_dict in self.op.hvparams.items():
1951 779c15bb Iustin Pop
        if hv_name not in self.new_hvparams:
1952 779c15bb Iustin Pop
          self.new_hvparams[hv_name] = hv_dict
1953 779c15bb Iustin Pop
        else:
1954 779c15bb Iustin Pop
          self.new_hvparams[hv_name].update(hv_dict)
1955 779c15bb Iustin Pop
1956 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1957 779c15bb Iustin Pop
      self.hv_list = self.op.enabled_hypervisors
1958 b119bccb Guido Trotter
      if not self.hv_list:
1959 b119bccb Guido Trotter
        raise errors.OpPrereqError("Enabled hypervisors list must contain at"
1960 5c983ee5 Iustin Pop
                                   " least one member",
1961 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
1962 b119bccb Guido Trotter
      invalid_hvs = set(self.hv_list) - constants.HYPER_TYPES
1963 b119bccb Guido Trotter
      if invalid_hvs:
1964 b119bccb Guido Trotter
        raise errors.OpPrereqError("Enabled hypervisors contains invalid"
1965 5c983ee5 Iustin Pop
                                   " entries: %s" % " ,".join(invalid_hvs),
1966 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
1967 779c15bb Iustin Pop
    else:
1968 779c15bb Iustin Pop
      self.hv_list = cluster.enabled_hypervisors
1969 779c15bb Iustin Pop
1970 779c15bb Iustin Pop
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1971 779c15bb Iustin Pop
      # either the enabled list has changed, or the parameters have, validate
1972 779c15bb Iustin Pop
      for hv_name, hv_params in self.new_hvparams.items():
1973 779c15bb Iustin Pop
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1974 779c15bb Iustin Pop
            (self.op.enabled_hypervisors and
1975 779c15bb Iustin Pop
             hv_name in self.op.enabled_hypervisors)):
1976 779c15bb Iustin Pop
          # either this is a new hypervisor, or its parameters have changed
1977 779c15bb Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
1978 a5728081 Guido Trotter
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1979 779c15bb Iustin Pop
          hv_class.CheckParameterSyntax(hv_params)
1980 779c15bb Iustin Pop
          _CheckHVParams(self, node_list, hv_name, hv_params)
1981 779c15bb Iustin Pop
1982 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1983 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1984 8084f9f6 Manuel Franceschini

1985 8084f9f6 Manuel Franceschini
    """
1986 779c15bb Iustin Pop
    if self.op.vg_name is not None:
1987 b2482333 Guido Trotter
      new_volume = self.op.vg_name
1988 b2482333 Guido Trotter
      if not new_volume:
1989 b2482333 Guido Trotter
        new_volume = None
1990 b2482333 Guido Trotter
      if new_volume != self.cfg.GetVGName():
1991 b2482333 Guido Trotter
        self.cfg.SetVGName(new_volume)
1992 779c15bb Iustin Pop
      else:
1993 779c15bb Iustin Pop
        feedback_fn("Cluster LVM configuration already in desired"
1994 779c15bb Iustin Pop
                    " state, not changing")
1995 779c15bb Iustin Pop
    if self.op.hvparams:
1996 779c15bb Iustin Pop
      self.cluster.hvparams = self.new_hvparams
1997 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1998 779c15bb Iustin Pop
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1999 779c15bb Iustin Pop
    if self.op.beparams:
2000 4ef7f423 Guido Trotter
      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
2001 5af3da74 Guido Trotter
    if self.op.nicparams:
2002 5af3da74 Guido Trotter
      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
2003 5af3da74 Guido Trotter
2004 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
2005 4b7735f9 Iustin Pop
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
2006 75e914fb Iustin Pop
      # we need to update the pool size here, otherwise the save will fail
2007 44485f49 Guido Trotter
      _AdjustCandidatePool(self, [])
2008 4b7735f9 Iustin Pop
2009 a4eae71f Michael Hanselmann
    self.cfg.Update(self.cluster, feedback_fn)
2010 8084f9f6 Manuel Franceschini
2011 8084f9f6 Manuel Franceschini
2012 28eddce5 Guido Trotter
def _RedistributeAncillaryFiles(lu, additional_nodes=None):
2013 28eddce5 Guido Trotter
  """Distribute additional files which are part of the cluster configuration.
2014 28eddce5 Guido Trotter

2015 28eddce5 Guido Trotter
  ConfigWriter takes care of distributing the config and ssconf files, but
2016 28eddce5 Guido Trotter
  there are more files which should be distributed to all nodes. This function
2017 28eddce5 Guido Trotter
  makes sure those are copied.
2018 28eddce5 Guido Trotter

2019 28eddce5 Guido Trotter
  @param lu: calling logical unit
2020 28eddce5 Guido Trotter
  @param additional_nodes: list of nodes not in the config to distribute to
2021 28eddce5 Guido Trotter

2022 28eddce5 Guido Trotter
  """
2023 28eddce5 Guido Trotter
  # 1. Gather target nodes
2024 28eddce5 Guido Trotter
  myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
2025 28eddce5 Guido Trotter
  dist_nodes = lu.cfg.GetNodeList()
2026 28eddce5 Guido Trotter
  if additional_nodes is not None:
2027 28eddce5 Guido Trotter
    dist_nodes.extend(additional_nodes)
2028 28eddce5 Guido Trotter
  if myself.name in dist_nodes:
2029 28eddce5 Guido Trotter
    dist_nodes.remove(myself.name)
2030 a4eae71f Michael Hanselmann
2031 28eddce5 Guido Trotter
  # 2. Gather files to distribute
2032 28eddce5 Guido Trotter
  dist_files = set([constants.ETC_HOSTS,
2033 28eddce5 Guido Trotter
                    constants.SSH_KNOWN_HOSTS_FILE,
2034 28eddce5 Guido Trotter
                    constants.RAPI_CERT_FILE,
2035 28eddce5 Guido Trotter
                    constants.RAPI_USERS_FILE,
2036 4a34c5cf Guido Trotter
                    constants.HMAC_CLUSTER_KEY,
2037 28eddce5 Guido Trotter
                   ])
2038 e1b8653f Guido Trotter
2039 e1b8653f Guido Trotter
  enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
2040 e1b8653f Guido Trotter
  for hv_name in enabled_hypervisors:
2041 e1b8653f Guido Trotter
    hv_class = hypervisor.GetHypervisor(hv_name)
2042 e1b8653f Guido Trotter
    dist_files.update(hv_class.GetAncillaryFiles())
2043 e1b8653f Guido Trotter
2044 28eddce5 Guido Trotter
  # 3. Perform the files upload
2045 28eddce5 Guido Trotter
  for fname in dist_files:
2046 28eddce5 Guido Trotter
    if os.path.exists(fname):
2047 28eddce5 Guido Trotter
      result = lu.rpc.call_upload_file(dist_nodes, fname)
2048 28eddce5 Guido Trotter
      for to_node, to_result in result.items():
2049 6f7d4e75 Iustin Pop
        msg = to_result.fail_msg
2050 6f7d4e75 Iustin Pop
        if msg:
2051 6f7d4e75 Iustin Pop
          msg = ("Copy of file %s to node %s failed: %s" %
2052 6f7d4e75 Iustin Pop
                 (fname, to_node, msg))
2053 6f7d4e75 Iustin Pop
          lu.proc.LogWarning(msg)
2054 28eddce5 Guido Trotter
2055 28eddce5 Guido Trotter
2056 afee0879 Iustin Pop
class LURedistributeConfig(NoHooksLU):
2057 afee0879 Iustin Pop
  """Force the redistribution of cluster configuration.
2058 afee0879 Iustin Pop

2059 afee0879 Iustin Pop
  This is a very simple LU.
2060 afee0879 Iustin Pop

2061 afee0879 Iustin Pop
  """
2062 afee0879 Iustin Pop
  _OP_REQP = []
2063 afee0879 Iustin Pop
  REQ_BGL = False
2064 afee0879 Iustin Pop
2065 afee0879 Iustin Pop
  def ExpandNames(self):
2066 afee0879 Iustin Pop
    self.needed_locks = {
2067 afee0879 Iustin Pop
      locking.LEVEL_NODE: locking.ALL_SET,
2068 afee0879 Iustin Pop
    }
2069 afee0879 Iustin Pop
    self.share_locks[locking.LEVEL_NODE] = 1
2070 afee0879 Iustin Pop
2071 afee0879 Iustin Pop
  def CheckPrereq(self):
2072 afee0879 Iustin Pop
    """Check prerequisites.
2073 afee0879 Iustin Pop

2074 afee0879 Iustin Pop
    """
2075 afee0879 Iustin Pop
2076 afee0879 Iustin Pop
  def Exec(self, feedback_fn):
2077 afee0879 Iustin Pop
    """Redistribute the configuration.
2078 afee0879 Iustin Pop

2079 afee0879 Iustin Pop
    """
2080 a4eae71f Michael Hanselmann
    self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
2081 28eddce5 Guido Trotter
    _RedistributeAncillaryFiles(self)
2082 afee0879 Iustin Pop
2083 afee0879 Iustin Pop
2084 b9bddb6b Iustin Pop
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
2085 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
2086 a8083063 Iustin Pop

2087 a8083063 Iustin Pop
  """
2088 a8083063 Iustin Pop
  if not instance.disks:
2089 a8083063 Iustin Pop
    return True
2090 a8083063 Iustin Pop
2091 a8083063 Iustin Pop
  if not oneshot:
2092 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
2093 a8083063 Iustin Pop
2094 a8083063 Iustin Pop
  node = instance.primary_node
2095 a8083063 Iustin Pop
2096 a8083063 Iustin Pop
  for dev in instance.disks:
2097 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
2098 a8083063 Iustin Pop
2099 a8083063 Iustin Pop
  retries = 0
2100 fbafd7a8 Iustin Pop
  degr_retries = 10 # in seconds, as we sleep 1 second each time
2101 a8083063 Iustin Pop
  while True:
2102 a8083063 Iustin Pop
    max_time = 0
2103 a8083063 Iustin Pop
    done = True
2104 a8083063 Iustin Pop
    cumul_degraded = False
2105 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
2106 4c4e4e1e Iustin Pop
    msg = rstats.fail_msg
2107 3efa9051 Iustin Pop
    if msg:
2108 3efa9051 Iustin Pop
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
2109 a8083063 Iustin Pop
      retries += 1
2110 a8083063 Iustin Pop
      if retries >= 10:
2111 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
2112 3ecf6786 Iustin Pop
                                 " aborting." % node)
2113 a8083063 Iustin Pop
      time.sleep(6)
2114 a8083063 Iustin Pop
      continue
2115 3efa9051 Iustin Pop
    rstats = rstats.payload
2116 a8083063 Iustin Pop
    retries = 0
2117 1492cca7 Iustin Pop
    for i, mstat in enumerate(rstats):
2118 a8083063 Iustin Pop
      if mstat is None:
2119 86d9d3bb Iustin Pop
        lu.LogWarning("Can't compute data for node %s/%s",
2120 86d9d3bb Iustin Pop
                           node, instance.disks[i].iv_name)
2121 a8083063 Iustin Pop
        continue
2122 36145b12 Michael Hanselmann
2123 36145b12 Michael Hanselmann
      cumul_degraded = (cumul_degraded or
2124 36145b12 Michael Hanselmann
                        (mstat.is_degraded and mstat.sync_percent is None))
2125 36145b12 Michael Hanselmann
      if mstat.sync_percent is not None:
2126 a8083063 Iustin Pop
        done = False
2127 36145b12 Michael Hanselmann
        if mstat.estimated_time is not None:
2128 36145b12 Michael Hanselmann
          rem_time = "%d estimated seconds remaining" % mstat.estimated_time
2129 36145b12 Michael Hanselmann
          max_time = mstat.estimated_time
2130 a8083063 Iustin Pop
        else:
2131 a8083063 Iustin Pop
          rem_time = "no time estimate"
2132 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
2133 4d4a651d Michael Hanselmann
                        (instance.disks[i].iv_name, mstat.sync_percent,
2134 4d4a651d Michael Hanselmann
                         rem_time))
2135 fbafd7a8 Iustin Pop
2136 fbafd7a8 Iustin Pop
    # if we're done but degraded, let's do a few small retries, to
2137 fbafd7a8 Iustin Pop
    # make sure we see a stable and not transient situation; therefore
2138 fbafd7a8 Iustin Pop
    # we force restart of the loop
2139 fbafd7a8 Iustin Pop
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
2140 fbafd7a8 Iustin Pop
      logging.info("Degraded disks found, %d retries left", degr_retries)
2141 fbafd7a8 Iustin Pop
      degr_retries -= 1
2142 fbafd7a8 Iustin Pop
      time.sleep(1)
2143 fbafd7a8 Iustin Pop
      continue
2144 fbafd7a8 Iustin Pop
2145 a8083063 Iustin Pop
    if done or oneshot:
2146 a8083063 Iustin Pop
      break
2147 a8083063 Iustin Pop
2148 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
2149 a8083063 Iustin Pop
2150 a8083063 Iustin Pop
  if done:
2151 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
2152 a8083063 Iustin Pop
  return not cumul_degraded
2153 a8083063 Iustin Pop
2154 a8083063 Iustin Pop
2155 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
2156 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
2157 a8083063 Iustin Pop

2158 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
2159 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
2160 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
2161 0834c866 Iustin Pop

2162 a8083063 Iustin Pop
  """
2163 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
2164 a8083063 Iustin Pop
2165 a8083063 Iustin Pop
  result = True
2166 96acbc09 Michael Hanselmann
2167 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
2168 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
2169 4c4e4e1e Iustin Pop
    msg = rstats.fail_msg
2170 23829f6f Iustin Pop
    if msg:
2171 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
2172 23829f6f Iustin Pop
      result = False
2173 23829f6f Iustin Pop
    elif not rstats.payload:
2174 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s", node)
2175 a8083063 Iustin Pop
      result = False
2176 a8083063 Iustin Pop
    else:
2177 96acbc09 Michael Hanselmann
      if ldisk:
2178 f208978a Michael Hanselmann
        result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
2179 96acbc09 Michael Hanselmann
      else:
2180 96acbc09 Michael Hanselmann
        result = result and not rstats.payload.is_degraded
2181 96acbc09 Michael Hanselmann
2182 a8083063 Iustin Pop
  if dev.children:
2183 a8083063 Iustin Pop
    for child in dev.children:
2184 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
2185 a8083063 Iustin Pop
2186 a8083063 Iustin Pop
  return result
2187 a8083063 Iustin Pop
2188 a8083063 Iustin Pop
2189 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
2190 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
2191 a8083063 Iustin Pop

2192 a8083063 Iustin Pop
  """
2193 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2194 6bf01bbb Guido Trotter
  REQ_BGL = False
2195 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet()
2196 1e288a26 Guido Trotter
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status", "variants")
2197 1e288a26 Guido Trotter
  # Fields that need calculation of global os validity
2198 1e288a26 Guido Trotter
  _FIELDS_NEEDVALID = frozenset(["valid", "variants"])
2199 a8083063 Iustin Pop
2200 6bf01bbb Guido Trotter
  def ExpandNames(self):
2201 1f9430d6 Iustin Pop
    if self.op.names:
2202 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported",
2203 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
2204 1f9430d6 Iustin Pop
2205 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2206 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2207 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
2208 1f9430d6 Iustin Pop
2209 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
2210 a6ab004b Iustin Pop
    # Temporary removal of locks, should be reverted later
2211 a6ab004b Iustin Pop
    # TODO: reintroduce locks when they are lighter-weight
2212 6bf01bbb Guido Trotter
    self.needed_locks = {}
2213 a6ab004b Iustin Pop
    #self.share_locks[locking.LEVEL_NODE] = 1
2214 a6ab004b Iustin Pop
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2215 6bf01bbb Guido Trotter
2216 6bf01bbb Guido Trotter
  def CheckPrereq(self):
2217 6bf01bbb Guido Trotter
    """Check prerequisites.
2218 6bf01bbb Guido Trotter

2219 6bf01bbb Guido Trotter
    """
2220 6bf01bbb Guido Trotter
2221 1f9430d6 Iustin Pop
  @staticmethod
2222 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
2223 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
2224 1f9430d6 Iustin Pop

2225 e4376078 Iustin Pop
    @param node_list: a list with the names of all nodes
2226 e4376078 Iustin Pop
    @param rlist: a map with node names as keys and OS objects as values
2227 1f9430d6 Iustin Pop

2228 e4376078 Iustin Pop
    @rtype: dict
2229 5fcc718f Iustin Pop
    @return: a dictionary with osnames as keys and as value another map, with
2230 255dcebd Iustin Pop
        nodes as keys and tuples of (path, status, diagnose) as values, eg::
2231 e4376078 Iustin Pop

2232 255dcebd Iustin Pop
          {"debian-etch": {"node1": [(/usr/lib/..., True, ""),
2233 255dcebd Iustin Pop
                                     (/srv/..., False, "invalid api")],
2234 255dcebd Iustin Pop
                           "node2": [(/srv/..., True, "")]}
2235 e4376078 Iustin Pop
          }
2236 1f9430d6 Iustin Pop

2237 1f9430d6 Iustin Pop
    """
2238 1f9430d6 Iustin Pop
    all_os = {}
2239 a6ab004b Iustin Pop
    # we build here the list of nodes that didn't fail the RPC (at RPC
2240 a6ab004b Iustin Pop
    # level), so that nodes with a non-responding node daemon don't
2241 a6ab004b Iustin Pop
    # make all OSes invalid
2242 a6ab004b Iustin Pop
    good_nodes = [node_name for node_name in rlist
2243 4c4e4e1e Iustin Pop
                  if not rlist[node_name].fail_msg]
2244 83d92ad8 Iustin Pop
    for node_name, nr in rlist.items():
2245 4c4e4e1e Iustin Pop
      if nr.fail_msg or not nr.payload:
2246 1f9430d6 Iustin Pop
        continue
2247 ba00557a Guido Trotter
      for name, path, status, diagnose, variants in nr.payload:
2248 255dcebd Iustin Pop
        if name not in all_os:
2249 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
2250 1f9430d6 Iustin Pop
          # for each node in node_list
2251 255dcebd Iustin Pop
          all_os[name] = {}
2252 a6ab004b Iustin Pop
          for nname in good_nodes:
2253 255dcebd Iustin Pop
            all_os[name][nname] = []
2254 ba00557a Guido Trotter
        all_os[name][node_name].append((path, status, diagnose, variants))
2255 1f9430d6 Iustin Pop
    return all_os
2256 a8083063 Iustin Pop
2257 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2258 a8083063 Iustin Pop
    """Compute the list of OSes.
2259 a8083063 Iustin Pop

2260 a8083063 Iustin Pop
    """
2261 a6ab004b Iustin Pop
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
2262 94a02bb5 Iustin Pop
    node_data = self.rpc.call_os_diagnose(valid_nodes)
2263 94a02bb5 Iustin Pop
    pol = self._DiagnoseByOS(valid_nodes, node_data)
2264 1f9430d6 Iustin Pop
    output = []
2265 1e288a26 Guido Trotter
    calc_valid = self._FIELDS_NEEDVALID.intersection(self.op.output_fields)
2266 1e288a26 Guido Trotter
    calc_variants = "variants" in self.op.output_fields
2267 1e288a26 Guido Trotter
2268 83d92ad8 Iustin Pop
    for os_name, os_data in pol.items():
2269 1f9430d6 Iustin Pop
      row = []
2270 1e288a26 Guido Trotter
      if calc_valid:
2271 1e288a26 Guido Trotter
        valid = True
2272 1e288a26 Guido Trotter
        variants = None
2273 1e288a26 Guido Trotter
        for osl in os_data.values():
2274 1e288a26 Guido Trotter
          valid = valid and osl and osl[0][1]
2275 1e288a26 Guido Trotter
          if not valid:
2276 1e288a26 Guido Trotter
            variants = None
2277 1e288a26 Guido Trotter
            break
2278 1e288a26 Guido Trotter
          if calc_variants:
2279 1e288a26 Guido Trotter
            node_variants = osl[0][3]
2280 1e288a26 Guido Trotter
            if variants is None:
2281 1e288a26 Guido Trotter
              variants = node_variants
2282 1e288a26 Guido Trotter
            else:
2283 1e288a26 Guido Trotter
              variants = [v for v in variants if v in node_variants]
2284 1e288a26 Guido Trotter
2285 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
2286 1f9430d6 Iustin Pop
        if field == "name":
2287 1f9430d6 Iustin Pop
          val = os_name
2288 1f9430d6 Iustin Pop
        elif field == "valid":
2289 1e288a26 Guido Trotter
          val = valid
2290 1f9430d6 Iustin Pop
        elif field == "node_status":
2291 255dcebd Iustin Pop
          # this is just a copy of the dict
2292 1f9430d6 Iustin Pop
          val = {}
2293 255dcebd Iustin Pop
          for node_name, nos_list in os_data.items():
2294 255dcebd Iustin Pop
            val[node_name] = nos_list
2295 1e288a26 Guido Trotter
        elif field == "variants":
2296 1e288a26 Guido Trotter
          val =  variants
2297 1f9430d6 Iustin Pop
        else:
2298 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
2299 1f9430d6 Iustin Pop
        row.append(val)
2300 1f9430d6 Iustin Pop
      output.append(row)
2301 1f9430d6 Iustin Pop
2302 1f9430d6 Iustin Pop
    return output
2303 a8083063 Iustin Pop
2304 a8083063 Iustin Pop
2305 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
2306 a8083063 Iustin Pop
  """Logical unit for removing a node.
2307 a8083063 Iustin Pop

2308 a8083063 Iustin Pop
  """
2309 a8083063 Iustin Pop
  HPATH = "node-remove"
2310 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2311 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
2312 a8083063 Iustin Pop
2313 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2314 a8083063 Iustin Pop
    """Build hooks env.
2315 a8083063 Iustin Pop

2316 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
2317 d08869ee Guido Trotter
    node would then be impossible to remove.
2318 a8083063 Iustin Pop

2319 a8083063 Iustin Pop
    """
2320 396e1b78 Michael Hanselmann
    env = {
2321 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
2322 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
2323 396e1b78 Michael Hanselmann
      }
2324 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
2325 cd46f3b4 Luca Bigliardi
    if self.op.node_name in all_nodes:
2326 cd46f3b4 Luca Bigliardi
      all_nodes.remove(self.op.node_name)
2327 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
2328 a8083063 Iustin Pop
2329 a8083063 Iustin Pop
  def CheckPrereq(self):
2330 a8083063 Iustin Pop
    """Check prerequisites.
2331 a8083063 Iustin Pop

2332 a8083063 Iustin Pop
    This checks:
2333 a8083063 Iustin Pop
     - the node exists in the configuration
2334 a8083063 Iustin Pop
     - it does not have primary or secondary instances
2335 a8083063 Iustin Pop
     - it's not the master
2336 a8083063 Iustin Pop

2337 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
2338 a8083063 Iustin Pop

2339 a8083063 Iustin Pop
    """
2340 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
2341 a8083063 Iustin Pop
    if node is None:
2342 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Node '%s' is unknown." % self.op.node_name,
2343 5c983ee5 Iustin Pop
                                 errors.ECODE_NOENT)
2344 a8083063 Iustin Pop
2345 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
2346 a8083063 Iustin Pop
2347 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
2348 a8083063 Iustin Pop
    if node.name == masternode:
2349 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
2350 5c983ee5 Iustin Pop
                                 " you need to failover first.",
2351 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
2352 a8083063 Iustin Pop
2353 a8083063 Iustin Pop
    for instance_name in instance_list:
2354 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
2355 6b12959c Iustin Pop
      if node.name in instance.all_nodes:
2356 6b12959c Iustin Pop
        raise errors.OpPrereqError("Instance %s is still running on the node,"
2357 5c983ee5 Iustin Pop
                                   " please remove first." % instance_name,
2358 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
2359 a8083063 Iustin Pop
    self.op.node_name = node.name
2360 a8083063 Iustin Pop
    self.node = node
2361 a8083063 Iustin Pop
2362 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2363 a8083063 Iustin Pop
    """Removes the node from the cluster.
2364 a8083063 Iustin Pop

2365 a8083063 Iustin Pop
    """
2366 a8083063 Iustin Pop
    node = self.node
2367 9a4f63d1 Iustin Pop
    logging.info("Stopping the node daemon and removing configs from node %s",
2368 9a4f63d1 Iustin Pop
                 node.name)
2369 a8083063 Iustin Pop
2370 b989b9d9 Ken Wehr
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
2371 b989b9d9 Ken Wehr
2372 44485f49 Guido Trotter
    # Promote nodes to master candidate as needed
2373 44485f49 Guido Trotter
    _AdjustCandidatePool(self, exceptions=[node.name])
2374 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
2375 a8083063 Iustin Pop
2376 cd46f3b4 Luca Bigliardi
    # Run post hooks on the node before it's removed
2377 cd46f3b4 Luca Bigliardi
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
2378 cd46f3b4 Luca Bigliardi
    try:
2379 cd46f3b4 Luca Bigliardi
      h_results = hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
2380 3cb5c1e3 Luca Bigliardi
    except:
2381 3cb5c1e3 Luca Bigliardi
      self.LogWarning("Errors occurred running hooks on %s" % node.name)
2382 cd46f3b4 Luca Bigliardi
2383 b989b9d9 Ken Wehr
    result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
2384 4c4e4e1e Iustin Pop
    msg = result.fail_msg
2385 0623d351 Iustin Pop
    if msg:
2386 0623d351 Iustin Pop
      self.LogWarning("Errors encountered on the remote node while leaving"
2387 0623d351 Iustin Pop
                      " the cluster: %s", msg)
2388 c8a0948f Michael Hanselmann
2389 a8083063 Iustin Pop
2390 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
2391 a8083063 Iustin Pop
  """Logical unit for querying nodes.
2392 a8083063 Iustin Pop

2393 a8083063 Iustin Pop
  """
2394 bc8e4a1a Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
2395 35705d8f Guido Trotter
  REQ_BGL = False
2396 19bed813 Iustin Pop
2397 19bed813 Iustin Pop
  _SIMPLE_FIELDS = ["name", "serial_no", "ctime", "mtime", "uuid",
2398 19bed813 Iustin Pop
                    "master_candidate", "offline", "drained"]
2399 19bed813 Iustin Pop
2400 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet(
2401 31bf511f Iustin Pop
    "dtotal", "dfree",
2402 31bf511f Iustin Pop
    "mtotal", "mnode", "mfree",
2403 31bf511f Iustin Pop
    "bootid",
2404 0105bad3 Iustin Pop
    "ctotal", "cnodes", "csockets",
2405 31bf511f Iustin Pop
    )
2406 31bf511f Iustin Pop
2407 19bed813 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*[
2408 19bed813 Iustin Pop
    "pinst_cnt", "sinst_cnt",
2409 31bf511f Iustin Pop
    "pinst_list", "sinst_list",
2410 31bf511f Iustin Pop
    "pip", "sip", "tags",
2411 0e67cdbe Iustin Pop
    "master",
2412 19bed813 Iustin Pop
    "role"] + _SIMPLE_FIELDS
2413 31bf511f Iustin Pop
    )
2414 a8083063 Iustin Pop
2415 35705d8f Guido Trotter
  def ExpandNames(self):
2416 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2417 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2418 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2419 a8083063 Iustin Pop
2420 35705d8f Guido Trotter
    self.needed_locks = {}
2421 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2422 c8d8b4c8 Iustin Pop
2423 c8d8b4c8 Iustin Pop
    if self.op.names:
2424 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
2425 35705d8f Guido Trotter
    else:
2426 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
2427 c8d8b4c8 Iustin Pop
2428 bc8e4a1a Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
2429 bc8e4a1a Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
2430 c8d8b4c8 Iustin Pop
    if self.do_locking:
2431 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
2432 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
2433 c8d8b4c8 Iustin Pop
2434 35705d8f Guido Trotter
  def CheckPrereq(self):
2435 35705d8f Guido Trotter
    """Check prerequisites.
2436 35705d8f Guido Trotter

2437 35705d8f Guido Trotter
    """
2438 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
2439 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
2440 c8d8b4c8 Iustin Pop
    pass
2441 a8083063 Iustin Pop
2442 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2443 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2444 a8083063 Iustin Pop

2445 a8083063 Iustin Pop
    """
2446 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
2447 c8d8b4c8 Iustin Pop
    if self.do_locking:
2448 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
2449 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
2450 3fa93523 Guido Trotter
      nodenames = self.wanted
2451 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
2452 3fa93523 Guido Trotter
      if missing:
2453 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
2454 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
2455 c8d8b4c8 Iustin Pop
    else:
2456 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
2457 c1f1cbb2 Iustin Pop
2458 c1f1cbb2 Iustin Pop
    nodenames = utils.NiceSort(nodenames)
2459 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
2460 a8083063 Iustin Pop
2461 a8083063 Iustin Pop
    # begin data gathering
2462 a8083063 Iustin Pop
2463 bc8e4a1a Iustin Pop
    if self.do_node_query:
2464 a8083063 Iustin Pop
      live_data = {}
2465 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
2466 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
2467 a8083063 Iustin Pop
      for name in nodenames:
2468 781de953 Iustin Pop
        nodeinfo = node_data[name]
2469 4c4e4e1e Iustin Pop
        if not nodeinfo.fail_msg and nodeinfo.payload:
2470 070e998b Iustin Pop
          nodeinfo = nodeinfo.payload
2471 d599d686 Iustin Pop
          fn = utils.TryConvert
2472 a8083063 Iustin Pop
          live_data[name] = {
2473 d599d686 Iustin Pop
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
2474 d599d686 Iustin Pop
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
2475 d599d686 Iustin Pop
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
2476 d599d686 Iustin Pop
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
2477 d599d686 Iustin Pop
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
2478 d599d686 Iustin Pop
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
2479 d599d686 Iustin Pop
            "bootid": nodeinfo.get('bootid', None),
2480 0105bad3 Iustin Pop
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
2481 0105bad3 Iustin Pop
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
2482 a8083063 Iustin Pop
            }
2483 a8083063 Iustin Pop
        else:
2484 a8083063 Iustin Pop
          live_data[name] = {}
2485 a8083063 Iustin Pop
    else:
2486 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
2487 a8083063 Iustin Pop
2488 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
2489 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
2490 a8083063 Iustin Pop
2491 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
2492 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
2493 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
2494 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
2495 a8083063 Iustin Pop
2496 ec223efb Iustin Pop
      for instance_name in instancelist:
2497 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
2498 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
2499 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
2500 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
2501 ec223efb Iustin Pop
          if secnode in node_to_secondary:
2502 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
2503 a8083063 Iustin Pop
2504 0e67cdbe Iustin Pop
    master_node = self.cfg.GetMasterNode()
2505 0e67cdbe Iustin Pop
2506 a8083063 Iustin Pop
    # end data gathering
2507 a8083063 Iustin Pop
2508 a8083063 Iustin Pop
    output = []
2509 a8083063 Iustin Pop
    for node in nodelist:
2510 a8083063 Iustin Pop
      node_output = []
2511 a8083063 Iustin Pop
      for field in self.op.output_fields:
2512 19bed813 Iustin Pop
        if field in self._SIMPLE_FIELDS:
2513 19bed813 Iustin Pop
          val = getattr(node, field)
2514 ec223efb Iustin Pop
        elif field == "pinst_list":
2515 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
2516 ec223efb Iustin Pop
        elif field == "sinst_list":
2517 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
2518 ec223efb Iustin Pop
        elif field == "pinst_cnt":
2519 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
2520 ec223efb Iustin Pop
        elif field == "sinst_cnt":
2521 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
2522 a8083063 Iustin Pop
        elif field == "pip":
2523 a8083063 Iustin Pop
          val = node.primary_ip
2524 a8083063 Iustin Pop
        elif field == "sip":
2525 a8083063 Iustin Pop
          val = node.secondary_ip
2526 130a6a6f Iustin Pop
        elif field == "tags":
2527 130a6a6f Iustin Pop
          val = list(node.GetTags())
2528 0e67cdbe Iustin Pop
        elif field == "master":
2529 0e67cdbe Iustin Pop
          val = node.name == master_node
2530 31bf511f Iustin Pop
        elif self._FIELDS_DYNAMIC.Matches(field):
2531 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
2532 c120ff34 Iustin Pop
        elif field == "role":
2533 c120ff34 Iustin Pop
          if node.name == master_node:
2534 c120ff34 Iustin Pop
            val = "M"
2535 c120ff34 Iustin Pop
          elif node.master_candidate:
2536 c120ff34 Iustin Pop
            val = "C"
2537 c120ff34 Iustin Pop
          elif node.drained:
2538 c120ff34 Iustin Pop
            val = "D"
2539 c120ff34 Iustin Pop
          elif node.offline:
2540 c120ff34 Iustin Pop
            val = "O"
2541 c120ff34 Iustin Pop
          else:
2542 c120ff34 Iustin Pop
            val = "R"
2543 a8083063 Iustin Pop
        else:
2544 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2545 a8083063 Iustin Pop
        node_output.append(val)
2546 a8083063 Iustin Pop
      output.append(node_output)
2547 a8083063 Iustin Pop
2548 a8083063 Iustin Pop
    return output
2549 a8083063 Iustin Pop
2550 a8083063 Iustin Pop
2551 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
2552 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
2553 dcb93971 Michael Hanselmann

2554 dcb93971 Michael Hanselmann
  """
2555 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
2556 21a15682 Guido Trotter
  REQ_BGL = False
2557 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
2558 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("node")
2559 21a15682 Guido Trotter
2560 21a15682 Guido Trotter
  def ExpandNames(self):
2561 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2562 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2563 21a15682 Guido Trotter
                       selected=self.op.output_fields)
2564 21a15682 Guido Trotter
2565 21a15682 Guido Trotter
    self.needed_locks = {}
2566 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2567 21a15682 Guido Trotter
    if not self.op.nodes:
2568 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2569 21a15682 Guido Trotter
    else:
2570 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
2571 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
2572 dcb93971 Michael Hanselmann
2573 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
2574 dcb93971 Michael Hanselmann
    """Check prerequisites.
2575 dcb93971 Michael Hanselmann

2576 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
2577 dcb93971 Michael Hanselmann

2578 dcb93971 Michael Hanselmann
    """
2579 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
2580 dcb93971 Michael Hanselmann
2581 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
2582 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
2583 dcb93971 Michael Hanselmann

2584 dcb93971 Michael Hanselmann
    """
2585 a7ba5e53 Iustin Pop
    nodenames = self.nodes
2586 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
2587 dcb93971 Michael Hanselmann
2588 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
2589 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
2590 dcb93971 Michael Hanselmann
2591 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
2592 dcb93971 Michael Hanselmann
2593 dcb93971 Michael Hanselmann
    output = []
2594 dcb93971 Michael Hanselmann
    for node in nodenames:
2595 10bfe6cb Iustin Pop
      nresult = volumes[node]
2596 10bfe6cb Iustin Pop
      if nresult.offline:
2597 10bfe6cb Iustin Pop
        continue
2598 4c4e4e1e Iustin Pop
      msg = nresult.fail_msg
2599 10bfe6cb Iustin Pop
      if msg:
2600 10bfe6cb Iustin Pop
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
2601 37d19eb2 Michael Hanselmann
        continue
2602 37d19eb2 Michael Hanselmann
2603 10bfe6cb Iustin Pop
      node_vols = nresult.payload[:]
2604 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
2605 dcb93971 Michael Hanselmann
2606 dcb93971 Michael Hanselmann
      for vol in node_vols:
2607 dcb93971 Michael Hanselmann
        node_output = []
2608 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
2609 dcb93971 Michael Hanselmann
          if field == "node":
2610 dcb93971 Michael Hanselmann
            val = node
2611 dcb93971 Michael Hanselmann
          elif field == "phys":
2612 dcb93971 Michael Hanselmann
            val = vol['dev']
2613 dcb93971 Michael Hanselmann
          elif field == "vg":
2614 dcb93971 Michael Hanselmann
            val = vol['vg']
2615 dcb93971 Michael Hanselmann
          elif field == "name":
2616 dcb93971 Michael Hanselmann
            val = vol['name']
2617 dcb93971 Michael Hanselmann
          elif field == "size":
2618 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
2619 dcb93971 Michael Hanselmann
          elif field == "instance":
2620 dcb93971 Michael Hanselmann
            for inst in ilist:
2621 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
2622 dcb93971 Michael Hanselmann
                continue
2623 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
2624 dcb93971 Michael Hanselmann
                val = inst.name
2625 dcb93971 Michael Hanselmann
                break
2626 dcb93971 Michael Hanselmann
            else:
2627 dcb93971 Michael Hanselmann
              val = '-'
2628 dcb93971 Michael Hanselmann
          else:
2629 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
2630 dcb93971 Michael Hanselmann
          node_output.append(str(val))
2631 dcb93971 Michael Hanselmann
2632 dcb93971 Michael Hanselmann
        output.append(node_output)
2633 dcb93971 Michael Hanselmann
2634 dcb93971 Michael Hanselmann
    return output
2635 dcb93971 Michael Hanselmann
2636 dcb93971 Michael Hanselmann
2637 9e5442ce Michael Hanselmann
class LUQueryNodeStorage(NoHooksLU):
2638 9e5442ce Michael Hanselmann
  """Logical unit for getting information on storage units on node(s).
2639 9e5442ce Michael Hanselmann

2640 9e5442ce Michael Hanselmann
  """
2641 9e5442ce Michael Hanselmann
  _OP_REQP = ["nodes", "storage_type", "output_fields"]
2642 9e5442ce Michael Hanselmann
  REQ_BGL = False
2643 620a85fd Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
2644 9e5442ce Michael Hanselmann
2645 9e5442ce Michael Hanselmann
  def ExpandNames(self):
2646 9e5442ce Michael Hanselmann
    storage_type = self.op.storage_type
2647 9e5442ce Michael Hanselmann
2648 620a85fd Iustin Pop
    if storage_type not in constants.VALID_STORAGE_TYPES:
2649 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Unknown storage type: %s" % storage_type,
2650 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
2651 9e5442ce Michael Hanselmann
2652 9e5442ce Michael Hanselmann
    _CheckOutputFields(static=self._FIELDS_STATIC,
2653 620a85fd Iustin Pop
                       dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
2654 9e5442ce Michael Hanselmann
                       selected=self.op.output_fields)
2655 9e5442ce Michael Hanselmann
2656 9e5442ce Michael Hanselmann
    self.needed_locks = {}
2657 9e5442ce Michael Hanselmann
    self.share_locks[locking.LEVEL_NODE] = 1
2658 9e5442ce Michael Hanselmann
2659 9e5442ce Michael Hanselmann
    if self.op.nodes:
2660 9e5442ce Michael Hanselmann
      self.needed_locks[locking.LEVEL_NODE] = \
2661 9e5442ce Michael Hanselmann
        _GetWantedNodes(self, self.op.nodes)
2662 9e5442ce Michael Hanselmann
    else:
2663 9e5442ce Michael Hanselmann
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2664 9e5442ce Michael Hanselmann
2665 9e5442ce Michael Hanselmann
  def CheckPrereq(self):
2666 9e5442ce Michael Hanselmann
    """Check prerequisites.
2667 9e5442ce Michael Hanselmann

2668 9e5442ce Michael Hanselmann
    This checks that the fields required are valid output fields.
2669 9e5442ce Michael Hanselmann

2670 9e5442ce Michael Hanselmann
    """
2671 9e5442ce Michael Hanselmann
    self.op.name = getattr(self.op, "name", None)
2672 9e5442ce Michael Hanselmann
2673 9e5442ce Michael Hanselmann
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
2674 9e5442ce Michael Hanselmann
2675 9e5442ce Michael Hanselmann
  def Exec(self, feedback_fn):
2676 9e5442ce Michael Hanselmann
    """Computes the list of nodes and their attributes.
2677 9e5442ce Michael Hanselmann

2678 9e5442ce Michael Hanselmann
    """
2679 9e5442ce Michael Hanselmann
    # Always get name to sort by
2680 9e5442ce Michael Hanselmann
    if constants.SF_NAME in self.op.output_fields:
2681 9e5442ce Michael Hanselmann
      fields = self.op.output_fields[:]
2682 9e5442ce Michael Hanselmann
    else:
2683 9e5442ce Michael Hanselmann
      fields = [constants.SF_NAME] + self.op.output_fields
2684 9e5442ce Michael Hanselmann
2685 620a85fd Iustin Pop
    # Never ask for node or type as it's only known to the LU
2686 620a85fd Iustin Pop
    for extra in [constants.SF_NODE, constants.SF_TYPE]:
2687 620a85fd Iustin Pop
      while extra in fields:
2688 620a85fd Iustin Pop
        fields.remove(extra)
2689 9e5442ce Michael Hanselmann
2690 9e5442ce Michael Hanselmann
    field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
2691 9e5442ce Michael Hanselmann
    name_idx = field_idx[constants.SF_NAME]
2692 9e5442ce Michael Hanselmann
2693 efb8da02 Michael Hanselmann
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
2694 9e5442ce Michael Hanselmann
    data = self.rpc.call_storage_list(self.nodes,
2695 9e5442ce Michael Hanselmann
                                      self.op.storage_type, st_args,
2696 9e5442ce Michael Hanselmann
                                      self.op.name, fields)
2697 9e5442ce Michael Hanselmann
2698 9e5442ce Michael Hanselmann
    result = []
2699 9e5442ce Michael Hanselmann
2700 9e5442ce Michael Hanselmann
    for node in utils.NiceSort(self.nodes):
2701 9e5442ce Michael Hanselmann
      nresult = data[node]
2702 9e5442ce Michael Hanselmann
      if nresult.offline:
2703 9e5442ce Michael Hanselmann
        continue
2704 9e5442ce Michael Hanselmann
2705 9e5442ce Michael Hanselmann
      msg = nresult.fail_msg
2706 9e5442ce Michael Hanselmann
      if msg:
2707 9e5442ce Michael Hanselmann
        self.LogWarning("Can't get storage data from node %s: %s", node, msg)
2708 9e5442ce Michael Hanselmann
        continue
2709 9e5442ce Michael Hanselmann
2710 9e5442ce Michael Hanselmann
      rows = dict([(row[name_idx], row) for row in nresult.payload])
2711 9e5442ce Michael Hanselmann
2712 9e5442ce Michael Hanselmann
      for name in utils.NiceSort(rows.keys()):
2713 9e5442ce Michael Hanselmann
        row = rows[name]
2714 9e5442ce Michael Hanselmann
2715 9e5442ce Michael Hanselmann
        out = []
2716 9e5442ce Michael Hanselmann
2717 9e5442ce Michael Hanselmann
        for field in self.op.output_fields:
2718 620a85fd Iustin Pop
          if field == constants.SF_NODE:
2719 9e5442ce Michael Hanselmann
            val = node
2720 620a85fd Iustin Pop
          elif field == constants.SF_TYPE:
2721 620a85fd Iustin Pop
            val = self.op.storage_type
2722 9e5442ce Michael Hanselmann
          elif field in field_idx:
2723 9e5442ce Michael Hanselmann
            val = row[field_idx[field]]
2724 9e5442ce Michael Hanselmann
          else:
2725 9e5442ce Michael Hanselmann
            raise errors.ParameterError(field)
2726 9e5442ce Michael Hanselmann
2727 9e5442ce Michael Hanselmann
          out.append(val)
2728 9e5442ce Michael Hanselmann
2729 9e5442ce Michael Hanselmann
        result.append(out)
2730 9e5442ce Michael Hanselmann
2731 9e5442ce Michael Hanselmann
    return result
2732 9e5442ce Michael Hanselmann
2733 9e5442ce Michael Hanselmann
2734 efb8da02 Michael Hanselmann
class LUModifyNodeStorage(NoHooksLU):
2735 efb8da02 Michael Hanselmann
  """Logical unit for modifying a storage volume on a node.
2736 efb8da02 Michael Hanselmann

2737 efb8da02 Michael Hanselmann
  """
2738 efb8da02 Michael Hanselmann
  _OP_REQP = ["node_name", "storage_type", "name", "changes"]
2739 efb8da02 Michael Hanselmann
  REQ_BGL = False
2740 efb8da02 Michael Hanselmann
2741 efb8da02 Michael Hanselmann
  def CheckArguments(self):
2742 efb8da02 Michael Hanselmann
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2743 efb8da02 Michael Hanselmann
    if node_name is None:
2744 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name,
2745 5c983ee5 Iustin Pop
                                 errors.ECODE_NOENT)
2746 efb8da02 Michael Hanselmann
2747 efb8da02 Michael Hanselmann
    self.op.node_name = node_name
2748 efb8da02 Michael Hanselmann
2749 efb8da02 Michael Hanselmann
    storage_type = self.op.storage_type
2750 620a85fd Iustin Pop
    if storage_type not in constants.VALID_STORAGE_TYPES:
2751 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Unknown storage type: %s" % storage_type,
2752 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
2753 efb8da02 Michael Hanselmann
2754 efb8da02 Michael Hanselmann
  def ExpandNames(self):
2755 efb8da02 Michael Hanselmann
    self.needed_locks = {
2756 efb8da02 Michael Hanselmann
      locking.LEVEL_NODE: self.op.node_name,
2757 efb8da02 Michael Hanselmann
      }
2758 efb8da02 Michael Hanselmann
2759 efb8da02 Michael Hanselmann
  def CheckPrereq(self):
2760 efb8da02 Michael Hanselmann
    """Check prerequisites.
2761 efb8da02 Michael Hanselmann

2762 efb8da02 Michael Hanselmann
    """
2763 efb8da02 Michael Hanselmann
    storage_type = self.op.storage_type
2764 efb8da02 Michael Hanselmann
2765 efb8da02 Michael Hanselmann
    try:
2766 efb8da02 Michael Hanselmann
      modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
2767 efb8da02 Michael Hanselmann
    except KeyError:
2768 efb8da02 Michael Hanselmann
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
2769 5c983ee5 Iustin Pop
                                 " modified" % storage_type,
2770 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
2771 efb8da02 Michael Hanselmann
2772 efb8da02 Michael Hanselmann
    diff = set(self.op.changes.keys()) - modifiable
2773 efb8da02 Michael Hanselmann
    if diff:
2774 efb8da02 Michael Hanselmann
      raise errors.OpPrereqError("The following fields can not be modified for"
2775 efb8da02 Michael Hanselmann
                                 " storage units of type '%s': %r" %
2776 5c983ee5 Iustin Pop
                                 (storage_type, list(diff)),
2777 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
2778 efb8da02 Michael Hanselmann
2779 efb8da02 Michael Hanselmann
  def Exec(self, feedback_fn):
2780 efb8da02 Michael Hanselmann
    """Computes the list of nodes and their attributes.
2781 efb8da02 Michael Hanselmann

2782 efb8da02 Michael Hanselmann
    """
2783 efb8da02 Michael Hanselmann
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
2784 efb8da02 Michael Hanselmann
    result = self.rpc.call_storage_modify(self.op.node_name,
2785 efb8da02 Michael Hanselmann
                                          self.op.storage_type, st_args,
2786 efb8da02 Michael Hanselmann
                                          self.op.name, self.op.changes)
2787 efb8da02 Michael Hanselmann
    result.Raise("Failed to modify storage unit '%s' on %s" %
2788 efb8da02 Michael Hanselmann
                 (self.op.name, self.op.node_name))
2789 efb8da02 Michael Hanselmann
2790 efb8da02 Michael Hanselmann
2791 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
2792 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
2793 a8083063 Iustin Pop

2794 a8083063 Iustin Pop
  """
2795 a8083063 Iustin Pop
  HPATH = "node-add"
2796 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2797 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
2798 a8083063 Iustin Pop
2799 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2800 a8083063 Iustin Pop
    """Build hooks env.
2801 a8083063 Iustin Pop

2802 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
2803 a8083063 Iustin Pop

2804 a8083063 Iustin Pop
    """
2805 a8083063 Iustin Pop
    env = {
2806 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
2807 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
2808 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
2809 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
2810 a8083063 Iustin Pop
      }
2811 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
2812 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
2813 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
2814 a8083063 Iustin Pop
2815 a8083063 Iustin Pop
  def CheckPrereq(self):
2816 a8083063 Iustin Pop
    """Check prerequisites.
2817 a8083063 Iustin Pop

2818 a8083063 Iustin Pop
    This checks:
2819 a8083063 Iustin Pop
     - the new node is not already in the config
2820 a8083063 Iustin Pop
     - it is resolvable
2821 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
2822 a8083063 Iustin Pop

2823 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
2824 a8083063 Iustin Pop

2825 a8083063 Iustin Pop
    """
2826 a8083063 Iustin Pop
    node_name = self.op.node_name
2827 a8083063 Iustin Pop
    cfg = self.cfg
2828 a8083063 Iustin Pop
2829 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
2830 a8083063 Iustin Pop
2831 bcf043c9 Iustin Pop
    node = dns_data.name
2832 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
2833 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
2834 a8083063 Iustin Pop
    if secondary_ip is None:
2835 a8083063 Iustin Pop
      secondary_ip = primary_ip
2836 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
2837 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given",
2838 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
2839 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
2840 e7c6e02b Michael Hanselmann
2841 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
2842 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
2843 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
2844 5c983ee5 Iustin Pop
                                 node, errors.ECODE_EXISTS)
2845 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
2846 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Node %s is not in the configuration" % node,
2847 5c983ee5 Iustin Pop
                                 errors.ECODE_NOENT)
2848 a8083063 Iustin Pop
2849 a8083063 Iustin Pop
    for existing_node_name in node_list:
2850 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
2851 e7c6e02b Michael Hanselmann
2852 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
2853 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
2854 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
2855 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
2856 5c983ee5 Iustin Pop
                                     " address configuration as before",
2857 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
2858 e7c6e02b Michael Hanselmann
        continue
2859 e7c6e02b Michael Hanselmann
2860 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
2861 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
2862 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
2863 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
2864 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
2865 5c983ee5 Iustin Pop
                                   " existing node %s" % existing_node.name,
2866 5c983ee5 Iustin Pop
                                   errors.ECODE_NOTUNIQUE)
2867 a8083063 Iustin Pop
2868 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
2869 a8083063 Iustin Pop
    # same as for the master
2870 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
2871 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
2872 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
2873 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
2874 a8083063 Iustin Pop
      if master_singlehomed:
2875 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
2876 5c983ee5 Iustin Pop
                                   " new node has one",
2877 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
2878 a8083063 Iustin Pop
      else:
2879 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
2880 5c983ee5 Iustin Pop
                                   " new node doesn't have one",
2881 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
2882 a8083063 Iustin Pop
2883 5bbd3f7f Michael Hanselmann
    # checks reachability
2884 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
2885 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping",
2886 5c983ee5 Iustin Pop
                                 errors.ECODE_ENVIRON)
2887 a8083063 Iustin Pop
2888 a8083063 Iustin Pop
    if not newbie_singlehomed:
2889 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
2890 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
2891 b15d625f Iustin Pop
                           source=myself.secondary_ip):
2892 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
2893 5c983ee5 Iustin Pop
                                   " based ping to noded port",
2894 5c983ee5 Iustin Pop
                                   errors.ECODE_ENVIRON)
2895 a8083063 Iustin Pop
2896 a8ae3eb5 Iustin Pop
    if self.op.readd:
2897 a8ae3eb5 Iustin Pop
      exceptions = [node]
2898 a8ae3eb5 Iustin Pop
    else:
2899 a8ae3eb5 Iustin Pop
      exceptions = []
2900 6d7e1f20 Guido Trotter
2901 6d7e1f20 Guido Trotter
    self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
2902 0fff97e9 Guido Trotter
2903 a8ae3eb5 Iustin Pop
    if self.op.readd:
2904 a8ae3eb5 Iustin Pop
      self.new_node = self.cfg.GetNodeInfo(node)
2905 a8ae3eb5 Iustin Pop
      assert self.new_node is not None, "Can't retrieve locked node %s" % node
2906 a8ae3eb5 Iustin Pop
    else:
2907 a8ae3eb5 Iustin Pop
      self.new_node = objects.Node(name=node,
2908 a8ae3eb5 Iustin Pop
                                   primary_ip=primary_ip,
2909 a8ae3eb5 Iustin Pop
                                   secondary_ip=secondary_ip,
2910 a8ae3eb5 Iustin Pop
                                   master_candidate=self.master_candidate,
2911 a8ae3eb5 Iustin Pop
                                   offline=False, drained=False)
2912 a8083063 Iustin Pop
2913 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2914 a8083063 Iustin Pop
    """Adds the new node to the cluster.
2915 a8083063 Iustin Pop

2916 a8083063 Iustin Pop
    """
2917 a8083063 Iustin Pop
    new_node = self.new_node
2918 a8083063 Iustin Pop
    node = new_node.name
2919 a8083063 Iustin Pop
2920 a8ae3eb5 Iustin Pop
    # for re-adds, reset the offline/drained/master-candidate flags;
2921 a8ae3eb5 Iustin Pop
    # we need to reset here, otherwise offline would prevent RPC calls
2922 a8ae3eb5 Iustin Pop
    # later in the procedure; this also means that if the re-add
2923 a8ae3eb5 Iustin Pop
    # fails, we are left with a non-offlined, broken node
2924 a8ae3eb5 Iustin Pop
    if self.op.readd:
2925 a8ae3eb5 Iustin Pop
      new_node.drained = new_node.offline = False
2926 a8ae3eb5 Iustin Pop
      self.LogInfo("Readding a node, the offline/drained flags were reset")
2927 a8ae3eb5 Iustin Pop
      # if we demote the node, we do cleanup later in the procedure
2928 a8ae3eb5 Iustin Pop
      new_node.master_candidate = self.master_candidate
2929 a8ae3eb5 Iustin Pop
2930 a8ae3eb5 Iustin Pop
    # notify the user about any possible mc promotion
2931 a8ae3eb5 Iustin Pop
    if new_node.master_candidate:
2932 a8ae3eb5 Iustin Pop
      self.LogInfo("Node will be a master candidate")
2933 a8ae3eb5 Iustin Pop
2934 a8083063 Iustin Pop
    # check connectivity
2935 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
2936 4c4e4e1e Iustin Pop
    result.Raise("Can't get version information from node %s" % node)
2937 90b54c26 Iustin Pop
    if constants.PROTOCOL_VERSION == result.payload:
2938 90b54c26 Iustin Pop
      logging.info("Communication to node %s fine, sw version %s match",
2939 90b54c26 Iustin Pop
                   node, result.payload)
2940 a8083063 Iustin Pop
    else:
2941 90b54c26 Iustin Pop
      raise errors.OpExecError("Version mismatch master version %s,"
2942 90b54c26 Iustin Pop
                               " node version %s" %
2943 90b54c26 Iustin Pop
                               (constants.PROTOCOL_VERSION, result.payload))
2944 a8083063 Iustin Pop
2945 a8083063 Iustin Pop
    # setup ssh on node
2946 b989b9d9 Ken Wehr
    if self.cfg.GetClusterInfo().modify_ssh_setup:
2947 b989b9d9 Ken Wehr
      logging.info("Copy ssh key to node %s", node)
2948 b989b9d9 Ken Wehr
      priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
2949 b989b9d9 Ken Wehr
      keyarray = []
2950 b989b9d9 Ken Wehr
      keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
2951 b989b9d9 Ken Wehr
                  constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
2952 b989b9d9 Ken Wehr
                  priv_key, pub_key]
2953 b989b9d9 Ken Wehr
2954 b989b9d9 Ken Wehr
      for i in keyfiles:
2955 b989b9d9 Ken Wehr
        keyarray.append(utils.ReadFile(i))
2956 b989b9d9 Ken Wehr
2957 b989b9d9 Ken Wehr
      result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
2958 b989b9d9 Ken Wehr
                                      keyarray[2], keyarray[3], keyarray[4],
2959 b989b9d9 Ken Wehr
                                      keyarray[5])
2960 b989b9d9 Ken Wehr
      result.Raise("Cannot transfer ssh keys to the new node")
2961 a8083063 Iustin Pop
2962 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
2963 b86a6bcd Guido Trotter
    if self.cfg.GetClusterInfo().modify_etc_hosts:
2964 b86a6bcd Guido Trotter
      utils.AddHostToEtcHosts(new_node.name)
2965 c8a0948f Michael Hanselmann
2966 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
2967 781de953 Iustin Pop
      result = self.rpc.call_node_has_ip_address(new_node.name,
2968 781de953 Iustin Pop
                                                 new_node.secondary_ip)
2969 4c4e4e1e Iustin Pop
      result.Raise("Failure checking secondary ip on node %s" % new_node.name,
2970 045dd6d9 Iustin Pop
                   prereq=True, ecode=errors.ECODE_ENVIRON)
2971 c2fc8250 Iustin Pop
      if not result.payload:
2972 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
2973 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
2974 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
2975 a8083063 Iustin Pop
2976 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
2977 5c0527ed Guido Trotter
    node_verify_param = {
2978 f60759f7 Iustin Pop
      constants.NV_NODELIST: [node],
2979 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
2980 5c0527ed Guido Trotter
    }
2981 5c0527ed Guido Trotter
2982 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
2983 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
2984 5c0527ed Guido Trotter
    for verifier in node_verify_list:
2985 4c4e4e1e Iustin Pop
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
2986 f60759f7 Iustin Pop
      nl_payload = result[verifier].payload[constants.NV_NODELIST]
2987 6f68a739 Iustin Pop
      if nl_payload:
2988 6f68a739 Iustin Pop
        for failed in nl_payload:
2989 31821208 Iustin Pop
          feedback_fn("ssh/hostname verification failed"
2990 31821208 Iustin Pop
                      " (checking from %s): %s" %
2991 6f68a739 Iustin Pop
                      (verifier, nl_payload[failed]))
2992 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
2993 ff98055b Iustin Pop
2994 d8470559 Michael Hanselmann
    if self.op.readd:
2995 28eddce5 Guido Trotter
      _RedistributeAncillaryFiles(self)
2996 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
2997 a8ae3eb5 Iustin Pop
      # make sure we redistribute the config
2998 a4eae71f Michael Hanselmann
      self.cfg.Update(new_node, feedback_fn)
2999 a8ae3eb5 Iustin Pop
      # and make sure the new node will not have old files around
3000 a8ae3eb5 Iustin Pop
      if not new_node.master_candidate:
3001 a8ae3eb5 Iustin Pop
        result = self.rpc.call_node_demote_from_mc(new_node.name)
3002 3cebe102 Michael Hanselmann
        msg = result.fail_msg
3003 a8ae3eb5 Iustin Pop
        if msg:
3004 a8ae3eb5 Iustin Pop
          self.LogWarning("Node failed to demote itself from master"
3005 a8ae3eb5 Iustin Pop
                          " candidate status: %s" % msg)
3006 d8470559 Michael Hanselmann
    else:
3007 035566e3 Iustin Pop
      _RedistributeAncillaryFiles(self, additional_nodes=[node])
3008 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
3009 a8083063 Iustin Pop
3010 a8083063 Iustin Pop
3011 b31c8676 Iustin Pop
class LUSetNodeParams(LogicalUnit):
3012 b31c8676 Iustin Pop
  """Modifies the parameters of a node.
3013 b31c8676 Iustin Pop

3014 b31c8676 Iustin Pop
  """
3015 b31c8676 Iustin Pop
  HPATH = "node-modify"
3016 b31c8676 Iustin Pop
  HTYPE = constants.HTYPE_NODE
3017 b31c8676 Iustin Pop
  _OP_REQP = ["node_name"]
3018 b31c8676 Iustin Pop
  REQ_BGL = False
3019 b31c8676 Iustin Pop
3020 b31c8676 Iustin Pop
  def CheckArguments(self):
3021 b31c8676 Iustin Pop
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
3022 b31c8676 Iustin Pop
    if node_name is None:
3023 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name,
3024 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3025 b31c8676 Iustin Pop
    self.op.node_name = node_name
3026 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'master_candidate')
3027 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'offline')
3028 c9d443ea Iustin Pop
    _CheckBooleanOpField(self.op, 'drained')
3029 c9d443ea Iustin Pop
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
3030 c9d443ea Iustin Pop
    if all_mods.count(None) == 3:
3031 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Please pass at least one modification",
3032 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3033 c9d443ea Iustin Pop
    if all_mods.count(True) > 1:
3034 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Can't set the node into more than one"
3035 5c983ee5 Iustin Pop
                                 " state at the same time",
3036 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3037 b31c8676 Iustin Pop
3038 b31c8676 Iustin Pop
  def ExpandNames(self):
3039 b31c8676 Iustin Pop
    self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
3040 b31c8676 Iustin Pop
3041 b31c8676 Iustin Pop
  def BuildHooksEnv(self):
3042 b31c8676 Iustin Pop
    """Build hooks env.
3043 b31c8676 Iustin Pop

3044 b31c8676 Iustin Pop
    This runs on the master node.
3045 b31c8676 Iustin Pop

3046 b31c8676 Iustin Pop
    """
3047 b31c8676 Iustin Pop
    env = {
3048 b31c8676 Iustin Pop
      "OP_TARGET": self.op.node_name,
3049 b31c8676 Iustin Pop
      "MASTER_CANDIDATE": str(self.op.master_candidate),
3050 3a5ba66a Iustin Pop
      "OFFLINE": str(self.op.offline),
3051 c9d443ea Iustin Pop
      "DRAINED": str(self.op.drained),
3052 b31c8676 Iustin Pop
      }
3053 b31c8676 Iustin Pop
    nl = [self.cfg.GetMasterNode(),
3054 b31c8676 Iustin Pop
          self.op.node_name]
3055 b31c8676 Iustin Pop
    return env, nl, nl
3056 b31c8676 Iustin Pop
3057 b31c8676 Iustin Pop
  def CheckPrereq(self):
3058 b31c8676 Iustin Pop
    """Check prerequisites.
3059 b31c8676 Iustin Pop

3060 b31c8676 Iustin Pop
    This only checks the instance list against the existing names.
3061 b31c8676 Iustin Pop

3062 b31c8676 Iustin Pop
    """
3063 3a5ba66a Iustin Pop
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
3064 b31c8676 Iustin Pop
3065 97c61d46 Iustin Pop
    if (self.op.master_candidate is not None or
3066 97c61d46 Iustin Pop
        self.op.drained is not None or
3067 97c61d46 Iustin Pop
        self.op.offline is not None):
3068 97c61d46 Iustin Pop
      # we can't change the master's node flags
3069 97c61d46 Iustin Pop
      if self.op.node_name == self.cfg.GetMasterNode():
3070 97c61d46 Iustin Pop
        raise errors.OpPrereqError("The master role can be changed"
3071 5c983ee5 Iustin Pop
                                   " only via masterfailover",
3072 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
3073 97c61d46 Iustin Pop
3074 8fbf5ac7 Guido Trotter
    # Boolean value that tells us whether we're offlining or draining the node
3075 8fbf5ac7 Guido Trotter
    offline_or_drain = self.op.offline == True or self.op.drained == True
3076 3d9eb52b Guido Trotter
    deoffline_or_drain = self.op.offline == False or self.op.drained == False
3077 8fbf5ac7 Guido Trotter
3078 8fbf5ac7 Guido Trotter
    if (node.master_candidate and
3079 8fbf5ac7 Guido Trotter
        (self.op.master_candidate == False or offline_or_drain)):
3080 3e83dd48 Iustin Pop
      cp_size = self.cfg.GetClusterInfo().candidate_pool_size
3081 8fbf5ac7 Guido Trotter
      mc_now, mc_should, mc_max = self.cfg.GetMasterCandidateStats()
3082 8fbf5ac7 Guido Trotter
      if mc_now <= cp_size:
3083 3e83dd48 Iustin Pop
        msg = ("Not enough master candidates (desired"
3084 8fbf5ac7 Guido Trotter
               " %d, new value will be %d)" % (cp_size, mc_now-1))
3085 8fbf5ac7 Guido Trotter
        # Only allow forcing the operation if it's an offline/drain operation,
3086 8fbf5ac7 Guido Trotter
        # and we could not possibly promote more nodes.
3087 8fbf5ac7 Guido Trotter
        # FIXME: this can still lead to issues if in any way another node which
3088 8fbf5ac7 Guido Trotter
        # could be promoted appears in the meantime.
3089 8fbf5ac7 Guido Trotter
        if self.op.force and offline_or_drain and mc_should == mc_max:
3090 3e83dd48 Iustin Pop
          self.LogWarning(msg)
3091 3e83dd48 Iustin Pop
        else:
3092 5c983ee5 Iustin Pop
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3093 3e83dd48 Iustin Pop
3094 c9d443ea Iustin Pop
    if (self.op.master_candidate == True and
3095 c9d443ea Iustin Pop
        ((node.offline and not self.op.offline == False) or
3096 c9d443ea Iustin Pop
         (node.drained and not self.op.drained == False))):
3097 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
3098 5c983ee5 Iustin Pop
                                 " to master_candidate" % node.name,
3099 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3100 3a5ba66a Iustin Pop
3101 3d9eb52b Guido Trotter
    # If we're being deofflined/drained, we'll MC ourself if needed
3102 3d9eb52b Guido Trotter
    if (deoffline_or_drain and not offline_or_drain and not
3103 3d9eb52b Guido Trotter
        self.op.master_candidate == True):
3104 3d9eb52b Guido Trotter
      self.op.master_candidate = _DecideSelfPromotion(self)
3105 3d9eb52b Guido Trotter
      if self.op.master_candidate:
3106 3d9eb52b Guido Trotter
        self.LogInfo("Autopromoting node to master candidate")
3107 3d9eb52b Guido Trotter
3108 b31c8676 Iustin Pop
    return
3109 b31c8676 Iustin Pop
3110 b31c8676 Iustin Pop
  def Exec(self, feedback_fn):
3111 b31c8676 Iustin Pop
    """Modifies a node.
3112 b31c8676 Iustin Pop

3113 b31c8676 Iustin Pop
    """
3114 3a5ba66a Iustin Pop
    node = self.node
3115 b31c8676 Iustin Pop
3116 b31c8676 Iustin Pop
    result = []
3117 c9d443ea Iustin Pop
    changed_mc = False
3118 b31c8676 Iustin Pop
3119 3a5ba66a Iustin Pop
    if self.op.offline is not None:
3120 3a5ba66a Iustin Pop
      node.offline = self.op.offline
3121 3a5ba66a Iustin Pop
      result.append(("offline", str(self.op.offline)))
3122 c9d443ea Iustin Pop
      if self.op.offline == True:
3123 c9d443ea Iustin Pop
        if node.master_candidate:
3124 c9d443ea Iustin Pop
          node.master_candidate = False
3125 c9d443ea Iustin Pop
          changed_mc = True
3126 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to offline"))
3127 c9d443ea Iustin Pop
        if node.drained:
3128 c9d443ea Iustin Pop
          node.drained = False
3129 c9d443ea Iustin Pop
          result.append(("drained", "clear drained status due to offline"))
3130 3a5ba66a Iustin Pop
3131 b31c8676 Iustin Pop
    if self.op.master_candidate is not None:
3132 b31c8676 Iustin Pop
      node.master_candidate = self.op.master_candidate
3133 c9d443ea Iustin Pop
      changed_mc = True
3134 b31c8676 Iustin Pop
      result.append(("master_candidate", str(self.op.master_candidate)))
3135 56aa9fd5 Iustin Pop
      if self.op.master_candidate == False:
3136 56aa9fd5 Iustin Pop
        rrc = self.rpc.call_node_demote_from_mc(node.name)
3137 4c4e4e1e Iustin Pop
        msg = rrc.fail_msg
3138 0959c824 Iustin Pop
        if msg:
3139 0959c824 Iustin Pop
          self.LogWarning("Node failed to demote itself: %s" % msg)
3140 b31c8676 Iustin Pop
3141 c9d443ea Iustin Pop
    if self.op.drained is not None:
3142 c9d443ea Iustin Pop
      node.drained = self.op.drained
3143 82e12743 Iustin Pop
      result.append(("drained", str(self.op.drained)))
3144 c9d443ea Iustin Pop
      if self.op.drained == True:
3145 c9d443ea Iustin Pop
        if node.master_candidate:
3146 c9d443ea Iustin Pop
          node.master_candidate = False
3147 c9d443ea Iustin Pop
          changed_mc = True
3148 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to drain"))
3149 dec0d9da Iustin Pop
          rrc = self.rpc.call_node_demote_from_mc(node.name)
3150 3cebe102 Michael Hanselmann
          msg = rrc.fail_msg
3151 dec0d9da Iustin Pop
          if msg:
3152 dec0d9da Iustin Pop
            self.LogWarning("Node failed to demote itself: %s" % msg)
3153 c9d443ea Iustin Pop
        if node.offline:
3154 c9d443ea Iustin Pop
          node.offline = False
3155 c9d443ea Iustin Pop
          result.append(("offline", "clear offline status due to drain"))
3156 c9d443ea Iustin Pop
3157 b31c8676 Iustin Pop
    # this will trigger configuration file update, if needed
3158 a4eae71f Michael Hanselmann
    self.cfg.Update(node, feedback_fn)
3159 b31c8676 Iustin Pop
    # this will trigger job queue propagation or cleanup
3160 c9d443ea Iustin Pop
    if changed_mc:
3161 3a26773f Iustin Pop
      self.context.ReaddNode(node)
3162 b31c8676 Iustin Pop
3163 b31c8676 Iustin Pop
    return result
3164 b31c8676 Iustin Pop
3165 b31c8676 Iustin Pop
3166 f5118ade Iustin Pop
class LUPowercycleNode(NoHooksLU):
3167 f5118ade Iustin Pop
  """Powercycles a node.
3168 f5118ade Iustin Pop

3169 f5118ade Iustin Pop
  """
3170 f5118ade Iustin Pop
  _OP_REQP = ["node_name", "force"]
3171 f5118ade Iustin Pop
  REQ_BGL = False
3172 f5118ade Iustin Pop
3173 f5118ade Iustin Pop
  def CheckArguments(self):
3174 f5118ade Iustin Pop
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
3175 f5118ade Iustin Pop
    if node_name is None:
3176 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name,
3177 5c983ee5 Iustin Pop
                                 errors.ECODE_NOENT)
3178 f5118ade Iustin Pop
    self.op.node_name = node_name
3179 f5118ade Iustin Pop
    if node_name == self.cfg.GetMasterNode() and not self.op.force:
3180 f5118ade Iustin Pop
      raise errors.OpPrereqError("The node is the master and the force"
3181 5c983ee5 Iustin Pop
                                 " parameter was not set",
3182 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3183 f5118ade Iustin Pop
3184 f5118ade Iustin Pop
  def ExpandNames(self):
3185 f5118ade Iustin Pop
    """Locking for PowercycleNode.
3186 f5118ade Iustin Pop

3187 efb8da02 Michael Hanselmann
    This is a last-resort option and shouldn't block on other
3188 f5118ade Iustin Pop
    jobs. Therefore, we grab no locks.
3189 f5118ade Iustin Pop

3190 f5118ade Iustin Pop
    """
3191 f5118ade Iustin Pop
    self.needed_locks = {}
3192 f5118ade Iustin Pop
3193 f5118ade Iustin Pop
  def CheckPrereq(self):
3194 f5118ade Iustin Pop
    """Check prerequisites.
3195 f5118ade Iustin Pop

3196 f5118ade Iustin Pop
    This LU has no prereqs.
3197 f5118ade Iustin Pop

3198 f5118ade Iustin Pop
    """
3199 f5118ade Iustin Pop
    pass
3200 f5118ade Iustin Pop
3201 f5118ade Iustin Pop
  def Exec(self, feedback_fn):
3202 f5118ade Iustin Pop
    """Reboots a node.
3203 f5118ade Iustin Pop

3204 f5118ade Iustin Pop
    """
3205 f5118ade Iustin Pop
    result = self.rpc.call_node_powercycle(self.op.node_name,
3206 f5118ade Iustin Pop
                                           self.cfg.GetHypervisorType())
3207 4c4e4e1e Iustin Pop
    result.Raise("Failed to schedule the reboot")
3208 f5118ade Iustin Pop
    return result.payload
3209 f5118ade Iustin Pop
3210 f5118ade Iustin Pop
3211 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
3212 a8083063 Iustin Pop
  """Query cluster configuration.
3213 a8083063 Iustin Pop

3214 a8083063 Iustin Pop
  """
3215 a8083063 Iustin Pop
  _OP_REQP = []
3216 642339cf Guido Trotter
  REQ_BGL = False
3217 642339cf Guido Trotter
3218 642339cf Guido Trotter
  def ExpandNames(self):
3219 642339cf Guido Trotter
    self.needed_locks = {}
3220 a8083063 Iustin Pop
3221 a8083063 Iustin Pop
  def CheckPrereq(self):
3222 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
3223 a8083063 Iustin Pop

3224 a8083063 Iustin Pop
    """
3225 a8083063 Iustin Pop
    pass
3226 a8083063 Iustin Pop
3227 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3228 a8083063 Iustin Pop
    """Return cluster config.
3229 a8083063 Iustin Pop

3230 a8083063 Iustin Pop
    """
3231 469f88e1 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
3232 a8083063 Iustin Pop
    result = {
3233 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
3234 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
3235 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
3236 d1a7d66f Guido Trotter
      "os_api_version": max(constants.OS_API_VERSIONS),
3237 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
3238 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
3239 469f88e1 Iustin Pop
      "name": cluster.cluster_name,
3240 469f88e1 Iustin Pop
      "master": cluster.master_node,
3241 066f465d Guido Trotter
      "default_hypervisor": cluster.enabled_hypervisors[0],
3242 469f88e1 Iustin Pop
      "enabled_hypervisors": cluster.enabled_hypervisors,
3243 b8810fec Michael Hanselmann
      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
3244 7c4d6c7b Michael Hanselmann
                        for hypervisor_name in cluster.enabled_hypervisors]),
3245 469f88e1 Iustin Pop
      "beparams": cluster.beparams,
3246 1094acda Guido Trotter
      "nicparams": cluster.nicparams,
3247 4b7735f9 Iustin Pop
      "candidate_pool_size": cluster.candidate_pool_size,
3248 7a56b411 Guido Trotter
      "master_netdev": cluster.master_netdev,
3249 7a56b411 Guido Trotter
      "volume_group_name": cluster.volume_group_name,
3250 7a56b411 Guido Trotter
      "file_storage_dir": cluster.file_storage_dir,
3251 90f72445 Iustin Pop
      "ctime": cluster.ctime,
3252 90f72445 Iustin Pop
      "mtime": cluster.mtime,
3253 259578eb Iustin Pop
      "uuid": cluster.uuid,
3254 c118d1f4 Michael Hanselmann
      "tags": list(cluster.GetTags()),
3255 a8083063 Iustin Pop
      }
3256 a8083063 Iustin Pop
3257 a8083063 Iustin Pop
    return result
3258 a8083063 Iustin Pop
3259 a8083063 Iustin Pop
3260 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
3261 ae5849b5 Michael Hanselmann
  """Return configuration values.
3262 a8083063 Iustin Pop

3263 a8083063 Iustin Pop
  """
3264 a8083063 Iustin Pop
  _OP_REQP = []
3265 642339cf Guido Trotter
  REQ_BGL = False
3266 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet()
3267 05e50653 Michael Hanselmann
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag",
3268 05e50653 Michael Hanselmann
                                  "watcher_pause")
3269 642339cf Guido Trotter
3270 642339cf Guido Trotter
  def ExpandNames(self):
3271 642339cf Guido Trotter
    self.needed_locks = {}
3272 a8083063 Iustin Pop
3273 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
3274 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
3275 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
3276 ae5849b5 Michael Hanselmann
3277 a8083063 Iustin Pop
  def CheckPrereq(self):
3278 a8083063 Iustin Pop
    """No prerequisites.
3279 a8083063 Iustin Pop

3280 a8083063 Iustin Pop
    """
3281 a8083063 Iustin Pop
    pass
3282 a8083063 Iustin Pop
3283 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3284 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
3285 a8083063 Iustin Pop

3286 a8083063 Iustin Pop
    """
3287 ae5849b5 Michael Hanselmann
    values = []
3288 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
3289 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
3290 3ccafd0e Iustin Pop
        entry = self.cfg.GetClusterName()
3291 ae5849b5 Michael Hanselmann
      elif field == "master_node":
3292 3ccafd0e Iustin Pop
        entry = self.cfg.GetMasterNode()
3293 3ccafd0e Iustin Pop
      elif field == "drain_flag":
3294 3ccafd0e Iustin Pop
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
3295 05e50653 Michael Hanselmann
      elif field == "watcher_pause":
3296 05e50653 Michael Hanselmann
        return utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
3297 ae5849b5 Michael Hanselmann
      else:
3298 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
3299 3ccafd0e Iustin Pop
      values.append(entry)
3300 ae5849b5 Michael Hanselmann
    return values
3301 a8083063 Iustin Pop
3302 a8083063 Iustin Pop
3303 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
3304 a8083063 Iustin Pop
  """Bring up an instance's disks.
3305 a8083063 Iustin Pop

3306 a8083063 Iustin Pop
  """
3307 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3308 f22a8ba3 Guido Trotter
  REQ_BGL = False
3309 f22a8ba3 Guido Trotter
3310 f22a8ba3 Guido Trotter
  def ExpandNames(self):
3311 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
3312 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3313 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3314 f22a8ba3 Guido Trotter
3315 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
3316 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
3317 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
3318 a8083063 Iustin Pop
3319 a8083063 Iustin Pop
  def CheckPrereq(self):
3320 a8083063 Iustin Pop
    """Check prerequisites.
3321 a8083063 Iustin Pop

3322 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3323 a8083063 Iustin Pop

3324 a8083063 Iustin Pop
    """
3325 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3326 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
3327 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3328 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
3329 b4ec07f8 Iustin Pop
    if not hasattr(self.op, "ignore_size"):
3330 b4ec07f8 Iustin Pop
      self.op.ignore_size = False
3331 a8083063 Iustin Pop
3332 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3333 a8083063 Iustin Pop
    """Activate the disks.
3334 a8083063 Iustin Pop

3335 a8083063 Iustin Pop
    """
3336 b4ec07f8 Iustin Pop
    disks_ok, disks_info = \
3337 b4ec07f8 Iustin Pop
              _AssembleInstanceDisks(self, self.instance,
3338 b4ec07f8 Iustin Pop
                                     ignore_size=self.op.ignore_size)
3339 a8083063 Iustin Pop
    if not disks_ok:
3340 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
3341 a8083063 Iustin Pop
3342 a8083063 Iustin Pop
    return disks_info
3343 a8083063 Iustin Pop
3344 a8083063 Iustin Pop
3345 e3443b36 Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False,
3346 e3443b36 Iustin Pop
                           ignore_size=False):
3347 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
3348 a8083063 Iustin Pop

3349 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
3350 a8083063 Iustin Pop

3351 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
3352 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
3353 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
3354 e4376078 Iustin Pop
  @param instance: the instance for whose disks we assemble
3355 e4376078 Iustin Pop
  @type ignore_secondaries: boolean
3356 e4376078 Iustin Pop
  @param ignore_secondaries: if true, errors on secondary nodes
3357 e4376078 Iustin Pop
      won't result in an error return from the function
3358 e3443b36 Iustin Pop
  @type ignore_size: boolean
3359 e3443b36 Iustin Pop
  @param ignore_size: if true, the current known size of the disk
3360 e3443b36 Iustin Pop
      will not be used during the disk activation, useful for cases
3361 e3443b36 Iustin Pop
      when the size is wrong
3362 e4376078 Iustin Pop
  @return: False if the operation failed, otherwise a list of
3363 e4376078 Iustin Pop
      (host, instance_visible_name, node_visible_name)
3364 e4376078 Iustin Pop
      with the mapping from node devices to instance devices
3365 a8083063 Iustin Pop

3366 a8083063 Iustin Pop
  """
3367 a8083063 Iustin Pop
  device_info = []
3368 a8083063 Iustin Pop
  disks_ok = True
3369 fdbd668d Iustin Pop
  iname = instance.name
3370 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
3371 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
3372 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
3373 fdbd668d Iustin Pop
3374 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
3375 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
3376 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
3377 fdbd668d Iustin Pop
  # SyncSource, etc.)
3378 fdbd668d Iustin Pop
3379 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
3380 a8083063 Iustin Pop
  for inst_disk in instance.disks:
3381 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
3382 e3443b36 Iustin Pop
      if ignore_size:
3383 e3443b36 Iustin Pop
        node_disk = node_disk.Copy()
3384 e3443b36 Iustin Pop
        node_disk.UnsetSize()
3385 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
3386 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
3387 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3388 53c14ef1 Iustin Pop
      if msg:
3389 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
3390 53c14ef1 Iustin Pop
                           " (is_primary=False, pass=1): %s",
3391 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
3392 fdbd668d Iustin Pop
        if not ignore_secondaries:
3393 a8083063 Iustin Pop
          disks_ok = False
3394 fdbd668d Iustin Pop
3395 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
3396 fdbd668d Iustin Pop
3397 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
3398 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
3399 d52ea991 Michael Hanselmann
    dev_path = None
3400 d52ea991 Michael Hanselmann
3401 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
3402 fdbd668d Iustin Pop
      if node != instance.primary_node:
3403 fdbd668d Iustin Pop
        continue
3404 e3443b36 Iustin Pop
      if ignore_size:
3405 e3443b36 Iustin Pop
        node_disk = node_disk.Copy()
3406 e3443b36 Iustin Pop
        node_disk.UnsetSize()
3407 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
3408 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
3409 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3410 53c14ef1 Iustin Pop
      if msg:
3411 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
3412 53c14ef1 Iustin Pop
                           " (is_primary=True, pass=2): %s",
3413 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
3414 fdbd668d Iustin Pop
        disks_ok = False
3415 d52ea991 Michael Hanselmann
      else:
3416 d52ea991 Michael Hanselmann
        dev_path = result.payload
3417 d52ea991 Michael Hanselmann
3418 d52ea991 Michael Hanselmann
    device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
3419 a8083063 Iustin Pop
3420 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
3421 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
3422 b352ab5b Iustin Pop
  # improving the logical/physical id handling
3423 b352ab5b Iustin Pop
  for disk in instance.disks:
3424 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
3425 b352ab5b Iustin Pop
3426 a8083063 Iustin Pop
  return disks_ok, device_info
3427 a8083063 Iustin Pop
3428 a8083063 Iustin Pop
3429 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
3430 3ecf6786 Iustin Pop
  """Start the disks of an instance.
3431 3ecf6786 Iustin Pop

3432 3ecf6786 Iustin Pop
  """
3433 7c4d6c7b Michael Hanselmann
  disks_ok, _ = _AssembleInstanceDisks(lu, instance,
3434 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
3435 fe7b0351 Michael Hanselmann
  if not disks_ok:
3436 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
3437 fe7b0351 Michael Hanselmann
    if force is not None and not force:
3438 86d9d3bb Iustin Pop
      lu.proc.LogWarning("", hint="If the message above refers to a"
3439 86d9d3bb Iustin Pop
                         " secondary node,"
3440 86d9d3bb Iustin Pop
                         " you can retry the operation using '--force'.")
3441 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
3442 fe7b0351 Michael Hanselmann
3443 fe7b0351 Michael Hanselmann
3444 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
3445 a8083063 Iustin Pop
  """Shutdown an instance's disks.
3446 a8083063 Iustin Pop

3447 a8083063 Iustin Pop
  """
3448 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3449 f22a8ba3 Guido Trotter
  REQ_BGL = False
3450 f22a8ba3 Guido Trotter
3451 f22a8ba3 Guido Trotter
  def ExpandNames(self):
3452 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
3453 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3454 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3455 f22a8ba3 Guido Trotter
3456 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
3457 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
3458 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
3459 a8083063 Iustin Pop
3460 a8083063 Iustin Pop
  def CheckPrereq(self):
3461 a8083063 Iustin Pop
    """Check prerequisites.
3462 a8083063 Iustin Pop

3463 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3464 a8083063 Iustin Pop

3465 a8083063 Iustin Pop
    """
3466 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3467 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
3468 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3469 a8083063 Iustin Pop
3470 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3471 a8083063 Iustin Pop
    """Deactivate the disks
3472 a8083063 Iustin Pop

3473 a8083063 Iustin Pop
    """
3474 a8083063 Iustin Pop
    instance = self.instance
3475 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
3476 a8083063 Iustin Pop
3477 a8083063 Iustin Pop
3478 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
3479 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
3480 155d6c75 Guido Trotter

3481 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
3482 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
3483 155d6c75 Guido Trotter

3484 155d6c75 Guido Trotter
  """
3485 aca13712 Iustin Pop
  pnode = instance.primary_node
3486 4c4e4e1e Iustin Pop
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
3487 4c4e4e1e Iustin Pop
  ins_l.Raise("Can't contact node %s" % pnode)
3488 aca13712 Iustin Pop
3489 aca13712 Iustin Pop
  if instance.name in ins_l.payload:
3490 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
3491 155d6c75 Guido Trotter
                             " block devices.")
3492 155d6c75 Guido Trotter
3493 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
3494 a8083063 Iustin Pop
3495 a8083063 Iustin Pop
3496 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
3497 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
3498 a8083063 Iustin Pop

3499 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
3500 a8083063 Iustin Pop

3501 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
3502 a8083063 Iustin Pop
  ignored.
3503 a8083063 Iustin Pop

3504 a8083063 Iustin Pop
  """
3505 cacfd1fd Iustin Pop
  all_result = True
3506 a8083063 Iustin Pop
  for disk in instance.disks:
3507 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
3508 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
3509 781de953 Iustin Pop
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
3510 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3511 cacfd1fd Iustin Pop
      if msg:
3512 cacfd1fd Iustin Pop
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
3513 cacfd1fd Iustin Pop
                      disk.iv_name, node, msg)
3514 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
3515 cacfd1fd Iustin Pop
          all_result = False
3516 cacfd1fd Iustin Pop
  return all_result
3517 a8083063 Iustin Pop
3518 a8083063 Iustin Pop
3519 9ca87a96 Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
3520 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
3521 d4f16fd9 Iustin Pop

3522 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
3523 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
3524 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
3525 d4f16fd9 Iustin Pop
  exception.
3526 d4f16fd9 Iustin Pop

3527 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
3528 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
3529 e69d05fd Iustin Pop
  @type node: C{str}
3530 e69d05fd Iustin Pop
  @param node: the node to check
3531 e69d05fd Iustin Pop
  @type reason: C{str}
3532 e69d05fd Iustin Pop
  @param reason: string to use in the error message
3533 e69d05fd Iustin Pop
  @type requested: C{int}
3534 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
3535 9ca87a96 Iustin Pop
  @type hypervisor_name: C{str}
3536 9ca87a96 Iustin Pop
  @param hypervisor_name: the hypervisor to ask for memory stats
3537 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
3538 e69d05fd Iustin Pop
      we cannot check the node
3539 d4f16fd9 Iustin Pop

3540 d4f16fd9 Iustin Pop
  """
3541 9ca87a96 Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
3542 045dd6d9 Iustin Pop
  nodeinfo[node].Raise("Can't get data from node %s" % node,
3543 045dd6d9 Iustin Pop
                       prereq=True, ecode=errors.ECODE_ENVIRON)
3544 070e998b Iustin Pop
  free_mem = nodeinfo[node].payload.get('memory_free', None)
3545 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
3546 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
3547 5c983ee5 Iustin Pop
                               " was '%s'" % (node, free_mem),
3548 5c983ee5 Iustin Pop
                               errors.ECODE_ENVIRON)
3549 d4f16fd9 Iustin Pop
  if requested > free_mem:
3550 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
3551 070e998b Iustin Pop
                               " needed %s MiB, available %s MiB" %
3552 5c983ee5 Iustin Pop
                               (node, reason, requested, free_mem),
3553 5c983ee5 Iustin Pop
                               errors.ECODE_NORES)
3554 d4f16fd9 Iustin Pop
3555 d4f16fd9 Iustin Pop
3556 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
3557 a8083063 Iustin Pop
  """Starts an instance.
3558 a8083063 Iustin Pop

3559 a8083063 Iustin Pop
  """
3560 a8083063 Iustin Pop
  HPATH = "instance-start"
3561 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3562 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
3563 e873317a Guido Trotter
  REQ_BGL = False
3564 e873317a Guido Trotter
3565 e873317a Guido Trotter
  def ExpandNames(self):
3566 e873317a Guido Trotter
    self._ExpandAndLockInstance()
3567 a8083063 Iustin Pop
3568 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3569 a8083063 Iustin Pop
    """Build hooks env.
3570 a8083063 Iustin Pop

3571 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3572 a8083063 Iustin Pop

3573 a8083063 Iustin Pop
    """
3574 a8083063 Iustin Pop
    env = {
3575 a8083063 Iustin Pop
      "FORCE": self.op.force,
3576 a8083063 Iustin Pop
      }
3577 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3578 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3579 a8083063 Iustin Pop
    return env, nl, nl
3580 a8083063 Iustin Pop
3581 a8083063 Iustin Pop
  def CheckPrereq(self):
3582 a8083063 Iustin Pop
    """Check prerequisites.
3583 a8083063 Iustin Pop

3584 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3585 a8083063 Iustin Pop

3586 a8083063 Iustin Pop
    """
3587 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3588 e873317a Guido Trotter
    assert self.instance is not None, \
3589 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3590 a8083063 Iustin Pop
3591 d04aaa2f Iustin Pop
    # extra beparams
3592 d04aaa2f Iustin Pop
    self.beparams = getattr(self.op, "beparams", {})
3593 d04aaa2f Iustin Pop
    if self.beparams:
3594 d04aaa2f Iustin Pop
      if not isinstance(self.beparams, dict):
3595 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid beparams passed: %s, expected"
3596 5c983ee5 Iustin Pop
                                   " dict" % (type(self.beparams), ),
3597 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
3598 d04aaa2f Iustin Pop
      # fill the beparams dict
3599 d04aaa2f Iustin Pop
      utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
3600 d04aaa2f Iustin Pop
      self.op.beparams = self.beparams
3601 d04aaa2f Iustin Pop
3602 d04aaa2f Iustin Pop
    # extra hvparams
3603 d04aaa2f Iustin Pop
    self.hvparams = getattr(self.op, "hvparams", {})
3604 d04aaa2f Iustin Pop
    if self.hvparams:
3605 d04aaa2f Iustin Pop
      if not isinstance(self.hvparams, dict):
3606 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid hvparams passed: %s, expected"
3607 5c983ee5 Iustin Pop
                                   " dict" % (type(self.hvparams), ),
3608 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
3609 d04aaa2f Iustin Pop
3610 d04aaa2f Iustin Pop
      # check hypervisor parameter syntax (locally)
3611 d04aaa2f Iustin Pop
      cluster = self.cfg.GetClusterInfo()
3612 d04aaa2f Iustin Pop
      utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
3613 abe609b2 Guido Trotter
      filled_hvp = objects.FillDict(cluster.hvparams[instance.hypervisor],
3614 d04aaa2f Iustin Pop
                                    instance.hvparams)
3615 d04aaa2f Iustin Pop
      filled_hvp.update(self.hvparams)
3616 d04aaa2f Iustin Pop
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
3617 d04aaa2f Iustin Pop
      hv_type.CheckParameterSyntax(filled_hvp)
3618 d04aaa2f Iustin Pop
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
3619 d04aaa2f Iustin Pop
      self.op.hvparams = self.hvparams
3620 d04aaa2f Iustin Pop
3621 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3622 7527a8a4 Iustin Pop
3623 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3624 5bbd3f7f Michael Hanselmann
    # check bridges existence
3625 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
3626 a8083063 Iustin Pop
3627 f1926756 Guido Trotter
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3628 f1926756 Guido Trotter
                                              instance.name,
3629 f1926756 Guido Trotter
                                              instance.hypervisor)
3630 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
3631 045dd6d9 Iustin Pop
                      prereq=True, ecode=errors.ECODE_ENVIRON)
3632 7ad1af4a Iustin Pop
    if not remote_info.payload: # not running already
3633 f1926756 Guido Trotter
      _CheckNodeFreeMemory(self, instance.primary_node,
3634 f1926756 Guido Trotter
                           "starting instance %s" % instance.name,
3635 f1926756 Guido Trotter
                           bep[constants.BE_MEMORY], instance.hypervisor)
3636 d4f16fd9 Iustin Pop
3637 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3638 a8083063 Iustin Pop
    """Start the instance.
3639 a8083063 Iustin Pop

3640 a8083063 Iustin Pop
    """
3641 a8083063 Iustin Pop
    instance = self.instance
3642 a8083063 Iustin Pop
    force = self.op.force
3643 a8083063 Iustin Pop
3644 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
3645 fe482621 Iustin Pop
3646 a8083063 Iustin Pop
    node_current = instance.primary_node
3647 a8083063 Iustin Pop
3648 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
3649 a8083063 Iustin Pop
3650 d04aaa2f Iustin Pop
    result = self.rpc.call_instance_start(node_current, instance,
3651 d04aaa2f Iustin Pop
                                          self.hvparams, self.beparams)
3652 4c4e4e1e Iustin Pop
    msg = result.fail_msg
3653 dd279568 Iustin Pop
    if msg:
3654 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
3655 dd279568 Iustin Pop
      raise errors.OpExecError("Could not start instance: %s" % msg)
3656 a8083063 Iustin Pop
3657 a8083063 Iustin Pop
3658 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
3659 bf6929a2 Alexander Schreiber
  """Reboot an instance.
3660 bf6929a2 Alexander Schreiber

3661 bf6929a2 Alexander Schreiber
  """
3662 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
3663 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
3664 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
3665 e873317a Guido Trotter
  REQ_BGL = False
3666 e873317a Guido Trotter
3667 17c3f802 Guido Trotter
  def CheckArguments(self):
3668 17c3f802 Guido Trotter
    """Check the arguments.
3669 17c3f802 Guido Trotter

3670 17c3f802 Guido Trotter
    """
3671 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
3672 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
3673 17c3f802 Guido Trotter
3674 e873317a Guido Trotter
  def ExpandNames(self):
3675 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
3676 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
3677 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
3678 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
3679 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
3680 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
3681 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
3682 e873317a Guido Trotter
    self._ExpandAndLockInstance()
3683 bf6929a2 Alexander Schreiber
3684 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
3685 bf6929a2 Alexander Schreiber
    """Build hooks env.
3686 bf6929a2 Alexander Schreiber

3687 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
3688 bf6929a2 Alexander Schreiber

3689 bf6929a2 Alexander Schreiber
    """
3690 bf6929a2 Alexander Schreiber
    env = {
3691 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
3692 2c2690c9 Iustin Pop
      "REBOOT_TYPE": self.op.reboot_type,
3693 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
3694 bf6929a2 Alexander Schreiber
      }
3695 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3696 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3697 bf6929a2 Alexander Schreiber
    return env, nl, nl
3698 bf6929a2 Alexander Schreiber
3699 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
3700 bf6929a2 Alexander Schreiber
    """Check prerequisites.
3701 bf6929a2 Alexander Schreiber

3702 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
3703 bf6929a2 Alexander Schreiber

3704 bf6929a2 Alexander Schreiber
    """
3705 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3706 e873317a Guido Trotter
    assert self.instance is not None, \
3707 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3708 bf6929a2 Alexander Schreiber
3709 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3710 7527a8a4 Iustin Pop
3711 5bbd3f7f Michael Hanselmann
    # check bridges existence
3712 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
3713 bf6929a2 Alexander Schreiber
3714 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
3715 bf6929a2 Alexander Schreiber
    """Reboot the instance.
3716 bf6929a2 Alexander Schreiber

3717 bf6929a2 Alexander Schreiber
    """
3718 bf6929a2 Alexander Schreiber
    instance = self.instance
3719 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
3720 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
3721 bf6929a2 Alexander Schreiber
3722 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
3723 bf6929a2 Alexander Schreiber
3724 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
3725 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
3726 ae48ac32 Iustin Pop
      for disk in instance.disks:
3727 ae48ac32 Iustin Pop
        self.cfg.SetDiskID(disk, node_current)
3728 781de953 Iustin Pop
      result = self.rpc.call_instance_reboot(node_current, instance,
3729 17c3f802 Guido Trotter
                                             reboot_type,
3730 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
3731 4c4e4e1e Iustin Pop
      result.Raise("Could not reboot instance")
3732 bf6929a2 Alexander Schreiber
    else:
3733 17c3f802 Guido Trotter
      result = self.rpc.call_instance_shutdown(node_current, instance,
3734 17c3f802 Guido Trotter
                                               self.shutdown_timeout)
3735 4c4e4e1e Iustin Pop
      result.Raise("Could not shutdown instance for full reboot")
3736 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
3737 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
3738 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(node_current, instance, None, None)
3739 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3740 dd279568 Iustin Pop
      if msg:
3741 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3742 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance for"
3743 dd279568 Iustin Pop
                                 " full reboot: %s" % msg)
3744 bf6929a2 Alexander Schreiber
3745 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
3746 bf6929a2 Alexander Schreiber
3747 bf6929a2 Alexander Schreiber
3748 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
3749 a8083063 Iustin Pop
  """Shutdown an instance.
3750 a8083063 Iustin Pop

3751 a8083063 Iustin Pop
  """
3752 a8083063 Iustin Pop
  HPATH = "instance-stop"
3753 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3754 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3755 e873317a Guido Trotter
  REQ_BGL = False
3756 e873317a Guido Trotter
3757 6263189c Guido Trotter
  def CheckArguments(self):
3758 6263189c Guido Trotter
    """Check the arguments.
3759 6263189c Guido Trotter

3760 6263189c Guido Trotter
    """
3761 6263189c Guido Trotter
    self.timeout = getattr(self.op, "timeout",
3762 6263189c Guido Trotter
                           constants.DEFAULT_SHUTDOWN_TIMEOUT)
3763 6263189c Guido Trotter
3764 e873317a Guido Trotter
  def ExpandNames(self):
3765 e873317a Guido Trotter
    self._ExpandAndLockInstance()
3766 a8083063 Iustin Pop
3767 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3768 a8083063 Iustin Pop
    """Build hooks env.
3769 a8083063 Iustin Pop

3770 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3771 a8083063 Iustin Pop

3772 a8083063 Iustin Pop
    """
3773 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3774 6263189c Guido Trotter
    env["TIMEOUT"] = self.timeout
3775 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3776 a8083063 Iustin Pop
    return env, nl, nl
3777 a8083063 Iustin Pop
3778 a8083063 Iustin Pop
  def CheckPrereq(self):
3779 a8083063 Iustin Pop
    """Check prerequisites.
3780 a8083063 Iustin Pop

3781 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3782 a8083063 Iustin Pop

3783 a8083063 Iustin Pop
    """
3784 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3785 e873317a Guido Trotter
    assert self.instance is not None, \
3786 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3787 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
3788 a8083063 Iustin Pop
3789 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3790 a8083063 Iustin Pop
    """Shutdown the instance.
3791 a8083063 Iustin Pop

3792 a8083063 Iustin Pop
    """
3793 a8083063 Iustin Pop
    instance = self.instance
3794 a8083063 Iustin Pop
    node_current = instance.primary_node
3795 6263189c Guido Trotter
    timeout = self.timeout
3796 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
3797 6263189c Guido Trotter
    result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
3798 4c4e4e1e Iustin Pop
    msg = result.fail_msg
3799 1fae010f Iustin Pop
    if msg:
3800 1fae010f Iustin Pop
      self.proc.LogWarning("Could not shutdown instance: %s" % msg)
3801 a8083063 Iustin Pop
3802 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
3803 a8083063 Iustin Pop
3804 a8083063 Iustin Pop
3805 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
3806 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
3807 fe7b0351 Michael Hanselmann

3808 fe7b0351 Michael Hanselmann
  """
3809 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
3810 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
3811 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
3812 4e0b4d2d Guido Trotter
  REQ_BGL = False
3813 4e0b4d2d Guido Trotter
3814 4e0b4d2d Guido Trotter
  def ExpandNames(self):
3815 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
3816 fe7b0351 Michael Hanselmann
3817 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
3818 fe7b0351 Michael Hanselmann
    """Build hooks env.
3819 fe7b0351 Michael Hanselmann

3820 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
3821 fe7b0351 Michael Hanselmann

3822 fe7b0351 Michael Hanselmann
    """
3823 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3824 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3825 fe7b0351 Michael Hanselmann
    return env, nl, nl
3826 fe7b0351 Michael Hanselmann
3827 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
3828 fe7b0351 Michael Hanselmann
    """Check prerequisites.
3829 fe7b0351 Michael Hanselmann

3830 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
3831 fe7b0351 Michael Hanselmann

3832 fe7b0351 Michael Hanselmann
    """
3833 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3834 4e0b4d2d Guido Trotter
    assert instance is not None, \
3835 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3836 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3837 4e0b4d2d Guido Trotter
3838 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
3839 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
3840 5c983ee5 Iustin Pop
                                 self.op.instance_name,
3841 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3842 0d68c45d Iustin Pop
    if instance.admin_up:
3843 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3844 5c983ee5 Iustin Pop
                                 self.op.instance_name,
3845 5c983ee5 Iustin Pop
                                 errors.ECODE_STATE)
3846 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3847 72737a7f Iustin Pop
                                              instance.name,
3848 72737a7f Iustin Pop
                                              instance.hypervisor)
3849 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
3850 045dd6d9 Iustin Pop
                      prereq=True, ecode=errors.ECODE_ENVIRON)
3851 7ad1af4a Iustin Pop
    if remote_info.payload:
3852 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3853 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
3854 5c983ee5 Iustin Pop
                                  instance.primary_node),
3855 5c983ee5 Iustin Pop
                                 errors.ECODE_STATE)
3856 d0834de3 Michael Hanselmann
3857 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
3858 f2c05717 Guido Trotter
    self.op.force_variant = getattr(self.op, "force_variant", False)
3859 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
3860 d0834de3 Michael Hanselmann
      # OS verification
3861 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
3862 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
3863 d0834de3 Michael Hanselmann
      if pnode is None:
3864 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
3865 5c983ee5 Iustin Pop
                                   self.op.pnode, errors.ECODE_NOENT)
3866 781de953 Iustin Pop
      result = self.rpc.call_os_get(pnode.name, self.op.os_type)
3867 4c4e4e1e Iustin Pop
      result.Raise("OS '%s' not in supported OS list for primary node %s" %
3868 045dd6d9 Iustin Pop
                   (self.op.os_type, pnode.name),
3869 045dd6d9 Iustin Pop
                   prereq=True, ecode=errors.ECODE_INVAL)
3870 f2c05717 Guido Trotter
      if not self.op.force_variant:
3871 f2c05717 Guido Trotter
        _CheckOSVariant(result.payload, self.op.os_type)
3872 d0834de3 Michael Hanselmann
3873 fe7b0351 Michael Hanselmann
    self.instance = instance
3874 fe7b0351 Michael Hanselmann
3875 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
3876 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
3877 fe7b0351 Michael Hanselmann

3878 fe7b0351 Michael Hanselmann
    """
3879 fe7b0351 Michael Hanselmann
    inst = self.instance
3880 fe7b0351 Michael Hanselmann
3881 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
3882 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
3883 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
3884 a4eae71f Michael Hanselmann
      self.cfg.Update(inst, feedback_fn)
3885 d0834de3 Michael Hanselmann
3886 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
3887 fe7b0351 Michael Hanselmann
    try:
3888 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
3889 e557bae9 Guido Trotter
      result = self.rpc.call_instance_os_add(inst.primary_node, inst, True)
3890 4c4e4e1e Iustin Pop
      result.Raise("Could not install OS for instance %s on node %s" %
3891 4c4e4e1e Iustin Pop
                   (inst.name, inst.primary_node))
3892 fe7b0351 Michael Hanselmann
    finally:
3893 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
3894 fe7b0351 Michael Hanselmann
3895 fe7b0351 Michael Hanselmann
3896 bd315bfa Iustin Pop
class LURecreateInstanceDisks(LogicalUnit):
3897 bd315bfa Iustin Pop
  """Recreate an instance's missing disks.
3898 bd315bfa Iustin Pop

3899 bd315bfa Iustin Pop
  """
3900 bd315bfa Iustin Pop
  HPATH = "instance-recreate-disks"
3901 bd315bfa Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3902 bd315bfa Iustin Pop
  _OP_REQP = ["instance_name", "disks"]
3903 bd315bfa Iustin Pop
  REQ_BGL = False
3904 bd315bfa Iustin Pop
3905 bd315bfa Iustin Pop
  def CheckArguments(self):
3906 bd315bfa Iustin Pop
    """Check the arguments.
3907 bd315bfa Iustin Pop

3908 bd315bfa Iustin Pop
    """
3909 bd315bfa Iustin Pop
    if not isinstance(self.op.disks, list):
3910 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid disks parameter", errors.ECODE_INVAL)
3911 bd315bfa Iustin Pop
    for item in self.op.disks:
3912 bd315bfa Iustin Pop
      if (not isinstance(item, int) or
3913 bd315bfa Iustin Pop
          item < 0):
3914 bd315bfa Iustin Pop
        raise errors.OpPrereqError("Invalid disk specification '%s'" %
3915 5c983ee5 Iustin Pop
                                   str(item), errors.ECODE_INVAL)
3916 bd315bfa Iustin Pop
3917 bd315bfa Iustin Pop
  def ExpandNames(self):
3918 bd315bfa Iustin Pop
    self._ExpandAndLockInstance()
3919 bd315bfa Iustin Pop
3920 bd315bfa Iustin Pop
  def BuildHooksEnv(self):
3921 bd315bfa Iustin Pop
    """Build hooks env.
3922 bd315bfa Iustin Pop

3923 bd315bfa Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3924 bd315bfa Iustin Pop

3925 bd315bfa Iustin Pop
    """
3926 bd315bfa Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3927 bd315bfa Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3928 bd315bfa Iustin Pop
    return env, nl, nl
3929 bd315bfa Iustin Pop
3930 bd315bfa Iustin Pop
  def CheckPrereq(self):
3931 bd315bfa Iustin Pop
    """Check prerequisites.
3932 bd315bfa Iustin Pop

3933 bd315bfa Iustin Pop
    This checks that the instance is in the cluster and is not running.
3934 bd315bfa Iustin Pop

3935 bd315bfa Iustin Pop
    """
3936 bd315bfa Iustin Pop
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3937 bd315bfa Iustin Pop
    assert instance is not None, \
3938 bd315bfa Iustin Pop
      "Cannot retrieve locked instance %s" % self.op.instance_name
3939 bd315bfa Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3940 bd315bfa Iustin Pop
3941 bd315bfa Iustin Pop
    if instance.disk_template == constants.DT_DISKLESS:
3942 bd315bfa Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
3943 5c983ee5 Iustin Pop
                                 self.op.instance_name, errors.ECODE_INVAL)
3944 bd315bfa Iustin Pop
    if instance.admin_up:
3945 bd315bfa Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3946 5c983ee5 Iustin Pop
                                 self.op.instance_name, errors.ECODE_STATE)
3947 bd315bfa Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3948 bd315bfa Iustin Pop
                                              instance.name,
3949 bd315bfa Iustin Pop
                                              instance.hypervisor)
3950 bd315bfa Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
3951 045dd6d9 Iustin Pop
                      prereq=True, ecode=errors.ECODE_ENVIRON)
3952 bd315bfa Iustin Pop
    if remote_info.payload:
3953 bd315bfa Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3954 bd315bfa Iustin Pop
                                 (self.op.instance_name,
3955 5c983ee5 Iustin Pop
                                  instance.primary_node), errors.ECODE_STATE)
3956 bd315bfa Iustin Pop
3957 bd315bfa Iustin Pop
    if not self.op.disks:
3958 bd315bfa Iustin Pop
      self.op.disks = range(len(instance.disks))
3959 bd315bfa Iustin Pop
    else:
3960 bd315bfa Iustin Pop
      for idx in self.op.disks:
3961 bd315bfa Iustin Pop
        if idx >= len(instance.disks):
3962 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index passed '%s'" % idx,
3963 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
3964 bd315bfa Iustin Pop
3965 bd315bfa Iustin Pop
    self.instance = instance
3966 bd315bfa Iustin Pop
3967 bd315bfa Iustin Pop
  def Exec(self, feedback_fn):
3968 bd315bfa Iustin Pop
    """Recreate the disks.
3969 bd315bfa Iustin Pop

3970 bd315bfa Iustin Pop
    """
3971 bd315bfa Iustin Pop
    to_skip = []
3972 bd315bfa Iustin Pop
    for idx, disk in enumerate(self.instance.disks):
3973 bd315bfa Iustin Pop
      if idx not in self.op.disks: # disk idx has not been passed in
3974 bd315bfa Iustin Pop
        to_skip.append(idx)
3975 bd315bfa Iustin Pop
        continue
3976 bd315bfa Iustin Pop
3977 bd315bfa Iustin Pop
    _CreateDisks(self, self.instance, to_skip=to_skip)
3978 bd315bfa Iustin Pop
3979 bd315bfa Iustin Pop
3980 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
3981 decd5f45 Iustin Pop
  """Rename an instance.
3982 decd5f45 Iustin Pop

3983 decd5f45 Iustin Pop
  """
3984 decd5f45 Iustin Pop
  HPATH = "instance-rename"
3985 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3986 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
3987 decd5f45 Iustin Pop
3988 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
3989 decd5f45 Iustin Pop
    """Build hooks env.
3990 decd5f45 Iustin Pop

3991 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3992 decd5f45 Iustin Pop

3993 decd5f45 Iustin Pop
    """
3994 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3995 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
3996 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3997 decd5f45 Iustin Pop
    return env, nl, nl
3998 decd5f45 Iustin Pop
3999 decd5f45 Iustin Pop
  def CheckPrereq(self):
4000 decd5f45 Iustin Pop
    """Check prerequisites.
4001 decd5f45 Iustin Pop

4002 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
4003 decd5f45 Iustin Pop

4004 decd5f45 Iustin Pop
    """
4005 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
4006 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
4007 decd5f45 Iustin Pop
    if instance is None:
4008 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
4009 5c983ee5 Iustin Pop
                                 self.op.instance_name, errors.ECODE_NOENT)
4010 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
4011 7527a8a4 Iustin Pop
4012 0d68c45d Iustin Pop
    if instance.admin_up:
4013 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
4014 5c983ee5 Iustin Pop
                                 self.op.instance_name, errors.ECODE_STATE)
4015 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
4016 72737a7f Iustin Pop
                                              instance.name,
4017 72737a7f Iustin Pop
                                              instance.hypervisor)
4018 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
4019 045dd6d9 Iustin Pop
                      prereq=True, ecode=errors.ECODE_ENVIRON)
4020 7ad1af4a Iustin Pop
    if remote_info.payload:
4021 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
4022 decd5f45 Iustin Pop
                                 (self.op.instance_name,
4023 5c983ee5 Iustin Pop
                                  instance.primary_node), errors.ECODE_STATE)
4024 decd5f45 Iustin Pop
    self.instance = instance
4025 decd5f45 Iustin Pop
4026 decd5f45 Iustin Pop
    # new name verification
4027 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
4028 decd5f45 Iustin Pop
4029 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
4030 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
4031 7bde3275 Guido Trotter
    if new_name in instance_list:
4032 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
4033 5c983ee5 Iustin Pop
                                 new_name, errors.ECODE_EXISTS)
4034 7bde3275 Guido Trotter
4035 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
4036 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
4037 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
4038 5c983ee5 Iustin Pop
                                   (name_info.ip, new_name),
4039 5c983ee5 Iustin Pop
                                   errors.ECODE_NOTUNIQUE)
4040 decd5f45 Iustin Pop
4041 decd5f45 Iustin Pop
4042 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
4043 decd5f45 Iustin Pop
    """Reinstall the instance.
4044 decd5f45 Iustin Pop

4045 decd5f45 Iustin Pop
    """
4046 decd5f45 Iustin Pop
    inst = self.instance
4047 decd5f45 Iustin Pop
    old_name = inst.name
4048 decd5f45 Iustin Pop
4049 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
4050 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
4051 b23c4333 Manuel Franceschini
4052 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
4053 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
4054 cb4e8387 Iustin Pop
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
4055 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
4056 decd5f45 Iustin Pop
4057 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
4058 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
4059 decd5f45 Iustin Pop
4060 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
4061 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
4062 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
4063 72737a7f Iustin Pop
                                                     old_file_storage_dir,
4064 72737a7f Iustin Pop
                                                     new_file_storage_dir)
4065 4c4e4e1e Iustin Pop
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
4066 4c4e4e1e Iustin Pop
                   " (but the instance has been renamed in Ganeti)" %
4067 4c4e4e1e Iustin Pop
                   (inst.primary_node, old_file_storage_dir,
4068 4c4e4e1e Iustin Pop
                    new_file_storage_dir))
4069 b23c4333 Manuel Franceschini
4070 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
4071 decd5f45 Iustin Pop
    try:
4072 781de953 Iustin Pop
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
4073 781de953 Iustin Pop
                                                 old_name)
4074 4c4e4e1e Iustin Pop
      msg = result.fail_msg
4075 96841384 Iustin Pop
      if msg:
4076 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
4077 96841384 Iustin Pop
               " (but the instance has been renamed in Ganeti): %s" %
4078 96841384 Iustin Pop
               (inst.name, inst.primary_node, msg))
4079 86d9d3bb Iustin Pop
        self.proc.LogWarning(msg)
4080 decd5f45 Iustin Pop
    finally:
4081 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
4082 decd5f45 Iustin Pop
4083 decd5f45 Iustin Pop
4084 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
4085 a8083063 Iustin Pop
  """Remove an instance.
4086 a8083063 Iustin Pop

4087 a8083063 Iustin Pop
  """
4088 a8083063 Iustin Pop
  HPATH = "instance-remove"
4089 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4090 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
4091 cf472233 Guido Trotter
  REQ_BGL = False
4092 cf472233 Guido Trotter
4093 17c3f802 Guido Trotter
  def CheckArguments(self):
4094 17c3f802 Guido Trotter
    """Check the arguments.
4095 17c3f802 Guido Trotter

4096 17c3f802 Guido Trotter
    """
4097 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
4098 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
4099 17c3f802 Guido Trotter
4100 cf472233 Guido Trotter
  def ExpandNames(self):
4101 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
4102 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4103 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4104 cf472233 Guido Trotter
4105 cf472233 Guido Trotter
  def DeclareLocks(self, level):
4106 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
4107 cf472233 Guido Trotter
      self._LockInstancesNodes()
4108 a8083063 Iustin Pop
4109 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4110 a8083063 Iustin Pop
    """Build hooks env.
4111 a8083063 Iustin Pop

4112 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4113 a8083063 Iustin Pop

4114 a8083063 Iustin Pop
    """
4115 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4116 17c3f802 Guido Trotter
    env["SHUTDOWN_TIMEOUT"] = self.shutdown_timeout
4117 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
4118 a8083063 Iustin Pop
    return env, nl, nl
4119 a8083063 Iustin Pop
4120 a8083063 Iustin Pop
  def CheckPrereq(self):
4121 a8083063 Iustin Pop
    """Check prerequisites.
4122 a8083063 Iustin Pop

4123 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4124 a8083063 Iustin Pop

4125 a8083063 Iustin Pop
    """
4126 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4127 cf472233 Guido Trotter
    assert self.instance is not None, \
4128 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4129 a8083063 Iustin Pop
4130 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4131 a8083063 Iustin Pop
    """Remove the instance.
4132 a8083063 Iustin Pop

4133 a8083063 Iustin Pop
    """
4134 a8083063 Iustin Pop
    instance = self.instance
4135 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
4136 9a4f63d1 Iustin Pop
                 instance.name, instance.primary_node)
4137 a8083063 Iustin Pop
4138 17c3f802 Guido Trotter
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
4139 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
4140 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4141 1fae010f Iustin Pop
    if msg:
4142 1d67656e Iustin Pop
      if self.op.ignore_failures:
4143 1fae010f Iustin Pop
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
4144 1d67656e Iustin Pop
      else:
4145 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
4146 1fae010f Iustin Pop
                                 " node %s: %s" %
4147 1fae010f Iustin Pop
                                 (instance.name, instance.primary_node, msg))
4148 a8083063 Iustin Pop
4149 9a4f63d1 Iustin Pop
    logging.info("Removing block devices for instance %s", instance.name)
4150 a8083063 Iustin Pop
4151 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
4152 1d67656e Iustin Pop
      if self.op.ignore_failures:
4153 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
4154 1d67656e Iustin Pop
      else:
4155 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
4156 a8083063 Iustin Pop
4157 9a4f63d1 Iustin Pop
    logging.info("Removing instance %s out of cluster config", instance.name)
4158 a8083063 Iustin Pop
4159 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
4160 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
4161 a8083063 Iustin Pop
4162 a8083063 Iustin Pop
4163 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
4164 a8083063 Iustin Pop
  """Logical unit for querying instances.
4165 a8083063 Iustin Pop

4166 a8083063 Iustin Pop
  """
4167 ec79568d Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
4168 7eb9d8f7 Guido Trotter
  REQ_BGL = False
4169 19bed813 Iustin Pop
  _SIMPLE_FIELDS = ["name", "os", "network_port", "hypervisor",
4170 19bed813 Iustin Pop
                    "serial_no", "ctime", "mtime", "uuid"]
4171 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
4172 5b460366 Iustin Pop
                                    "admin_state",
4173 a2d2e1a7 Iustin Pop
                                    "disk_template", "ip", "mac", "bridge",
4174 638c6349 Guido Trotter
                                    "nic_mode", "nic_link",
4175 a2d2e1a7 Iustin Pop
                                    "sda_size", "sdb_size", "vcpus", "tags",
4176 a2d2e1a7 Iustin Pop
                                    "network_port", "beparams",
4177 8aec325c Iustin Pop
                                    r"(disk)\.(size)/([0-9]+)",
4178 8aec325c Iustin Pop
                                    r"(disk)\.(sizes)", "disk_usage",
4179 638c6349 Guido Trotter
                                    r"(nic)\.(mac|ip|mode|link)/([0-9]+)",
4180 638c6349 Guido Trotter
                                    r"(nic)\.(bridge)/([0-9]+)",
4181 638c6349 Guido Trotter
                                    r"(nic)\.(macs|ips|modes|links|bridges)",
4182 8aec325c Iustin Pop
                                    r"(disk|nic)\.(count)",
4183 19bed813 Iustin Pop
                                    "hvparams",
4184 19bed813 Iustin Pop
                                    ] + _SIMPLE_FIELDS +
4185 a2d2e1a7 Iustin Pop
                                  ["hv/%s" % name
4186 a2d2e1a7 Iustin Pop
                                   for name in constants.HVS_PARAMETERS] +
4187 a2d2e1a7 Iustin Pop
                                  ["be/%s" % name
4188 a2d2e1a7 Iustin Pop
                                   for name in constants.BES_PARAMETERS])
4189 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
4190 31bf511f Iustin Pop
4191 a8083063 Iustin Pop
4192 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
4193 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
4194 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
4195 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
4196 a8083063 Iustin Pop
4197 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
4198 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
4199 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
4200 7eb9d8f7 Guido Trotter
4201 57a2fb91 Iustin Pop
    if self.op.names:
4202 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
4203 7eb9d8f7 Guido Trotter
    else:
4204 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
4205 7eb9d8f7 Guido Trotter
4206 ec79568d Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
4207 ec79568d Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
4208 57a2fb91 Iustin Pop
    if self.do_locking:
4209 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
4210 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
4211 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4212 7eb9d8f7 Guido Trotter
4213 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
4214 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
4215 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
4216 7eb9d8f7 Guido Trotter
4217 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
4218 7eb9d8f7 Guido Trotter
    """Check prerequisites.
4219 7eb9d8f7 Guido Trotter

4220 7eb9d8f7 Guido Trotter
    """
4221 57a2fb91 Iustin Pop
    pass
4222 069dcc86 Iustin Pop
4223 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4224 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
4225 a8083063 Iustin Pop

4226 a8083063 Iustin Pop
    """
4227 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
4228 a7f5dc98 Iustin Pop
    if self.wanted == locking.ALL_SET:
4229 a7f5dc98 Iustin Pop
      # caller didn't specify instance names, so ordering is not important
4230 a7f5dc98 Iustin Pop
      if self.do_locking:
4231 a7f5dc98 Iustin Pop
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
4232 a7f5dc98 Iustin Pop
      else:
4233 a7f5dc98 Iustin Pop
        instance_names = all_info.keys()
4234 a7f5dc98 Iustin Pop
      instance_names = utils.NiceSort(instance_names)
4235 57a2fb91 Iustin Pop
    else:
4236 a7f5dc98 Iustin Pop
      # caller did specify names, so we must keep the ordering
4237 a7f5dc98 Iustin Pop
      if self.do_locking:
4238 a7f5dc98 Iustin Pop
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
4239 a7f5dc98 Iustin Pop
      else:
4240 a7f5dc98 Iustin Pop
        tgt_set = all_info.keys()
4241 a7f5dc98 Iustin Pop
      missing = set(self.wanted).difference(tgt_set)
4242 a7f5dc98 Iustin Pop
      if missing:
4243 a7f5dc98 Iustin Pop
        raise errors.OpExecError("Some instances were removed before"
4244 a7f5dc98 Iustin Pop
                                 " retrieving their data: %s" % missing)
4245 a7f5dc98 Iustin Pop
      instance_names = self.wanted
4246 c1f1cbb2 Iustin Pop
4247 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
4248 a8083063 Iustin Pop
4249 a8083063 Iustin Pop
    # begin data gathering
4250 a8083063 Iustin Pop
4251 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
4252 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
4253 a8083063 Iustin Pop
4254 a8083063 Iustin Pop
    bad_nodes = []
4255 cbfc4681 Iustin Pop
    off_nodes = []
4256 ec79568d Iustin Pop
    if self.do_node_query:
4257 a8083063 Iustin Pop
      live_data = {}
4258 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
4259 a8083063 Iustin Pop
      for name in nodes:
4260 a8083063 Iustin Pop
        result = node_data[name]
4261 cbfc4681 Iustin Pop
        if result.offline:
4262 cbfc4681 Iustin Pop
          # offline nodes will be in both lists
4263 cbfc4681 Iustin Pop
          off_nodes.append(name)
4264 3cebe102 Michael Hanselmann
        if result.fail_msg:
4265 a8083063 Iustin Pop
          bad_nodes.append(name)
4266 781de953 Iustin Pop
        else:
4267 2fa74ef4 Iustin Pop
          if result.payload:
4268 2fa74ef4 Iustin Pop
            live_data.update(result.payload)
4269 2fa74ef4 Iustin Pop
          # else no instance is alive
4270 a8083063 Iustin Pop
    else:
4271 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
4272 a8083063 Iustin Pop
4273 a8083063 Iustin Pop
    # end data gathering
4274 a8083063 Iustin Pop
4275 5018a335 Iustin Pop
    HVPREFIX = "hv/"
4276 338e51e8 Iustin Pop
    BEPREFIX = "be/"
4277 a8083063 Iustin Pop
    output = []
4278 638c6349 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
4279 a8083063 Iustin Pop
    for instance in instance_list:
4280 a8083063 Iustin Pop
      iout = []
4281 638c6349 Guido Trotter
      i_hv = cluster.FillHV(instance)
4282 638c6349 Guido Trotter
      i_be = cluster.FillBE(instance)
4283 638c6349 Guido Trotter
      i_nicp = [objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
4284 638c6349 Guido Trotter
                                 nic.nicparams) for nic in instance.nics]
4285 a8083063 Iustin Pop
      for field in self.op.output_fields:
4286 71c1af58 Iustin Pop
        st_match = self._FIELDS_STATIC.Matches(field)
4287 19bed813 Iustin Pop
        if field in self._SIMPLE_FIELDS:
4288 19bed813 Iustin Pop
          val = getattr(instance, field)
4289 a8083063 Iustin Pop
        elif field == "pnode":
4290 a8083063 Iustin Pop
          val = instance.primary_node
4291 a8083063 Iustin Pop
        elif field == "snodes":
4292 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
4293 a8083063 Iustin Pop
        elif field == "admin_state":
4294 0d68c45d Iustin Pop
          val = instance.admin_up
4295 a8083063 Iustin Pop
        elif field == "oper_state":
4296 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
4297 8a23d2d3 Iustin Pop
            val = None
4298 a8083063 Iustin Pop
          else:
4299 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
4300 d8052456 Iustin Pop
        elif field == "status":
4301 cbfc4681 Iustin Pop
          if instance.primary_node in off_nodes:
4302 cbfc4681 Iustin Pop
            val = "ERROR_nodeoffline"
4303 cbfc4681 Iustin Pop
          elif instance.primary_node in bad_nodes:
4304 d8052456 Iustin Pop
            val = "ERROR_nodedown"
4305 d8052456 Iustin Pop
          else:
4306 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
4307 d8052456 Iustin Pop
            if running:
4308 0d68c45d Iustin Pop
              if instance.admin_up:
4309 d8052456 Iustin Pop
                val = "running"
4310 d8052456 Iustin Pop
              else:
4311 d8052456 Iustin Pop
                val = "ERROR_up"
4312 d8052456 Iustin Pop
            else:
4313 0d68c45d Iustin Pop
              if instance.admin_up:
4314 d8052456 Iustin Pop
                val = "ERROR_down"
4315 d8052456 Iustin Pop
              else:
4316 d8052456 Iustin Pop
                val = "ADMIN_down"
4317 a8083063 Iustin Pop
        elif field == "oper_ram":
4318 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
4319 8a23d2d3 Iustin Pop
            val = None
4320 a8083063 Iustin Pop
          elif instance.name in live_data:
4321 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
4322 a8083063 Iustin Pop
          else:
4323 a8083063 Iustin Pop
            val = "-"
4324 c1ce76bb Iustin Pop
        elif field == "vcpus":
4325 c1ce76bb Iustin Pop
          val = i_be[constants.BE_VCPUS]
4326 a8083063 Iustin Pop
        elif field == "disk_template":
4327 a8083063 Iustin Pop
          val = instance.disk_template
4328 a8083063 Iustin Pop
        elif field == "ip":
4329 39a02558 Guido Trotter
          if instance.nics:
4330 39a02558 Guido Trotter
            val = instance.nics[0].ip
4331 39a02558 Guido Trotter
          else:
4332 39a02558 Guido Trotter
            val = None
4333 638c6349 Guido Trotter
        elif field == "nic_mode":
4334 638c6349 Guido Trotter
          if instance.nics:
4335 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_MODE]
4336 638c6349 Guido Trotter
          else:
4337 638c6349 Guido Trotter
            val = None
4338 638c6349 Guido Trotter
        elif field == "nic_link":
4339 39a02558 Guido Trotter
          if instance.nics:
4340 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_LINK]
4341 638c6349 Guido Trotter
          else:
4342 638c6349 Guido Trotter
            val = None
4343 638c6349 Guido Trotter
        elif field == "bridge":
4344 638c6349 Guido Trotter
          if (instance.nics and
4345 638c6349 Guido Trotter
              i_nicp[0][constants.NIC_MODE] == constants.NIC_MODE_BRIDGED):
4346 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_LINK]
4347 39a02558 Guido Trotter
          else:
4348 39a02558 Guido Trotter
            val = None
4349 a8083063 Iustin Pop
        elif field == "mac":
4350 39a02558 Guido Trotter
          if instance.nics:
4351 39a02558 Guido Trotter
            val = instance.nics[0].mac
4352 39a02558 Guido Trotter
          else:
4353 39a02558 Guido Trotter
            val = None
4354 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
4355 ad24e046 Iustin Pop
          idx = ord(field[2]) - ord('a')
4356 ad24e046 Iustin Pop
          try:
4357 ad24e046 Iustin Pop
            val = instance.FindDisk(idx).size
4358 ad24e046 Iustin Pop
          except errors.OpPrereqError:
4359 8a23d2d3 Iustin Pop
            val = None
4360 024e157f Iustin Pop
        elif field == "disk_usage": # total disk usage per node
4361 024e157f Iustin Pop
          disk_sizes = [{'size': disk.size} for disk in instance.disks]
4362 024e157f Iustin Pop
          val = _ComputeDiskSize(instance.disk_template, disk_sizes)
4363 130a6a6f Iustin Pop
        elif field == "tags":
4364 130a6a6f Iustin Pop
          val = list(instance.GetTags())
4365 338e51e8 Iustin Pop
        elif field == "hvparams":
4366 338e51e8 Iustin Pop
          val = i_hv
4367 5018a335 Iustin Pop
        elif (field.startswith(HVPREFIX) and
4368 5018a335 Iustin Pop
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
4369 5018a335 Iustin Pop
          val = i_hv.get(field[len(HVPREFIX):], None)
4370 338e51e8 Iustin Pop
        elif field == "beparams":
4371 338e51e8 Iustin Pop
          val = i_be
4372 338e51e8 Iustin Pop
        elif (field.startswith(BEPREFIX) and
4373 338e51e8 Iustin Pop
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
4374 338e51e8 Iustin Pop
          val = i_be.get(field[len(BEPREFIX):], None)
4375 71c1af58 Iustin Pop
        elif st_match and st_match.groups():
4376 71c1af58 Iustin Pop
          # matches a variable list
4377 71c1af58 Iustin Pop
          st_groups = st_match.groups()
4378 71c1af58 Iustin Pop
          if st_groups and st_groups[0] == "disk":
4379 71c1af58 Iustin Pop
            if st_groups[1] == "count":
4380 71c1af58 Iustin Pop
              val = len(instance.disks)
4381 41a776da Iustin Pop
            elif st_groups[1] == "sizes":
4382 41a776da Iustin Pop
              val = [disk.size for disk in instance.disks]
4383 71c1af58 Iustin Pop
            elif st_groups[1] == "size":
4384 3e0cea06 Iustin Pop
              try:
4385 3e0cea06 Iustin Pop
                val = instance.FindDisk(st_groups[2]).size
4386 3e0cea06 Iustin Pop
              except errors.OpPrereqError:
4387 71c1af58 Iustin Pop
                val = None
4388 71c1af58 Iustin Pop
            else:
4389 71c1af58 Iustin Pop
              assert False, "Unhandled disk parameter"
4390 71c1af58 Iustin Pop
          elif st_groups[0] == "nic":
4391 71c1af58 Iustin Pop
            if st_groups[1] == "count":
4392 71c1af58 Iustin Pop
              val = len(instance.nics)
4393 41a776da Iustin Pop
            elif st_groups[1] == "macs":
4394 41a776da Iustin Pop
              val = [nic.mac for nic in instance.nics]
4395 41a776da Iustin Pop
            elif st_groups[1] == "ips":
4396 41a776da Iustin Pop
              val = [nic.ip for nic in instance.nics]
4397 638c6349 Guido Trotter
            elif st_groups[1] == "modes":
4398 638c6349 Guido Trotter
              val = [nicp[constants.NIC_MODE] for nicp in i_nicp]
4399 638c6349 Guido Trotter
            elif st_groups[1] == "links":
4400 638c6349 Guido Trotter
              val = [nicp[constants.NIC_LINK] for nicp in i_nicp]
4401 41a776da Iustin Pop
            elif st_groups[1] == "bridges":
4402 638c6349 Guido Trotter
              val = []
4403 638c6349 Guido Trotter
              for nicp in i_nicp:
4404 638c6349 Guido Trotter
                if nicp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
4405 638c6349 Guido Trotter
                  val.append(nicp[constants.NIC_LINK])
4406 638c6349 Guido Trotter
                else:
4407 638c6349 Guido Trotter
                  val.append(None)
4408 71c1af58 Iustin Pop
            else:
4409 71c1af58 Iustin Pop
              # index-based item
4410 71c1af58 Iustin Pop
              nic_idx = int(st_groups[2])
4411 71c1af58 Iustin Pop
              if nic_idx >= len(instance.nics):
4412 71c1af58 Iustin Pop
                val = None
4413 71c1af58 Iustin Pop
              else:
4414 71c1af58 Iustin Pop
                if st_groups[1] == "mac":
4415 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].mac
4416 71c1af58 Iustin Pop
                elif st_groups[1] == "ip":
4417 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].ip
4418 638c6349 Guido Trotter
                elif st_groups[1] == "mode":
4419 638c6349 Guido Trotter
                  val = i_nicp[nic_idx][constants.NIC_MODE]
4420 638c6349 Guido Trotter
                elif st_groups[1] == "link":
4421 638c6349 Guido Trotter
                  val = i_nicp[nic_idx][constants.NIC_LINK]
4422 71c1af58 Iustin Pop
                elif st_groups[1] == "bridge":
4423 638c6349 Guido Trotter
                  nic_mode = i_nicp[nic_idx][constants.NIC_MODE]
4424 638c6349 Guido Trotter
                  if nic_mode == constants.NIC_MODE_BRIDGED:
4425 638c6349 Guido Trotter
                    val = i_nicp[nic_idx][constants.NIC_LINK]
4426 638c6349 Guido Trotter
                  else:
4427 638c6349 Guido Trotter
                    val = None
4428 71c1af58 Iustin Pop
                else:
4429 71c1af58 Iustin Pop
                  assert False, "Unhandled NIC parameter"
4430 71c1af58 Iustin Pop
          else:
4431 c1ce76bb Iustin Pop
            assert False, ("Declared but unhandled variable parameter '%s'" %
4432 c1ce76bb Iustin Pop
                           field)
4433 a8083063 Iustin Pop
        else:
4434 c1ce76bb Iustin Pop
          assert False, "Declared but unhandled parameter '%s'" % field
4435 a8083063 Iustin Pop
        iout.append(val)
4436 a8083063 Iustin Pop
      output.append(iout)
4437 a8083063 Iustin Pop
4438 a8083063 Iustin Pop
    return output
4439 a8083063 Iustin Pop
4440 a8083063 Iustin Pop
4441 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
4442 a8083063 Iustin Pop
  """Failover an instance.
4443 a8083063 Iustin Pop

4444 a8083063 Iustin Pop
  """
4445 a8083063 Iustin Pop
  HPATH = "instance-failover"
4446 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4447 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
4448 c9e5c064 Guido Trotter
  REQ_BGL = False
4449 c9e5c064 Guido Trotter
4450 17c3f802 Guido Trotter
  def CheckArguments(self):
4451 17c3f802 Guido Trotter
    """Check the arguments.
4452 17c3f802 Guido Trotter

4453 17c3f802 Guido Trotter
    """
4454 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
4455 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
4456 17c3f802 Guido Trotter
4457 c9e5c064 Guido Trotter
  def ExpandNames(self):
4458 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
4459 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4460 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4461 c9e5c064 Guido Trotter
4462 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
4463 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
4464 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
4465 a8083063 Iustin Pop
4466 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4467 a8083063 Iustin Pop
    """Build hooks env.
4468 a8083063 Iustin Pop

4469 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4470 a8083063 Iustin Pop

4471 a8083063 Iustin Pop
    """
4472 a8083063 Iustin Pop
    env = {
4473 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
4474 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
4475 a8083063 Iustin Pop
      }
4476 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4477 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
4478 a8083063 Iustin Pop
    return env, nl, nl
4479 a8083063 Iustin Pop
4480 a8083063 Iustin Pop
  def CheckPrereq(self):
4481 a8083063 Iustin Pop
    """Check prerequisites.
4482 a8083063 Iustin Pop

4483 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4484 a8083063 Iustin Pop

4485 a8083063 Iustin Pop
    """
4486 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4487 c9e5c064 Guido Trotter
    assert self.instance is not None, \
4488 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4489 a8083063 Iustin Pop
4490 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
4491 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
4492 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
4493 5c983ee5 Iustin Pop
                                 " network mirrored, cannot failover.",
4494 5c983ee5 Iustin Pop
                                 errors.ECODE_STATE)
4495 2a710df1 Michael Hanselmann
4496 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
4497 2a710df1 Michael Hanselmann
    if not secondary_nodes:
4498 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
4499 abdf0113 Iustin Pop
                                   "a mirrored disk template")
4500 2a710df1 Michael Hanselmann
4501 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
4502 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, target_node)
4503 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, target_node)
4504 d27776f0 Iustin Pop
    if instance.admin_up:
4505 d27776f0 Iustin Pop
      # check memory requirements on the secondary node
4506 d27776f0 Iustin Pop
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
4507 d27776f0 Iustin Pop
                           instance.name, bep[constants.BE_MEMORY],
4508 d27776f0 Iustin Pop
                           instance.hypervisor)
4509 d27776f0 Iustin Pop
    else:
4510 d27776f0 Iustin Pop
      self.LogInfo("Not checking memory on the secondary node as"
4511 d27776f0 Iustin Pop
                   " instance will not be started")
4512 3a7c308e Guido Trotter
4513 a8083063 Iustin Pop
    # check bridge existance
4514 b165e77e Guido Trotter
    _CheckInstanceBridgesExist(self, instance, node=target_node)
4515 a8083063 Iustin Pop
4516 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4517 a8083063 Iustin Pop
    """Failover an instance.
4518 a8083063 Iustin Pop

4519 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
4520 a8083063 Iustin Pop
    starting it on the secondary.
4521 a8083063 Iustin Pop

4522 a8083063 Iustin Pop
    """
4523 a8083063 Iustin Pop
    instance = self.instance
4524 a8083063 Iustin Pop
4525 a8083063 Iustin Pop
    source_node = instance.primary_node
4526 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
4527 a8083063 Iustin Pop
4528 1df79ce6 Michael Hanselmann
    if instance.admin_up:
4529 1df79ce6 Michael Hanselmann
      feedback_fn("* checking disk consistency between source and target")
4530 1df79ce6 Michael Hanselmann
      for dev in instance.disks:
4531 1df79ce6 Michael Hanselmann
        # for drbd, these are drbd over lvm
4532 1df79ce6 Michael Hanselmann
        if not _CheckDiskConsistency(self, dev, target_node, False):
4533 1df79ce6 Michael Hanselmann
          if not self.op.ignore_consistency:
4534 1df79ce6 Michael Hanselmann
            raise errors.OpExecError("Disk %s is degraded on target node,"
4535 1df79ce6 Michael Hanselmann
                                     " aborting failover." % dev.iv_name)
4536 1df79ce6 Michael Hanselmann
    else:
4537 1df79ce6 Michael Hanselmann
      feedback_fn("* not checking disk consistency as instance is not running")
4538 a8083063 Iustin Pop
4539 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
4540 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
4541 9a4f63d1 Iustin Pop
                 instance.name, source_node)
4542 a8083063 Iustin Pop
4543 17c3f802 Guido Trotter
    result = self.rpc.call_instance_shutdown(source_node, instance,
4544 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
4545 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4546 1fae010f Iustin Pop
    if msg:
4547 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
4548 86d9d3bb Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
4549 1fae010f Iustin Pop
                             " Proceeding anyway. Please make sure node"
4550 1fae010f Iustin Pop
                             " %s is down. Error details: %s",
4551 1fae010f Iustin Pop
                             instance.name, source_node, source_node, msg)
4552 24a40d57 Iustin Pop
      else:
4553 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
4554 1fae010f Iustin Pop
                                 " node %s: %s" %
4555 1fae010f Iustin Pop
                                 (instance.name, source_node, msg))
4556 a8083063 Iustin Pop
4557 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
4558 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
4559 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
4560 a8083063 Iustin Pop
4561 a8083063 Iustin Pop
    instance.primary_node = target_node
4562 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
4563 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, feedback_fn)
4564 a8083063 Iustin Pop
4565 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
4566 0d68c45d Iustin Pop
    if instance.admin_up:
4567 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
4568 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s",
4569 9a4f63d1 Iustin Pop
                   instance.name, target_node)
4570 12a0cfbe Guido Trotter
4571 7c4d6c7b Michael Hanselmann
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
4572 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
4573 12a0cfbe Guido Trotter
      if not disks_ok:
4574 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
4575 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
4576 a8083063 Iustin Pop
4577 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
4578 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance, None, None)
4579 4c4e4e1e Iustin Pop
      msg = result.fail_msg
4580 dd279568 Iustin Pop
      if msg:
4581 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
4582 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
4583 dd279568 Iustin Pop
                                 (instance.name, target_node, msg))
4584 a8083063 Iustin Pop
4585 a8083063 Iustin Pop
4586 53c776b5 Iustin Pop
class LUMigrateInstance(LogicalUnit):
4587 53c776b5 Iustin Pop
  """Migrate an instance.
4588 53c776b5 Iustin Pop

4589 53c776b5 Iustin Pop
  This is migration without shutting down, compared to the failover,
4590 53c776b5 Iustin Pop
  which is done with shutdown.
4591 53c776b5 Iustin Pop

4592 53c776b5 Iustin Pop
  """
4593 53c776b5 Iustin Pop
  HPATH = "instance-migrate"
4594 53c776b5 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4595 53c776b5 Iustin Pop
  _OP_REQP = ["instance_name", "live", "cleanup"]
4596 53c776b5 Iustin Pop
4597 53c776b5 Iustin Pop
  REQ_BGL = False
4598 53c776b5 Iustin Pop
4599 53c776b5 Iustin Pop
  def ExpandNames(self):
4600 53c776b5 Iustin Pop
    self._ExpandAndLockInstance()
4601 3e06e001 Michael Hanselmann
4602 53c776b5 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
4603 53c776b5 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4604 53c776b5 Iustin Pop
4605 3e06e001 Michael Hanselmann
    self._migrater = TLMigrateInstance(self, self.op.instance_name,
4606 3e06e001 Michael Hanselmann
                                       self.op.live, self.op.cleanup)
4607 3a012b41 Michael Hanselmann
    self.tasklets = [self._migrater]
4608 3e06e001 Michael Hanselmann
4609 53c776b5 Iustin Pop
  def DeclareLocks(self, level):
4610 53c776b5 Iustin Pop
    if level == locking.LEVEL_NODE:
4611 53c776b5 Iustin Pop
      self._LockInstancesNodes()
4612 53c776b5 Iustin Pop
4613 53c776b5 Iustin Pop
  def BuildHooksEnv(self):
4614 53c776b5 Iustin Pop
    """Build hooks env.
4615 53c776b5 Iustin Pop

4616 53c776b5 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4617 53c776b5 Iustin Pop

4618 53c776b5 Iustin Pop
    """
4619 3e06e001 Michael Hanselmann
    instance = self._migrater.instance
4620 3e06e001 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self, instance)
4621 2c2690c9 Iustin Pop
    env["MIGRATE_LIVE"] = self.op.live
4622 2c2690c9 Iustin Pop
    env["MIGRATE_CLEANUP"] = self.op.cleanup
4623 3e06e001 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
4624 53c776b5 Iustin Pop
    return env, nl, nl
4625 53c776b5 Iustin Pop
4626 3e06e001 Michael Hanselmann
4627 313bcead Iustin Pop
class LUMoveInstance(LogicalUnit):
4628 313bcead Iustin Pop
  """Move an instance by data-copying.
4629 313bcead Iustin Pop

4630 313bcead Iustin Pop
  """
4631 313bcead Iustin Pop
  HPATH = "instance-move"
4632 313bcead Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4633 313bcead Iustin Pop
  _OP_REQP = ["instance_name", "target_node"]
4634 313bcead Iustin Pop
  REQ_BGL = False
4635 313bcead Iustin Pop
4636 17c3f802 Guido Trotter
  def CheckArguments(self):
4637 17c3f802 Guido Trotter
    """Check the arguments.
4638 17c3f802 Guido Trotter

4639 17c3f802 Guido Trotter
    """
4640 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
4641 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
4642 17c3f802 Guido Trotter
4643 313bcead Iustin Pop
  def ExpandNames(self):
4644 313bcead Iustin Pop
    self._ExpandAndLockInstance()
4645 313bcead Iustin Pop
    target_node = self.cfg.ExpandNodeName(self.op.target_node)
4646 313bcead Iustin Pop
    if target_node is None:
4647 313bcead Iustin Pop
      raise errors.OpPrereqError("Node '%s' not known" %
4648 5c983ee5 Iustin Pop
                                  self.op.target_node, errors.ECODE_NOENT)
4649 313bcead Iustin Pop
    self.op.target_node = target_node
4650 313bcead Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
4651 313bcead Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
4652 313bcead Iustin Pop
4653 313bcead Iustin Pop
  def DeclareLocks(self, level):
4654 313bcead Iustin Pop
    if level == locking.LEVEL_NODE:
4655 313bcead Iustin Pop
      self._LockInstancesNodes(primary_only=True)
4656 313bcead Iustin Pop
4657 313bcead Iustin Pop
  def BuildHooksEnv(self):
4658 313bcead Iustin Pop
    """Build hooks env.
4659 313bcead Iustin Pop

4660 313bcead Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4661 313bcead Iustin Pop

4662 313bcead Iustin Pop
    """
4663 313bcead Iustin Pop
    env = {
4664 313bcead Iustin Pop
      "TARGET_NODE": self.op.target_node,
4665 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
4666 313bcead Iustin Pop
      }
4667 313bcead Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4668 313bcead Iustin Pop
    nl = [self.cfg.GetMasterNode()] + [self.instance.primary_node,
4669 313bcead Iustin Pop
                                       self.op.target_node]
4670 313bcead Iustin Pop
    return env, nl, nl
4671 313bcead Iustin Pop
4672 313bcead Iustin Pop
  def CheckPrereq(self):
4673 313bcead Iustin Pop
    """Check prerequisites.
4674 313bcead Iustin Pop

4675 313bcead Iustin Pop
    This checks that the instance is in the cluster.
4676 313bcead Iustin Pop

4677 313bcead Iustin Pop
    """
4678 313bcead Iustin Pop
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4679 313bcead Iustin Pop
    assert self.instance is not None, \
4680 313bcead Iustin Pop
      "Cannot retrieve locked instance %s" % self.op.instance_name
4681 313bcead Iustin Pop
4682 313bcead Iustin Pop
    node = self.cfg.GetNodeInfo(self.op.target_node)
4683 313bcead Iustin Pop
    assert node is not None, \
4684 313bcead Iustin Pop
      "Cannot retrieve locked node %s" % self.op.target_node
4685 313bcead Iustin Pop
4686 313bcead Iustin Pop
    self.target_node = target_node = node.name
4687 313bcead Iustin Pop
4688 313bcead Iustin Pop
    if target_node == instance.primary_node:
4689 313bcead Iustin Pop
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
4690 5c983ee5 Iustin Pop
                                 (instance.name, target_node),
4691 5c983ee5 Iustin Pop
                                 errors.ECODE_STATE)
4692 313bcead Iustin Pop
4693 313bcead Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
4694 313bcead Iustin Pop
4695 313bcead Iustin Pop
    for idx, dsk in enumerate(instance.disks):
4696 313bcead Iustin Pop
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
4697 313bcead Iustin Pop
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
4698 5c983ee5 Iustin Pop
                                   " cannot copy", errors.ECODE_STATE)
4699 313bcead Iustin Pop
4700 313bcead Iustin Pop
    _CheckNodeOnline(self, target_node)
4701 313bcead Iustin Pop
    _CheckNodeNotDrained(self, target_node)
4702 313bcead Iustin Pop
4703 313bcead Iustin Pop
    if instance.admin_up:
4704 313bcead Iustin Pop
      # check memory requirements on the secondary node
4705 313bcead Iustin Pop
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
4706 313bcead Iustin Pop
                           instance.name, bep[constants.BE_MEMORY],
4707 313bcead Iustin Pop
                           instance.hypervisor)
4708 313bcead Iustin Pop
    else:
4709 313bcead Iustin Pop
      self.LogInfo("Not checking memory on the secondary node as"
4710 313bcead Iustin Pop
                   " instance will not be started")
4711 313bcead Iustin Pop
4712 313bcead Iustin Pop
    # check bridge existance
4713 313bcead Iustin Pop
    _CheckInstanceBridgesExist(self, instance, node=target_node)
4714 313bcead Iustin Pop
4715 313bcead Iustin Pop
  def Exec(self, feedback_fn):
4716 313bcead Iustin Pop
    """Move an instance.
4717 313bcead Iustin Pop

4718 313bcead Iustin Pop
    The move is done by shutting it down on its present node, copying
4719 313bcead Iustin Pop
    the data over (slow) and starting it on the new node.
4720 313bcead Iustin Pop

4721 313bcead Iustin Pop
    """
4722 313bcead Iustin Pop
    instance = self.instance
4723 313bcead Iustin Pop
4724 313bcead Iustin Pop
    source_node = instance.primary_node
4725 313bcead Iustin Pop
    target_node = self.target_node
4726 313bcead Iustin Pop
4727 313bcead Iustin Pop
    self.LogInfo("Shutting down instance %s on source node %s",
4728 313bcead Iustin Pop
                 instance.name, source_node)
4729 313bcead Iustin Pop
4730 17c3f802 Guido Trotter
    result = self.rpc.call_instance_shutdown(source_node, instance,
4731 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
4732 313bcead Iustin Pop
    msg = result.fail_msg
4733 313bcead Iustin Pop
    if msg:
4734 313bcead Iustin Pop
      if self.op.ignore_consistency:
4735 313bcead Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
4736 313bcead Iustin Pop
                             " Proceeding anyway. Please make sure node"
4737 313bcead Iustin Pop
                             " %s is down. Error details: %s",
4738 313bcead Iustin Pop
                             instance.name, source_node, source_node, msg)
4739 313bcead Iustin Pop
      else:
4740 313bcead Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
4741 313bcead Iustin Pop
                                 " node %s: %s" %
4742 313bcead Iustin Pop
                                 (instance.name, source_node, msg))
4743 313bcead Iustin Pop
4744 313bcead Iustin Pop
    # create the target disks
4745 313bcead Iustin Pop
    try:
4746 313bcead Iustin Pop
      _CreateDisks(self, instance, target_node=target_node)
4747 313bcead Iustin Pop
    except errors.OpExecError:
4748 313bcead Iustin Pop
      self.LogWarning("Device creation failed, reverting...")
4749 313bcead Iustin Pop
      try:
4750 313bcead Iustin Pop
        _RemoveDisks(self, instance, target_node=target_node)
4751 313bcead Iustin Pop
      finally:
4752 313bcead Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
4753 313bcead Iustin Pop
        raise
4754 313bcead Iustin Pop
4755 313bcead Iustin Pop
    cluster_name = self.cfg.GetClusterInfo().cluster_name
4756 313bcead Iustin Pop
4757 313bcead Iustin Pop
    errs = []
4758 313bcead Iustin Pop
    # activate, get path, copy the data over
4759 313bcead Iustin Pop
    for idx, disk in enumerate(instance.disks):
4760 313bcead Iustin Pop
      self.LogInfo("Copying data for disk %d", idx)
4761 313bcead Iustin Pop
      result = self.rpc.call_blockdev_assemble(target_node, disk,
4762 313bcead Iustin Pop
                                               instance.name, True)
4763 313bcead Iustin Pop
      if result.fail_msg:
4764 313bcead Iustin Pop
        self.LogWarning("Can't assemble newly created disk %d: %s",
4765 313bcead Iustin Pop
                        idx, result.fail_msg)
4766 313bcead Iustin Pop
        errs.append(result.fail_msg)
4767 313bcead Iustin Pop
        break
4768 313bcead Iustin Pop
      dev_path = result.payload
4769 313bcead Iustin Pop
      result = self.rpc.call_blockdev_export(source_node, disk,
4770 313bcead Iustin Pop
                                             target_node, dev_path,
4771 313bcead Iustin Pop
                                             cluster_name)
4772 313bcead Iustin Pop
      if result.fail_msg:
4773 313bcead Iustin Pop
        self.LogWarning("Can't copy data over for disk %d: %s",
4774 313bcead Iustin Pop
                        idx, result.fail_msg)
4775 313bcead Iustin Pop
        errs.append(result.fail_msg)
4776 313bcead Iustin Pop
        break
4777 313bcead Iustin Pop
4778 313bcead Iustin Pop
    if errs:
4779 313bcead Iustin Pop
      self.LogWarning("Some disks failed to copy, aborting")
4780 313bcead Iustin Pop
      try:
4781 313bcead Iustin Pop
        _RemoveDisks(self, instance, target_node=target_node)
4782 313bcead Iustin Pop
      finally:
4783 313bcead Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
4784 313bcead Iustin Pop
        raise errors.OpExecError("Errors during disk copy: %s" %
4785 313bcead Iustin Pop
                                 (",".join(errs),))
4786 313bcead Iustin Pop
4787 313bcead Iustin Pop
    instance.primary_node = target_node
4788 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, feedback_fn)
4789 313bcead Iustin Pop
4790 313bcead Iustin Pop
    self.LogInfo("Removing the disks on the original node")
4791 313bcead Iustin Pop
    _RemoveDisks(self, instance, target_node=source_node)
4792 313bcead Iustin Pop
4793 313bcead Iustin Pop
    # Only start the instance if it's marked as up
4794 313bcead Iustin Pop
    if instance.admin_up:
4795 313bcead Iustin Pop
      self.LogInfo("Starting instance %s on node %s",
4796 313bcead Iustin Pop
                   instance.name, target_node)
4797 313bcead Iustin Pop
4798 313bcead Iustin Pop
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
4799 313bcead Iustin Pop
                                           ignore_secondaries=True)
4800 313bcead Iustin Pop
      if not disks_ok:
4801 313bcead Iustin Pop
        _ShutdownInstanceDisks(self, instance)
4802 313bcead Iustin Pop
        raise errors.OpExecError("Can't activate the instance's disks")
4803 313bcead Iustin Pop
4804 313bcead Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance, None, None)
4805 313bcead Iustin Pop
      msg = result.fail_msg
4806 313bcead Iustin Pop
      if msg:
4807 313bcead Iustin Pop
        _ShutdownInstanceDisks(self, instance)
4808 313bcead Iustin Pop
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
4809 313bcead Iustin Pop
                                 (instance.name, target_node, msg))
4810 313bcead Iustin Pop
4811 313bcead Iustin Pop
4812 80cb875c Michael Hanselmann
class LUMigrateNode(LogicalUnit):
4813 80cb875c Michael Hanselmann
  """Migrate all instances from a node.
4814 80cb875c Michael Hanselmann

4815 80cb875c Michael Hanselmann
  """
4816 80cb875c Michael Hanselmann
  HPATH = "node-migrate"
4817 80cb875c Michael Hanselmann
  HTYPE = constants.HTYPE_NODE
4818 80cb875c Michael Hanselmann
  _OP_REQP = ["node_name", "live"]
4819 80cb875c Michael Hanselmann
  REQ_BGL = False
4820 80cb875c Michael Hanselmann
4821 80cb875c Michael Hanselmann
  def ExpandNames(self):
4822 80cb875c Michael Hanselmann
    self.op.node_name = self.cfg.ExpandNodeName(self.op.node_name)
4823 80cb875c Michael Hanselmann
    if self.op.node_name is None:
4824 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Node '%s' not known" % self.op.node_name,
4825 5c983ee5 Iustin Pop
                                 errors.ECODE_NOENT)
4826 80cb875c Michael Hanselmann
4827 80cb875c Michael Hanselmann
    self.needed_locks = {
4828 80cb875c Michael Hanselmann
      locking.LEVEL_NODE: [self.op.node_name],
4829 80cb875c Michael Hanselmann
      }
4830 80cb875c Michael Hanselmann
4831 80cb875c Michael Hanselmann
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
4832 80cb875c Michael Hanselmann
4833 80cb875c Michael Hanselmann
    # Create tasklets for migrating instances for all instances on this node
4834 80cb875c Michael Hanselmann
    names = []
4835 80cb875c Michael Hanselmann
    tasklets = []
4836 80cb875c Michael Hanselmann
4837 80cb875c Michael Hanselmann
    for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name):
4838 80cb875c Michael Hanselmann
      logging.debug("Migrating instance %s", inst.name)
4839 80cb875c Michael Hanselmann
      names.append(inst.name)
4840 80cb875c Michael Hanselmann
4841 80cb875c Michael Hanselmann
      tasklets.append(TLMigrateInstance(self, inst.name, self.op.live, False))
4842 80cb875c Michael Hanselmann
4843 80cb875c Michael Hanselmann
    self.tasklets = tasklets
4844 80cb875c Michael Hanselmann
4845 80cb875c Michael Hanselmann
    # Declare instance locks
4846 80cb875c Michael Hanselmann
    self.needed_locks[locking.LEVEL_INSTANCE] = names
4847 80cb875c Michael Hanselmann
4848 80cb875c Michael Hanselmann
  def DeclareLocks(self, level):
4849 80cb875c Michael Hanselmann
    if level == locking.LEVEL_NODE:
4850 80cb875c Michael Hanselmann
      self._LockInstancesNodes()
4851 80cb875c Michael Hanselmann
4852 80cb875c Michael Hanselmann
  def BuildHooksEnv(self):
4853 80cb875c Michael Hanselmann
    """Build hooks env.
4854 80cb875c Michael Hanselmann

4855 80cb875c Michael Hanselmann
    This runs on the master, the primary and all the secondaries.
4856 80cb875c Michael Hanselmann

4857 80cb875c Michael Hanselmann
    """
4858 80cb875c Michael Hanselmann
    env = {
4859 80cb875c Michael Hanselmann
      "NODE_NAME": self.op.node_name,
4860 80cb875c Michael Hanselmann
      }
4861 80cb875c Michael Hanselmann
4862 80cb875c Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
4863 80cb875c Michael Hanselmann
4864 80cb875c Michael Hanselmann
    return (env, nl, nl)
4865 80cb875c Michael Hanselmann
4866 80cb875c Michael Hanselmann
4867 3e06e001 Michael Hanselmann
class TLMigrateInstance(Tasklet):
4868 3e06e001 Michael Hanselmann
  def __init__(self, lu, instance_name, live, cleanup):
4869 3e06e001 Michael Hanselmann
    """Initializes this class.
4870 3e06e001 Michael Hanselmann

4871 3e06e001 Michael Hanselmann
    """
4872 464243a7 Michael Hanselmann
    Tasklet.__init__(self, lu)
4873 464243a7 Michael Hanselmann
4874 3e06e001 Michael Hanselmann
    # Parameters
4875 3e06e001 Michael Hanselmann
    self.instance_name = instance_name
4876 3e06e001 Michael Hanselmann
    self.live = live
4877 3e06e001 Michael Hanselmann
    self.cleanup = cleanup
4878 3e06e001 Michael Hanselmann
4879 53c776b5 Iustin Pop
  def CheckPrereq(self):
4880 53c776b5 Iustin Pop
    """Check prerequisites.
4881 53c776b5 Iustin Pop

4882 53c776b5 Iustin Pop
    This checks that the instance is in the cluster.
4883 53c776b5 Iustin Pop

4884 53c776b5 Iustin Pop
    """
4885 53c776b5 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
4886 3e06e001 Michael Hanselmann
      self.cfg.ExpandInstanceName(self.instance_name))
4887 53c776b5 Iustin Pop
    if instance is None:
4888 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
4889 5c983ee5 Iustin Pop
                                 self.instance_name, errors.ECODE_NOENT)
4890 53c776b5 Iustin Pop
4891 53c776b5 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
4892 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
4893 5c983ee5 Iustin Pop
                                 " drbd8, cannot migrate.", errors.ECODE_STATE)
4894 53c776b5 Iustin Pop
4895 53c776b5 Iustin Pop
    secondary_nodes = instance.secondary_nodes
4896 53c776b5 Iustin Pop
    if not secondary_nodes:
4897 733a2b6a Iustin Pop
      raise errors.ConfigurationError("No secondary node but using"
4898 733a2b6a Iustin Pop
                                      " drbd8 disk template")
4899 53c776b5 Iustin Pop
4900 53c776b5 Iustin Pop
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
4901 53c776b5 Iustin Pop
4902 53c776b5 Iustin Pop
    target_node = secondary_nodes[0]
4903 53c776b5 Iustin Pop
    # check memory requirements on the secondary node
4904 53c776b5 Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
4905 53c776b5 Iustin Pop
                         instance.name, i_be[constants.BE_MEMORY],
4906 53c776b5 Iustin Pop
                         instance.hypervisor)
4907 53c776b5 Iustin Pop
4908 53c776b5 Iustin Pop
    # check bridge existance
4909 b165e77e Guido Trotter
    _CheckInstanceBridgesExist(self, instance, node=target_node)
4910 53c776b5 Iustin Pop
4911 3e06e001 Michael Hanselmann
    if not self.cleanup:
4912 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, target_node)
4913 53c776b5 Iustin Pop
      result = self.rpc.call_instance_migratable(instance.primary_node,
4914 53c776b5 Iustin Pop
                                                 instance)
4915 045dd6d9 Iustin Pop
      result.Raise("Can't migrate, please use failover",
4916 045dd6d9 Iustin Pop
                   prereq=True, ecode=errors.ECODE_STATE)
4917 53c776b5 Iustin Pop
4918 53c776b5 Iustin Pop
    self.instance = instance
4919 53c776b5 Iustin Pop
4920 53c776b5 Iustin Pop
  def _WaitUntilSync(self):
4921 53c776b5 Iustin Pop
    """Poll with custom rpc for disk sync.
4922 53c776b5 Iustin Pop

4923 53c776b5 Iustin Pop
    This uses our own step-based rpc call.
4924 53c776b5 Iustin Pop

4925 53c776b5 Iustin Pop
    """
4926 53c776b5 Iustin Pop
    self.feedback_fn("* wait until resync is done")
4927 53c776b5 Iustin Pop
    all_done = False
4928 53c776b5 Iustin Pop
    while not all_done:
4929 53c776b5 Iustin Pop
      all_done = True
4930 53c776b5 Iustin Pop
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
4931 53c776b5 Iustin Pop
                                            self.nodes_ip,
4932 53c776b5 Iustin Pop
                                            self.instance.disks)
4933 53c776b5 Iustin Pop
      min_percent = 100
4934 53c776b5 Iustin Pop
      for node, nres in result.items():
4935 4c4e4e1e Iustin Pop
        nres.Raise("Cannot resync disks on node %s" % node)
4936 0959c824 Iustin Pop
        node_done, node_percent = nres.payload
4937 53c776b5 Iustin Pop
        all_done = all_done and node_done
4938 53c776b5 Iustin Pop
        if node_percent is not None:
4939 53c776b5 Iustin Pop
          min_percent = min(min_percent, node_percent)
4940 53c776b5 Iustin Pop
      if not all_done:
4941 53c776b5 Iustin Pop
        if min_percent < 100:
4942 53c776b5 Iustin Pop
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
4943 53c776b5 Iustin Pop
        time.sleep(2)
4944 53c776b5 Iustin Pop
4945 53c776b5 Iustin Pop
  def _EnsureSecondary(self, node):
4946 53c776b5 Iustin Pop
    """Demote a node to secondary.
4947 53c776b5 Iustin Pop

4948 53c776b5 Iustin Pop
    """
4949 53c776b5 Iustin Pop
    self.feedback_fn("* switching node %s to secondary mode" % node)
4950 53c776b5 Iustin Pop
4951 53c776b5 Iustin Pop
    for dev in self.instance.disks:
4952 53c776b5 Iustin Pop
      self.cfg.SetDiskID(dev, node)
4953 53c776b5 Iustin Pop
4954 53c776b5 Iustin Pop
    result = self.rpc.call_blockdev_close(node, self.instance.name,
4955 53c776b5 Iustin Pop
                                          self.instance.disks)
4956 4c4e4e1e Iustin Pop
    result.Raise("Cannot change disk to secondary on node %s" % node)
4957 53c776b5 Iustin Pop
4958 53c776b5 Iustin Pop
  def _GoStandalone(self):
4959 53c776b5 Iustin Pop
    """Disconnect from the network.
4960 53c776b5 Iustin Pop

4961 53c776b5 Iustin Pop
    """
4962 53c776b5 Iustin Pop
    self.feedback_fn("* changing into standalone mode")
4963 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
4964 53c776b5 Iustin Pop
                                               self.instance.disks)
4965 53c776b5 Iustin Pop
    for node, nres in result.items():
4966 4c4e4e1e Iustin Pop
      nres.Raise("Cannot disconnect disks node %s" % node)
4967 53c776b5 Iustin Pop
4968 53c776b5 Iustin Pop
  def _GoReconnect(self, multimaster):
4969 53c776b5 Iustin Pop
    """Reconnect to the network.
4970 53c776b5 Iustin Pop

4971 53c776b5 Iustin Pop
    """
4972 53c776b5 Iustin Pop
    if multimaster:
4973 53c776b5 Iustin Pop
      msg = "dual-master"
4974 53c776b5 Iustin Pop
    else:
4975 53c776b5 Iustin Pop
      msg = "single-master"
4976 53c776b5 Iustin Pop
    self.feedback_fn("* changing disks into %s mode" % msg)
4977 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
4978 53c776b5 Iustin Pop
                                           self.instance.disks,
4979 53c776b5 Iustin Pop
                                           self.instance.name, multimaster)
4980 53c776b5 Iustin Pop
    for node, nres in result.items():
4981 4c4e4e1e Iustin Pop
      nres.Raise("Cannot change disks config on node %s" % node)
4982 53c776b5 Iustin Pop
4983 53c776b5 Iustin Pop
  def _ExecCleanup(self):
4984 53c776b5 Iustin Pop
    """Try to cleanup after a failed migration.
4985 53c776b5 Iustin Pop

4986 53c776b5 Iustin Pop
    The cleanup is done by:
4987 53c776b5 Iustin Pop
      - check that the instance is running only on one node
4988 53c776b5 Iustin Pop
        (and update the config if needed)
4989 53c776b5 Iustin Pop
      - change disks on its secondary node to secondary
4990 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
4991 53c776b5 Iustin Pop
      - disconnect from the network
4992 53c776b5 Iustin Pop
      - change disks into single-master mode
4993 53c776b5 Iustin Pop
      - wait again until disks are fully synchronized
4994 53c776b5 Iustin Pop

4995 53c776b5 Iustin Pop
    """
4996 53c776b5 Iustin Pop
    instance = self.instance
4997 53c776b5 Iustin Pop
    target_node = self.target_node
4998 53c776b5 Iustin Pop
    source_node = self.source_node
4999 53c776b5 Iustin Pop
5000 53c776b5 Iustin Pop
    # check running on only one node
5001 53c776b5 Iustin Pop
    self.feedback_fn("* checking where the instance actually runs"
5002 53c776b5 Iustin Pop
                     " (if this hangs, the hypervisor might be in"
5003 53c776b5 Iustin Pop
                     " a bad state)")
5004 53c776b5 Iustin Pop
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
5005 53c776b5 Iustin Pop
    for node, result in ins_l.items():
5006 4c4e4e1e Iustin Pop
      result.Raise("Can't contact node %s" % node)
5007 53c776b5 Iustin Pop
5008 aca13712 Iustin Pop
    runningon_source = instance.name in ins_l[source_node].payload
5009 aca13712 Iustin Pop
    runningon_target = instance.name in ins_l[target_node].payload
5010 53c776b5 Iustin Pop
5011 53c776b5 Iustin Pop
    if runningon_source and runningon_target:
5012 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance seems to be running on two nodes,"
5013 53c776b5 Iustin Pop
                               " or the hypervisor is confused. You will have"
5014 53c776b5 Iustin Pop
                               " to ensure manually that it runs only on one"
5015 53c776b5 Iustin Pop
                               " and restart this operation.")
5016 53c776b5 Iustin Pop
5017 53c776b5 Iustin Pop
    if not (runningon_source or runningon_target):
5018 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance does not seem to be running at all."
5019 53c776b5 Iustin Pop
                               " In this case, it's safer to repair by"
5020 53c776b5 Iustin Pop
                               " running 'gnt-instance stop' to ensure disk"
5021 53c776b5 Iustin Pop
                               " shutdown, and then restarting it.")
5022 53c776b5 Iustin Pop
5023 53c776b5 Iustin Pop
    if runningon_target:
5024 53c776b5 Iustin Pop
      # the migration has actually succeeded, we need to update the config
5025 53c776b5 Iustin Pop
      self.feedback_fn("* instance running on secondary node (%s),"
5026 53c776b5 Iustin Pop
                       " updating config" % target_node)
5027 53c776b5 Iustin Pop
      instance.primary_node = target_node
5028 a4eae71f Michael Hanselmann
      self.cfg.Update(instance, self.feedback_fn)
5029 53c776b5 Iustin Pop
      demoted_node = source_node
5030 53c776b5 Iustin Pop
    else:
5031 53c776b5 Iustin Pop
      self.feedback_fn("* instance confirmed to be running on its"
5032 53c776b5 Iustin Pop
                       " primary node (%s)" % source_node)
5033 53c776b5 Iustin Pop
      demoted_node = target_node
5034 53c776b5 Iustin Pop
5035 53c776b5 Iustin Pop
    self._EnsureSecondary(demoted_node)
5036 53c776b5 Iustin Pop
    try:
5037 53c776b5 Iustin Pop
      self._WaitUntilSync()
5038 53c776b5 Iustin Pop
    except errors.OpExecError:
5039 53c776b5 Iustin Pop
      # we ignore here errors, since if the device is standalone, it
5040 53c776b5 Iustin Pop
      # won't be able to sync
5041 53c776b5 Iustin Pop
      pass
5042 53c776b5 Iustin Pop
    self._GoStandalone()
5043 53c776b5 Iustin Pop
    self._GoReconnect(False)
5044 53c776b5 Iustin Pop
    self._WaitUntilSync()
5045 53c776b5 Iustin Pop
5046 53c776b5 Iustin Pop
    self.feedback_fn("* done")
5047 53c776b5 Iustin Pop
5048 6906a9d8 Guido Trotter
  def _RevertDiskStatus(self):
5049 6906a9d8 Guido Trotter
    """Try to revert the disk status after a failed migration.
5050 6906a9d8 Guido Trotter

5051 6906a9d8 Guido Trotter
    """
5052 6906a9d8 Guido Trotter
    target_node = self.target_node
5053 6906a9d8 Guido Trotter
    try:
5054 6906a9d8 Guido Trotter
      self._EnsureSecondary(target_node)
5055 6906a9d8 Guido Trotter
      self._GoStandalone()
5056 6906a9d8 Guido Trotter
      self._GoReconnect(False)
5057 6906a9d8 Guido Trotter
      self._WaitUntilSync()
5058 6906a9d8 Guido Trotter
    except errors.OpExecError, err:
5059 3e06e001 Michael Hanselmann
      self.lu.LogWarning("Migration failed and I can't reconnect the"
5060 3e06e001 Michael Hanselmann
                         " drives: error '%s'\n"
5061 3e06e001 Michael Hanselmann
                         "Please look and recover the instance status" %
5062 3e06e001 Michael Hanselmann
                         str(err))
5063 6906a9d8 Guido Trotter
5064 6906a9d8 Guido Trotter
  def _AbortMigration(self):
5065 6906a9d8 Guido Trotter
    """Call the hypervisor code to abort a started migration.
5066 6906a9d8 Guido Trotter

5067 6906a9d8 Guido Trotter
    """
5068 6906a9d8 Guido Trotter
    instance = self.instance
5069 6906a9d8 Guido Trotter
    target_node = self.target_node
5070 6906a9d8 Guido Trotter
    migration_info = self.migration_info
5071 6906a9d8 Guido Trotter
5072 6906a9d8 Guido Trotter
    abort_result = self.rpc.call_finalize_migration(target_node,
5073 6906a9d8 Guido Trotter
                                                    instance,
5074 6906a9d8 Guido Trotter
                                                    migration_info,
5075 6906a9d8 Guido Trotter
                                                    False)
5076 4c4e4e1e Iustin Pop
    abort_msg = abort_result.fail_msg
5077 6906a9d8 Guido Trotter
    if abort_msg:
5078 6906a9d8 Guido Trotter
      logging.error("Aborting migration failed on target node %s: %s" %
5079 6906a9d8 Guido Trotter
                    (target_node, abort_msg))
5080 6906a9d8 Guido Trotter
      # Don't raise an exception here, as we stil have to try to revert the
5081 6906a9d8 Guido Trotter
      # disk status, even if this step failed.
5082 6906a9d8 Guido Trotter
5083 53c776b5 Iustin Pop
  def _ExecMigration(self):
5084 53c776b5 Iustin Pop
    """Migrate an instance.
5085 53c776b5 Iustin Pop

5086 53c776b5 Iustin Pop
    The migrate is done by:
5087 53c776b5 Iustin Pop
      - change the disks into dual-master mode
5088 53c776b5 Iustin Pop
      - wait until disks are fully synchronized again
5089 53c776b5 Iustin Pop
      - migrate the instance
5090 53c776b5 Iustin Pop
      - change disks on the new secondary node (the old primary) to secondary
5091 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
5092 53c776b5 Iustin Pop
      - change disks into single-master mode
5093 53c776b5 Iustin Pop

5094 53c776b5 Iustin Pop
    """
5095 53c776b5 Iustin Pop
    instance = self.instance
5096 53c776b5 Iustin Pop
    target_node = self.target_node
5097 53c776b5 Iustin Pop
    source_node = self.source_node
5098 53c776b5 Iustin Pop
5099 53c776b5 Iustin Pop
    self.feedback_fn("* checking disk consistency between source and target")
5100 53c776b5 Iustin Pop
    for dev in instance.disks:
5101 53c776b5 Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
5102 53c776b5 Iustin Pop
        raise errors.OpExecError("Disk %s is degraded or not fully"
5103 53c776b5 Iustin Pop
                                 " synchronized on target node,"
5104 53c776b5 Iustin Pop
                                 " aborting migrate." % dev.iv_name)
5105 53c776b5 Iustin Pop
5106 6906a9d8 Guido Trotter
    # First get the migration information from the remote node
5107 6906a9d8 Guido Trotter
    result = self.rpc.call_migration_info(source_node, instance)
5108 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5109 6906a9d8 Guido Trotter
    if msg:
5110 6906a9d8 Guido Trotter
      log_err = ("Failed fetching source migration information from %s: %s" %
5111 0959c824 Iustin Pop
                 (source_node, msg))
5112 6906a9d8 Guido Trotter
      logging.error(log_err)
5113 6906a9d8 Guido Trotter
      raise errors.OpExecError(log_err)
5114 6906a9d8 Guido Trotter
5115 0959c824 Iustin Pop
    self.migration_info = migration_info = result.payload
5116 6906a9d8 Guido Trotter
5117 6906a9d8 Guido Trotter
    # Then switch the disks to master/master mode
5118 53c776b5 Iustin Pop
    self._EnsureSecondary(target_node)
5119 53c776b5 Iustin Pop
    self._GoStandalone()
5120 53c776b5 Iustin Pop
    self._GoReconnect(True)
5121 53c776b5 Iustin Pop
    self._WaitUntilSync()
5122 53c776b5 Iustin Pop
5123 6906a9d8 Guido Trotter
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
5124 6906a9d8 Guido Trotter
    result = self.rpc.call_accept_instance(target_node,
5125 6906a9d8 Guido Trotter
                                           instance,
5126 6906a9d8 Guido Trotter
                                           migration_info,
5127 6906a9d8 Guido Trotter
                                           self.nodes_ip[target_node])
5128 6906a9d8 Guido Trotter
5129 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5130 6906a9d8 Guido Trotter
    if msg:
5131 6906a9d8 Guido Trotter
      logging.error("Instance pre-migration failed, trying to revert"
5132 6906a9d8 Guido Trotter
                    " disk status: %s", msg)
5133 6906a9d8 Guido Trotter
      self._AbortMigration()
5134 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
5135 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
5136 6906a9d8 Guido Trotter
                               (instance.name, msg))
5137 6906a9d8 Guido Trotter
5138 53c776b5 Iustin Pop
    self.feedback_fn("* migrating instance to %s" % target_node)
5139 53c776b5 Iustin Pop
    time.sleep(10)
5140 53c776b5 Iustin Pop
    result = self.rpc.call_instance_migrate(source_node, instance,
5141 53c776b5 Iustin Pop
                                            self.nodes_ip[target_node],
5142 3e06e001 Michael Hanselmann
                                            self.live)
5143 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5144 53c776b5 Iustin Pop
    if msg:
5145 53c776b5 Iustin Pop
      logging.error("Instance migration failed, trying to revert"
5146 53c776b5 Iustin Pop
                    " disk status: %s", msg)
5147 6906a9d8 Guido Trotter
      self._AbortMigration()
5148 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
5149 53c776b5 Iustin Pop
      raise errors.OpExecError("Could not migrate instance %s: %s" %
5150 53c776b5 Iustin Pop
                               (instance.name, msg))
5151 53c776b5 Iustin Pop
    time.sleep(10)
5152 53c776b5 Iustin Pop
5153 53c776b5 Iustin Pop
    instance.primary_node = target_node
5154 53c776b5 Iustin Pop
    # distribute new instance config to the other nodes
5155 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, self.feedback_fn)
5156 53c776b5 Iustin Pop
5157 6906a9d8 Guido Trotter
    result = self.rpc.call_finalize_migration(target_node,
5158 6906a9d8 Guido Trotter
                                              instance,
5159 6906a9d8 Guido Trotter
                                              migration_info,
5160 6906a9d8 Guido Trotter
                                              True)
5161 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5162 6906a9d8 Guido Trotter
    if msg:
5163 6906a9d8 Guido Trotter
      logging.error("Instance migration succeeded, but finalization failed:"
5164 6906a9d8 Guido Trotter
                    " %s" % msg)
5165 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not finalize instance migration: %s" %
5166 6906a9d8 Guido Trotter
                               msg)
5167 6906a9d8 Guido Trotter
5168 53c776b5 Iustin Pop
    self._EnsureSecondary(source_node)
5169 53c776b5 Iustin Pop
    self._WaitUntilSync()
5170 53c776b5 Iustin Pop
    self._GoStandalone()
5171 53c776b5 Iustin Pop
    self._GoReconnect(False)
5172 53c776b5 Iustin Pop
    self._WaitUntilSync()
5173 53c776b5 Iustin Pop
5174 53c776b5 Iustin Pop
    self.feedback_fn("* done")
5175 53c776b5 Iustin Pop
5176 53c776b5 Iustin Pop
  def Exec(self, feedback_fn):
5177 53c776b5 Iustin Pop
    """Perform the migration.
5178 53c776b5 Iustin Pop

5179 53c776b5 Iustin Pop
    """
5180 80cb875c Michael Hanselmann
    feedback_fn("Migrating instance %s" % self.instance.name)
5181 80cb875c Michael Hanselmann
5182 53c776b5 Iustin Pop
    self.feedback_fn = feedback_fn
5183 53c776b5 Iustin Pop
5184 53c776b5 Iustin Pop
    self.source_node = self.instance.primary_node
5185 53c776b5 Iustin Pop
    self.target_node = self.instance.secondary_nodes[0]
5186 53c776b5 Iustin Pop
    self.all_nodes = [self.source_node, self.target_node]
5187 53c776b5 Iustin Pop
    self.nodes_ip = {
5188 53c776b5 Iustin Pop
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
5189 53c776b5 Iustin Pop
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
5190 53c776b5 Iustin Pop
      }
5191 3e06e001 Michael Hanselmann
5192 3e06e001 Michael Hanselmann
    if self.cleanup:
5193 53c776b5 Iustin Pop
      return self._ExecCleanup()
5194 53c776b5 Iustin Pop
    else:
5195 53c776b5 Iustin Pop
      return self._ExecMigration()
5196 53c776b5 Iustin Pop
5197 53c776b5 Iustin Pop
5198 428958aa Iustin Pop
def _CreateBlockDev(lu, node, instance, device, force_create,
5199 428958aa Iustin Pop
                    info, force_open):
5200 428958aa Iustin Pop
  """Create a tree of block devices on a given node.
5201 a8083063 Iustin Pop

5202 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
5203 a8083063 Iustin Pop
  all its children.
5204 a8083063 Iustin Pop

5205 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
5206 a8083063 Iustin Pop

5207 428958aa Iustin Pop
  @param lu: the lu on whose behalf we execute
5208 428958aa Iustin Pop
  @param node: the node on which to create the device
5209 428958aa Iustin Pop
  @type instance: L{objects.Instance}
5210 428958aa Iustin Pop
  @param instance: the instance which owns the device
5211 428958aa Iustin Pop
  @type device: L{objects.Disk}
5212 428958aa Iustin Pop
  @param device: the device to create
5213 428958aa Iustin Pop
  @type force_create: boolean
5214 428958aa Iustin Pop
  @param force_create: whether to force creation of this device; this
5215 428958aa Iustin Pop
      will be change to True whenever we find a device which has
5216 428958aa Iustin Pop
      CreateOnSecondary() attribute
5217 428958aa Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
5218 428958aa Iustin Pop
      (this will be represented as a LVM tag)
5219 428958aa Iustin Pop
  @type force_open: boolean
5220 428958aa Iustin Pop
  @param force_open: this parameter will be passes to the
5221 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
5222 428958aa Iustin Pop
      whether we run on primary or not, and it affects both
5223 428958aa Iustin Pop
      the child assembly and the device own Open() execution
5224 428958aa Iustin Pop

5225 a8083063 Iustin Pop
  """
5226 a8083063 Iustin Pop
  if device.CreateOnSecondary():
5227 428958aa Iustin Pop
    force_create = True
5228 796cab27 Iustin Pop
5229 a8083063 Iustin Pop
  if device.children:
5230 a8083063 Iustin Pop
    for child in device.children:
5231 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, child, force_create,
5232 428958aa Iustin Pop
                      info, force_open)
5233 a8083063 Iustin Pop
5234 428958aa Iustin Pop
  if not force_create:
5235 796cab27 Iustin Pop
    return
5236 796cab27 Iustin Pop
5237 de12473a Iustin Pop
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
5238 de12473a Iustin Pop
5239 de12473a Iustin Pop
5240 de12473a Iustin Pop
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
5241 de12473a Iustin Pop
  """Create a single block device on a given node.
5242 de12473a Iustin Pop

5243 de12473a Iustin Pop
  This will not recurse over children of the device, so they must be
5244 de12473a Iustin Pop
  created in advance.
5245 de12473a Iustin Pop

5246 de12473a Iustin Pop
  @param lu: the lu on whose behalf we execute
5247 de12473a Iustin Pop
  @param node: the node on which to create the device
5248 de12473a Iustin Pop
  @type instance: L{objects.Instance}
5249 de12473a Iustin Pop
  @param instance: the instance which owns the device
5250 de12473a Iustin Pop
  @type device: L{objects.Disk}
5251 de12473a Iustin Pop
  @param device: the device to create
5252 de12473a Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
5253 de12473a Iustin Pop
      (this will be represented as a LVM tag)
5254 de12473a Iustin Pop
  @type force_open: boolean
5255 de12473a Iustin Pop
  @param force_open: this parameter will be passes to the
5256 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
5257 de12473a Iustin Pop
      whether we run on primary or not, and it affects both
5258 de12473a Iustin Pop
      the child assembly and the device own Open() execution
5259 de12473a Iustin Pop

5260 de12473a Iustin Pop
  """
5261 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
5262 7d81697f Iustin Pop
  result = lu.rpc.call_blockdev_create(node, device, device.size,
5263 428958aa Iustin Pop
                                       instance.name, force_open, info)
5264 4c4e4e1e Iustin Pop
  result.Raise("Can't create block device %s on"
5265 4c4e4e1e Iustin Pop
               " node %s for instance %s" % (device, node, instance.name))
5266 a8083063 Iustin Pop
  if device.physical_id is None:
5267 0959c824 Iustin Pop
    device.physical_id = result.payload
5268 a8083063 Iustin Pop
5269 a8083063 Iustin Pop
5270 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
5271 923b1523 Iustin Pop
  """Generate a suitable LV name.
5272 923b1523 Iustin Pop

5273 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
5274 923b1523 Iustin Pop

5275 923b1523 Iustin Pop
  """
5276 923b1523 Iustin Pop
  results = []
5277 923b1523 Iustin Pop
  for val in exts:
5278 b9bddb6b Iustin Pop
    new_id = lu.cfg.GenerateUniqueID()
5279 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
5280 923b1523 Iustin Pop
  return results
5281 923b1523 Iustin Pop
5282 923b1523 Iustin Pop
5283 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
5284 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
5285 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
5286 a1f445d3 Iustin Pop

5287 a1f445d3 Iustin Pop
  """
5288 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
5289 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
5290 b9bddb6b Iustin Pop
  shared_secret = lu.cfg.GenerateDRBDSecret()
5291 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
5292 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
5293 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
5294 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
5295 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
5296 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
5297 f9518d38 Iustin Pop
                                      p_minor, s_minor,
5298 f9518d38 Iustin Pop
                                      shared_secret),
5299 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
5300 a1f445d3 Iustin Pop
                          iv_name=iv_name)
5301 a1f445d3 Iustin Pop
  return drbd_dev
5302 a1f445d3 Iustin Pop
5303 7c0d6283 Michael Hanselmann
5304 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
5305 a8083063 Iustin Pop
                          instance_name, primary_node,
5306 08db7c5c Iustin Pop
                          secondary_nodes, disk_info,
5307 e2a65344 Iustin Pop
                          file_storage_dir, file_driver,
5308 e2a65344 Iustin Pop
                          base_index):
5309 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
5310 a8083063 Iustin Pop

5311 a8083063 Iustin Pop
  """
5312 a8083063 Iustin Pop
  #TODO: compute space requirements
5313 a8083063 Iustin Pop
5314 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
5315 08db7c5c Iustin Pop
  disk_count = len(disk_info)
5316 08db7c5c Iustin Pop
  disks = []
5317 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
5318 08db7c5c Iustin Pop
    pass
5319 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
5320 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
5321 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
5322 923b1523 Iustin Pop
5323 fb4b324b Guido Trotter
    names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
5324 08db7c5c Iustin Pop
                                      for i in range(disk_count)])
5325 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
5326 e2a65344 Iustin Pop
      disk_index = idx + base_index
5327 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
5328 08db7c5c Iustin Pop
                              logical_id=(vgname, names[idx]),
5329 6ec66eae Iustin Pop
                              iv_name="disk/%d" % disk_index,
5330 6ec66eae Iustin Pop
                              mode=disk["mode"])
5331 08db7c5c Iustin Pop
      disks.append(disk_dev)
5332 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
5333 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
5334 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
5335 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
5336 08db7c5c Iustin Pop
    minors = lu.cfg.AllocateDRBDMinor(
5337 08db7c5c Iustin Pop
      [primary_node, remote_node] * len(disk_info), instance_name)
5338 08db7c5c Iustin Pop
5339 e6c1ff2f Iustin Pop
    names = []
5340 fb4b324b Guido Trotter
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
5341 e6c1ff2f Iustin Pop
                                               for i in range(disk_count)]):
5342 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_data")
5343 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_meta")
5344 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
5345 112050d9 Iustin Pop
      disk_index = idx + base_index
5346 08db7c5c Iustin Pop
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
5347 08db7c5c Iustin Pop
                                      disk["size"], names[idx*2:idx*2+2],
5348 e2a65344 Iustin Pop
                                      "disk/%d" % disk_index,
5349 08db7c5c Iustin Pop
                                      minors[idx*2], minors[idx*2+1])
5350 6ec66eae Iustin Pop
      disk_dev.mode = disk["mode"]
5351 08db7c5c Iustin Pop
      disks.append(disk_dev)
5352 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
5353 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
5354 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
5355 0f1a06e3 Manuel Franceschini
5356 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
5357 112050d9 Iustin Pop
      disk_index = idx + base_index
5358 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
5359 e2a65344 Iustin Pop
                              iv_name="disk/%d" % disk_index,
5360 08db7c5c Iustin Pop
                              logical_id=(file_driver,
5361 08db7c5c Iustin Pop
                                          "%s/disk%d" % (file_storage_dir,
5362 43e99cff Guido Trotter
                                                         disk_index)),
5363 6ec66eae Iustin Pop
                              mode=disk["mode"])
5364 08db7c5c Iustin Pop
      disks.append(disk_dev)
5365 a8083063 Iustin Pop
  else:
5366 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
5367 a8083063 Iustin Pop
  return disks
5368 a8083063 Iustin Pop
5369 a8083063 Iustin Pop
5370 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
5371 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
5372 3ecf6786 Iustin Pop

5373 3ecf6786 Iustin Pop
  """
5374 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
5375 a0c3fea1 Michael Hanselmann
5376 a0c3fea1 Michael Hanselmann
5377 621b7678 Iustin Pop
def _CreateDisks(lu, instance, to_skip=None, target_node=None):
5378 a8083063 Iustin Pop
  """Create all disks for an instance.
5379 a8083063 Iustin Pop

5380 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
5381 a8083063 Iustin Pop

5382 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
5383 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
5384 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
5385 e4376078 Iustin Pop
  @param instance: the instance whose disks we should create
5386 bd315bfa Iustin Pop
  @type to_skip: list
5387 bd315bfa Iustin Pop
  @param to_skip: list of indices to skip
5388 621b7678 Iustin Pop
  @type target_node: string
5389 621b7678 Iustin Pop
  @param target_node: if passed, overrides the target node for creation
5390 e4376078 Iustin Pop
  @rtype: boolean
5391 e4376078 Iustin Pop
  @return: the success of the creation
5392 a8083063 Iustin Pop

5393 a8083063 Iustin Pop
  """
5394 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
5395 621b7678 Iustin Pop
  if target_node is None:
5396 621b7678 Iustin Pop
    pnode = instance.primary_node
5397 621b7678 Iustin Pop
    all_nodes = instance.all_nodes
5398 621b7678 Iustin Pop
  else:
5399 621b7678 Iustin Pop
    pnode = target_node
5400 621b7678 Iustin Pop
    all_nodes = [pnode]
5401 a0c3fea1 Michael Hanselmann
5402 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
5403 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
5404 428958aa Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
5405 0f1a06e3 Manuel Franceschini
5406 4c4e4e1e Iustin Pop
    result.Raise("Failed to create directory '%s' on"
5407 9b4127eb Guido Trotter
                 " node %s" % (file_storage_dir, pnode))
5408 0f1a06e3 Manuel Franceschini
5409 24991749 Iustin Pop
  # Note: this needs to be kept in sync with adding of disks in
5410 24991749 Iustin Pop
  # LUSetInstanceParams
5411 bd315bfa Iustin Pop
  for idx, device in enumerate(instance.disks):
5412 bd315bfa Iustin Pop
    if to_skip and idx in to_skip:
5413 bd315bfa Iustin Pop
      continue
5414 9a4f63d1 Iustin Pop
    logging.info("Creating volume %s for instance %s",
5415 9a4f63d1 Iustin Pop
                 device.iv_name, instance.name)
5416 a8083063 Iustin Pop
    #HARDCODE
5417 621b7678 Iustin Pop
    for node in all_nodes:
5418 428958aa Iustin Pop
      f_create = node == pnode
5419 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
5420 a8083063 Iustin Pop
5421 a8083063 Iustin Pop
5422 621b7678 Iustin Pop
def _RemoveDisks(lu, instance, target_node=None):
5423 a8083063 Iustin Pop
  """Remove all disks for an instance.
5424 a8083063 Iustin Pop

5425 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
5426 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
5427 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
5428 a8083063 Iustin Pop
  with `_CreateDisks()`).
5429 a8083063 Iustin Pop

5430 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
5431 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
5432 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
5433 e4376078 Iustin Pop
  @param instance: the instance whose disks we should remove
5434 621b7678 Iustin Pop
  @type target_node: string
5435 621b7678 Iustin Pop
  @param target_node: used to override the node on which to remove the disks
5436 e4376078 Iustin Pop
  @rtype: boolean
5437 e4376078 Iustin Pop
  @return: the success of the removal
5438 a8083063 Iustin Pop

5439 a8083063 Iustin Pop
  """
5440 9a4f63d1 Iustin Pop
  logging.info("Removing block devices for instance %s", instance.name)
5441 a8083063 Iustin Pop
5442 e1bc0878 Iustin Pop
  all_result = True
5443 a8083063 Iustin Pop
  for device in instance.disks:
5444 621b7678 Iustin Pop
    if target_node:
5445 621b7678 Iustin Pop
      edata = [(target_node, device)]
5446 621b7678 Iustin Pop
    else:
5447 621b7678 Iustin Pop
      edata = device.ComputeNodeTree(instance.primary_node)
5448 621b7678 Iustin Pop
    for node, disk in edata:
5449 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
5450 4c4e4e1e Iustin Pop
      msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
5451 e1bc0878 Iustin Pop
      if msg:
5452 e1bc0878 Iustin Pop
        lu.LogWarning("Could not remove block device %s on node %s,"
5453 e1bc0878 Iustin Pop
                      " continuing anyway: %s", device.iv_name, node, msg)
5454 e1bc0878 Iustin Pop
        all_result = False
5455 0f1a06e3 Manuel Franceschini
5456 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
5457 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
5458 dfc2a24c Guido Trotter
    if target_node:
5459 dfc2a24c Guido Trotter
      tgt = target_node
5460 621b7678 Iustin Pop
    else:
5461 dfc2a24c Guido Trotter
      tgt = instance.primary_node
5462 621b7678 Iustin Pop
    result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
5463 621b7678 Iustin Pop
    if result.fail_msg:
5464 b2b8bcce Iustin Pop
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
5465 621b7678 Iustin Pop
                    file_storage_dir, instance.primary_node, result.fail_msg)
5466 e1bc0878 Iustin Pop
      all_result = False
5467 0f1a06e3 Manuel Franceschini
5468 e1bc0878 Iustin Pop
  return all_result
5469 a8083063 Iustin Pop
5470 a8083063 Iustin Pop
5471 08db7c5c Iustin Pop
def _ComputeDiskSize(disk_template, disks):
5472 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
5473 e2fe6369 Iustin Pop

5474 e2fe6369 Iustin Pop
  """
5475 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
5476 e2fe6369 Iustin Pop
  req_size_dict = {
5477 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
5478 08db7c5c Iustin Pop
    constants.DT_PLAIN: sum(d["size"] for d in disks),
5479 08db7c5c Iustin Pop
    # 128 MB are added for drbd metadata for each disk
5480 08db7c5c Iustin Pop
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
5481 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
5482 e2fe6369 Iustin Pop
  }
5483 e2fe6369 Iustin Pop
5484 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
5485 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
5486 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
5487 e2fe6369 Iustin Pop
5488 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
5489 e2fe6369 Iustin Pop
5490 e2fe6369 Iustin Pop
5491 74409b12 Iustin Pop
def _CheckHVParams(lu, nodenames, hvname, hvparams):
5492 74409b12 Iustin Pop
  """Hypervisor parameter validation.
5493 74409b12 Iustin Pop

5494 74409b12 Iustin Pop
  This function abstract the hypervisor parameter validation to be
5495 74409b12 Iustin Pop
  used in both instance create and instance modify.
5496 74409b12 Iustin Pop

5497 74409b12 Iustin Pop
  @type lu: L{LogicalUnit}
5498 74409b12 Iustin Pop
  @param lu: the logical unit for which we check
5499 74409b12 Iustin Pop
  @type nodenames: list
5500 74409b12 Iustin Pop
  @param nodenames: the list of nodes on which we should check
5501 74409b12 Iustin Pop
  @type hvname: string
5502 74409b12 Iustin Pop
  @param hvname: the name of the hypervisor we should use
5503 74409b12 Iustin Pop
  @type hvparams: dict
5504 74409b12 Iustin Pop
  @param hvparams: the parameters which we need to check
5505 74409b12 Iustin Pop
  @raise errors.OpPrereqError: if the parameters are not valid
5506 74409b12 Iustin Pop

5507 74409b12 Iustin Pop
  """
5508 74409b12 Iustin Pop
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
5509 74409b12 Iustin Pop
                                                  hvname,
5510 74409b12 Iustin Pop
                                                  hvparams)
5511 74409b12 Iustin Pop
  for node in nodenames:
5512 781de953 Iustin Pop
    info = hvinfo[node]
5513 68c6f21c Iustin Pop
    if info.offline:
5514 68c6f21c Iustin Pop
      continue
5515 4c4e4e1e Iustin Pop
    info.Raise("Hypervisor parameter validation failed on node %s" % node)
5516 74409b12 Iustin Pop
5517 74409b12 Iustin Pop
5518 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
5519 a8083063 Iustin Pop
  """Create an instance.
5520 a8083063 Iustin Pop

5521 a8083063 Iustin Pop
  """
5522 a8083063 Iustin Pop
  HPATH = "instance-add"
5523 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5524 08db7c5c Iustin Pop
  _OP_REQP = ["instance_name", "disks", "disk_template",
5525 08db7c5c Iustin Pop
              "mode", "start",
5526 08db7c5c Iustin Pop
              "wait_for_sync", "ip_check", "nics",
5527 338e51e8 Iustin Pop
              "hvparams", "beparams"]
5528 7baf741d Guido Trotter
  REQ_BGL = False
5529 7baf741d Guido Trotter
5530 7baf741d Guido Trotter
  def _ExpandNode(self, node):
5531 7baf741d Guido Trotter
    """Expands and checks one node name.
5532 7baf741d Guido Trotter

5533 7baf741d Guido Trotter
    """
5534 7baf741d Guido Trotter
    node_full = self.cfg.ExpandNodeName(node)
5535 7baf741d Guido Trotter
    if node_full is None:
5536 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Unknown node %s" % node, errors.ECODE_NOENT)
5537 7baf741d Guido Trotter
    return node_full
5538 7baf741d Guido Trotter
5539 7baf741d Guido Trotter
  def ExpandNames(self):
5540 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
5541 7baf741d Guido Trotter

5542 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
5543 7baf741d Guido Trotter

5544 7baf741d Guido Trotter
    """
5545 7baf741d Guido Trotter
    self.needed_locks = {}
5546 7baf741d Guido Trotter
5547 7baf741d Guido Trotter
    # set optional parameters to none if they don't exist
5548 6785674e Iustin Pop
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
5549 7baf741d Guido Trotter
      if not hasattr(self.op, attr):
5550 7baf741d Guido Trotter
        setattr(self.op, attr, None)
5551 7baf741d Guido Trotter
5552 4b2f38dd Iustin Pop
    # cheap checks, mostly valid constants given
5553 4b2f38dd Iustin Pop
5554 7baf741d Guido Trotter
    # verify creation mode
5555 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
5556 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
5557 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
5558 5c983ee5 Iustin Pop
                                 self.op.mode, errors.ECODE_INVAL)
5559 4b2f38dd Iustin Pop
5560 7baf741d Guido Trotter
    # disk template and mirror node verification
5561 7baf741d Guido Trotter
    if self.op.disk_template not in constants.DISK_TEMPLATES:
5562 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid disk template name",
5563 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
5564 7baf741d Guido Trotter
5565 4b2f38dd Iustin Pop
    if self.op.hypervisor is None:
5566 4b2f38dd Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
5567 4b2f38dd Iustin Pop
5568 8705eb96 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
5569 8705eb96 Iustin Pop
    enabled_hvs = cluster.enabled_hypervisors
5570 4b2f38dd Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
5571 4b2f38dd Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
5572 4b2f38dd Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
5573 5c983ee5 Iustin Pop
                                  ",".join(enabled_hvs)),
5574 5c983ee5 Iustin Pop
                                 errors.ECODE_STATE)
5575 4b2f38dd Iustin Pop
5576 6785674e Iustin Pop
    # check hypervisor parameter syntax (locally)
5577 a5728081 Guido Trotter
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
5578 abe609b2 Guido Trotter
    filled_hvp = objects.FillDict(cluster.hvparams[self.op.hypervisor],
5579 8705eb96 Iustin Pop
                                  self.op.hvparams)
5580 6785674e Iustin Pop
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
5581 8705eb96 Iustin Pop
    hv_type.CheckParameterSyntax(filled_hvp)
5582 67fc3042 Iustin Pop
    self.hv_full = filled_hvp
5583 6785674e Iustin Pop
5584 338e51e8 Iustin Pop
    # fill and remember the beparams dict
5585 a5728081 Guido Trotter
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
5586 4ef7f423 Guido Trotter
    self.be_full = objects.FillDict(cluster.beparams[constants.PP_DEFAULT],
5587 338e51e8 Iustin Pop
                                    self.op.beparams)
5588 338e51e8 Iustin Pop
5589 7baf741d Guido Trotter
    #### instance parameters check
5590 7baf741d Guido Trotter
5591 7baf741d Guido Trotter
    # instance name verification
5592 7baf741d Guido Trotter
    hostname1 = utils.HostInfo(self.op.instance_name)
5593 7baf741d Guido Trotter
    self.op.instance_name = instance_name = hostname1.name
5594 7baf741d Guido Trotter
5595 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
5596 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
5597 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
5598 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
5599 5c983ee5 Iustin Pop
                                 instance_name, errors.ECODE_EXISTS)
5600 7baf741d Guido Trotter
5601 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
5602 7baf741d Guido Trotter
5603 08db7c5c Iustin Pop
    # NIC buildup
5604 08db7c5c Iustin Pop
    self.nics = []
5605 9dce4771 Guido Trotter
    for idx, nic in enumerate(self.op.nics):
5606 9dce4771 Guido Trotter
      nic_mode_req = nic.get("mode", None)
5607 9dce4771 Guido Trotter
      nic_mode = nic_mode_req
5608 9dce4771 Guido Trotter
      if nic_mode is None:
5609 9dce4771 Guido Trotter
        nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
5610 9dce4771 Guido Trotter
5611 9dce4771 Guido Trotter
      # in routed mode, for the first nic, the default ip is 'auto'
5612 9dce4771 Guido Trotter
      if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
5613 9dce4771 Guido Trotter
        default_ip_mode = constants.VALUE_AUTO
5614 9dce4771 Guido Trotter
      else:
5615 9dce4771 Guido Trotter
        default_ip_mode = constants.VALUE_NONE
5616 9dce4771 Guido Trotter
5617 08db7c5c Iustin Pop
      # ip validity checks
5618 9dce4771 Guido Trotter
      ip = nic.get("ip", default_ip_mode)
5619 9dce4771 Guido Trotter
      if ip is None or ip.lower() == constants.VALUE_NONE:
5620 08db7c5c Iustin Pop
        nic_ip = None
5621 08db7c5c Iustin Pop
      elif ip.lower() == constants.VALUE_AUTO:
5622 08db7c5c Iustin Pop
        nic_ip = hostname1.ip
5623 08db7c5c Iustin Pop
      else:
5624 08db7c5c Iustin Pop
        if not utils.IsValidIP(ip):
5625 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
5626 5c983ee5 Iustin Pop
                                     " like a valid IP" % ip,
5627 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
5628 08db7c5c Iustin Pop
        nic_ip = ip
5629 08db7c5c Iustin Pop
5630 9dce4771 Guido Trotter
      # TODO: check the ip for uniqueness !!
5631 9dce4771 Guido Trotter
      if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
5632 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Routed nic mode requires an ip address",
5633 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
5634 9dce4771 Guido Trotter
5635 08db7c5c Iustin Pop
      # MAC address verification
5636 08db7c5c Iustin Pop
      mac = nic.get("mac", constants.VALUE_AUTO)
5637 08db7c5c Iustin Pop
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5638 08db7c5c Iustin Pop
        if not utils.IsValidMac(mac.lower()):
5639 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Invalid MAC address specified: %s" %
5640 5c983ee5 Iustin Pop
                                     mac, errors.ECODE_INVAL)
5641 87e43988 Iustin Pop
        else:
5642 87e43988 Iustin Pop
          # or validate/reserve the current one
5643 87e43988 Iustin Pop
          if self.cfg.IsMacInUse(mac):
5644 87e43988 Iustin Pop
            raise errors.OpPrereqError("MAC address %s already in use"
5645 5c983ee5 Iustin Pop
                                       " in cluster" % mac,
5646 5c983ee5 Iustin Pop
                                       errors.ECODE_NOTUNIQUE)
5647 87e43988 Iustin Pop
5648 08db7c5c Iustin Pop
      # bridge verification
5649 9939547b Iustin Pop
      bridge = nic.get("bridge", None)
5650 9dce4771 Guido Trotter
      link = nic.get("link", None)
5651 9dce4771 Guido Trotter
      if bridge and link:
5652 29921401 Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
5653 5c983ee5 Iustin Pop
                                   " at the same time", errors.ECODE_INVAL)
5654 9dce4771 Guido Trotter
      elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
5655 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic",
5656 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
5657 9dce4771 Guido Trotter
      elif bridge:
5658 9dce4771 Guido Trotter
        link = bridge
5659 9dce4771 Guido Trotter
5660 9dce4771 Guido Trotter
      nicparams = {}
5661 9dce4771 Guido Trotter
      if nic_mode_req:
5662 9dce4771 Guido Trotter
        nicparams[constants.NIC_MODE] = nic_mode_req
5663 9dce4771 Guido Trotter
      if link:
5664 9dce4771 Guido Trotter
        nicparams[constants.NIC_LINK] = link
5665 9dce4771 Guido Trotter
5666 9dce4771 Guido Trotter
      check_params = objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
5667 9dce4771 Guido Trotter
                                      nicparams)
5668 9dce4771 Guido Trotter
      objects.NIC.CheckParameterSyntax(check_params)
5669 9dce4771 Guido Trotter
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
5670 08db7c5c Iustin Pop
5671 08db7c5c Iustin Pop
    # disk checks/pre-build
5672 08db7c5c Iustin Pop
    self.disks = []
5673 08db7c5c Iustin Pop
    for disk in self.op.disks:
5674 08db7c5c Iustin Pop
      mode = disk.get("mode", constants.DISK_RDWR)
5675 08db7c5c Iustin Pop
      if mode not in constants.DISK_ACCESS_SET:
5676 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
5677 5c983ee5 Iustin Pop
                                   mode, errors.ECODE_INVAL)
5678 08db7c5c Iustin Pop
      size = disk.get("size", None)
5679 08db7c5c Iustin Pop
      if size is None:
5680 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
5681 08db7c5c Iustin Pop
      try:
5682 08db7c5c Iustin Pop
        size = int(size)
5683 08db7c5c Iustin Pop
      except ValueError:
5684 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Invalid disk size '%s'" % size,
5685 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
5686 08db7c5c Iustin Pop
      self.disks.append({"size": size, "mode": mode})
5687 08db7c5c Iustin Pop
5688 7baf741d Guido Trotter
    # used in CheckPrereq for ip ping check
5689 7baf741d Guido Trotter
    self.check_ip = hostname1.ip
5690 7baf741d Guido Trotter
5691 7baf741d Guido Trotter
    # file storage checks
5692 7baf741d Guido Trotter
    if (self.op.file_driver and
5693 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
5694 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
5695 5c983ee5 Iustin Pop
                                 self.op.file_driver, errors.ECODE_INVAL)
5696 7baf741d Guido Trotter
5697 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
5698 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("File storage directory path not absolute",
5699 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
5700 7baf741d Guido Trotter
5701 7baf741d Guido Trotter
    ### Node/iallocator related checks
5702 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
5703 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
5704 5c983ee5 Iustin Pop
                                 " node must be given",
5705 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
5706 7baf741d Guido Trotter
5707 7baf741d Guido Trotter
    if self.op.iallocator:
5708 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5709 7baf741d Guido Trotter
    else:
5710 7baf741d Guido Trotter
      self.op.pnode = self._ExpandNode(self.op.pnode)
5711 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
5712 7baf741d Guido Trotter
      if self.op.snode is not None:
5713 7baf741d Guido Trotter
        self.op.snode = self._ExpandNode(self.op.snode)
5714 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
5715 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
5716 7baf741d Guido Trotter
5717 7baf741d Guido Trotter
    # in case of import lock the source node too
5718 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
5719 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
5720 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
5721 7baf741d Guido Trotter
5722 b9322a9f Guido Trotter
      if src_path is None:
5723 b9322a9f Guido Trotter
        self.op.src_path = src_path = self.op.instance_name
5724 b9322a9f Guido Trotter
5725 b9322a9f Guido Trotter
      if src_node is None:
5726 b9322a9f Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5727 b9322a9f Guido Trotter
        self.op.src_node = None
5728 b9322a9f Guido Trotter
        if os.path.isabs(src_path):
5729 b9322a9f Guido Trotter
          raise errors.OpPrereqError("Importing an instance from an absolute"
5730 5c983ee5 Iustin Pop
                                     " path requires a source node option.",
5731 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
5732 b9322a9f Guido Trotter
      else:
5733 b9322a9f Guido Trotter
        self.op.src_node = src_node = self._ExpandNode(src_node)
5734 b9322a9f Guido Trotter
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
5735 b9322a9f Guido Trotter
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
5736 b9322a9f Guido Trotter
        if not os.path.isabs(src_path):
5737 b9322a9f Guido Trotter
          self.op.src_path = src_path = \
5738 b9322a9f Guido Trotter
            os.path.join(constants.EXPORT_DIR, src_path)
5739 7baf741d Guido Trotter
5740 f2c05717 Guido Trotter
      # On import force_variant must be True, because if we forced it at
5741 f2c05717 Guido Trotter
      # initial install, our only chance when importing it back is that it
5742 f2c05717 Guido Trotter
      # works again!
5743 f2c05717 Guido Trotter
      self.op.force_variant = True
5744 f2c05717 Guido Trotter
5745 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
5746 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
5747 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("No guest OS specified",
5748 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
5749 f2c05717 Guido Trotter
      self.op.force_variant = getattr(self.op, "force_variant", False)
5750 a8083063 Iustin Pop
5751 538475ca Iustin Pop
  def _RunAllocator(self):
5752 538475ca Iustin Pop
    """Run the allocator based on input opcode.
5753 538475ca Iustin Pop

5754 538475ca Iustin Pop
    """
5755 08db7c5c Iustin Pop
    nics = [n.ToDict() for n in self.nics]
5756 923ddac0 Michael Hanselmann
    ial = IAllocator(self.cfg, self.rpc,
5757 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
5758 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
5759 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
5760 d1c2dd75 Iustin Pop
                     tags=[],
5761 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
5762 338e51e8 Iustin Pop
                     vcpus=self.be_full[constants.BE_VCPUS],
5763 338e51e8 Iustin Pop
                     mem_size=self.be_full[constants.BE_MEMORY],
5764 08db7c5c Iustin Pop
                     disks=self.disks,
5765 d1c2dd75 Iustin Pop
                     nics=nics,
5766 8cc7e742 Guido Trotter
                     hypervisor=self.op.hypervisor,
5767 29859cb7 Iustin Pop
                     )
5768 d1c2dd75 Iustin Pop
5769 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
5770 d1c2dd75 Iustin Pop
5771 d1c2dd75 Iustin Pop
    if not ial.success:
5772 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
5773 5c983ee5 Iustin Pop
                                 " iallocator '%s': %s" %
5774 5c983ee5 Iustin Pop
                                 (self.op.iallocator, ial.info),
5775 5c983ee5 Iustin Pop
                                 errors.ECODE_NORES)
5776 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
5777 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
5778 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
5779 97abc79f Iustin Pop
                                 (self.op.iallocator, len(ial.nodes),
5780 5c983ee5 Iustin Pop
                                  ial.required_nodes), errors.ECODE_FAULT)
5781 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
5782 86d9d3bb Iustin Pop
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
5783 86d9d3bb Iustin Pop
                 self.op.instance_name, self.op.iallocator,
5784 86d9d3bb Iustin Pop
                 ", ".join(ial.nodes))
5785 27579978 Iustin Pop
    if ial.required_nodes == 2:
5786 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
5787 538475ca Iustin Pop
5788 a8083063 Iustin Pop
  def BuildHooksEnv(self):
5789 a8083063 Iustin Pop
    """Build hooks env.
5790 a8083063 Iustin Pop

5791 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
5792 a8083063 Iustin Pop

5793 a8083063 Iustin Pop
    """
5794 a8083063 Iustin Pop
    env = {
5795 2c2690c9 Iustin Pop
      "ADD_MODE": self.op.mode,
5796 a8083063 Iustin Pop
      }
5797 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
5798 2c2690c9 Iustin Pop
      env["SRC_NODE"] = self.op.src_node
5799 2c2690c9 Iustin Pop
      env["SRC_PATH"] = self.op.src_path
5800 2c2690c9 Iustin Pop
      env["SRC_IMAGES"] = self.src_images
5801 396e1b78 Michael Hanselmann
5802 2c2690c9 Iustin Pop
    env.update(_BuildInstanceHookEnv(
5803 2c2690c9 Iustin Pop
      name=self.op.instance_name,
5804 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
5805 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
5806 4978db17 Iustin Pop
      status=self.op.start,
5807 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
5808 338e51e8 Iustin Pop
      memory=self.be_full[constants.BE_MEMORY],
5809 338e51e8 Iustin Pop
      vcpus=self.be_full[constants.BE_VCPUS],
5810 f9b10246 Guido Trotter
      nics=_NICListToTuple(self, self.nics),
5811 2c2690c9 Iustin Pop
      disk_template=self.op.disk_template,
5812 2c2690c9 Iustin Pop
      disks=[(d["size"], d["mode"]) for d in self.disks],
5813 67fc3042 Iustin Pop
      bep=self.be_full,
5814 67fc3042 Iustin Pop
      hvp=self.hv_full,
5815 3df6e710 Iustin Pop
      hypervisor_name=self.op.hypervisor,
5816 396e1b78 Michael Hanselmann
    ))
5817 a8083063 Iustin Pop
5818 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
5819 a8083063 Iustin Pop
          self.secondaries)
5820 a8083063 Iustin Pop
    return env, nl, nl
5821 a8083063 Iustin Pop
5822 a8083063 Iustin Pop
5823 a8083063 Iustin Pop
  def CheckPrereq(self):
5824 a8083063 Iustin Pop
    """Check prerequisites.
5825 a8083063 Iustin Pop

5826 a8083063 Iustin Pop
    """
5827 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
5828 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
5829 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
5830 5c983ee5 Iustin Pop
                                 " instances", errors.ECODE_STATE)
5831 eedc99de Manuel Franceschini
5832 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
5833 7baf741d Guido Trotter
      src_node = self.op.src_node
5834 7baf741d Guido Trotter
      src_path = self.op.src_path
5835 a8083063 Iustin Pop
5836 c0cbdc67 Guido Trotter
      if src_node is None:
5837 1b7bfbb7 Iustin Pop
        locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
5838 1b7bfbb7 Iustin Pop
        exp_list = self.rpc.call_export_list(locked_nodes)
5839 c0cbdc67 Guido Trotter
        found = False
5840 c0cbdc67 Guido Trotter
        for node in exp_list:
5841 4c4e4e1e Iustin Pop
          if exp_list[node].fail_msg:
5842 1b7bfbb7 Iustin Pop
            continue
5843 1b7bfbb7 Iustin Pop
          if src_path in exp_list[node].payload:
5844 c0cbdc67 Guido Trotter
            found = True
5845 c0cbdc67 Guido Trotter
            self.op.src_node = src_node = node
5846 c0cbdc67 Guido Trotter
            self.op.src_path = src_path = os.path.join(constants.EXPORT_DIR,
5847 c0cbdc67 Guido Trotter
                                                       src_path)
5848 c0cbdc67 Guido Trotter
            break
5849 c0cbdc67 Guido Trotter
        if not found:
5850 c0cbdc67 Guido Trotter
          raise errors.OpPrereqError("No export found for relative path %s" %
5851 5c983ee5 Iustin Pop
                                      src_path, errors.ECODE_INVAL)
5852 c0cbdc67 Guido Trotter
5853 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, src_node)
5854 781de953 Iustin Pop
      result = self.rpc.call_export_info(src_node, src_path)
5855 4c4e4e1e Iustin Pop
      result.Raise("No export or invalid export found in dir %s" % src_path)
5856 a8083063 Iustin Pop
5857 3eccac06 Iustin Pop
      export_info = objects.SerializableConfigParser.Loads(str(result.payload))
5858 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
5859 5c983ee5 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config",
5860 5c983ee5 Iustin Pop
                                     errors.ECODE_ENVIRON)
5861 a8083063 Iustin Pop
5862 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
5863 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
5864 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
5865 5c983ee5 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION),
5866 5c983ee5 Iustin Pop
                                   errors.ECODE_ENVIRON)
5867 a8083063 Iustin Pop
5868 09acf207 Guido Trotter
      # Check that the new instance doesn't have less disks than the export
5869 08db7c5c Iustin Pop
      instance_disks = len(self.disks)
5870 09acf207 Guido Trotter
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
5871 09acf207 Guido Trotter
      if instance_disks < export_disks:
5872 09acf207 Guido Trotter
        raise errors.OpPrereqError("Not enough disks to import."
5873 09acf207 Guido Trotter
                                   " (instance: %d, export: %d)" %
5874 5c983ee5 Iustin Pop
                                   (instance_disks, export_disks),
5875 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
5876 a8083063 Iustin Pop
5877 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
5878 09acf207 Guido Trotter
      disk_images = []
5879 09acf207 Guido Trotter
      for idx in range(export_disks):
5880 09acf207 Guido Trotter
        option = 'disk%d_dump' % idx
5881 09acf207 Guido Trotter
        if export_info.has_option(constants.INISECT_INS, option):
5882 09acf207 Guido Trotter
          # FIXME: are the old os-es, disk sizes, etc. useful?
5883 09acf207 Guido Trotter
          export_name = export_info.get(constants.INISECT_INS, option)
5884 09acf207 Guido Trotter
          image = os.path.join(src_path, export_name)
5885 09acf207 Guido Trotter
          disk_images.append(image)
5886 09acf207 Guido Trotter
        else:
5887 09acf207 Guido Trotter
          disk_images.append(False)
5888 09acf207 Guido Trotter
5889 09acf207 Guido Trotter
      self.src_images = disk_images
5890 901a65c1 Iustin Pop
5891 b4364a6b Guido Trotter
      old_name = export_info.get(constants.INISECT_INS, 'name')
5892 b4364a6b Guido Trotter
      # FIXME: int() here could throw a ValueError on broken exports
5893 b4364a6b Guido Trotter
      exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
5894 b4364a6b Guido Trotter
      if self.op.instance_name == old_name:
5895 b4364a6b Guido Trotter
        for idx, nic in enumerate(self.nics):
5896 b4364a6b Guido Trotter
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
5897 b4364a6b Guido Trotter
            nic_mac_ini = 'nic%d_mac' % idx
5898 b4364a6b Guido Trotter
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
5899 bc89efc3 Guido Trotter
5900 295728df Guido Trotter
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
5901 7baf741d Guido Trotter
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
5902 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
5903 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
5904 5c983ee5 Iustin Pop
                                 " adding an instance in start mode",
5905 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
5906 901a65c1 Iustin Pop
5907 901a65c1 Iustin Pop
    if self.op.ip_check:
5908 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
5909 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
5910 5c983ee5 Iustin Pop
                                   (self.check_ip, self.op.instance_name),
5911 5c983ee5 Iustin Pop
                                   errors.ECODE_NOTUNIQUE)
5912 901a65c1 Iustin Pop
5913 295728df Guido Trotter
    #### mac address generation
5914 295728df Guido Trotter
    # By generating here the mac address both the allocator and the hooks get
5915 295728df Guido Trotter
    # the real final mac address rather than the 'auto' or 'generate' value.
5916 295728df Guido Trotter
    # There is a race condition between the generation and the instance object
5917 295728df Guido Trotter
    # creation, which means that we know the mac is valid now, but we're not
5918 295728df Guido Trotter
    # sure it will be when we actually add the instance. If things go bad
5919 295728df Guido Trotter
    # adding the instance will abort because of a duplicate mac, and the
5920 295728df Guido Trotter
    # creation job will fail.
5921 295728df Guido Trotter
    for nic in self.nics:
5922 295728df Guido Trotter
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5923 295728df Guido Trotter
        nic.mac = self.cfg.GenerateMAC()
5924 295728df Guido Trotter
5925 538475ca Iustin Pop
    #### allocator run
5926 538475ca Iustin Pop
5927 538475ca Iustin Pop
    if self.op.iallocator is not None:
5928 538475ca Iustin Pop
      self._RunAllocator()
5929 0f1a06e3 Manuel Franceschini
5930 901a65c1 Iustin Pop
    #### node related checks
5931 901a65c1 Iustin Pop
5932 901a65c1 Iustin Pop
    # check primary node
5933 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
5934 7baf741d Guido Trotter
    assert self.pnode is not None, \
5935 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
5936 7527a8a4 Iustin Pop
    if pnode.offline:
5937 7527a8a4 Iustin Pop
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
5938 5c983ee5 Iustin Pop
                                 pnode.name, errors.ECODE_STATE)
5939 733a2b6a Iustin Pop
    if pnode.drained:
5940 733a2b6a Iustin Pop
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
5941 5c983ee5 Iustin Pop
                                 pnode.name, errors.ECODE_STATE)
5942 7527a8a4 Iustin Pop
5943 901a65c1 Iustin Pop
    self.secondaries = []
5944 901a65c1 Iustin Pop
5945 901a65c1 Iustin Pop
    # mirror node verification
5946 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
5947 7baf741d Guido Trotter
      if self.op.snode is None:
5948 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
5949 5c983ee5 Iustin Pop
                                   " a mirror node", errors.ECODE_INVAL)
5950 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
5951 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be the"
5952 5c983ee5 Iustin Pop
                                   " primary node.", errors.ECODE_INVAL)
5953 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, self.op.snode)
5954 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, self.op.snode)
5955 733a2b6a Iustin Pop
      self.secondaries.append(self.op.snode)
5956 a8083063 Iustin Pop
5957 6785674e Iustin Pop
    nodenames = [pnode.name] + self.secondaries
5958 6785674e Iustin Pop
5959 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
5960 08db7c5c Iustin Pop
                                self.disks)
5961 ed1ebc60 Guido Trotter
5962 8d75db10 Iustin Pop
    # Check lv size requirements
5963 8d75db10 Iustin Pop
    if req_size is not None:
5964 72737a7f Iustin Pop
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
5965 72737a7f Iustin Pop
                                         self.op.hypervisor)
5966 8d75db10 Iustin Pop
      for node in nodenames:
5967 781de953 Iustin Pop
        info = nodeinfo[node]
5968 4c4e4e1e Iustin Pop
        info.Raise("Cannot get current information from node %s" % node)
5969 070e998b Iustin Pop
        info = info.payload
5970 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
5971 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
5972 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
5973 5c983ee5 Iustin Pop
                                     " node %s" % node, errors.ECODE_ENVIRON)
5974 070e998b Iustin Pop
        if req_size > vg_free:
5975 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
5976 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
5977 5c983ee5 Iustin Pop
                                     (node, vg_free, req_size),
5978 5c983ee5 Iustin Pop
                                     errors.ECODE_NORES)
5979 ed1ebc60 Guido Trotter
5980 74409b12 Iustin Pop
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
5981 6785674e Iustin Pop
5982 a8083063 Iustin Pop
    # os verification
5983 781de953 Iustin Pop
    result = self.rpc.call_os_get(pnode.name, self.op.os_type)
5984 4c4e4e1e Iustin Pop
    result.Raise("OS '%s' not in supported os list for primary node %s" %
5985 045dd6d9 Iustin Pop
                 (self.op.os_type, pnode.name),
5986 045dd6d9 Iustin Pop
                 prereq=True, ecode=errors.ECODE_INVAL)
5987 f2c05717 Guido Trotter
    if not self.op.force_variant:
5988 f2c05717 Guido Trotter
      _CheckOSVariant(result.payload, self.op.os_type)
5989 a8083063 Iustin Pop
5990 b165e77e Guido Trotter
    _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
5991 a8083063 Iustin Pop
5992 49ce1563 Iustin Pop
    # memory check on primary node
5993 49ce1563 Iustin Pop
    if self.op.start:
5994 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
5995 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
5996 338e51e8 Iustin Pop
                           self.be_full[constants.BE_MEMORY],
5997 338e51e8 Iustin Pop
                           self.op.hypervisor)
5998 49ce1563 Iustin Pop
5999 08896026 Iustin Pop
    self.dry_run_result = list(nodenames)
6000 08896026 Iustin Pop
6001 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6002 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
6003 a8083063 Iustin Pop

6004 a8083063 Iustin Pop
    """
6005 a8083063 Iustin Pop
    instance = self.op.instance_name
6006 a8083063 Iustin Pop
    pnode_name = self.pnode.name
6007 a8083063 Iustin Pop
6008 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
6009 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
6010 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
6011 2a6469d5 Alexander Schreiber
    else:
6012 2a6469d5 Alexander Schreiber
      network_port = None
6013 58acb49d Alexander Schreiber
6014 6785674e Iustin Pop
    ##if self.op.vnc_bind_address is None:
6015 6785674e Iustin Pop
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
6016 31a853d2 Iustin Pop
6017 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
6018 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
6019 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
6020 2c313123 Manuel Franceschini
    else:
6021 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
6022 2c313123 Manuel Franceschini
6023 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
6024 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
6025 d6a02168 Michael Hanselmann
                                        self.cfg.GetFileStorageDir(),
6026 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
6027 0f1a06e3 Manuel Franceschini
6028 0f1a06e3 Manuel Franceschini
6029 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
6030 a8083063 Iustin Pop
                                  self.op.disk_template,
6031 a8083063 Iustin Pop
                                  instance, pnode_name,
6032 08db7c5c Iustin Pop
                                  self.secondaries,
6033 08db7c5c Iustin Pop
                                  self.disks,
6034 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
6035 e2a65344 Iustin Pop
                                  self.op.file_driver,
6036 e2a65344 Iustin Pop
                                  0)
6037 a8083063 Iustin Pop
6038 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
6039 a8083063 Iustin Pop
                            primary_node=pnode_name,
6040 08db7c5c Iustin Pop
                            nics=self.nics, disks=disks,
6041 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
6042 4978db17 Iustin Pop
                            admin_up=False,
6043 58acb49d Alexander Schreiber
                            network_port=network_port,
6044 338e51e8 Iustin Pop
                            beparams=self.op.beparams,
6045 6785674e Iustin Pop
                            hvparams=self.op.hvparams,
6046 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
6047 a8083063 Iustin Pop
                            )
6048 a8083063 Iustin Pop
6049 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
6050 796cab27 Iustin Pop
    try:
6051 796cab27 Iustin Pop
      _CreateDisks(self, iobj)
6052 796cab27 Iustin Pop
    except errors.OpExecError:
6053 796cab27 Iustin Pop
      self.LogWarning("Device creation failed, reverting...")
6054 796cab27 Iustin Pop
      try:
6055 796cab27 Iustin Pop
        _RemoveDisks(self, iobj)
6056 796cab27 Iustin Pop
      finally:
6057 796cab27 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance)
6058 796cab27 Iustin Pop
        raise
6059 a8083063 Iustin Pop
6060 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
6061 a8083063 Iustin Pop
6062 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
6063 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
6064 7baf741d Guido Trotter
    # added the instance to the config
6065 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
6066 e36e96b4 Guido Trotter
    # Unlock all the nodes
6067 9c8971d7 Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
6068 9c8971d7 Guido Trotter
      nodes_keep = [self.op.src_node]
6069 9c8971d7 Guido Trotter
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
6070 9c8971d7 Guido Trotter
                       if node != self.op.src_node]
6071 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
6072 9c8971d7 Guido Trotter
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
6073 9c8971d7 Guido Trotter
    else:
6074 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE)
6075 9c8971d7 Guido Trotter
      del self.acquired_locks[locking.LEVEL_NODE]
6076 a8083063 Iustin Pop
6077 a8083063 Iustin Pop
    if self.op.wait_for_sync:
6078 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
6079 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
6080 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
6081 a8083063 Iustin Pop
      time.sleep(15)
6082 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
6083 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
6084 a8083063 Iustin Pop
    else:
6085 a8083063 Iustin Pop
      disk_abort = False
6086 a8083063 Iustin Pop
6087 a8083063 Iustin Pop
    if disk_abort:
6088 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
6089 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
6090 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
6091 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
6092 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
6093 3ecf6786 Iustin Pop
                               " this instance")
6094 a8083063 Iustin Pop
6095 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
6096 a8083063 Iustin Pop
                (instance, pnode_name))
6097 a8083063 Iustin Pop
6098 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
6099 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
6100 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
6101 e557bae9 Guido Trotter
        result = self.rpc.call_instance_os_add(pnode_name, iobj, False)
6102 4c4e4e1e Iustin Pop
        result.Raise("Could not add os for instance %s"
6103 4c4e4e1e Iustin Pop
                     " on node %s" % (instance, pnode_name))
6104 a8083063 Iustin Pop
6105 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
6106 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
6107 a8083063 Iustin Pop
        src_node = self.op.src_node
6108 09acf207 Guido Trotter
        src_images = self.src_images
6109 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
6110 6c0af70e Guido Trotter
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
6111 09acf207 Guido Trotter
                                                         src_node, src_images,
6112 6c0af70e Guido Trotter
                                                         cluster_name)
6113 4c4e4e1e Iustin Pop
        msg = import_result.fail_msg
6114 944bf548 Iustin Pop
        if msg:
6115 944bf548 Iustin Pop
          self.LogWarning("Error while importing the disk images for instance"
6116 944bf548 Iustin Pop
                          " %s on node %s: %s" % (instance, pnode_name, msg))
6117 a8083063 Iustin Pop
      else:
6118 a8083063 Iustin Pop
        # also checked in the prereq part
6119 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
6120 3ecf6786 Iustin Pop
                                     % self.op.mode)
6121 a8083063 Iustin Pop
6122 a8083063 Iustin Pop
    if self.op.start:
6123 4978db17 Iustin Pop
      iobj.admin_up = True
6124 a4eae71f Michael Hanselmann
      self.cfg.Update(iobj, feedback_fn)
6125 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s", instance, pnode_name)
6126 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
6127 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
6128 4c4e4e1e Iustin Pop
      result.Raise("Could not start instance")
6129 a8083063 Iustin Pop
6130 08896026 Iustin Pop
    return list(iobj.all_nodes)
6131 08896026 Iustin Pop
6132 a8083063 Iustin Pop
6133 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
6134 a8083063 Iustin Pop
  """Connect to an instance's console.
6135 a8083063 Iustin Pop

6136 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
6137 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
6138 a8083063 Iustin Pop
  console.
6139 a8083063 Iustin Pop

6140 a8083063 Iustin Pop
  """
6141 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
6142 8659b73e Guido Trotter
  REQ_BGL = False
6143 8659b73e Guido Trotter
6144 8659b73e Guido Trotter
  def ExpandNames(self):
6145 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
6146 a8083063 Iustin Pop
6147 a8083063 Iustin Pop
  def CheckPrereq(self):
6148 a8083063 Iustin Pop
    """Check prerequisites.
6149 a8083063 Iustin Pop

6150 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
6151 a8083063 Iustin Pop

6152 a8083063 Iustin Pop
    """
6153 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6154 8659b73e Guido Trotter
    assert self.instance is not None, \
6155 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
6156 513e896d Guido Trotter
    _CheckNodeOnline(self, self.instance.primary_node)
6157 a8083063 Iustin Pop
6158 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6159 a8083063 Iustin Pop
    """Connect to the console of an instance
6160 a8083063 Iustin Pop

6161 a8083063 Iustin Pop
    """
6162 a8083063 Iustin Pop
    instance = self.instance
6163 a8083063 Iustin Pop
    node = instance.primary_node
6164 a8083063 Iustin Pop
6165 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
6166 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
6167 4c4e4e1e Iustin Pop
    node_insts.Raise("Can't get node information from %s" % node)
6168 a8083063 Iustin Pop
6169 aca13712 Iustin Pop
    if instance.name not in node_insts.payload:
6170 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
6171 a8083063 Iustin Pop
6172 9a4f63d1 Iustin Pop
    logging.debug("Connecting to console of %s on %s", instance.name, node)
6173 a8083063 Iustin Pop
6174 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
6175 5431b2e4 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
6176 5431b2e4 Guido Trotter
    # beparams and hvparams are passed separately, to avoid editing the
6177 5431b2e4 Guido Trotter
    # instance and then saving the defaults in the instance itself.
6178 5431b2e4 Guido Trotter
    hvparams = cluster.FillHV(instance)
6179 5431b2e4 Guido Trotter
    beparams = cluster.FillBE(instance)
6180 5431b2e4 Guido Trotter
    console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
6181 b047857b Michael Hanselmann
6182 82122173 Iustin Pop
    # build ssh cmdline
6183 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
6184 a8083063 Iustin Pop
6185 a8083063 Iustin Pop
6186 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
6187 a8083063 Iustin Pop
  """Replace the disks of an instance.
6188 a8083063 Iustin Pop

6189 a8083063 Iustin Pop
  """
6190 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
6191 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
6192 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
6193 efd990e4 Guido Trotter
  REQ_BGL = False
6194 efd990e4 Guido Trotter
6195 7e9366f7 Iustin Pop
  def CheckArguments(self):
6196 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
6197 efd990e4 Guido Trotter
      self.op.remote_node = None
6198 7e9366f7 Iustin Pop
    if not hasattr(self.op, "iallocator"):
6199 7e9366f7 Iustin Pop
      self.op.iallocator = None
6200 7e9366f7 Iustin Pop
6201 c68174b6 Michael Hanselmann
    TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
6202 c68174b6 Michael Hanselmann
                                  self.op.iallocator)
6203 7e9366f7 Iustin Pop
6204 7e9366f7 Iustin Pop
  def ExpandNames(self):
6205 7e9366f7 Iustin Pop
    self._ExpandAndLockInstance()
6206 7e9366f7 Iustin Pop
6207 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
6208 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6209 2bb5c911 Michael Hanselmann
6210 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
6211 efd990e4 Guido Trotter
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
6212 efd990e4 Guido Trotter
      if remote_node is None:
6213 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Node '%s' not known" %
6214 5c983ee5 Iustin Pop
                                   self.op.remote_node, errors.ECODE_NOENT)
6215 2bb5c911 Michael Hanselmann
6216 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
6217 2bb5c911 Michael Hanselmann
6218 3b559640 Iustin Pop
      # Warning: do not remove the locking of the new secondary here
6219 3b559640 Iustin Pop
      # unless DRBD8.AddChildren is changed to work in parallel;
6220 3b559640 Iustin Pop
      # currently it doesn't since parallel invocations of
6221 3b559640 Iustin Pop
      # FindUnusedMinor will conflict
6222 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
6223 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6224 2bb5c911 Michael Hanselmann
6225 efd990e4 Guido Trotter
    else:
6226 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
6227 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6228 efd990e4 Guido Trotter
6229 c68174b6 Michael Hanselmann
    self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
6230 c68174b6 Michael Hanselmann
                                   self.op.iallocator, self.op.remote_node,
6231 c68174b6 Michael Hanselmann
                                   self.op.disks)
6232 c68174b6 Michael Hanselmann
6233 3a012b41 Michael Hanselmann
    self.tasklets = [self.replacer]
6234 2bb5c911 Michael Hanselmann
6235 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
6236 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
6237 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
6238 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
6239 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
6240 efd990e4 Guido Trotter
      self._LockInstancesNodes()
6241 a8083063 Iustin Pop
6242 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6243 a8083063 Iustin Pop
    """Build hooks env.
6244 a8083063 Iustin Pop

6245 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
6246 a8083063 Iustin Pop

6247 a8083063 Iustin Pop
    """
6248 2bb5c911 Michael Hanselmann
    instance = self.replacer.instance
6249 a8083063 Iustin Pop
    env = {
6250 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
6251 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
6252 2bb5c911 Michael Hanselmann
      "OLD_SECONDARY": instance.secondary_nodes[0],
6253 a8083063 Iustin Pop
      }
6254 2bb5c911 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self, instance))
6255 0834c866 Iustin Pop
    nl = [
6256 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
6257 2bb5c911 Michael Hanselmann
      instance.primary_node,
6258 0834c866 Iustin Pop
      ]
6259 0834c866 Iustin Pop
    if self.op.remote_node is not None:
6260 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
6261 a8083063 Iustin Pop
    return env, nl, nl
6262 a8083063 Iustin Pop
6263 2bb5c911 Michael Hanselmann
6264 7ffc5a86 Michael Hanselmann
class LUEvacuateNode(LogicalUnit):
6265 7ffc5a86 Michael Hanselmann
  """Relocate the secondary instances from a node.
6266 7ffc5a86 Michael Hanselmann

6267 7ffc5a86 Michael Hanselmann
  """
6268 7ffc5a86 Michael Hanselmann
  HPATH = "node-evacuate"
6269 7ffc5a86 Michael Hanselmann
  HTYPE = constants.HTYPE_NODE
6270 7ffc5a86 Michael Hanselmann
  _OP_REQP = ["node_name"]
6271 7ffc5a86 Michael Hanselmann
  REQ_BGL = False
6272 7ffc5a86 Michael Hanselmann
6273 7ffc5a86 Michael Hanselmann
  def CheckArguments(self):
6274 7ffc5a86 Michael Hanselmann
    if not hasattr(self.op, "remote_node"):
6275 7ffc5a86 Michael Hanselmann
      self.op.remote_node = None
6276 7ffc5a86 Michael Hanselmann
    if not hasattr(self.op, "iallocator"):
6277 7ffc5a86 Michael Hanselmann
      self.op.iallocator = None
6278 7ffc5a86 Michael Hanselmann
6279 7ffc5a86 Michael Hanselmann
    TLReplaceDisks.CheckArguments(constants.REPLACE_DISK_CHG,
6280 7ffc5a86 Michael Hanselmann
                                  self.op.remote_node,
6281 7ffc5a86 Michael Hanselmann
                                  self.op.iallocator)
6282 7ffc5a86 Michael Hanselmann
6283 7ffc5a86 Michael Hanselmann
  def ExpandNames(self):
6284 7ffc5a86 Michael Hanselmann
    self.op.node_name = self.cfg.ExpandNodeName(self.op.node_name)
6285 7ffc5a86 Michael Hanselmann
    if self.op.node_name is None:
6286 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Node '%s' not known" % self.op.node_name,
6287 5c983ee5 Iustin Pop
                                 errors.ECODE_NOENT)
6288 7ffc5a86 Michael Hanselmann
6289 7ffc5a86 Michael Hanselmann
    self.needed_locks = {}
6290 7ffc5a86 Michael Hanselmann
6291 7ffc5a86 Michael Hanselmann
    # Declare node locks
6292 7ffc5a86 Michael Hanselmann
    if self.op.iallocator is not None:
6293 7ffc5a86 Michael Hanselmann
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6294 7ffc5a86 Michael Hanselmann
6295 7ffc5a86 Michael Hanselmann
    elif self.op.remote_node is not None:
6296 7ffc5a86 Michael Hanselmann
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
6297 7ffc5a86 Michael Hanselmann
      if remote_node is None:
6298 7ffc5a86 Michael Hanselmann
        raise errors.OpPrereqError("Node '%s' not known" %
6299 5c983ee5 Iustin Pop
                                   self.op.remote_node, errors.ECODE_NOENT)
6300 7ffc5a86 Michael Hanselmann
6301 7ffc5a86 Michael Hanselmann
      self.op.remote_node = remote_node
6302 7ffc5a86 Michael Hanselmann
6303 7ffc5a86 Michael Hanselmann
      # Warning: do not remove the locking of the new secondary here
6304 7ffc5a86 Michael Hanselmann
      # unless DRBD8.AddChildren is changed to work in parallel;
6305 7ffc5a86 Michael Hanselmann
      # currently it doesn't since parallel invocations of
6306 7ffc5a86 Michael Hanselmann
      # FindUnusedMinor will conflict
6307 7ffc5a86 Michael Hanselmann
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
6308 7ffc5a86 Michael Hanselmann
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6309 7ffc5a86 Michael Hanselmann
6310 7ffc5a86 Michael Hanselmann
    else:
6311 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid parameters", errors.ECODE_INVAL)
6312 7ffc5a86 Michael Hanselmann
6313 7ffc5a86 Michael Hanselmann
    # Create tasklets for replacing disks for all secondary instances on this
6314 7ffc5a86 Michael Hanselmann
    # node
6315 7ffc5a86 Michael Hanselmann
    names = []
6316 3a012b41 Michael Hanselmann
    tasklets = []
6317 7ffc5a86 Michael Hanselmann
6318 7ffc5a86 Michael Hanselmann
    for inst in _GetNodeSecondaryInstances(self.cfg, self.op.node_name):
6319 7ffc5a86 Michael Hanselmann
      logging.debug("Replacing disks for instance %s", inst.name)
6320 7ffc5a86 Michael Hanselmann
      names.append(inst.name)
6321 7ffc5a86 Michael Hanselmann
6322 7ffc5a86 Michael Hanselmann
      replacer = TLReplaceDisks(self, inst.name, constants.REPLACE_DISK_CHG,
6323 7ffc5a86 Michael Hanselmann
                                self.op.iallocator, self.op.remote_node, [])
6324 3a012b41 Michael Hanselmann
      tasklets.append(replacer)
6325 7ffc5a86 Michael Hanselmann
6326 3a012b41 Michael Hanselmann
    self.tasklets = tasklets
6327 7ffc5a86 Michael Hanselmann
    self.instance_names = names
6328 7ffc5a86 Michael Hanselmann
6329 7ffc5a86 Michael Hanselmann
    # Declare instance locks
6330 7ffc5a86 Michael Hanselmann
    self.needed_locks[locking.LEVEL_INSTANCE] = self.instance_names
6331 7ffc5a86 Michael Hanselmann
6332 7ffc5a86 Michael Hanselmann
  def DeclareLocks(self, level):
6333 7ffc5a86 Michael Hanselmann
    # If we're not already locking all nodes in the set we have to declare the
6334 7ffc5a86 Michael Hanselmann
    # instance's primary/secondary nodes.
6335 7ffc5a86 Michael Hanselmann
    if (level == locking.LEVEL_NODE and
6336 7ffc5a86 Michael Hanselmann
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
6337 7ffc5a86 Michael Hanselmann
      self._LockInstancesNodes()
6338 7ffc5a86 Michael Hanselmann
6339 7ffc5a86 Michael Hanselmann
  def BuildHooksEnv(self):
6340 7ffc5a86 Michael Hanselmann
    """Build hooks env.
6341 7ffc5a86 Michael Hanselmann

6342 7ffc5a86 Michael Hanselmann
    This runs on the master, the primary and all the secondaries.
6343 7ffc5a86 Michael Hanselmann

6344 7ffc5a86 Michael Hanselmann
    """
6345 7ffc5a86 Michael Hanselmann
    env = {
6346 7ffc5a86 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
6347 7ffc5a86 Michael Hanselmann
      }
6348 7ffc5a86 Michael Hanselmann
6349 7ffc5a86 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
6350 7ffc5a86 Michael Hanselmann
6351 7ffc5a86 Michael Hanselmann
    if self.op.remote_node is not None:
6352 7ffc5a86 Michael Hanselmann
      env["NEW_SECONDARY"] = self.op.remote_node
6353 7ffc5a86 Michael Hanselmann
      nl.append(self.op.remote_node)
6354 7ffc5a86 Michael Hanselmann
6355 7ffc5a86 Michael Hanselmann
    return (env, nl, nl)
6356 7ffc5a86 Michael Hanselmann
6357 7ffc5a86 Michael Hanselmann
6358 c68174b6 Michael Hanselmann
class TLReplaceDisks(Tasklet):
6359 2bb5c911 Michael Hanselmann
  """Replaces disks for an instance.
6360 2bb5c911 Michael Hanselmann

6361 2bb5c911 Michael Hanselmann
  Note: Locking is not within the scope of this class.
6362 2bb5c911 Michael Hanselmann

6363 2bb5c911 Michael Hanselmann
  """
6364 2bb5c911 Michael Hanselmann
  def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
6365 2bb5c911 Michael Hanselmann
               disks):
6366 2bb5c911 Michael Hanselmann
    """Initializes this class.
6367 2bb5c911 Michael Hanselmann

6368 2bb5c911 Michael Hanselmann
    """
6369 464243a7 Michael Hanselmann
    Tasklet.__init__(self, lu)
6370 464243a7 Michael Hanselmann
6371 2bb5c911 Michael Hanselmann
    # Parameters
6372 2bb5c911 Michael Hanselmann
    self.instance_name = instance_name
6373 2bb5c911 Michael Hanselmann
    self.mode = mode
6374 2bb5c911 Michael Hanselmann
    self.iallocator_name = iallocator_name
6375 2bb5c911 Michael Hanselmann
    self.remote_node = remote_node
6376 2bb5c911 Michael Hanselmann
    self.disks = disks
6377 2bb5c911 Michael Hanselmann
6378 2bb5c911 Michael Hanselmann
    # Runtime data
6379 2bb5c911 Michael Hanselmann
    self.instance = None
6380 2bb5c911 Michael Hanselmann
    self.new_node = None
6381 2bb5c911 Michael Hanselmann
    self.target_node = None
6382 2bb5c911 Michael Hanselmann
    self.other_node = None
6383 2bb5c911 Michael Hanselmann
    self.remote_node_info = None
6384 2bb5c911 Michael Hanselmann
    self.node_secondary_ip = None
6385 2bb5c911 Michael Hanselmann
6386 2bb5c911 Michael Hanselmann
  @staticmethod
6387 2bb5c911 Michael Hanselmann
  def CheckArguments(mode, remote_node, iallocator):
6388 c68174b6 Michael Hanselmann
    """Helper function for users of this class.
6389 c68174b6 Michael Hanselmann

6390 c68174b6 Michael Hanselmann
    """
6391 2bb5c911 Michael Hanselmann
    # check for valid parameter combination
6392 2bb5c911 Michael Hanselmann
    if mode == constants.REPLACE_DISK_CHG:
6393 02a00186 Michael Hanselmann
      if remote_node is None and iallocator is None:
6394 2bb5c911 Michael Hanselmann
        raise errors.OpPrereqError("When changing the secondary either an"
6395 2bb5c911 Michael Hanselmann
                                   " iallocator script must be used or the"
6396 5c983ee5 Iustin Pop
                                   " new node given", errors.ECODE_INVAL)
6397 02a00186 Michael Hanselmann
6398 02a00186 Michael Hanselmann
      if remote_node is not None and iallocator is not None:
6399 2bb5c911 Michael Hanselmann
        raise errors.OpPrereqError("Give either the iallocator or the new"
6400 5c983ee5 Iustin Pop
                                   " secondary, not both", errors.ECODE_INVAL)
6401 02a00186 Michael Hanselmann
6402 02a00186 Michael Hanselmann
    elif remote_node is not None or iallocator is not None:
6403 02a00186 Michael Hanselmann
      # Not replacing the secondary
6404 02a00186 Michael Hanselmann
      raise errors.OpPrereqError("The iallocator and new node options can"
6405 02a00186 Michael Hanselmann
                                 " only be used when changing the"
6406 5c983ee5 Iustin Pop
                                 " secondary node", errors.ECODE_INVAL)
6407 2bb5c911 Michael Hanselmann
6408 2bb5c911 Michael Hanselmann
  @staticmethod
6409 2bb5c911 Michael Hanselmann
  def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
6410 2bb5c911 Michael Hanselmann
    """Compute a new secondary node using an IAllocator.
6411 2bb5c911 Michael Hanselmann

6412 2bb5c911 Michael Hanselmann
    """
6413 2bb5c911 Michael Hanselmann
    ial = IAllocator(lu.cfg, lu.rpc,
6414 2bb5c911 Michael Hanselmann
                     mode=constants.IALLOCATOR_MODE_RELOC,
6415 2bb5c911 Michael Hanselmann
                     name=instance_name,
6416 2bb5c911 Michael Hanselmann
                     relocate_from=relocate_from)
6417 2bb5c911 Michael Hanselmann
6418 2bb5c911 Michael Hanselmann
    ial.Run(iallocator_name)
6419 2bb5c911 Michael Hanselmann
6420 2bb5c911 Michael Hanselmann
    if not ial.success:
6421 2bb5c911 Michael Hanselmann
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
6422 5c983ee5 Iustin Pop
                                 " %s" % (iallocator_name, ial.info),
6423 5c983ee5 Iustin Pop
                                 errors.ECODE_NORES)
6424 2bb5c911 Michael Hanselmann
6425 2bb5c911 Michael Hanselmann
    if len(ial.nodes) != ial.required_nodes:
6426 2bb5c911 Michael Hanselmann
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
6427 2bb5c911 Michael Hanselmann
                                 " of nodes (%s), required %s" %
6428 5c983ee5 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes),
6429 5c983ee5 Iustin Pop
                                 errors.ECODE_FAULT)
6430 2bb5c911 Michael Hanselmann
6431 2bb5c911 Michael Hanselmann
    remote_node_name = ial.nodes[0]
6432 2bb5c911 Michael Hanselmann
6433 2bb5c911 Michael Hanselmann
    lu.LogInfo("Selected new secondary for instance '%s': %s",
6434 2bb5c911 Michael Hanselmann
               instance_name, remote_node_name)
6435 2bb5c911 Michael Hanselmann
6436 2bb5c911 Michael Hanselmann
    return remote_node_name
6437 2bb5c911 Michael Hanselmann
6438 942be002 Michael Hanselmann
  def _FindFaultyDisks(self, node_name):
6439 2d9005d8 Michael Hanselmann
    return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
6440 2d9005d8 Michael Hanselmann
                                    node_name, True)
6441 942be002 Michael Hanselmann
6442 2bb5c911 Michael Hanselmann
  def CheckPrereq(self):
6443 2bb5c911 Michael Hanselmann
    """Check prerequisites.
6444 2bb5c911 Michael Hanselmann

6445 2bb5c911 Michael Hanselmann
    This checks that the instance is in the cluster.
6446 2bb5c911 Michael Hanselmann

6447 2bb5c911 Michael Hanselmann
    """
6448 e9022531 Iustin Pop
    self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
6449 e9022531 Iustin Pop
    assert instance is not None, \
6450 20eca47d Iustin Pop
      "Cannot retrieve locked instance %s" % self.instance_name
6451 2bb5c911 Michael Hanselmann
6452 e9022531 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
6453 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
6454 5c983ee5 Iustin Pop
                                 " instances", errors.ECODE_INVAL)
6455 a8083063 Iustin Pop
6456 e9022531 Iustin Pop
    if len(instance.secondary_nodes) != 1:
6457 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
6458 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
6459 5c983ee5 Iustin Pop
                                 len(instance.secondary_nodes),
6460 5c983ee5 Iustin Pop
                                 errors.ECODE_FAULT)
6461 a8083063 Iustin Pop
6462 e9022531 Iustin Pop
    secondary_node = instance.secondary_nodes[0]
6463 a9e0c397 Iustin Pop
6464 2bb5c911 Michael Hanselmann
    if self.iallocator_name is None:
6465 2bb5c911 Michael Hanselmann
      remote_node = self.remote_node
6466 2bb5c911 Michael Hanselmann
    else:
6467 2bb5c911 Michael Hanselmann
      remote_node = self._RunAllocator(self.lu, self.iallocator_name,
6468 e9022531 Iustin Pop
                                       instance.name, instance.secondary_nodes)
6469 b6e82a65 Iustin Pop
6470 a9e0c397 Iustin Pop
    if remote_node is not None:
6471 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
6472 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
6473 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
6474 a9e0c397 Iustin Pop
    else:
6475 a9e0c397 Iustin Pop
      self.remote_node_info = None
6476 2bb5c911 Michael Hanselmann
6477 2bb5c911 Michael Hanselmann
    if remote_node == self.instance.primary_node:
6478 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
6479 5c983ee5 Iustin Pop
                                 " the instance.", errors.ECODE_INVAL)
6480 2bb5c911 Michael Hanselmann
6481 2bb5c911 Michael Hanselmann
    if remote_node == secondary_node:
6482 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("The specified node is already the"
6483 5c983ee5 Iustin Pop
                                 " secondary node of the instance.",
6484 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
6485 7e9366f7 Iustin Pop
6486 2945fd2d Michael Hanselmann
    if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
6487 2945fd2d Michael Hanselmann
                                    constants.REPLACE_DISK_CHG):
6488 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Cannot specify disks to be replaced",
6489 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
6490 942be002 Michael Hanselmann
6491 2945fd2d Michael Hanselmann
    if self.mode == constants.REPLACE_DISK_AUTO:
6492 e9022531 Iustin Pop
      faulty_primary = self._FindFaultyDisks(instance.primary_node)
6493 942be002 Michael Hanselmann
      faulty_secondary = self._FindFaultyDisks(secondary_node)
6494 942be002 Michael Hanselmann
6495 942be002 Michael Hanselmann
      if faulty_primary and faulty_secondary:
6496 942be002 Michael Hanselmann
        raise errors.OpPrereqError("Instance %s has faulty disks on more than"
6497 942be002 Michael Hanselmann
                                   " one node and can not be repaired"
6498 5c983ee5 Iustin Pop
                                   " automatically" % self.instance_name,
6499 5c983ee5 Iustin Pop
                                   errors.ECODE_STATE)
6500 942be002 Michael Hanselmann
6501 942be002 Michael Hanselmann
      if faulty_primary:
6502 942be002 Michael Hanselmann
        self.disks = faulty_primary
6503 e9022531 Iustin Pop
        self.target_node = instance.primary_node
6504 942be002 Michael Hanselmann
        self.other_node = secondary_node
6505 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
6506 942be002 Michael Hanselmann
      elif faulty_secondary:
6507 942be002 Michael Hanselmann
        self.disks = faulty_secondary
6508 942be002 Michael Hanselmann
        self.target_node = secondary_node
6509 e9022531 Iustin Pop
        self.other_node = instance.primary_node
6510 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
6511 942be002 Michael Hanselmann
      else:
6512 942be002 Michael Hanselmann
        self.disks = []
6513 942be002 Michael Hanselmann
        check_nodes = []
6514 942be002 Michael Hanselmann
6515 942be002 Michael Hanselmann
    else:
6516 942be002 Michael Hanselmann
      # Non-automatic modes
6517 942be002 Michael Hanselmann
      if self.mode == constants.REPLACE_DISK_PRI:
6518 e9022531 Iustin Pop
        self.target_node = instance.primary_node
6519 942be002 Michael Hanselmann
        self.other_node = secondary_node
6520 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
6521 7e9366f7 Iustin Pop
6522 942be002 Michael Hanselmann
      elif self.mode == constants.REPLACE_DISK_SEC:
6523 942be002 Michael Hanselmann
        self.target_node = secondary_node
6524 e9022531 Iustin Pop
        self.other_node = instance.primary_node
6525 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
6526 a9e0c397 Iustin Pop
6527 942be002 Michael Hanselmann
      elif self.mode == constants.REPLACE_DISK_CHG:
6528 942be002 Michael Hanselmann
        self.new_node = remote_node
6529 e9022531 Iustin Pop
        self.other_node = instance.primary_node
6530 942be002 Michael Hanselmann
        self.target_node = secondary_node
6531 942be002 Michael Hanselmann
        check_nodes = [self.new_node, self.other_node]
6532 54155f52 Iustin Pop
6533 942be002 Michael Hanselmann
        _CheckNodeNotDrained(self.lu, remote_node)
6534 a8083063 Iustin Pop
6535 942be002 Michael Hanselmann
      else:
6536 942be002 Michael Hanselmann
        raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
6537 942be002 Michael Hanselmann
                                     self.mode)
6538 942be002 Michael Hanselmann
6539 942be002 Michael Hanselmann
      # If not specified all disks should be replaced
6540 942be002 Michael Hanselmann
      if not self.disks:
6541 942be002 Michael Hanselmann
        self.disks = range(len(self.instance.disks))
6542 a9e0c397 Iustin Pop
6543 2bb5c911 Michael Hanselmann
    for node in check_nodes:
6544 2bb5c911 Michael Hanselmann
      _CheckNodeOnline(self.lu, node)
6545 e4376078 Iustin Pop
6546 2bb5c911 Michael Hanselmann
    # Check whether disks are valid
6547 2bb5c911 Michael Hanselmann
    for disk_idx in self.disks:
6548 e9022531 Iustin Pop
      instance.FindDisk(disk_idx)
6549 e4376078 Iustin Pop
6550 2bb5c911 Michael Hanselmann
    # Get secondary node IP addresses
6551 2bb5c911 Michael Hanselmann
    node_2nd_ip = {}
6552 e4376078 Iustin Pop
6553 2bb5c911 Michael Hanselmann
    for node_name in [self.target_node, self.other_node, self.new_node]:
6554 2bb5c911 Michael Hanselmann
      if node_name is not None:
6555 2bb5c911 Michael Hanselmann
        node_2nd_ip[node_name] = self.cfg.GetNodeInfo(node_name).secondary_ip
6556 e4376078 Iustin Pop
6557 2bb5c911 Michael Hanselmann
    self.node_secondary_ip = node_2nd_ip
6558 a9e0c397 Iustin Pop
6559 c68174b6 Michael Hanselmann
  def Exec(self, feedback_fn):
6560 2bb5c911 Michael Hanselmann
    """Execute disk replacement.
6561 2bb5c911 Michael Hanselmann

6562 2bb5c911 Michael Hanselmann
    This dispatches the disk replacement to the appropriate handler.
6563 cff90b79 Iustin Pop

6564 a9e0c397 Iustin Pop
    """
6565 942be002 Michael Hanselmann
    if not self.disks:
6566 942be002 Michael Hanselmann
      feedback_fn("No disks need replacement")
6567 942be002 Michael Hanselmann
      return
6568 942be002 Michael Hanselmann
6569 942be002 Michael Hanselmann
    feedback_fn("Replacing disk(s) %s for %s" %
6570 942be002 Michael Hanselmann
                (", ".join([str(i) for i in self.disks]), self.instance.name))
6571 7ffc5a86 Michael Hanselmann
6572 2bb5c911 Michael Hanselmann
    activate_disks = (not self.instance.admin_up)
6573 2bb5c911 Michael Hanselmann
6574 2bb5c911 Michael Hanselmann
    # Activate the instance disks if we're replacing them on a down instance
6575 2bb5c911 Michael Hanselmann
    if activate_disks:
6576 2bb5c911 Michael Hanselmann
      _StartInstanceDisks(self.lu, self.instance, True)
6577 2bb5c911 Michael Hanselmann
6578 2bb5c911 Michael Hanselmann
    try:
6579 942be002 Michael Hanselmann
      # Should we replace the secondary node?
6580 942be002 Michael Hanselmann
      if self.new_node is not None:
6581 a4eae71f Michael Hanselmann
        fn = self._ExecDrbd8Secondary
6582 2bb5c911 Michael Hanselmann
      else:
6583 a4eae71f Michael Hanselmann
        fn = self._ExecDrbd8DiskOnly
6584 a4eae71f Michael Hanselmann
6585 a4eae71f Michael Hanselmann
      return fn(feedback_fn)
6586 2bb5c911 Michael Hanselmann
6587 2bb5c911 Michael Hanselmann
    finally:
6588 5c983ee5 Iustin Pop
      # Deactivate the instance disks if we're replacing them on a
6589 5c983ee5 Iustin Pop
      # down instance
6590 2bb5c911 Michael Hanselmann
      if activate_disks:
6591 2bb5c911 Michael Hanselmann
        _SafeShutdownInstanceDisks(self.lu, self.instance)
6592 2bb5c911 Michael Hanselmann
6593 2bb5c911 Michael Hanselmann
  def _CheckVolumeGroup(self, nodes):
6594 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Checking volume groups")
6595 2bb5c911 Michael Hanselmann
6596 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
6597 cff90b79 Iustin Pop
6598 2bb5c911 Michael Hanselmann
    # Make sure volume group exists on all involved nodes
6599 2bb5c911 Michael Hanselmann
    results = self.rpc.call_vg_list(nodes)
6600 cff90b79 Iustin Pop
    if not results:
6601 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
6602 2bb5c911 Michael Hanselmann
6603 2bb5c911 Michael Hanselmann
    for node in nodes:
6604 781de953 Iustin Pop
      res = results[node]
6605 4c4e4e1e Iustin Pop
      res.Raise("Error checking node %s" % node)
6606 2bb5c911 Michael Hanselmann
      if vgname not in res.payload:
6607 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Volume group '%s' not found on node %s" %
6608 2bb5c911 Michael Hanselmann
                                 (vgname, node))
6609 2bb5c911 Michael Hanselmann
6610 2bb5c911 Michael Hanselmann
  def _CheckDisksExistence(self, nodes):
6611 2bb5c911 Michael Hanselmann
    # Check disk existence
6612 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
6613 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
6614 cff90b79 Iustin Pop
        continue
6615 2bb5c911 Michael Hanselmann
6616 2bb5c911 Michael Hanselmann
      for node in nodes:
6617 2bb5c911 Michael Hanselmann
        self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
6618 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(dev, node)
6619 2bb5c911 Michael Hanselmann
6620 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(node, dev)
6621 2bb5c911 Michael Hanselmann
6622 4c4e4e1e Iustin Pop
        msg = result.fail_msg
6623 2bb5c911 Michael Hanselmann
        if msg or not result.payload:
6624 2bb5c911 Michael Hanselmann
          if not msg:
6625 2bb5c911 Michael Hanselmann
            msg = "disk not found"
6626 23829f6f Iustin Pop
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
6627 23829f6f Iustin Pop
                                   (idx, node, msg))
6628 cff90b79 Iustin Pop
6629 2bb5c911 Michael Hanselmann
  def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
6630 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
6631 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
6632 cff90b79 Iustin Pop
        continue
6633 cff90b79 Iustin Pop
6634 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Checking disk/%d consistency on node %s" %
6635 2bb5c911 Michael Hanselmann
                      (idx, node_name))
6636 2bb5c911 Michael Hanselmann
6637 2bb5c911 Michael Hanselmann
      if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary,
6638 2bb5c911 Michael Hanselmann
                                   ldisk=ldisk):
6639 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Node %s has degraded storage, unsafe to"
6640 2bb5c911 Michael Hanselmann
                                 " replace disks for instance %s" %
6641 2bb5c911 Michael Hanselmann
                                 (node_name, self.instance.name))
6642 2bb5c911 Michael Hanselmann
6643 2bb5c911 Michael Hanselmann
  def _CreateNewStorage(self, node_name):
6644 2bb5c911 Michael Hanselmann
    vgname = self.cfg.GetVGName()
6645 2bb5c911 Michael Hanselmann
    iv_names = {}
6646 2bb5c911 Michael Hanselmann
6647 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
6648 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
6649 a9e0c397 Iustin Pop
        continue
6650 2bb5c911 Michael Hanselmann
6651 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
6652 2bb5c911 Michael Hanselmann
6653 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, node_name)
6654 2bb5c911 Michael Hanselmann
6655 2bb5c911 Michael Hanselmann
      lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
6656 2bb5c911 Michael Hanselmann
      names = _GenerateUniqueNames(self.lu, lv_names)
6657 2bb5c911 Michael Hanselmann
6658 2bb5c911 Michael Hanselmann
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
6659 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
6660 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
6661 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
6662 2bb5c911 Michael Hanselmann
6663 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
6664 a9e0c397 Iustin Pop
      old_lvs = dev.children
6665 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
6666 2bb5c911 Michael Hanselmann
6667 428958aa Iustin Pop
      # we pass force_create=True to force the LVM creation
6668 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
6669 2bb5c911 Michael Hanselmann
        _CreateBlockDev(self.lu, node_name, self.instance, new_lv, True,
6670 2bb5c911 Michael Hanselmann
                        _GetInstanceInfoText(self.instance), False)
6671 2bb5c911 Michael Hanselmann
6672 2bb5c911 Michael Hanselmann
    return iv_names
6673 2bb5c911 Michael Hanselmann
6674 2bb5c911 Michael Hanselmann
  def _CheckDevices(self, node_name, iv_names):
6675 2bb5c911 Michael Hanselmann
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
6676 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, node_name)
6677 2bb5c911 Michael Hanselmann
6678 2bb5c911 Michael Hanselmann
      result = self.rpc.call_blockdev_find(node_name, dev)
6679 2bb5c911 Michael Hanselmann
6680 2bb5c911 Michael Hanselmann
      msg = result.fail_msg
6681 2bb5c911 Michael Hanselmann
      if msg or not result.payload:
6682 2bb5c911 Michael Hanselmann
        if not msg:
6683 2bb5c911 Michael Hanselmann
          msg = "disk not found"
6684 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
6685 2bb5c911 Michael Hanselmann
                                 (name, msg))
6686 2bb5c911 Michael Hanselmann
6687 96acbc09 Michael Hanselmann
      if result.payload.is_degraded:
6688 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
6689 2bb5c911 Michael Hanselmann
6690 2bb5c911 Michael Hanselmann
  def _RemoveOldStorage(self, node_name, iv_names):
6691 2bb5c911 Michael Hanselmann
    for name, (dev, old_lvs, _) in iv_names.iteritems():
6692 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Remove logical volumes for %s" % name)
6693 2bb5c911 Michael Hanselmann
6694 2bb5c911 Michael Hanselmann
      for lv in old_lvs:
6695 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(lv, node_name)
6696 2bb5c911 Michael Hanselmann
6697 2bb5c911 Michael Hanselmann
        msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
6698 2bb5c911 Michael Hanselmann
        if msg:
6699 2bb5c911 Michael Hanselmann
          self.lu.LogWarning("Can't remove old LV: %s" % msg,
6700 2bb5c911 Michael Hanselmann
                             hint="remove unused LVs manually")
6701 2bb5c911 Michael Hanselmann
6702 a4eae71f Michael Hanselmann
  def _ExecDrbd8DiskOnly(self, feedback_fn):
6703 2bb5c911 Michael Hanselmann
    """Replace a disk on the primary or secondary for DRBD 8.
6704 2bb5c911 Michael Hanselmann

6705 2bb5c911 Michael Hanselmann
    The algorithm for replace is quite complicated:
6706 2bb5c911 Michael Hanselmann

6707 2bb5c911 Michael Hanselmann
      1. for each disk to be replaced:
6708 2bb5c911 Michael Hanselmann

6709 2bb5c911 Michael Hanselmann
        1. create new LVs on the target node with unique names
6710 2bb5c911 Michael Hanselmann
        1. detach old LVs from the drbd device
6711 2bb5c911 Michael Hanselmann
        1. rename old LVs to name_replaced.<time_t>
6712 2bb5c911 Michael Hanselmann
        1. rename new LVs to old LVs
6713 2bb5c911 Michael Hanselmann
        1. attach the new LVs (with the old names now) to the drbd device
6714 2bb5c911 Michael Hanselmann

6715 2bb5c911 Michael Hanselmann
      1. wait for sync across all devices
6716 2bb5c911 Michael Hanselmann

6717 2bb5c911 Michael Hanselmann
      1. for each modified disk:
6718 2bb5c911 Michael Hanselmann

6719 2bb5c911 Michael Hanselmann
        1. remove old LVs (which have the name name_replaces.<time_t>)
6720 2bb5c911 Michael Hanselmann

6721 2bb5c911 Michael Hanselmann
    Failures are not very well handled.
6722 2bb5c911 Michael Hanselmann

6723 2bb5c911 Michael Hanselmann
    """
6724 2bb5c911 Michael Hanselmann
    steps_total = 6
6725 2bb5c911 Michael Hanselmann
6726 2bb5c911 Michael Hanselmann
    # Step: check device activation
6727 2bb5c911 Michael Hanselmann
    self.lu.LogStep(1, steps_total, "Check device existence")
6728 2bb5c911 Michael Hanselmann
    self._CheckDisksExistence([self.other_node, self.target_node])
6729 2bb5c911 Michael Hanselmann
    self._CheckVolumeGroup([self.target_node, self.other_node])
6730 2bb5c911 Michael Hanselmann
6731 2bb5c911 Michael Hanselmann
    # Step: check other node consistency
6732 2bb5c911 Michael Hanselmann
    self.lu.LogStep(2, steps_total, "Check peer consistency")
6733 2bb5c911 Michael Hanselmann
    self._CheckDisksConsistency(self.other_node,
6734 2bb5c911 Michael Hanselmann
                                self.other_node == self.instance.primary_node,
6735 2bb5c911 Michael Hanselmann
                                False)
6736 2bb5c911 Michael Hanselmann
6737 2bb5c911 Michael Hanselmann
    # Step: create new storage
6738 2bb5c911 Michael Hanselmann
    self.lu.LogStep(3, steps_total, "Allocate new storage")
6739 2bb5c911 Michael Hanselmann
    iv_names = self._CreateNewStorage(self.target_node)
6740 a9e0c397 Iustin Pop
6741 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
6742 2bb5c911 Michael Hanselmann
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
6743 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
6744 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
6745 2bb5c911 Michael Hanselmann
6746 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
6747 4d4a651d Michael Hanselmann
                                                     old_lvs)
6748 4c4e4e1e Iustin Pop
      result.Raise("Can't detach drbd from local storage on node"
6749 2bb5c911 Michael Hanselmann
                   " %s for device %s" % (self.target_node, dev.iv_name))
6750 cff90b79 Iustin Pop
      #dev.children = []
6751 cff90b79 Iustin Pop
      #cfg.Update(instance)
6752 a9e0c397 Iustin Pop
6753 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
6754 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
6755 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
6756 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
6757 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
6758 cff90b79 Iustin Pop
6759 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
6760 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
6761 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
6762 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
6763 2bb5c911 Michael Hanselmann
6764 2bb5c911 Michael Hanselmann
      # Build the rename list based on what LVs exist on the node
6765 2bb5c911 Michael Hanselmann
      rename_old_to_new = []
6766 cff90b79 Iustin Pop
      for to_ren in old_lvs:
6767 2bb5c911 Michael Hanselmann
        result = self.rpc.call_blockdev_find(self.target_node, to_ren)
6768 4c4e4e1e Iustin Pop
        if not result.fail_msg and result.payload:
6769 23829f6f Iustin Pop
          # device exists
6770 2bb5c911 Michael Hanselmann
          rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
6771 cff90b79 Iustin Pop
6772 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Renaming the old LVs on the target node")
6773 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_rename(self.target_node,
6774 4d4a651d Michael Hanselmann
                                             rename_old_to_new)
6775 2bb5c911 Michael Hanselmann
      result.Raise("Can't rename old LVs on node %s" % self.target_node)
6776 2bb5c911 Michael Hanselmann
6777 2bb5c911 Michael Hanselmann
      # Now we rename the new LVs to the old LVs
6778 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Renaming the new LVs on the target node")
6779 2bb5c911 Michael Hanselmann
      rename_new_to_old = [(new, old.physical_id)
6780 2bb5c911 Michael Hanselmann
                           for old, new in zip(old_lvs, new_lvs)]
6781 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_rename(self.target_node,
6782 4d4a651d Michael Hanselmann
                                             rename_new_to_old)
6783 2bb5c911 Michael Hanselmann
      result.Raise("Can't rename new LVs on node %s" % self.target_node)
6784 cff90b79 Iustin Pop
6785 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
6786 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
6787 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(new, self.target_node)
6788 a9e0c397 Iustin Pop
6789 cff90b79 Iustin Pop
      for disk in old_lvs:
6790 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
6791 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(disk, self.target_node)
6792 a9e0c397 Iustin Pop
6793 2bb5c911 Michael Hanselmann
      # Now that the new lvs have the old name, we can add them to the device
6794 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
6795 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_addchildren(self.target_node, dev,
6796 4d4a651d Michael Hanselmann
                                                  new_lvs)
6797 4c4e4e1e Iustin Pop
      msg = result.fail_msg
6798 2cc1da8b Iustin Pop
      if msg:
6799 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
6800 4d4a651d Michael Hanselmann
          msg2 = self.rpc.call_blockdev_remove(self.target_node,
6801 4d4a651d Michael Hanselmann
                                               new_lv).fail_msg
6802 4c4e4e1e Iustin Pop
          if msg2:
6803 2bb5c911 Michael Hanselmann
            self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
6804 2bb5c911 Michael Hanselmann
                               hint=("cleanup manually the unused logical"
6805 2bb5c911 Michael Hanselmann
                                     "volumes"))
6806 2cc1da8b Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
6807 a9e0c397 Iustin Pop
6808 a9e0c397 Iustin Pop
      dev.children = new_lvs
6809 a9e0c397 Iustin Pop
6810 a4eae71f Michael Hanselmann
      self.cfg.Update(self.instance, feedback_fn)
6811 a9e0c397 Iustin Pop
6812 2bb5c911 Michael Hanselmann
    # Wait for sync
6813 2bb5c911 Michael Hanselmann
    # This can fail as the old devices are degraded and _WaitForSync
6814 2bb5c911 Michael Hanselmann
    # does a combined result over all disks, so we don't check its return value
6815 2bb5c911 Michael Hanselmann
    self.lu.LogStep(5, steps_total, "Sync devices")
6816 2bb5c911 Michael Hanselmann
    _WaitForSync(self.lu, self.instance, unlock=True)
6817 a9e0c397 Iustin Pop
6818 2bb5c911 Michael Hanselmann
    # Check all devices manually
6819 2bb5c911 Michael Hanselmann
    self._CheckDevices(self.instance.primary_node, iv_names)
6820 a9e0c397 Iustin Pop
6821 cff90b79 Iustin Pop
    # Step: remove old storage
6822 2bb5c911 Michael Hanselmann
    self.lu.LogStep(6, steps_total, "Removing old storage")
6823 2bb5c911 Michael Hanselmann
    self._RemoveOldStorage(self.target_node, iv_names)
6824 a9e0c397 Iustin Pop
6825 a4eae71f Michael Hanselmann
  def _ExecDrbd8Secondary(self, feedback_fn):
6826 2bb5c911 Michael Hanselmann
    """Replace the secondary node for DRBD 8.
6827 a9e0c397 Iustin Pop

6828 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
6829 a9e0c397 Iustin Pop
      - for all disks of the instance:
6830 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
6831 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
6832 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
6833 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
6834 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
6835 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
6836 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
6837 a9e0c397 Iustin Pop
          not network enabled
6838 a9e0c397 Iustin Pop
      - wait for sync across all devices
6839 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
6840 a9e0c397 Iustin Pop

6841 a9e0c397 Iustin Pop
    Failures are not very well handled.
6842 0834c866 Iustin Pop

6843 a9e0c397 Iustin Pop
    """
6844 0834c866 Iustin Pop
    steps_total = 6
6845 0834c866 Iustin Pop
6846 0834c866 Iustin Pop
    # Step: check device activation
6847 2bb5c911 Michael Hanselmann
    self.lu.LogStep(1, steps_total, "Check device existence")
6848 2bb5c911 Michael Hanselmann
    self._CheckDisksExistence([self.instance.primary_node])
6849 2bb5c911 Michael Hanselmann
    self._CheckVolumeGroup([self.instance.primary_node])
6850 0834c866 Iustin Pop
6851 0834c866 Iustin Pop
    # Step: check other node consistency
6852 2bb5c911 Michael Hanselmann
    self.lu.LogStep(2, steps_total, "Check peer consistency")
6853 2bb5c911 Michael Hanselmann
    self._CheckDisksConsistency(self.instance.primary_node, True, True)
6854 0834c866 Iustin Pop
6855 0834c866 Iustin Pop
    # Step: create new storage
6856 2bb5c911 Michael Hanselmann
    self.lu.LogStep(3, steps_total, "Allocate new storage")
6857 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
6858 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
6859 2bb5c911 Michael Hanselmann
                      (self.new_node, idx))
6860 428958aa Iustin Pop
      # we pass force_create=True to force LVM creation
6861 a9e0c397 Iustin Pop
      for new_lv in dev.children:
6862 2bb5c911 Michael Hanselmann
        _CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True,
6863 2bb5c911 Michael Hanselmann
                        _GetInstanceInfoText(self.instance), False)
6864 a9e0c397 Iustin Pop
6865 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
6866 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
6867 a1578d63 Iustin Pop
    # error and the success paths
6868 2bb5c911 Michael Hanselmann
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
6869 4d4a651d Michael Hanselmann
    minors = self.cfg.AllocateDRBDMinor([self.new_node
6870 4d4a651d Michael Hanselmann
                                         for dev in self.instance.disks],
6871 2bb5c911 Michael Hanselmann
                                        self.instance.name)
6872 2bb5c911 Michael Hanselmann
    logging.debug("Allocated minors %r" % (minors,))
6873 2bb5c911 Michael Hanselmann
6874 2bb5c911 Michael Hanselmann
    iv_names = {}
6875 2bb5c911 Michael Hanselmann
    for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
6876 4d4a651d Michael Hanselmann
      self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
6877 4d4a651d Michael Hanselmann
                      (self.new_node, idx))
6878 a2d59d8b Iustin Pop
      # create new devices on new_node; note that we create two IDs:
6879 a2d59d8b Iustin Pop
      # one without port, so the drbd will be activated without
6880 a2d59d8b Iustin Pop
      # networking information on the new node at this stage, and one
6881 a2d59d8b Iustin Pop
      # with network, for the latter activation in step 4
6882 a2d59d8b Iustin Pop
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
6883 2bb5c911 Michael Hanselmann
      if self.instance.primary_node == o_node1:
6884 a2d59d8b Iustin Pop
        p_minor = o_minor1
6885 ffa1c0dc Iustin Pop
      else:
6886 a2d59d8b Iustin Pop
        p_minor = o_minor2
6887 a2d59d8b Iustin Pop
6888 4d4a651d Michael Hanselmann
      new_alone_id = (self.instance.primary_node, self.new_node, None,
6889 4d4a651d Michael Hanselmann
                      p_minor, new_minor, o_secret)
6890 4d4a651d Michael Hanselmann
      new_net_id = (self.instance.primary_node, self.new_node, o_port,
6891 4d4a651d Michael Hanselmann
                    p_minor, new_minor, o_secret)
6892 a2d59d8b Iustin Pop
6893 a2d59d8b Iustin Pop
      iv_names[idx] = (dev, dev.children, new_net_id)
6894 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
6895 a2d59d8b Iustin Pop
                    new_net_id)
6896 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
6897 a2d59d8b Iustin Pop
                              logical_id=new_alone_id,
6898 8a6c7011 Iustin Pop
                              children=dev.children,
6899 8a6c7011 Iustin Pop
                              size=dev.size)
6900 796cab27 Iustin Pop
      try:
6901 2bb5c911 Michael Hanselmann
        _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
6902 2bb5c911 Michael Hanselmann
                              _GetInstanceInfoText(self.instance), False)
6903 82759cb1 Iustin Pop
      except errors.GenericError:
6904 2bb5c911 Michael Hanselmann
        self.cfg.ReleaseDRBDMinors(self.instance.name)
6905 796cab27 Iustin Pop
        raise
6906 a9e0c397 Iustin Pop
6907 2bb5c911 Michael Hanselmann
    # We have new devices, shutdown the drbd on the old secondary
6908 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
6909 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
6910 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, self.target_node)
6911 2bb5c911 Michael Hanselmann
      msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
6912 cacfd1fd Iustin Pop
      if msg:
6913 2bb5c911 Michael Hanselmann
        self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
6914 2bb5c911 Michael Hanselmann
                           "node: %s" % (idx, msg),
6915 2bb5c911 Michael Hanselmann
                           hint=("Please cleanup this device manually as"
6916 2bb5c911 Michael Hanselmann
                                 " soon as possible"))
6917 a9e0c397 Iustin Pop
6918 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
6919 4d4a651d Michael Hanselmann
    result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node],
6920 4d4a651d Michael Hanselmann
                                               self.node_secondary_ip,
6921 4d4a651d Michael Hanselmann
                                               self.instance.disks)\
6922 4d4a651d Michael Hanselmann
                                              [self.instance.primary_node]
6923 642445d9 Iustin Pop
6924 4c4e4e1e Iustin Pop
    msg = result.fail_msg
6925 a2d59d8b Iustin Pop
    if msg:
6926 a2d59d8b Iustin Pop
      # detaches didn't succeed (unlikely)
6927 2bb5c911 Michael Hanselmann
      self.cfg.ReleaseDRBDMinors(self.instance.name)
6928 a2d59d8b Iustin Pop
      raise errors.OpExecError("Can't detach the disks from the network on"
6929 a2d59d8b Iustin Pop
                               " old node: %s" % (msg,))
6930 642445d9 Iustin Pop
6931 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
6932 642445d9 Iustin Pop
    # the instance to point to the new secondary
6933 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Updating instance configuration")
6934 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
6935 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
6936 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, self.instance.primary_node)
6937 2bb5c911 Michael Hanselmann
6938 a4eae71f Michael Hanselmann
    self.cfg.Update(self.instance, feedback_fn)
6939 a9e0c397 Iustin Pop
6940 642445d9 Iustin Pop
    # and now perform the drbd attach
6941 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Attaching primary drbds to new secondary"
6942 2bb5c911 Michael Hanselmann
                    " (standalone => connected)")
6943 4d4a651d Michael Hanselmann
    result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
6944 4d4a651d Michael Hanselmann
                                            self.new_node],
6945 4d4a651d Michael Hanselmann
                                           self.node_secondary_ip,
6946 4d4a651d Michael Hanselmann
                                           self.instance.disks,
6947 4d4a651d Michael Hanselmann
                                           self.instance.name,
6948 a2d59d8b Iustin Pop
                                           False)
6949 a2d59d8b Iustin Pop
    for to_node, to_result in result.items():
6950 4c4e4e1e Iustin Pop
      msg = to_result.fail_msg
6951 a2d59d8b Iustin Pop
      if msg:
6952 4d4a651d Michael Hanselmann
        self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
6953 4d4a651d Michael Hanselmann
                           to_node, msg,
6954 2bb5c911 Michael Hanselmann
                           hint=("please do a gnt-instance info to see the"
6955 2bb5c911 Michael Hanselmann
                                 " status of disks"))
6956 a9e0c397 Iustin Pop
6957 2bb5c911 Michael Hanselmann
    # Wait for sync
6958 2bb5c911 Michael Hanselmann
    # This can fail as the old devices are degraded and _WaitForSync
6959 2bb5c911 Michael Hanselmann
    # does a combined result over all disks, so we don't check its return value
6960 2bb5c911 Michael Hanselmann
    self.lu.LogStep(5, steps_total, "Sync devices")
6961 2bb5c911 Michael Hanselmann
    _WaitForSync(self.lu, self.instance, unlock=True)
6962 a9e0c397 Iustin Pop
6963 2bb5c911 Michael Hanselmann
    # Check all devices manually
6964 2bb5c911 Michael Hanselmann
    self._CheckDevices(self.instance.primary_node, iv_names)
6965 22985314 Guido Trotter
6966 2bb5c911 Michael Hanselmann
    # Step: remove old storage
6967 2bb5c911 Michael Hanselmann
    self.lu.LogStep(6, steps_total, "Removing old storage")
6968 2bb5c911 Michael Hanselmann
    self._RemoveOldStorage(self.target_node, iv_names)
6969 a9e0c397 Iustin Pop
6970 a8083063 Iustin Pop
6971 76aef8fc Michael Hanselmann
class LURepairNodeStorage(NoHooksLU):
6972 76aef8fc Michael Hanselmann
  """Repairs the volume group on a node.
6973 76aef8fc Michael Hanselmann

6974 76aef8fc Michael Hanselmann
  """
6975 76aef8fc Michael Hanselmann
  _OP_REQP = ["node_name"]
6976 76aef8fc Michael Hanselmann
  REQ_BGL = False
6977 76aef8fc Michael Hanselmann
6978 76aef8fc Michael Hanselmann
  def CheckArguments(self):
6979 76aef8fc Michael Hanselmann
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
6980 76aef8fc Michael Hanselmann
    if node_name is None:
6981 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name,
6982 5c983ee5 Iustin Pop
                                 errors.ECODE_NOENT)
6983 76aef8fc Michael Hanselmann
6984 76aef8fc Michael Hanselmann
    self.op.node_name = node_name
6985 76aef8fc Michael Hanselmann
6986 76aef8fc Michael Hanselmann
  def ExpandNames(self):
6987 76aef8fc Michael Hanselmann
    self.needed_locks = {
6988 76aef8fc Michael Hanselmann
      locking.LEVEL_NODE: [self.op.node_name],
6989 76aef8fc Michael Hanselmann
      }
6990 76aef8fc Michael Hanselmann
6991 76aef8fc Michael Hanselmann
  def _CheckFaultyDisks(self, instance, node_name):
6992 76aef8fc Michael Hanselmann
    if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
6993 76aef8fc Michael Hanselmann
                                node_name, True):
6994 76aef8fc Michael Hanselmann
      raise errors.OpPrereqError("Instance '%s' has faulty disks on"
6995 5c983ee5 Iustin Pop
                                 " node '%s'" % (instance.name, node_name),
6996 5c983ee5 Iustin Pop
                                 errors.ECODE_STATE)
6997 76aef8fc Michael Hanselmann
6998 76aef8fc Michael Hanselmann
  def CheckPrereq(self):
6999 76aef8fc Michael Hanselmann
    """Check prerequisites.
7000 76aef8fc Michael Hanselmann

7001 76aef8fc Michael Hanselmann
    """
7002 76aef8fc Michael Hanselmann
    storage_type = self.op.storage_type
7003 76aef8fc Michael Hanselmann
7004 76aef8fc Michael Hanselmann
    if (constants.SO_FIX_CONSISTENCY not in
7005 76aef8fc Michael Hanselmann
        constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
7006 76aef8fc Michael Hanselmann
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
7007 5c983ee5 Iustin Pop
                                 " repaired" % storage_type,
7008 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
7009 76aef8fc Michael Hanselmann
7010 76aef8fc Michael Hanselmann
    # Check whether any instance on this node has faulty disks
7011 76aef8fc Michael Hanselmann
    for inst in _GetNodeInstances(self.cfg, self.op.node_name):
7012 76aef8fc Michael Hanselmann
      check_nodes = set(inst.all_nodes)
7013 76aef8fc Michael Hanselmann
      check_nodes.discard(self.op.node_name)
7014 76aef8fc Michael Hanselmann
      for inst_node_name in check_nodes:
7015 76aef8fc Michael Hanselmann
        self._CheckFaultyDisks(inst, inst_node_name)
7016 76aef8fc Michael Hanselmann
7017 76aef8fc Michael Hanselmann
  def Exec(self, feedback_fn):
7018 76aef8fc Michael Hanselmann
    feedback_fn("Repairing storage unit '%s' on %s ..." %
7019 76aef8fc Michael Hanselmann
                (self.op.name, self.op.node_name))
7020 76aef8fc Michael Hanselmann
7021 76aef8fc Michael Hanselmann
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
7022 76aef8fc Michael Hanselmann
    result = self.rpc.call_storage_execute(self.op.node_name,
7023 76aef8fc Michael Hanselmann
                                           self.op.storage_type, st_args,
7024 76aef8fc Michael Hanselmann
                                           self.op.name,
7025 76aef8fc Michael Hanselmann
                                           constants.SO_FIX_CONSISTENCY)
7026 76aef8fc Michael Hanselmann
    result.Raise("Failed to repair storage unit '%s' on %s" %
7027 76aef8fc Michael Hanselmann
                 (self.op.name, self.op.node_name))
7028 76aef8fc Michael Hanselmann
7029 76aef8fc Michael Hanselmann
7030 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
7031 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
7032 8729e0d7 Iustin Pop

7033 8729e0d7 Iustin Pop
  """
7034 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
7035 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
7036 6605411d Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
7037 31e63dbf Guido Trotter
  REQ_BGL = False
7038 31e63dbf Guido Trotter
7039 31e63dbf Guido Trotter
  def ExpandNames(self):
7040 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
7041 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
7042 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7043 31e63dbf Guido Trotter
7044 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
7045 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
7046 31e63dbf Guido Trotter
      self._LockInstancesNodes()
7047 8729e0d7 Iustin Pop
7048 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
7049 8729e0d7 Iustin Pop
    """Build hooks env.
7050 8729e0d7 Iustin Pop

7051 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
7052 8729e0d7 Iustin Pop

7053 8729e0d7 Iustin Pop
    """
7054 8729e0d7 Iustin Pop
    env = {
7055 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
7056 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
7057 8729e0d7 Iustin Pop
      }
7058 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
7059 8729e0d7 Iustin Pop
    nl = [
7060 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
7061 8729e0d7 Iustin Pop
      self.instance.primary_node,
7062 8729e0d7 Iustin Pop
      ]
7063 8729e0d7 Iustin Pop
    return env, nl, nl
7064 8729e0d7 Iustin Pop
7065 8729e0d7 Iustin Pop
  def CheckPrereq(self):
7066 8729e0d7 Iustin Pop
    """Check prerequisites.
7067 8729e0d7 Iustin Pop

7068 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
7069 8729e0d7 Iustin Pop

7070 8729e0d7 Iustin Pop
    """
7071 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7072 31e63dbf Guido Trotter
    assert instance is not None, \
7073 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
7074 6b12959c Iustin Pop
    nodenames = list(instance.all_nodes)
7075 6b12959c Iustin Pop
    for node in nodenames:
7076 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, node)
7077 7527a8a4 Iustin Pop
7078 31e63dbf Guido Trotter
7079 8729e0d7 Iustin Pop
    self.instance = instance
7080 8729e0d7 Iustin Pop
7081 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
7082 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
7083 5c983ee5 Iustin Pop
                                 " growing.", errors.ECODE_INVAL)
7084 8729e0d7 Iustin Pop
7085 ad24e046 Iustin Pop
    self.disk = instance.FindDisk(self.op.disk)
7086 8729e0d7 Iustin Pop
7087 72737a7f Iustin Pop
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
7088 72737a7f Iustin Pop
                                       instance.hypervisor)
7089 8729e0d7 Iustin Pop
    for node in nodenames:
7090 781de953 Iustin Pop
      info = nodeinfo[node]
7091 4c4e4e1e Iustin Pop
      info.Raise("Cannot get current information from node %s" % node)
7092 070e998b Iustin Pop
      vg_free = info.payload.get('vg_free', None)
7093 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
7094 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
7095 5c983ee5 Iustin Pop
                                   " node %s" % node, errors.ECODE_ENVIRON)
7096 781de953 Iustin Pop
      if self.op.amount > vg_free:
7097 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
7098 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
7099 5c983ee5 Iustin Pop
                                   (node, vg_free, self.op.amount),
7100 5c983ee5 Iustin Pop
                                   errors.ECODE_NORES)
7101 8729e0d7 Iustin Pop
7102 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
7103 8729e0d7 Iustin Pop
    """Execute disk grow.
7104 8729e0d7 Iustin Pop

7105 8729e0d7 Iustin Pop
    """
7106 8729e0d7 Iustin Pop
    instance = self.instance
7107 ad24e046 Iustin Pop
    disk = self.disk
7108 6b12959c Iustin Pop
    for node in instance.all_nodes:
7109 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
7110 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
7111 4c4e4e1e Iustin Pop
      result.Raise("Grow request failed to node %s" % node)
7112 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
7113 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, feedback_fn)
7114 6605411d Iustin Pop
    if self.op.wait_for_sync:
7115 cd4d138f Guido Trotter
      disk_abort = not _WaitForSync(self, instance)
7116 6605411d Iustin Pop
      if disk_abort:
7117 86d9d3bb Iustin Pop
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
7118 86d9d3bb Iustin Pop
                             " status.\nPlease check the instance.")
7119 8729e0d7 Iustin Pop
7120 8729e0d7 Iustin Pop
7121 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
7122 a8083063 Iustin Pop
  """Query runtime instance data.
7123 a8083063 Iustin Pop

7124 a8083063 Iustin Pop
  """
7125 57821cac Iustin Pop
  _OP_REQP = ["instances", "static"]
7126 a987fa48 Guido Trotter
  REQ_BGL = False
7127 ae5849b5 Michael Hanselmann
7128 a987fa48 Guido Trotter
  def ExpandNames(self):
7129 a987fa48 Guido Trotter
    self.needed_locks = {}
7130 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
7131 a987fa48 Guido Trotter
7132 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
7133 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'",
7134 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
7135 a987fa48 Guido Trotter
7136 a987fa48 Guido Trotter
    if self.op.instances:
7137 a987fa48 Guido Trotter
      self.wanted_names = []
7138 a987fa48 Guido Trotter
      for name in self.op.instances:
7139 a987fa48 Guido Trotter
        full_name = self.cfg.ExpandInstanceName(name)
7140 a987fa48 Guido Trotter
        if full_name is None:
7141 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Instance '%s' not known" % name,
7142 5c983ee5 Iustin Pop
                                     errors.ECODE_NOENT)
7143 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
7144 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
7145 a987fa48 Guido Trotter
    else:
7146 a987fa48 Guido Trotter
      self.wanted_names = None
7147 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
7148 a987fa48 Guido Trotter
7149 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
7150 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7151 a987fa48 Guido Trotter
7152 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
7153 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
7154 a987fa48 Guido Trotter
      self._LockInstancesNodes()
7155 a8083063 Iustin Pop
7156 a8083063 Iustin Pop
  def CheckPrereq(self):
7157 a8083063 Iustin Pop
    """Check prerequisites.
7158 a8083063 Iustin Pop

7159 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
7160 a8083063 Iustin Pop

7161 a8083063 Iustin Pop
    """
7162 a987fa48 Guido Trotter
    if self.wanted_names is None:
7163 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
7164 a8083063 Iustin Pop
7165 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
7166 a987fa48 Guido Trotter
                             in self.wanted_names]
7167 a987fa48 Guido Trotter
    return
7168 a8083063 Iustin Pop
7169 98825740 Michael Hanselmann
  def _ComputeBlockdevStatus(self, node, instance_name, dev):
7170 98825740 Michael Hanselmann
    """Returns the status of a block device
7171 98825740 Michael Hanselmann

7172 98825740 Michael Hanselmann
    """
7173 4dce1a83 Michael Hanselmann
    if self.op.static or not node:
7174 98825740 Michael Hanselmann
      return None
7175 98825740 Michael Hanselmann
7176 98825740 Michael Hanselmann
    self.cfg.SetDiskID(dev, node)
7177 98825740 Michael Hanselmann
7178 98825740 Michael Hanselmann
    result = self.rpc.call_blockdev_find(node, dev)
7179 98825740 Michael Hanselmann
    if result.offline:
7180 98825740 Michael Hanselmann
      return None
7181 98825740 Michael Hanselmann
7182 98825740 Michael Hanselmann
    result.Raise("Can't compute disk status for %s" % instance_name)
7183 98825740 Michael Hanselmann
7184 98825740 Michael Hanselmann
    status = result.payload
7185 ddfe2228 Michael Hanselmann
    if status is None:
7186 ddfe2228 Michael Hanselmann
      return None
7187 98825740 Michael Hanselmann
7188 98825740 Michael Hanselmann
    return (status.dev_path, status.major, status.minor,
7189 98825740 Michael Hanselmann
            status.sync_percent, status.estimated_time,
7190 f208978a Michael Hanselmann
            status.is_degraded, status.ldisk_status)
7191 98825740 Michael Hanselmann
7192 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
7193 a8083063 Iustin Pop
    """Compute block device status.
7194 a8083063 Iustin Pop

7195 a8083063 Iustin Pop
    """
7196 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
7197 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
7198 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
7199 a8083063 Iustin Pop
        snode = dev.logical_id[1]
7200 a8083063 Iustin Pop
      else:
7201 a8083063 Iustin Pop
        snode = dev.logical_id[0]
7202 a8083063 Iustin Pop
7203 98825740 Michael Hanselmann
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
7204 98825740 Michael Hanselmann
                                              instance.name, dev)
7205 98825740 Michael Hanselmann
    dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
7206 a8083063 Iustin Pop
7207 a8083063 Iustin Pop
    if dev.children:
7208 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
7209 a8083063 Iustin Pop
                      for child in dev.children]
7210 a8083063 Iustin Pop
    else:
7211 a8083063 Iustin Pop
      dev_children = []
7212 a8083063 Iustin Pop
7213 a8083063 Iustin Pop
    data = {
7214 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
7215 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
7216 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
7217 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
7218 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
7219 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
7220 a8083063 Iustin Pop
      "children": dev_children,
7221 b6fdf8b8 Iustin Pop
      "mode": dev.mode,
7222 c98162a7 Iustin Pop
      "size": dev.size,
7223 a8083063 Iustin Pop
      }
7224 a8083063 Iustin Pop
7225 a8083063 Iustin Pop
    return data
7226 a8083063 Iustin Pop
7227 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
7228 a8083063 Iustin Pop
    """Gather and return data"""
7229 a8083063 Iustin Pop
    result = {}
7230 338e51e8 Iustin Pop
7231 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
7232 338e51e8 Iustin Pop
7233 a8083063 Iustin Pop
    for instance in self.wanted_instances:
7234 57821cac Iustin Pop
      if not self.op.static:
7235 57821cac Iustin Pop
        remote_info = self.rpc.call_instance_info(instance.primary_node,
7236 57821cac Iustin Pop
                                                  instance.name,
7237 57821cac Iustin Pop
                                                  instance.hypervisor)
7238 4c4e4e1e Iustin Pop
        remote_info.Raise("Error checking node %s" % instance.primary_node)
7239 7ad1af4a Iustin Pop
        remote_info = remote_info.payload
7240 57821cac Iustin Pop
        if remote_info and "state" in remote_info:
7241 57821cac Iustin Pop
          remote_state = "up"
7242 57821cac Iustin Pop
        else:
7243 57821cac Iustin Pop
          remote_state = "down"
7244 a8083063 Iustin Pop
      else:
7245 57821cac Iustin Pop
        remote_state = None
7246 0d68c45d Iustin Pop
      if instance.admin_up:
7247 a8083063 Iustin Pop
        config_state = "up"
7248 0d68c45d Iustin Pop
      else:
7249 0d68c45d Iustin Pop
        config_state = "down"
7250 a8083063 Iustin Pop
7251 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
7252 a8083063 Iustin Pop
               for device in instance.disks]
7253 a8083063 Iustin Pop
7254 a8083063 Iustin Pop
      idict = {
7255 a8083063 Iustin Pop
        "name": instance.name,
7256 a8083063 Iustin Pop
        "config_state": config_state,
7257 a8083063 Iustin Pop
        "run_state": remote_state,
7258 a8083063 Iustin Pop
        "pnode": instance.primary_node,
7259 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
7260 a8083063 Iustin Pop
        "os": instance.os,
7261 0b13832c Guido Trotter
        # this happens to be the same format used for hooks
7262 0b13832c Guido Trotter
        "nics": _NICListToTuple(self, instance.nics),
7263 a8083063 Iustin Pop
        "disks": disks,
7264 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
7265 24838135 Iustin Pop
        "network_port": instance.network_port,
7266 24838135 Iustin Pop
        "hv_instance": instance.hvparams,
7267 338e51e8 Iustin Pop
        "hv_actual": cluster.FillHV(instance),
7268 338e51e8 Iustin Pop
        "be_instance": instance.beparams,
7269 338e51e8 Iustin Pop
        "be_actual": cluster.FillBE(instance),
7270 90f72445 Iustin Pop
        "serial_no": instance.serial_no,
7271 90f72445 Iustin Pop
        "mtime": instance.mtime,
7272 90f72445 Iustin Pop
        "ctime": instance.ctime,
7273 033d58b0 Iustin Pop
        "uuid": instance.uuid,
7274 a8083063 Iustin Pop
        }
7275 a8083063 Iustin Pop
7276 a8083063 Iustin Pop
      result[instance.name] = idict
7277 a8083063 Iustin Pop
7278 a8083063 Iustin Pop
    return result
7279 a8083063 Iustin Pop
7280 a8083063 Iustin Pop
7281 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
7282 a8083063 Iustin Pop
  """Modifies an instances's parameters.
7283 a8083063 Iustin Pop

7284 a8083063 Iustin Pop
  """
7285 a8083063 Iustin Pop
  HPATH = "instance-modify"
7286 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
7287 24991749 Iustin Pop
  _OP_REQP = ["instance_name"]
7288 1a5c7281 Guido Trotter
  REQ_BGL = False
7289 1a5c7281 Guido Trotter
7290 24991749 Iustin Pop
  def CheckArguments(self):
7291 24991749 Iustin Pop
    if not hasattr(self.op, 'nics'):
7292 24991749 Iustin Pop
      self.op.nics = []
7293 24991749 Iustin Pop
    if not hasattr(self.op, 'disks'):
7294 24991749 Iustin Pop
      self.op.disks = []
7295 24991749 Iustin Pop
    if not hasattr(self.op, 'beparams'):
7296 24991749 Iustin Pop
      self.op.beparams = {}
7297 24991749 Iustin Pop
    if not hasattr(self.op, 'hvparams'):
7298 24991749 Iustin Pop
      self.op.hvparams = {}
7299 24991749 Iustin Pop
    self.op.force = getattr(self.op, "force", False)
7300 24991749 Iustin Pop
    if not (self.op.nics or self.op.disks or
7301 24991749 Iustin Pop
            self.op.hvparams or self.op.beparams):
7302 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
7303 24991749 Iustin Pop
7304 24991749 Iustin Pop
    # Disk validation
7305 24991749 Iustin Pop
    disk_addremove = 0
7306 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
7307 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
7308 24991749 Iustin Pop
        disk_addremove += 1
7309 24991749 Iustin Pop
        continue
7310 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
7311 24991749 Iustin Pop
        disk_addremove += 1
7312 24991749 Iustin Pop
      else:
7313 24991749 Iustin Pop
        if not isinstance(disk_op, int):
7314 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index", errors.ECODE_INVAL)
7315 8b46606c Guido Trotter
        if not isinstance(disk_dict, dict):
7316 8b46606c Guido Trotter
          msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
7317 5c983ee5 Iustin Pop
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
7318 8b46606c Guido Trotter
7319 24991749 Iustin Pop
      if disk_op == constants.DDM_ADD:
7320 24991749 Iustin Pop
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
7321 6ec66eae Iustin Pop
        if mode not in constants.DISK_ACCESS_SET:
7322 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
7323 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
7324 24991749 Iustin Pop
        size = disk_dict.get('size', None)
7325 24991749 Iustin Pop
        if size is None:
7326 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Required disk parameter size missing",
7327 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
7328 24991749 Iustin Pop
        try:
7329 24991749 Iustin Pop
          size = int(size)
7330 24991749 Iustin Pop
        except ValueError, err:
7331 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
7332 5c983ee5 Iustin Pop
                                     str(err), errors.ECODE_INVAL)
7333 24991749 Iustin Pop
        disk_dict['size'] = size
7334 24991749 Iustin Pop
      else:
7335 24991749 Iustin Pop
        # modification of disk
7336 24991749 Iustin Pop
        if 'size' in disk_dict:
7337 24991749 Iustin Pop
          raise errors.OpPrereqError("Disk size change not possible, use"
7338 5c983ee5 Iustin Pop
                                     " grow-disk", errors.ECODE_INVAL)
7339 24991749 Iustin Pop
7340 24991749 Iustin Pop
    if disk_addremove > 1:
7341 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one disk add or remove operation"
7342 5c983ee5 Iustin Pop
                                 " supported at a time", errors.ECODE_INVAL)
7343 24991749 Iustin Pop
7344 24991749 Iustin Pop
    # NIC validation
7345 24991749 Iustin Pop
    nic_addremove = 0
7346 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
7347 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
7348 24991749 Iustin Pop
        nic_addremove += 1
7349 24991749 Iustin Pop
        continue
7350 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
7351 24991749 Iustin Pop
        nic_addremove += 1
7352 24991749 Iustin Pop
      else:
7353 24991749 Iustin Pop
        if not isinstance(nic_op, int):
7354 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid nic index", errors.ECODE_INVAL)
7355 8b46606c Guido Trotter
        if not isinstance(nic_dict, dict):
7356 8b46606c Guido Trotter
          msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
7357 5c983ee5 Iustin Pop
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
7358 24991749 Iustin Pop
7359 24991749 Iustin Pop
      # nic_dict should be a dict
7360 24991749 Iustin Pop
      nic_ip = nic_dict.get('ip', None)
7361 24991749 Iustin Pop
      if nic_ip is not None:
7362 5c44da6a Guido Trotter
        if nic_ip.lower() == constants.VALUE_NONE:
7363 24991749 Iustin Pop
          nic_dict['ip'] = None
7364 24991749 Iustin Pop
        else:
7365 24991749 Iustin Pop
          if not utils.IsValidIP(nic_ip):
7366 5c983ee5 Iustin Pop
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip,
7367 5c983ee5 Iustin Pop
                                       errors.ECODE_INVAL)
7368 5c44da6a Guido Trotter
7369 cd098c41 Guido Trotter
      nic_bridge = nic_dict.get('bridge', None)
7370 cd098c41 Guido Trotter
      nic_link = nic_dict.get('link', None)
7371 cd098c41 Guido Trotter
      if nic_bridge and nic_link:
7372 29921401 Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
7373 5c983ee5 Iustin Pop
                                   " at the same time", errors.ECODE_INVAL)
7374 cd098c41 Guido Trotter
      elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
7375 cd098c41 Guido Trotter
        nic_dict['bridge'] = None
7376 cd098c41 Guido Trotter
      elif nic_link and nic_link.lower() == constants.VALUE_NONE:
7377 cd098c41 Guido Trotter
        nic_dict['link'] = None
7378 cd098c41 Guido Trotter
7379 5c44da6a Guido Trotter
      if nic_op == constants.DDM_ADD:
7380 5c44da6a Guido Trotter
        nic_mac = nic_dict.get('mac', None)
7381 5c44da6a Guido Trotter
        if nic_mac is None:
7382 5c44da6a Guido Trotter
          nic_dict['mac'] = constants.VALUE_AUTO
7383 5c44da6a Guido Trotter
7384 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
7385 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
7386 24991749 Iustin Pop
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7387 24991749 Iustin Pop
          if not utils.IsValidMac(nic_mac):
7388 5c983ee5 Iustin Pop
            raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac,
7389 5c983ee5 Iustin Pop
                                       errors.ECODE_INVAL)
7390 5c44da6a Guido Trotter
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
7391 5c44da6a Guido Trotter
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
7392 5c983ee5 Iustin Pop
                                     " modifying an existing nic",
7393 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
7394 5c44da6a Guido Trotter
7395 24991749 Iustin Pop
    if nic_addremove > 1:
7396 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one NIC add or remove operation"
7397 5c983ee5 Iustin Pop
                                 " supported at a time", errors.ECODE_INVAL)
7398 24991749 Iustin Pop
7399 1a5c7281 Guido Trotter
  def ExpandNames(self):
7400 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
7401 74409b12 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
7402 74409b12 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7403 74409b12 Iustin Pop
7404 74409b12 Iustin Pop
  def DeclareLocks(self, level):
7405 74409b12 Iustin Pop
    if level == locking.LEVEL_NODE:
7406 74409b12 Iustin Pop
      self._LockInstancesNodes()
7407 a8083063 Iustin Pop
7408 a8083063 Iustin Pop
  def BuildHooksEnv(self):
7409 a8083063 Iustin Pop
    """Build hooks env.
7410 a8083063 Iustin Pop

7411 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
7412 a8083063 Iustin Pop

7413 a8083063 Iustin Pop
    """
7414 396e1b78 Michael Hanselmann
    args = dict()
7415 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.be_new:
7416 338e51e8 Iustin Pop
      args['memory'] = self.be_new[constants.BE_MEMORY]
7417 338e51e8 Iustin Pop
    if constants.BE_VCPUS in self.be_new:
7418 61be6ba4 Iustin Pop
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
7419 d8dcf3c9 Guido Trotter
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
7420 d8dcf3c9 Guido Trotter
    # information at all.
7421 d8dcf3c9 Guido Trotter
    if self.op.nics:
7422 d8dcf3c9 Guido Trotter
      args['nics'] = []
7423 d8dcf3c9 Guido Trotter
      nic_override = dict(self.op.nics)
7424 62f0dd02 Guido Trotter
      c_nicparams = self.cluster.nicparams[constants.PP_DEFAULT]
7425 d8dcf3c9 Guido Trotter
      for idx, nic in enumerate(self.instance.nics):
7426 d8dcf3c9 Guido Trotter
        if idx in nic_override:
7427 d8dcf3c9 Guido Trotter
          this_nic_override = nic_override[idx]
7428 d8dcf3c9 Guido Trotter
        else:
7429 d8dcf3c9 Guido Trotter
          this_nic_override = {}
7430 d8dcf3c9 Guido Trotter
        if 'ip' in this_nic_override:
7431 d8dcf3c9 Guido Trotter
          ip = this_nic_override['ip']
7432 d8dcf3c9 Guido Trotter
        else:
7433 d8dcf3c9 Guido Trotter
          ip = nic.ip
7434 d8dcf3c9 Guido Trotter
        if 'mac' in this_nic_override:
7435 d8dcf3c9 Guido Trotter
          mac = this_nic_override['mac']
7436 d8dcf3c9 Guido Trotter
        else:
7437 d8dcf3c9 Guido Trotter
          mac = nic.mac
7438 62f0dd02 Guido Trotter
        if idx in self.nic_pnew:
7439 62f0dd02 Guido Trotter
          nicparams = self.nic_pnew[idx]
7440 62f0dd02 Guido Trotter
        else:
7441 62f0dd02 Guido Trotter
          nicparams = objects.FillDict(c_nicparams, nic.nicparams)
7442 62f0dd02 Guido Trotter
        mode = nicparams[constants.NIC_MODE]
7443 62f0dd02 Guido Trotter
        link = nicparams[constants.NIC_LINK]
7444 62f0dd02 Guido Trotter
        args['nics'].append((ip, mac, mode, link))
7445 d8dcf3c9 Guido Trotter
      if constants.DDM_ADD in nic_override:
7446 d8dcf3c9 Guido Trotter
        ip = nic_override[constants.DDM_ADD].get('ip', None)
7447 d8dcf3c9 Guido Trotter
        mac = nic_override[constants.DDM_ADD]['mac']
7448 62f0dd02 Guido Trotter
        nicparams = self.nic_pnew[constants.DDM_ADD]
7449 62f0dd02 Guido Trotter
        mode = nicparams[constants.NIC_MODE]
7450 62f0dd02 Guido Trotter
        link = nicparams[constants.NIC_LINK]
7451 62f0dd02 Guido Trotter
        args['nics'].append((ip, mac, mode, link))
7452 d8dcf3c9 Guido Trotter
      elif constants.DDM_REMOVE in nic_override:
7453 d8dcf3c9 Guido Trotter
        del args['nics'][-1]
7454 d8dcf3c9 Guido Trotter
7455 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
7456 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
7457 a8083063 Iustin Pop
    return env, nl, nl
7458 a8083063 Iustin Pop
7459 0329617a Guido Trotter
  def _GetUpdatedParams(self, old_params, update_dict,
7460 0329617a Guido Trotter
                        default_values, parameter_types):
7461 0329617a Guido Trotter
    """Return the new params dict for the given params.
7462 0329617a Guido Trotter

7463 0329617a Guido Trotter
    @type old_params: dict
7464 f2fd87d7 Iustin Pop
    @param old_params: old parameters
7465 0329617a Guido Trotter
    @type update_dict: dict
7466 f2fd87d7 Iustin Pop
    @param update_dict: dict containing new parameter values,
7467 f2fd87d7 Iustin Pop
                        or constants.VALUE_DEFAULT to reset the
7468 f2fd87d7 Iustin Pop
                        parameter to its default value
7469 0329617a Guido Trotter
    @type default_values: dict
7470 0329617a Guido Trotter
    @param default_values: default values for the filled parameters
7471 0329617a Guido Trotter
    @type parameter_types: dict
7472 0329617a Guido Trotter
    @param parameter_types: dict mapping target dict keys to types
7473 0329617a Guido Trotter
                            in constants.ENFORCEABLE_TYPES
7474 0329617a Guido Trotter
    @rtype: (dict, dict)
7475 0329617a Guido Trotter
    @return: (new_parameters, filled_parameters)
7476 0329617a Guido Trotter

7477 0329617a Guido Trotter
    """
7478 0329617a Guido Trotter
    params_copy = copy.deepcopy(old_params)
7479 0329617a Guido Trotter
    for key, val in update_dict.iteritems():
7480 0329617a Guido Trotter
      if val == constants.VALUE_DEFAULT:
7481 0329617a Guido Trotter
        try:
7482 0329617a Guido Trotter
          del params_copy[key]
7483 0329617a Guido Trotter
        except KeyError:
7484 0329617a Guido Trotter
          pass
7485 0329617a Guido Trotter
      else:
7486 0329617a Guido Trotter
        params_copy[key] = val
7487 0329617a Guido Trotter
    utils.ForceDictType(params_copy, parameter_types)
7488 0329617a Guido Trotter
    params_filled = objects.FillDict(default_values, params_copy)
7489 0329617a Guido Trotter
    return (params_copy, params_filled)
7490 0329617a Guido Trotter
7491 a8083063 Iustin Pop
  def CheckPrereq(self):
7492 a8083063 Iustin Pop
    """Check prerequisites.
7493 a8083063 Iustin Pop

7494 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
7495 a8083063 Iustin Pop

7496 a8083063 Iustin Pop
    """
7497 7c4d6c7b Michael Hanselmann
    self.force = self.op.force
7498 a8083063 Iustin Pop
7499 74409b12 Iustin Pop
    # checking the new params on the primary/secondary nodes
7500 31a853d2 Iustin Pop
7501 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7502 2ee88aeb Guido Trotter
    cluster = self.cluster = self.cfg.GetClusterInfo()
7503 1a5c7281 Guido Trotter
    assert self.instance is not None, \
7504 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
7505 6b12959c Iustin Pop
    pnode = instance.primary_node
7506 6b12959c Iustin Pop
    nodelist = list(instance.all_nodes)
7507 74409b12 Iustin Pop
7508 338e51e8 Iustin Pop
    # hvparams processing
7509 74409b12 Iustin Pop
    if self.op.hvparams:
7510 0329617a Guido Trotter
      i_hvdict, hv_new = self._GetUpdatedParams(
7511 0329617a Guido Trotter
                             instance.hvparams, self.op.hvparams,
7512 0329617a Guido Trotter
                             cluster.hvparams[instance.hypervisor],
7513 0329617a Guido Trotter
                             constants.HVS_PARAMETER_TYPES)
7514 74409b12 Iustin Pop
      # local check
7515 74409b12 Iustin Pop
      hypervisor.GetHypervisor(
7516 74409b12 Iustin Pop
        instance.hypervisor).CheckParameterSyntax(hv_new)
7517 74409b12 Iustin Pop
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
7518 338e51e8 Iustin Pop
      self.hv_new = hv_new # the new actual values
7519 338e51e8 Iustin Pop
      self.hv_inst = i_hvdict # the new dict (without defaults)
7520 338e51e8 Iustin Pop
    else:
7521 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
7522 338e51e8 Iustin Pop
7523 338e51e8 Iustin Pop
    # beparams processing
7524 338e51e8 Iustin Pop
    if self.op.beparams:
7525 0329617a Guido Trotter
      i_bedict, be_new = self._GetUpdatedParams(
7526 0329617a Guido Trotter
                             instance.beparams, self.op.beparams,
7527 0329617a Guido Trotter
                             cluster.beparams[constants.PP_DEFAULT],
7528 0329617a Guido Trotter
                             constants.BES_PARAMETER_TYPES)
7529 338e51e8 Iustin Pop
      self.be_new = be_new # the new actual values
7530 338e51e8 Iustin Pop
      self.be_inst = i_bedict # the new dict (without defaults)
7531 338e51e8 Iustin Pop
    else:
7532 b637ae4d Iustin Pop
      self.be_new = self.be_inst = {}
7533 74409b12 Iustin Pop
7534 cfefe007 Guido Trotter
    self.warn = []
7535 647a5d80 Iustin Pop
7536 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.op.beparams and not self.force:
7537 647a5d80 Iustin Pop
      mem_check_list = [pnode]
7538 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
7539 c0f2b229 Iustin Pop
        # either we changed auto_balance to yes or it was from before
7540 647a5d80 Iustin Pop
        mem_check_list.extend(instance.secondary_nodes)
7541 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
7542 72737a7f Iustin Pop
                                                  instance.hypervisor)
7543 647a5d80 Iustin Pop
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
7544 72737a7f Iustin Pop
                                         instance.hypervisor)
7545 070e998b Iustin Pop
      pninfo = nodeinfo[pnode]
7546 4c4e4e1e Iustin Pop
      msg = pninfo.fail_msg
7547 070e998b Iustin Pop
      if msg:
7548 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
7549 070e998b Iustin Pop
        self.warn.append("Can't get info from primary node %s: %s" %
7550 070e998b Iustin Pop
                         (pnode,  msg))
7551 070e998b Iustin Pop
      elif not isinstance(pninfo.payload.get('memory_free', None), int):
7552 070e998b Iustin Pop
        self.warn.append("Node data from primary node %s doesn't contain"
7553 070e998b Iustin Pop
                         " free memory information" % pnode)
7554 4c4e4e1e Iustin Pop
      elif instance_info.fail_msg:
7555 7ad1af4a Iustin Pop
        self.warn.append("Can't get instance runtime information: %s" %
7556 4c4e4e1e Iustin Pop
                        instance_info.fail_msg)
7557 cfefe007 Guido Trotter
      else:
7558 7ad1af4a Iustin Pop
        if instance_info.payload:
7559 7ad1af4a Iustin Pop
          current_mem = int(instance_info.payload['memory'])
7560 cfefe007 Guido Trotter
        else:
7561 cfefe007 Guido Trotter
          # Assume instance not running
7562 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
7563 cfefe007 Guido Trotter
          # and we have no other way to check)
7564 cfefe007 Guido Trotter
          current_mem = 0
7565 338e51e8 Iustin Pop
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
7566 070e998b Iustin Pop
                    pninfo.payload['memory_free'])
7567 cfefe007 Guido Trotter
        if miss_mem > 0:
7568 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
7569 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
7570 5c983ee5 Iustin Pop
                                     " missing on its primary node" % miss_mem,
7571 5c983ee5 Iustin Pop
                                     errors.ECODE_NORES)
7572 cfefe007 Guido Trotter
7573 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
7574 070e998b Iustin Pop
        for node, nres in nodeinfo.items():
7575 ea33068f Iustin Pop
          if node not in instance.secondary_nodes:
7576 ea33068f Iustin Pop
            continue
7577 4c4e4e1e Iustin Pop
          msg = nres.fail_msg
7578 070e998b Iustin Pop
          if msg:
7579 070e998b Iustin Pop
            self.warn.append("Can't get info from secondary node %s: %s" %
7580 070e998b Iustin Pop
                             (node, msg))
7581 070e998b Iustin Pop
          elif not isinstance(nres.payload.get('memory_free', None), int):
7582 070e998b Iustin Pop
            self.warn.append("Secondary node %s didn't return free"
7583 070e998b Iustin Pop
                             " memory information" % node)
7584 070e998b Iustin Pop
          elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
7585 647a5d80 Iustin Pop
            self.warn.append("Not enough memory to failover instance to"
7586 647a5d80 Iustin Pop
                             " secondary node %s" % node)
7587 5bc84f33 Alexander Schreiber
7588 24991749 Iustin Pop
    # NIC processing
7589 cd098c41 Guido Trotter
    self.nic_pnew = {}
7590 cd098c41 Guido Trotter
    self.nic_pinst = {}
7591 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
7592 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
7593 24991749 Iustin Pop
        if not instance.nics:
7594 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Instance has no NICs, cannot remove",
7595 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
7596 24991749 Iustin Pop
        continue
7597 24991749 Iustin Pop
      if nic_op != constants.DDM_ADD:
7598 24991749 Iustin Pop
        # an existing nic
7599 24991749 Iustin Pop
        if nic_op < 0 or nic_op >= len(instance.nics):
7600 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
7601 24991749 Iustin Pop
                                     " are 0 to %d" %
7602 5c983ee5 Iustin Pop
                                     (nic_op, len(instance.nics)),
7603 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
7604 cd098c41 Guido Trotter
        old_nic_params = instance.nics[nic_op].nicparams
7605 cd098c41 Guido Trotter
        old_nic_ip = instance.nics[nic_op].ip
7606 cd098c41 Guido Trotter
      else:
7607 cd098c41 Guido Trotter
        old_nic_params = {}
7608 cd098c41 Guido Trotter
        old_nic_ip = None
7609 cd098c41 Guido Trotter
7610 cd098c41 Guido Trotter
      update_params_dict = dict([(key, nic_dict[key])
7611 cd098c41 Guido Trotter
                                 for key in constants.NICS_PARAMETERS
7612 cd098c41 Guido Trotter
                                 if key in nic_dict])
7613 cd098c41 Guido Trotter
7614 5c44da6a Guido Trotter
      if 'bridge' in nic_dict:
7615 cd098c41 Guido Trotter
        update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
7616 cd098c41 Guido Trotter
7617 cd098c41 Guido Trotter
      new_nic_params, new_filled_nic_params = \
7618 cd098c41 Guido Trotter
          self._GetUpdatedParams(old_nic_params, update_params_dict,
7619 cd098c41 Guido Trotter
                                 cluster.nicparams[constants.PP_DEFAULT],
7620 cd098c41 Guido Trotter
                                 constants.NICS_PARAMETER_TYPES)
7621 cd098c41 Guido Trotter
      objects.NIC.CheckParameterSyntax(new_filled_nic_params)
7622 cd098c41 Guido Trotter
      self.nic_pinst[nic_op] = new_nic_params
7623 cd098c41 Guido Trotter
      self.nic_pnew[nic_op] = new_filled_nic_params
7624 cd098c41 Guido Trotter
      new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
7625 cd098c41 Guido Trotter
7626 cd098c41 Guido Trotter
      if new_nic_mode == constants.NIC_MODE_BRIDGED:
7627 cd098c41 Guido Trotter
        nic_bridge = new_filled_nic_params[constants.NIC_LINK]
7628 4c4e4e1e Iustin Pop
        msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
7629 35c0c8da Iustin Pop
        if msg:
7630 35c0c8da Iustin Pop
          msg = "Error checking bridges on node %s: %s" % (pnode, msg)
7631 24991749 Iustin Pop
          if self.force:
7632 24991749 Iustin Pop
            self.warn.append(msg)
7633 24991749 Iustin Pop
          else:
7634 5c983ee5 Iustin Pop
            raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
7635 cd098c41 Guido Trotter
      if new_nic_mode == constants.NIC_MODE_ROUTED:
7636 cd098c41 Guido Trotter
        if 'ip' in nic_dict:
7637 cd098c41 Guido Trotter
          nic_ip = nic_dict['ip']
7638 cd098c41 Guido Trotter
        else:
7639 cd098c41 Guido Trotter
          nic_ip = old_nic_ip
7640 cd098c41 Guido Trotter
        if nic_ip is None:
7641 cd098c41 Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic ip to None'
7642 5c983ee5 Iustin Pop
                                     ' on a routed nic', errors.ECODE_INVAL)
7643 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
7644 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
7645 5c44da6a Guido Trotter
        if nic_mac is None:
7646 5c983ee5 Iustin Pop
          raise errors.OpPrereqError('Cannot set the nic mac to None',
7647 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
7648 5c44da6a Guido Trotter
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7649 5c44da6a Guido Trotter
          # otherwise generate the mac
7650 5c44da6a Guido Trotter
          nic_dict['mac'] = self.cfg.GenerateMAC()
7651 5c44da6a Guido Trotter
        else:
7652 5c44da6a Guido Trotter
          # or validate/reserve the current one
7653 5c44da6a Guido Trotter
          if self.cfg.IsMacInUse(nic_mac):
7654 5c44da6a Guido Trotter
            raise errors.OpPrereqError("MAC address %s already in use"
7655 5c983ee5 Iustin Pop
                                       " in cluster" % nic_mac,
7656 5c983ee5 Iustin Pop
                                       errors.ECODE_NOTUNIQUE)
7657 24991749 Iustin Pop
7658 24991749 Iustin Pop
    # DISK processing
7659 24991749 Iustin Pop
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
7660 24991749 Iustin Pop
      raise errors.OpPrereqError("Disk operations not supported for"
7661 5c983ee5 Iustin Pop
                                 " diskless instances",
7662 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
7663 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
7664 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
7665 24991749 Iustin Pop
        if len(instance.disks) == 1:
7666 24991749 Iustin Pop
          raise errors.OpPrereqError("Cannot remove the last disk of"
7667 5c983ee5 Iustin Pop
                                     " an instance",
7668 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
7669 24991749 Iustin Pop
        ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
7670 24991749 Iustin Pop
        ins_l = ins_l[pnode]
7671 4c4e4e1e Iustin Pop
        msg = ins_l.fail_msg
7672 aca13712 Iustin Pop
        if msg:
7673 aca13712 Iustin Pop
          raise errors.OpPrereqError("Can't contact node %s: %s" %
7674 5c983ee5 Iustin Pop
                                     (pnode, msg), errors.ECODE_ENVIRON)
7675 aca13712 Iustin Pop
        if instance.name in ins_l.payload:
7676 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance is running, can't remove"
7677 5c983ee5 Iustin Pop
                                     " disks.", errors.ECODE_STATE)
7678 24991749 Iustin Pop
7679 24991749 Iustin Pop
      if (disk_op == constants.DDM_ADD and
7680 24991749 Iustin Pop
          len(instance.nics) >= constants.MAX_DISKS):
7681 24991749 Iustin Pop
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
7682 5c983ee5 Iustin Pop
                                   " add more" % constants.MAX_DISKS,
7683 5c983ee5 Iustin Pop
                                   errors.ECODE_STATE)
7684 24991749 Iustin Pop
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
7685 24991749 Iustin Pop
        # an existing disk
7686 24991749 Iustin Pop
        if disk_op < 0 or disk_op >= len(instance.disks):
7687 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
7688 24991749 Iustin Pop
                                     " are 0 to %d" %
7689 5c983ee5 Iustin Pop
                                     (disk_op, len(instance.disks)),
7690 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
7691 24991749 Iustin Pop
7692 a8083063 Iustin Pop
    return
7693 a8083063 Iustin Pop
7694 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
7695 a8083063 Iustin Pop
    """Modifies an instance.
7696 a8083063 Iustin Pop

7697 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
7698 24991749 Iustin Pop

7699 a8083063 Iustin Pop
    """
7700 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
7701 cfefe007 Guido Trotter
    # feedback_fn there.
7702 cfefe007 Guido Trotter
    for warn in self.warn:
7703 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
7704 cfefe007 Guido Trotter
7705 a8083063 Iustin Pop
    result = []
7706 a8083063 Iustin Pop
    instance = self.instance
7707 cd098c41 Guido Trotter
    cluster = self.cluster
7708 24991749 Iustin Pop
    # disk changes
7709 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
7710 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
7711 24991749 Iustin Pop
        # remove the last disk
7712 24991749 Iustin Pop
        device = instance.disks.pop()
7713 24991749 Iustin Pop
        device_idx = len(instance.disks)
7714 24991749 Iustin Pop
        for node, disk in device.ComputeNodeTree(instance.primary_node):
7715 24991749 Iustin Pop
          self.cfg.SetDiskID(disk, node)
7716 4c4e4e1e Iustin Pop
          msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
7717 e1bc0878 Iustin Pop
          if msg:
7718 e1bc0878 Iustin Pop
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
7719 e1bc0878 Iustin Pop
                            " continuing anyway", device_idx, node, msg)
7720 24991749 Iustin Pop
        result.append(("disk/%d" % device_idx, "remove"))
7721 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
7722 24991749 Iustin Pop
        # add a new disk
7723 24991749 Iustin Pop
        if instance.disk_template == constants.DT_FILE:
7724 24991749 Iustin Pop
          file_driver, file_path = instance.disks[0].logical_id
7725 24991749 Iustin Pop
          file_path = os.path.dirname(file_path)
7726 24991749 Iustin Pop
        else:
7727 24991749 Iustin Pop
          file_driver = file_path = None
7728 24991749 Iustin Pop
        disk_idx_base = len(instance.disks)
7729 24991749 Iustin Pop
        new_disk = _GenerateDiskTemplate(self,
7730 24991749 Iustin Pop
                                         instance.disk_template,
7731 32388e6d Iustin Pop
                                         instance.name, instance.primary_node,
7732 24991749 Iustin Pop
                                         instance.secondary_nodes,
7733 24991749 Iustin Pop
                                         [disk_dict],
7734 24991749 Iustin Pop
                                         file_path,
7735 24991749 Iustin Pop
                                         file_driver,
7736 24991749 Iustin Pop
                                         disk_idx_base)[0]
7737 24991749 Iustin Pop
        instance.disks.append(new_disk)
7738 24991749 Iustin Pop
        info = _GetInstanceInfoText(instance)
7739 24991749 Iustin Pop
7740 24991749 Iustin Pop
        logging.info("Creating volume %s for instance %s",
7741 24991749 Iustin Pop
                     new_disk.iv_name, instance.name)
7742 24991749 Iustin Pop
        # Note: this needs to be kept in sync with _CreateDisks
7743 24991749 Iustin Pop
        #HARDCODE
7744 428958aa Iustin Pop
        for node in instance.all_nodes:
7745 428958aa Iustin Pop
          f_create = node == instance.primary_node
7746 796cab27 Iustin Pop
          try:
7747 428958aa Iustin Pop
            _CreateBlockDev(self, node, instance, new_disk,
7748 428958aa Iustin Pop
                            f_create, info, f_create)
7749 1492cca7 Iustin Pop
          except errors.OpExecError, err:
7750 24991749 Iustin Pop
            self.LogWarning("Failed to create volume %s (%s) on"
7751 428958aa Iustin Pop
                            " node %s: %s",
7752 428958aa Iustin Pop
                            new_disk.iv_name, new_disk, node, err)
7753 24991749 Iustin Pop
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
7754 24991749 Iustin Pop
                       (new_disk.size, new_disk.mode)))
7755 24991749 Iustin Pop
      else:
7756 24991749 Iustin Pop
        # change a given disk
7757 24991749 Iustin Pop
        instance.disks[disk_op].mode = disk_dict['mode']
7758 24991749 Iustin Pop
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
7759 24991749 Iustin Pop
    # NIC changes
7760 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
7761 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
7762 24991749 Iustin Pop
        # remove the last nic
7763 24991749 Iustin Pop
        del instance.nics[-1]
7764 24991749 Iustin Pop
        result.append(("nic.%d" % len(instance.nics), "remove"))
7765 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
7766 5c44da6a Guido Trotter
        # mac and bridge should be set, by now
7767 5c44da6a Guido Trotter
        mac = nic_dict['mac']
7768 cd098c41 Guido Trotter
        ip = nic_dict.get('ip', None)
7769 cd098c41 Guido Trotter
        nicparams = self.nic_pinst[constants.DDM_ADD]
7770 cd098c41 Guido Trotter
        new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
7771 24991749 Iustin Pop
        instance.nics.append(new_nic)
7772 24991749 Iustin Pop
        result.append(("nic.%d" % (len(instance.nics) - 1),
7773 cd098c41 Guido Trotter
                       "add:mac=%s,ip=%s,mode=%s,link=%s" %
7774 cd098c41 Guido Trotter
                       (new_nic.mac, new_nic.ip,
7775 cd098c41 Guido Trotter
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
7776 cd098c41 Guido Trotter
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
7777 cd098c41 Guido Trotter
                       )))
7778 24991749 Iustin Pop
      else:
7779 cd098c41 Guido Trotter
        for key in 'mac', 'ip':
7780 24991749 Iustin Pop
          if key in nic_dict:
7781 24991749 Iustin Pop
            setattr(instance.nics[nic_op], key, nic_dict[key])
7782 cd098c41 Guido Trotter
        if nic_op in self.nic_pnew:
7783 cd098c41 Guido Trotter
          instance.nics[nic_op].nicparams = self.nic_pnew[nic_op]
7784 cd098c41 Guido Trotter
        for key, val in nic_dict.iteritems():
7785 cd098c41 Guido Trotter
          result.append(("nic.%s/%d" % (key, nic_op), val))
7786 24991749 Iustin Pop
7787 24991749 Iustin Pop
    # hvparams changes
7788 74409b12 Iustin Pop
    if self.op.hvparams:
7789 12649e35 Guido Trotter
      instance.hvparams = self.hv_inst
7790 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
7791 74409b12 Iustin Pop
        result.append(("hv/%s" % key, val))
7792 24991749 Iustin Pop
7793 24991749 Iustin Pop
    # beparams changes
7794 338e51e8 Iustin Pop
    if self.op.beparams:
7795 338e51e8 Iustin Pop
      instance.beparams = self.be_inst
7796 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
7797 338e51e8 Iustin Pop
        result.append(("be/%s" % key, val))
7798 a8083063 Iustin Pop
7799 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, feedback_fn)
7800 a8083063 Iustin Pop
7801 a8083063 Iustin Pop
    return result
7802 a8083063 Iustin Pop
7803 a8083063 Iustin Pop
7804 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
7805 a8083063 Iustin Pop
  """Query the exports list
7806 a8083063 Iustin Pop

7807 a8083063 Iustin Pop
  """
7808 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
7809 21a15682 Guido Trotter
  REQ_BGL = False
7810 21a15682 Guido Trotter
7811 21a15682 Guido Trotter
  def ExpandNames(self):
7812 21a15682 Guido Trotter
    self.needed_locks = {}
7813 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
7814 21a15682 Guido Trotter
    if not self.op.nodes:
7815 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7816 21a15682 Guido Trotter
    else:
7817 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
7818 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
7819 a8083063 Iustin Pop
7820 a8083063 Iustin Pop
  def CheckPrereq(self):
7821 21a15682 Guido Trotter
    """Check prerequisites.
7822 a8083063 Iustin Pop

7823 a8083063 Iustin Pop
    """
7824 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
7825 a8083063 Iustin Pop
7826 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
7827 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
7828 a8083063 Iustin Pop

7829 e4376078 Iustin Pop
    @rtype: dict
7830 e4376078 Iustin Pop
    @return: a dictionary with the structure node->(export-list)
7831 e4376078 Iustin Pop
        where export-list is a list of the instances exported on
7832 e4376078 Iustin Pop
        that node.
7833 a8083063 Iustin Pop

7834 a8083063 Iustin Pop
    """
7835 b04285f2 Guido Trotter
    rpcresult = self.rpc.call_export_list(self.nodes)
7836 b04285f2 Guido Trotter
    result = {}
7837 b04285f2 Guido Trotter
    for node in rpcresult:
7838 4c4e4e1e Iustin Pop
      if rpcresult[node].fail_msg:
7839 b04285f2 Guido Trotter
        result[node] = False
7840 b04285f2 Guido Trotter
      else:
7841 1b7bfbb7 Iustin Pop
        result[node] = rpcresult[node].payload
7842 b04285f2 Guido Trotter
7843 b04285f2 Guido Trotter
    return result
7844 a8083063 Iustin Pop
7845 a8083063 Iustin Pop
7846 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
7847 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
7848 a8083063 Iustin Pop

7849 a8083063 Iustin Pop
  """
7850 a8083063 Iustin Pop
  HPATH = "instance-export"
7851 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
7852 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
7853 6657590e Guido Trotter
  REQ_BGL = False
7854 6657590e Guido Trotter
7855 17c3f802 Guido Trotter
  def CheckArguments(self):
7856 17c3f802 Guido Trotter
    """Check the arguments.
7857 17c3f802 Guido Trotter

7858 17c3f802 Guido Trotter
    """
7859 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
7860 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
7861 17c3f802 Guido Trotter
7862 6657590e Guido Trotter
  def ExpandNames(self):
7863 6657590e Guido Trotter
    self._ExpandAndLockInstance()
7864 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
7865 6657590e Guido Trotter
    #
7866 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
7867 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
7868 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
7869 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
7870 6657590e Guido Trotter
    #    then one to remove, after
7871 5bbd3f7f Michael Hanselmann
    #  - removing the removal operation altogether
7872 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7873 6657590e Guido Trotter
7874 6657590e Guido Trotter
  def DeclareLocks(self, level):
7875 6657590e Guido Trotter
    """Last minute lock declaration."""
7876 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
7877 a8083063 Iustin Pop
7878 a8083063 Iustin Pop
  def BuildHooksEnv(self):
7879 a8083063 Iustin Pop
    """Build hooks env.
7880 a8083063 Iustin Pop

7881 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
7882 a8083063 Iustin Pop

7883 a8083063 Iustin Pop
    """
7884 a8083063 Iustin Pop
    env = {
7885 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
7886 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
7887 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
7888 a8083063 Iustin Pop
      }
7889 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
7890 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
7891 a8083063 Iustin Pop
          self.op.target_node]
7892 a8083063 Iustin Pop
    return env, nl, nl
7893 a8083063 Iustin Pop
7894 a8083063 Iustin Pop
  def CheckPrereq(self):
7895 a8083063 Iustin Pop
    """Check prerequisites.
7896 a8083063 Iustin Pop

7897 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
7898 a8083063 Iustin Pop

7899 a8083063 Iustin Pop
    """
7900 6657590e Guido Trotter
    instance_name = self.op.instance_name
7901 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
7902 6657590e Guido Trotter
    assert self.instance is not None, \
7903 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
7904 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
7905 a8083063 Iustin Pop
7906 6657590e Guido Trotter
    self.dst_node = self.cfg.GetNodeInfo(
7907 6657590e Guido Trotter
      self.cfg.ExpandNodeName(self.op.target_node))
7908 a8083063 Iustin Pop
7909 268b8e42 Iustin Pop
    if self.dst_node is None:
7910 268b8e42 Iustin Pop
      # This is wrong node name, not a non-locked node
7911 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node,
7912 5c983ee5 Iustin Pop
                                 errors.ECODE_NOENT)
7913 aeb83a2b Iustin Pop
    _CheckNodeOnline(self, self.dst_node.name)
7914 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, self.dst_node.name)
7915 a8083063 Iustin Pop
7916 b6023d6c Manuel Franceschini
    # instance disk type verification
7917 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
7918 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
7919 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
7920 5c983ee5 Iustin Pop
                                   " file-based disks", errors.ECODE_INVAL)
7921 b6023d6c Manuel Franceschini
7922 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
7923 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
7924 a8083063 Iustin Pop

7925 a8083063 Iustin Pop
    """
7926 a8083063 Iustin Pop
    instance = self.instance
7927 a8083063 Iustin Pop
    dst_node = self.dst_node
7928 a8083063 Iustin Pop
    src_node = instance.primary_node
7929 37972df0 Michael Hanselmann
7930 a8083063 Iustin Pop
    if self.op.shutdown:
7931 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
7932 37972df0 Michael Hanselmann
      feedback_fn("Shutting down instance %s" % instance.name)
7933 17c3f802 Guido Trotter
      result = self.rpc.call_instance_shutdown(src_node, instance,
7934 17c3f802 Guido Trotter
                                               self.shutdown_timeout)
7935 4c4e4e1e Iustin Pop
      result.Raise("Could not shutdown instance %s on"
7936 4c4e4e1e Iustin Pop
                   " node %s" % (instance.name, src_node))
7937 a8083063 Iustin Pop
7938 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
7939 a8083063 Iustin Pop
7940 a8083063 Iustin Pop
    snap_disks = []
7941 a8083063 Iustin Pop
7942 998c712c Iustin Pop
    # set the disks ID correctly since call_instance_start needs the
7943 998c712c Iustin Pop
    # correct drbd minor to create the symlinks
7944 998c712c Iustin Pop
    for disk in instance.disks:
7945 998c712c Iustin Pop
      self.cfg.SetDiskID(disk, src_node)
7946 998c712c Iustin Pop
7947 3e53a60b Michael Hanselmann
    activate_disks = (not instance.admin_up)
7948 3e53a60b Michael Hanselmann
7949 3e53a60b Michael Hanselmann
    if activate_disks:
7950 3e53a60b Michael Hanselmann
      # Activate the instance disks if we'exporting a stopped instance
7951 3e53a60b Michael Hanselmann
      feedback_fn("Activating disks for %s" % instance.name)
7952 3e53a60b Michael Hanselmann
      _StartInstanceDisks(self, instance, None)
7953 3e53a60b Michael Hanselmann
7954 a8083063 Iustin Pop
    try:
7955 3e53a60b Michael Hanselmann
      # per-disk results
7956 3e53a60b Michael Hanselmann
      dresults = []
7957 3e53a60b Michael Hanselmann
      try:
7958 3e53a60b Michael Hanselmann
        for idx, disk in enumerate(instance.disks):
7959 3e53a60b Michael Hanselmann
          feedback_fn("Creating a snapshot of disk/%s on node %s" %
7960 3e53a60b Michael Hanselmann
                      (idx, src_node))
7961 3e53a60b Michael Hanselmann
7962 3e53a60b Michael Hanselmann
          # result.payload will be a snapshot of an lvm leaf of the one we
7963 3e53a60b Michael Hanselmann
          # passed
7964 3e53a60b Michael Hanselmann
          result = self.rpc.call_blockdev_snapshot(src_node, disk)
7965 3e53a60b Michael Hanselmann
          msg = result.fail_msg
7966 3e53a60b Michael Hanselmann
          if msg:
7967 3e53a60b Michael Hanselmann
            self.LogWarning("Could not snapshot disk/%s on node %s: %s",
7968 3e53a60b Michael Hanselmann
                            idx, src_node, msg)
7969 3e53a60b Michael Hanselmann
            snap_disks.append(False)
7970 3e53a60b Michael Hanselmann
          else:
7971 3e53a60b Michael Hanselmann
            disk_id = (vgname, result.payload)
7972 3e53a60b Michael Hanselmann
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
7973 3e53a60b Michael Hanselmann
                                   logical_id=disk_id, physical_id=disk_id,
7974 3e53a60b Michael Hanselmann
                                   iv_name=disk.iv_name)
7975 3e53a60b Michael Hanselmann
            snap_disks.append(new_dev)
7976 37972df0 Michael Hanselmann
7977 3e53a60b Michael Hanselmann
      finally:
7978 3e53a60b Michael Hanselmann
        if self.op.shutdown and instance.admin_up:
7979 3e53a60b Michael Hanselmann
          feedback_fn("Starting instance %s" % instance.name)
7980 3e53a60b Michael Hanselmann
          result = self.rpc.call_instance_start(src_node, instance, None, None)
7981 3e53a60b Michael Hanselmann
          msg = result.fail_msg
7982 3e53a60b Michael Hanselmann
          if msg:
7983 3e53a60b Michael Hanselmann
            _ShutdownInstanceDisks(self, instance)
7984 3e53a60b Michael Hanselmann
            raise errors.OpExecError("Could not start instance: %s" % msg)
7985 3e53a60b Michael Hanselmann
7986 3e53a60b Michael Hanselmann
      # TODO: check for size
7987 3e53a60b Michael Hanselmann
7988 3e53a60b Michael Hanselmann
      cluster_name = self.cfg.GetClusterName()
7989 3e53a60b Michael Hanselmann
      for idx, dev in enumerate(snap_disks):
7990 3e53a60b Michael Hanselmann
        feedback_fn("Exporting snapshot %s from %s to %s" %
7991 3e53a60b Michael Hanselmann
                    (idx, src_node, dst_node.name))
7992 3e53a60b Michael Hanselmann
        if dev:
7993 3e53a60b Michael Hanselmann
          result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
7994 3e53a60b Michael Hanselmann
                                                 instance, cluster_name, idx)
7995 3e53a60b Michael Hanselmann
          msg = result.fail_msg
7996 3e53a60b Michael Hanselmann
          if msg:
7997 3e53a60b Michael Hanselmann
            self.LogWarning("Could not export disk/%s from node %s to"
7998 3e53a60b Michael Hanselmann
                            " node %s: %s", idx, src_node, dst_node.name, msg)
7999 3e53a60b Michael Hanselmann
            dresults.append(False)
8000 3e53a60b Michael Hanselmann
          else:
8001 3e53a60b Michael Hanselmann
            dresults.append(True)
8002 3e53a60b Michael Hanselmann
          msg = self.rpc.call_blockdev_remove(src_node, dev).fail_msg
8003 3e53a60b Michael Hanselmann
          if msg:
8004 3e53a60b Michael Hanselmann
            self.LogWarning("Could not remove snapshot for disk/%d from node"
8005 3e53a60b Michael Hanselmann
                            " %s: %s", idx, src_node, msg)
8006 19d7f90a Guido Trotter
        else:
8007 084f05a5 Iustin Pop
          dresults.append(False)
8008 a8083063 Iustin Pop
8009 3e53a60b Michael Hanselmann
      feedback_fn("Finalizing export on %s" % dst_node.name)
8010 3e53a60b Michael Hanselmann
      result = self.rpc.call_finalize_export(dst_node.name, instance,
8011 3e53a60b Michael Hanselmann
                                             snap_disks)
8012 3e53a60b Michael Hanselmann
      fin_resu = True
8013 3e53a60b Michael Hanselmann
      msg = result.fail_msg
8014 3e53a60b Michael Hanselmann
      if msg:
8015 3e53a60b Michael Hanselmann
        self.LogWarning("Could not finalize export for instance %s"
8016 3e53a60b Michael Hanselmann
                        " on node %s: %s", instance.name, dst_node.name, msg)
8017 3e53a60b Michael Hanselmann
        fin_resu = False
8018 3e53a60b Michael Hanselmann
8019 3e53a60b Michael Hanselmann
    finally:
8020 3e53a60b Michael Hanselmann
      if activate_disks:
8021 3e53a60b Michael Hanselmann
        feedback_fn("Deactivating disks for %s" % instance.name)
8022 3e53a60b Michael Hanselmann
        _ShutdownInstanceDisks(self, instance)
8023 a8083063 Iustin Pop
8024 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
8025 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
8026 a8083063 Iustin Pop
8027 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
8028 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
8029 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
8030 35fbcd11 Iustin Pop
    iname = instance.name
8031 a8083063 Iustin Pop
    if nodelist:
8032 37972df0 Michael Hanselmann
      feedback_fn("Removing old exports for instance %s" % iname)
8033 72737a7f Iustin Pop
      exportlist = self.rpc.call_export_list(nodelist)
8034 a8083063 Iustin Pop
      for node in exportlist:
8035 4c4e4e1e Iustin Pop
        if exportlist[node].fail_msg:
8036 781de953 Iustin Pop
          continue
8037 35fbcd11 Iustin Pop
        if iname in exportlist[node].payload:
8038 4c4e4e1e Iustin Pop
          msg = self.rpc.call_export_remove(node, iname).fail_msg
8039 35fbcd11 Iustin Pop
          if msg:
8040 19d7f90a Guido Trotter
            self.LogWarning("Could not remove older export for instance %s"
8041 35fbcd11 Iustin Pop
                            " on node %s: %s", iname, node, msg)
8042 084f05a5 Iustin Pop
    return fin_resu, dresults
8043 5c947f38 Iustin Pop
8044 5c947f38 Iustin Pop
8045 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
8046 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
8047 9ac99fda Guido Trotter

8048 9ac99fda Guido Trotter
  """
8049 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
8050 3656b3af Guido Trotter
  REQ_BGL = False
8051 3656b3af Guido Trotter
8052 3656b3af Guido Trotter
  def ExpandNames(self):
8053 3656b3af Guido Trotter
    self.needed_locks = {}
8054 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
8055 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
8056 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
8057 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
8058 9ac99fda Guido Trotter
8059 9ac99fda Guido Trotter
  def CheckPrereq(self):
8060 9ac99fda Guido Trotter
    """Check prerequisites.
8061 9ac99fda Guido Trotter
    """
8062 9ac99fda Guido Trotter
    pass
8063 9ac99fda Guido Trotter
8064 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
8065 9ac99fda Guido Trotter
    """Remove any export.
8066 9ac99fda Guido Trotter

8067 9ac99fda Guido Trotter
    """
8068 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
8069 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
8070 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
8071 9ac99fda Guido Trotter
    fqdn_warn = False
8072 9ac99fda Guido Trotter
    if not instance_name:
8073 9ac99fda Guido Trotter
      fqdn_warn = True
8074 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
8075 9ac99fda Guido Trotter
8076 1b7bfbb7 Iustin Pop
    locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
8077 1b7bfbb7 Iustin Pop
    exportlist = self.rpc.call_export_list(locked_nodes)
8078 9ac99fda Guido Trotter
    found = False
8079 9ac99fda Guido Trotter
    for node in exportlist:
8080 4c4e4e1e Iustin Pop
      msg = exportlist[node].fail_msg
8081 1b7bfbb7 Iustin Pop
      if msg:
8082 1b7bfbb7 Iustin Pop
        self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
8083 781de953 Iustin Pop
        continue
8084 1b7bfbb7 Iustin Pop
      if instance_name in exportlist[node].payload:
8085 9ac99fda Guido Trotter
        found = True
8086 781de953 Iustin Pop
        result = self.rpc.call_export_remove(node, instance_name)
8087 4c4e4e1e Iustin Pop
        msg = result.fail_msg
8088 35fbcd11 Iustin Pop
        if msg:
8089 9a4f63d1 Iustin Pop
          logging.error("Could not remove export for instance %s"
8090 35fbcd11 Iustin Pop
                        " on node %s: %s", instance_name, node, msg)
8091 9ac99fda Guido Trotter
8092 9ac99fda Guido Trotter
    if fqdn_warn and not found:
8093 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
8094 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
8095 9ac99fda Guido Trotter
                  " Domain Name.")
8096 9ac99fda Guido Trotter
8097 9ac99fda Guido Trotter
8098 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
8099 5c947f38 Iustin Pop
  """Generic tags LU.
8100 5c947f38 Iustin Pop

8101 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
8102 5c947f38 Iustin Pop

8103 5c947f38 Iustin Pop
  """
8104 5c947f38 Iustin Pop
8105 8646adce Guido Trotter
  def ExpandNames(self):
8106 8646adce Guido Trotter
    self.needed_locks = {}
8107 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
8108 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
8109 5c947f38 Iustin Pop
      if name is None:
8110 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
8111 5c983ee5 Iustin Pop
                                   (self.op.name,), errors.ECODE_NOENT)
8112 5c947f38 Iustin Pop
      self.op.name = name
8113 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = name
8114 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
8115 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
8116 5c947f38 Iustin Pop
      if name is None:
8117 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
8118 5c983ee5 Iustin Pop
                                   (self.op.name,), errors.ECODE_NOENT)
8119 5c947f38 Iustin Pop
      self.op.name = name
8120 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = name
8121 8646adce Guido Trotter
8122 8646adce Guido Trotter
  def CheckPrereq(self):
8123 8646adce Guido Trotter
    """Check prerequisites.
8124 8646adce Guido Trotter

8125 8646adce Guido Trotter
    """
8126 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
8127 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
8128 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
8129 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
8130 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
8131 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
8132 5c947f38 Iustin Pop
    else:
8133 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
8134 5c983ee5 Iustin Pop
                                 str(self.op.kind), errors.ECODE_INVAL)
8135 5c947f38 Iustin Pop
8136 5c947f38 Iustin Pop
8137 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
8138 5c947f38 Iustin Pop
  """Returns the tags of a given object.
8139 5c947f38 Iustin Pop

8140 5c947f38 Iustin Pop
  """
8141 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
8142 8646adce Guido Trotter
  REQ_BGL = False
8143 5c947f38 Iustin Pop
8144 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
8145 5c947f38 Iustin Pop
    """Returns the tag list.
8146 5c947f38 Iustin Pop

8147 5c947f38 Iustin Pop
    """
8148 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
8149 5c947f38 Iustin Pop
8150 5c947f38 Iustin Pop
8151 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
8152 73415719 Iustin Pop
  """Searches the tags for a given pattern.
8153 73415719 Iustin Pop

8154 73415719 Iustin Pop
  """
8155 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
8156 8646adce Guido Trotter
  REQ_BGL = False
8157 8646adce Guido Trotter
8158 8646adce Guido Trotter
  def ExpandNames(self):
8159 8646adce Guido Trotter
    self.needed_locks = {}
8160 73415719 Iustin Pop
8161 73415719 Iustin Pop
  def CheckPrereq(self):
8162 73415719 Iustin Pop
    """Check prerequisites.
8163 73415719 Iustin Pop

8164 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
8165 73415719 Iustin Pop

8166 73415719 Iustin Pop
    """
8167 73415719 Iustin Pop
    try:
8168 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
8169 73415719 Iustin Pop
    except re.error, err:
8170 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
8171 5c983ee5 Iustin Pop
                                 (self.op.pattern, err), errors.ECODE_INVAL)
8172 73415719 Iustin Pop
8173 73415719 Iustin Pop
  def Exec(self, feedback_fn):
8174 73415719 Iustin Pop
    """Returns the tag list.
8175 73415719 Iustin Pop

8176 73415719 Iustin Pop
    """
8177 73415719 Iustin Pop
    cfg = self.cfg
8178 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
8179 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
8180 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
8181 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
8182 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
8183 73415719 Iustin Pop
    results = []
8184 73415719 Iustin Pop
    for path, target in tgts:
8185 73415719 Iustin Pop
      for tag in target.GetTags():
8186 73415719 Iustin Pop
        if self.re.search(tag):
8187 73415719 Iustin Pop
          results.append((path, tag))
8188 73415719 Iustin Pop
    return results
8189 73415719 Iustin Pop
8190 73415719 Iustin Pop
8191 f27302fa Iustin Pop
class LUAddTags(TagsLU):
8192 5c947f38 Iustin Pop
  """Sets a tag on a given object.
8193 5c947f38 Iustin Pop

8194 5c947f38 Iustin Pop
  """
8195 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
8196 8646adce Guido Trotter
  REQ_BGL = False
8197 5c947f38 Iustin Pop
8198 5c947f38 Iustin Pop
  def CheckPrereq(self):
8199 5c947f38 Iustin Pop
    """Check prerequisites.
8200 5c947f38 Iustin Pop

8201 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
8202 5c947f38 Iustin Pop

8203 5c947f38 Iustin Pop
    """
8204 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
8205 f27302fa Iustin Pop
    for tag in self.op.tags:
8206 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
8207 5c947f38 Iustin Pop
8208 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
8209 5c947f38 Iustin Pop
    """Sets the tag.
8210 5c947f38 Iustin Pop

8211 5c947f38 Iustin Pop
    """
8212 5c947f38 Iustin Pop
    try:
8213 f27302fa Iustin Pop
      for tag in self.op.tags:
8214 f27302fa Iustin Pop
        self.target.AddTag(tag)
8215 5c947f38 Iustin Pop
    except errors.TagError, err:
8216 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
8217 159d4ec6 Iustin Pop
    self.cfg.Update(self.target, feedback_fn)
8218 5c947f38 Iustin Pop
8219 5c947f38 Iustin Pop
8220 f27302fa Iustin Pop
class LUDelTags(TagsLU):
8221 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
8222 5c947f38 Iustin Pop

8223 5c947f38 Iustin Pop
  """
8224 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
8225 8646adce Guido Trotter
  REQ_BGL = False
8226 5c947f38 Iustin Pop
8227 5c947f38 Iustin Pop
  def CheckPrereq(self):
8228 5c947f38 Iustin Pop
    """Check prerequisites.
8229 5c947f38 Iustin Pop

8230 5c947f38 Iustin Pop
    This checks that we have the given tag.
8231 5c947f38 Iustin Pop

8232 5c947f38 Iustin Pop
    """
8233 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
8234 f27302fa Iustin Pop
    for tag in self.op.tags:
8235 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
8236 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
8237 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
8238 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
8239 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
8240 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
8241 f27302fa Iustin Pop
      diff_names.sort()
8242 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
8243 5c983ee5 Iustin Pop
                                 (",".join(diff_names)), errors.ECODE_NOENT)
8244 5c947f38 Iustin Pop
8245 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
8246 5c947f38 Iustin Pop
    """Remove the tag from the object.
8247 5c947f38 Iustin Pop

8248 5c947f38 Iustin Pop
    """
8249 f27302fa Iustin Pop
    for tag in self.op.tags:
8250 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
8251 159d4ec6 Iustin Pop
    self.cfg.Update(self.target, feedback_fn)
8252 06009e27 Iustin Pop
8253 0eed6e61 Guido Trotter
8254 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
8255 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
8256 06009e27 Iustin Pop

8257 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
8258 06009e27 Iustin Pop
  time.
8259 06009e27 Iustin Pop

8260 06009e27 Iustin Pop
  """
8261 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
8262 fbe9022f Guido Trotter
  REQ_BGL = False
8263 06009e27 Iustin Pop
8264 fbe9022f Guido Trotter
  def ExpandNames(self):
8265 fbe9022f Guido Trotter
    """Expand names and set required locks.
8266 06009e27 Iustin Pop

8267 fbe9022f Guido Trotter
    This expands the node list, if any.
8268 06009e27 Iustin Pop

8269 06009e27 Iustin Pop
    """
8270 fbe9022f Guido Trotter
    self.needed_locks = {}
8271 06009e27 Iustin Pop
    if self.op.on_nodes:
8272 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
8273 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
8274 fbe9022f Guido Trotter
      # more information.
8275 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
8276 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
8277 fbe9022f Guido Trotter
8278 fbe9022f Guido Trotter
  def CheckPrereq(self):
8279 fbe9022f Guido Trotter
    """Check prerequisites.
8280 fbe9022f Guido Trotter

8281 fbe9022f Guido Trotter
    """
8282 06009e27 Iustin Pop
8283 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
8284 06009e27 Iustin Pop
    """Do the actual sleep.
8285 06009e27 Iustin Pop

8286 06009e27 Iustin Pop
    """
8287 06009e27 Iustin Pop
    if self.op.on_master:
8288 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
8289 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
8290 06009e27 Iustin Pop
    if self.op.on_nodes:
8291 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
8292 06009e27 Iustin Pop
      for node, node_result in result.items():
8293 4c4e4e1e Iustin Pop
        node_result.Raise("Failure during rpc call to node %s" % node)
8294 d61df03e Iustin Pop
8295 d61df03e Iustin Pop
8296 d1c2dd75 Iustin Pop
class IAllocator(object):
8297 d1c2dd75 Iustin Pop
  """IAllocator framework.
8298 d61df03e Iustin Pop

8299 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
8300 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
8301 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
8302 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
8303 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
8304 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
8305 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
8306 d1c2dd75 Iustin Pop
      easy usage
8307 d61df03e Iustin Pop

8308 d61df03e Iustin Pop
  """
8309 29859cb7 Iustin Pop
  _ALLO_KEYS = [
8310 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
8311 8cc7e742 Guido Trotter
    "os", "tags", "nics", "vcpus", "hypervisor",
8312 d1c2dd75 Iustin Pop
    ]
8313 29859cb7 Iustin Pop
  _RELO_KEYS = [
8314 29859cb7 Iustin Pop
    "relocate_from",
8315 29859cb7 Iustin Pop
    ]
8316 d1c2dd75 Iustin Pop
8317 923ddac0 Michael Hanselmann
  def __init__(self, cfg, rpc, mode, name, **kwargs):
8318 923ddac0 Michael Hanselmann
    self.cfg = cfg
8319 923ddac0 Michael Hanselmann
    self.rpc = rpc
8320 d1c2dd75 Iustin Pop
    # init buffer variables
8321 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
8322 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
8323 29859cb7 Iustin Pop
    self.mode = mode
8324 29859cb7 Iustin Pop
    self.name = name
8325 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
8326 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
8327 a0add446 Iustin Pop
    self.hypervisor = None
8328 29859cb7 Iustin Pop
    self.relocate_from = None
8329 27579978 Iustin Pop
    # computed fields
8330 27579978 Iustin Pop
    self.required_nodes = None
8331 d1c2dd75 Iustin Pop
    # init result fields
8332 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
8333 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
8334 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
8335 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
8336 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
8337 29859cb7 Iustin Pop
    else:
8338 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
8339 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
8340 d1c2dd75 Iustin Pop
    for key in kwargs:
8341 29859cb7 Iustin Pop
      if key not in keyset:
8342 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
8343 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
8344 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
8345 29859cb7 Iustin Pop
    for key in keyset:
8346 d1c2dd75 Iustin Pop
      if key not in kwargs:
8347 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
8348 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
8349 d1c2dd75 Iustin Pop
    self._BuildInputData()
8350 d1c2dd75 Iustin Pop
8351 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
8352 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
8353 d1c2dd75 Iustin Pop

8354 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
8355 d1c2dd75 Iustin Pop

8356 d1c2dd75 Iustin Pop
    """
8357 923ddac0 Michael Hanselmann
    cfg = self.cfg
8358 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
8359 d1c2dd75 Iustin Pop
    # cluster data
8360 d1c2dd75 Iustin Pop
    data = {
8361 77031881 Iustin Pop
      "version": constants.IALLOCATOR_VERSION,
8362 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
8363 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
8364 1325da74 Iustin Pop
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
8365 d1c2dd75 Iustin Pop
      # we don't have job IDs
8366 d61df03e Iustin Pop
      }
8367 b57e9819 Guido Trotter
    iinfo = cfg.GetAllInstancesInfo().values()
8368 b57e9819 Guido Trotter
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
8369 6286519f Iustin Pop
8370 d1c2dd75 Iustin Pop
    # node data
8371 d1c2dd75 Iustin Pop
    node_results = {}
8372 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
8373 8cc7e742 Guido Trotter
8374 8cc7e742 Guido Trotter
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
8375 a0add446 Iustin Pop
      hypervisor_name = self.hypervisor
8376 8cc7e742 Guido Trotter
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
8377 a0add446 Iustin Pop
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
8378 8cc7e742 Guido Trotter
8379 923ddac0 Michael Hanselmann
    node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
8380 923ddac0 Michael Hanselmann
                                        hypervisor_name)
8381 923ddac0 Michael Hanselmann
    node_iinfo = \
8382 923ddac0 Michael Hanselmann
      self.rpc.call_all_instances_info(node_list,
8383 923ddac0 Michael Hanselmann
                                       cluster_info.enabled_hypervisors)
8384 1325da74 Iustin Pop
    for nname, nresult in node_data.items():
8385 1325da74 Iustin Pop
      # first fill in static (config-based) values
8386 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
8387 d1c2dd75 Iustin Pop
      pnr = {
8388 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
8389 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
8390 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
8391 fc0fe88c Iustin Pop
        "offline": ninfo.offline,
8392 0b2454b9 Iustin Pop
        "drained": ninfo.drained,
8393 1325da74 Iustin Pop
        "master_candidate": ninfo.master_candidate,
8394 d1c2dd75 Iustin Pop
        }
8395 1325da74 Iustin Pop
8396 0d853843 Iustin Pop
      if not (ninfo.offline or ninfo.drained):
8397 4c4e4e1e Iustin Pop
        nresult.Raise("Can't get data for node %s" % nname)
8398 4c4e4e1e Iustin Pop
        node_iinfo[nname].Raise("Can't get node instance info from node %s" %
8399 4c4e4e1e Iustin Pop
                                nname)
8400 070e998b Iustin Pop
        remote_info = nresult.payload
8401 b142ef15 Iustin Pop
8402 1325da74 Iustin Pop
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
8403 1325da74 Iustin Pop
                     'vg_size', 'vg_free', 'cpu_total']:
8404 1325da74 Iustin Pop
          if attr not in remote_info:
8405 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' didn't return attribute"
8406 1325da74 Iustin Pop
                                     " '%s'" % (nname, attr))
8407 070e998b Iustin Pop
          if not isinstance(remote_info[attr], int):
8408 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' returned invalid value"
8409 070e998b Iustin Pop
                                     " for '%s': %s" %
8410 070e998b Iustin Pop
                                     (nname, attr, remote_info[attr]))
8411 1325da74 Iustin Pop
        # compute memory used by primary instances
8412 1325da74 Iustin Pop
        i_p_mem = i_p_up_mem = 0
8413 1325da74 Iustin Pop
        for iinfo, beinfo in i_list:
8414 1325da74 Iustin Pop
          if iinfo.primary_node == nname:
8415 1325da74 Iustin Pop
            i_p_mem += beinfo[constants.BE_MEMORY]
8416 2fa74ef4 Iustin Pop
            if iinfo.name not in node_iinfo[nname].payload:
8417 1325da74 Iustin Pop
              i_used_mem = 0
8418 1325da74 Iustin Pop
            else:
8419 2fa74ef4 Iustin Pop
              i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory'])
8420 1325da74 Iustin Pop
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
8421 1325da74 Iustin Pop
            remote_info['memory_free'] -= max(0, i_mem_diff)
8422 1325da74 Iustin Pop
8423 1325da74 Iustin Pop
            if iinfo.admin_up:
8424 1325da74 Iustin Pop
              i_p_up_mem += beinfo[constants.BE_MEMORY]
8425 1325da74 Iustin Pop
8426 1325da74 Iustin Pop
        # compute memory used by instances
8427 1325da74 Iustin Pop
        pnr_dyn = {
8428 1325da74 Iustin Pop
          "total_memory": remote_info['memory_total'],
8429 1325da74 Iustin Pop
          "reserved_memory": remote_info['memory_dom0'],
8430 1325da74 Iustin Pop
          "free_memory": remote_info['memory_free'],
8431 1325da74 Iustin Pop
          "total_disk": remote_info['vg_size'],
8432 1325da74 Iustin Pop
          "free_disk": remote_info['vg_free'],
8433 1325da74 Iustin Pop
          "total_cpus": remote_info['cpu_total'],
8434 1325da74 Iustin Pop
          "i_pri_memory": i_p_mem,
8435 1325da74 Iustin Pop
          "i_pri_up_memory": i_p_up_mem,
8436 1325da74 Iustin Pop
          }
8437 1325da74 Iustin Pop
        pnr.update(pnr_dyn)
8438 1325da74 Iustin Pop
8439 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
8440 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
8441 d1c2dd75 Iustin Pop
8442 d1c2dd75 Iustin Pop
    # instance data
8443 d1c2dd75 Iustin Pop
    instance_data = {}
8444 338e51e8 Iustin Pop
    for iinfo, beinfo in i_list:
8445 a9fe7e8f Guido Trotter
      nic_data = []
8446 a9fe7e8f Guido Trotter
      for nic in iinfo.nics:
8447 a9fe7e8f Guido Trotter
        filled_params = objects.FillDict(
8448 a9fe7e8f Guido Trotter
            cluster_info.nicparams[constants.PP_DEFAULT],
8449 a9fe7e8f Guido Trotter
            nic.nicparams)
8450 a9fe7e8f Guido Trotter
        nic_dict = {"mac": nic.mac,
8451 a9fe7e8f Guido Trotter
                    "ip": nic.ip,
8452 a9fe7e8f Guido Trotter
                    "mode": filled_params[constants.NIC_MODE],
8453 a9fe7e8f Guido Trotter
                    "link": filled_params[constants.NIC_LINK],
8454 a9fe7e8f Guido Trotter
                   }
8455 a9fe7e8f Guido Trotter
        if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
8456 a9fe7e8f Guido Trotter
          nic_dict["bridge"] = filled_params[constants.NIC_LINK]
8457 a9fe7e8f Guido Trotter
        nic_data.append(nic_dict)
8458 d1c2dd75 Iustin Pop
      pir = {
8459 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
8460 1325da74 Iustin Pop
        "admin_up": iinfo.admin_up,
8461 338e51e8 Iustin Pop
        "vcpus": beinfo[constants.BE_VCPUS],
8462 338e51e8 Iustin Pop
        "memory": beinfo[constants.BE_MEMORY],
8463 d1c2dd75 Iustin Pop
        "os": iinfo.os,
8464 1325da74 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
8465 d1c2dd75 Iustin Pop
        "nics": nic_data,
8466 1325da74 Iustin Pop
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
8467 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
8468 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
8469 d1c2dd75 Iustin Pop
        }
8470 88ae4f85 Iustin Pop
      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
8471 88ae4f85 Iustin Pop
                                                 pir["disks"])
8472 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
8473 d61df03e Iustin Pop
8474 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
8475 d61df03e Iustin Pop
8476 d1c2dd75 Iustin Pop
    self.in_data = data
8477 d61df03e Iustin Pop
8478 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
8479 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
8480 d61df03e Iustin Pop

8481 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
8482 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
8483 d61df03e Iustin Pop

8484 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
8485 d1c2dd75 Iustin Pop
    done.
8486 d61df03e Iustin Pop

8487 d1c2dd75 Iustin Pop
    """
8488 d1c2dd75 Iustin Pop
    data = self.in_data
8489 d1c2dd75 Iustin Pop
8490 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
8491 d1c2dd75 Iustin Pop
8492 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
8493 27579978 Iustin Pop
      self.required_nodes = 2
8494 27579978 Iustin Pop
    else:
8495 27579978 Iustin Pop
      self.required_nodes = 1
8496 d1c2dd75 Iustin Pop
    request = {
8497 d1c2dd75 Iustin Pop
      "type": "allocate",
8498 d1c2dd75 Iustin Pop
      "name": self.name,
8499 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
8500 d1c2dd75 Iustin Pop
      "tags": self.tags,
8501 d1c2dd75 Iustin Pop
      "os": self.os,
8502 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
8503 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
8504 d1c2dd75 Iustin Pop
      "disks": self.disks,
8505 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
8506 d1c2dd75 Iustin Pop
      "nics": self.nics,
8507 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
8508 d1c2dd75 Iustin Pop
      }
8509 d1c2dd75 Iustin Pop
    data["request"] = request
8510 298fe380 Iustin Pop
8511 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
8512 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
8513 298fe380 Iustin Pop

8514 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
8515 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
8516 d61df03e Iustin Pop

8517 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
8518 d1c2dd75 Iustin Pop
    done.
8519 d61df03e Iustin Pop

8520 d1c2dd75 Iustin Pop
    """
8521 923ddac0 Michael Hanselmann
    instance = self.cfg.GetInstanceInfo(self.name)
8522 27579978 Iustin Pop
    if instance is None:
8523 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
8524 27579978 Iustin Pop
                                   " IAllocator" % self.name)
8525 27579978 Iustin Pop
8526 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
8527 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances",
8528 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
8529 27579978 Iustin Pop
8530 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
8531 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node",
8532 5c983ee5 Iustin Pop
                                 errors.ECODE_STATE)
8533 2a139bb0 Iustin Pop
8534 27579978 Iustin Pop
    self.required_nodes = 1
8535 dafc7302 Guido Trotter
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
8536 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
8537 27579978 Iustin Pop
8538 d1c2dd75 Iustin Pop
    request = {
8539 2a139bb0 Iustin Pop
      "type": "relocate",
8540 d1c2dd75 Iustin Pop
      "name": self.name,
8541 27579978 Iustin Pop
      "disk_space_total": disk_space,
8542 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
8543 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
8544 d1c2dd75 Iustin Pop
      }
8545 27579978 Iustin Pop
    self.in_data["request"] = request
8546 d61df03e Iustin Pop
8547 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
8548 d1c2dd75 Iustin Pop
    """Build input data structures.
8549 d61df03e Iustin Pop

8550 d1c2dd75 Iustin Pop
    """
8551 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
8552 d61df03e Iustin Pop
8553 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
8554 d1c2dd75 Iustin Pop
      self._AddNewInstance()
8555 d1c2dd75 Iustin Pop
    else:
8556 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
8557 d61df03e Iustin Pop
8558 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
8559 d61df03e Iustin Pop
8560 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
8561 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
8562 298fe380 Iustin Pop

8563 d1c2dd75 Iustin Pop
    """
8564 72737a7f Iustin Pop
    if call_fn is None:
8565 923ddac0 Michael Hanselmann
      call_fn = self.rpc.call_iallocator_runner
8566 298fe380 Iustin Pop
8567 923ddac0 Michael Hanselmann
    result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
8568 4c4e4e1e Iustin Pop
    result.Raise("Failure while running the iallocator script")
8569 8d528b7c Iustin Pop
8570 87f5c298 Iustin Pop
    self.out_text = result.payload
8571 d1c2dd75 Iustin Pop
    if validate:
8572 d1c2dd75 Iustin Pop
      self._ValidateResult()
8573 298fe380 Iustin Pop
8574 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
8575 d1c2dd75 Iustin Pop
    """Process the allocator results.
8576 538475ca Iustin Pop

8577 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
8578 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
8579 538475ca Iustin Pop

8580 d1c2dd75 Iustin Pop
    """
8581 d1c2dd75 Iustin Pop
    try:
8582 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
8583 d1c2dd75 Iustin Pop
    except Exception, err:
8584 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
8585 d1c2dd75 Iustin Pop
8586 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
8587 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
8588 538475ca Iustin Pop
8589 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
8590 d1c2dd75 Iustin Pop
      if key not in rdict:
8591 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
8592 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
8593 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
8594 538475ca Iustin Pop
8595 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
8596 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
8597 d1c2dd75 Iustin Pop
                               " is not a list")
8598 d1c2dd75 Iustin Pop
    self.out_data = rdict
8599 538475ca Iustin Pop
8600 538475ca Iustin Pop
8601 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
8602 d61df03e Iustin Pop
  """Run allocator tests.
8603 d61df03e Iustin Pop

8604 d61df03e Iustin Pop
  This LU runs the allocator tests
8605 d61df03e Iustin Pop

8606 d61df03e Iustin Pop
  """
8607 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
8608 d61df03e Iustin Pop
8609 d61df03e Iustin Pop
  def CheckPrereq(self):
8610 d61df03e Iustin Pop
    """Check prerequisites.
8611 d61df03e Iustin Pop

8612 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
8613 d61df03e Iustin Pop

8614 d61df03e Iustin Pop
    """
8615 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
8616 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
8617 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
8618 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
8619 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
8620 5c983ee5 Iustin Pop
                                     attr, errors.ECODE_INVAL)
8621 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
8622 d61df03e Iustin Pop
      if iname is not None:
8623 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
8624 5c983ee5 Iustin Pop
                                   iname, errors.ECODE_EXISTS)
8625 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
8626 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'",
8627 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
8628 d61df03e Iustin Pop
      for row in self.op.nics:
8629 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
8630 d61df03e Iustin Pop
            "mac" not in row or
8631 d61df03e Iustin Pop
            "ip" not in row or
8632 d61df03e Iustin Pop
            "bridge" not in row):
8633 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the 'nics'"
8634 5c983ee5 Iustin Pop
                                     " parameter", errors.ECODE_INVAL)
8635 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
8636 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'",
8637 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
8638 d61df03e Iustin Pop
      for row in self.op.disks:
8639 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
8640 d61df03e Iustin Pop
            "size" not in row or
8641 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
8642 d61df03e Iustin Pop
            "mode" not in row or
8643 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
8644 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the 'disks'"
8645 5c983ee5 Iustin Pop
                                     " parameter", errors.ECODE_INVAL)
8646 8901997e Iustin Pop
      if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None:
8647 8cc7e742 Guido Trotter
        self.op.hypervisor = self.cfg.GetHypervisorType()
8648 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
8649 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
8650 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input",
8651 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
8652 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
8653 d61df03e Iustin Pop
      if fname is None:
8654 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
8655 5c983ee5 Iustin Pop
                                   self.op.name, errors.ECODE_NOENT)
8656 d61df03e Iustin Pop
      self.op.name = fname
8657 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
8658 d61df03e Iustin Pop
    else:
8659 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
8660 5c983ee5 Iustin Pop
                                 self.op.mode, errors.ECODE_INVAL)
8661 d61df03e Iustin Pop
8662 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
8663 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
8664 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Missing allocator name",
8665 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
8666 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
8667 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
8668 5c983ee5 Iustin Pop
                                 self.op.direction, errors.ECODE_INVAL)
8669 d61df03e Iustin Pop
8670 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
8671 d61df03e Iustin Pop
    """Run the allocator test.
8672 d61df03e Iustin Pop

8673 d61df03e Iustin Pop
    """
8674 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
8675 923ddac0 Michael Hanselmann
      ial = IAllocator(self.cfg, self.rpc,
8676 29859cb7 Iustin Pop
                       mode=self.op.mode,
8677 29859cb7 Iustin Pop
                       name=self.op.name,
8678 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
8679 29859cb7 Iustin Pop
                       disks=self.op.disks,
8680 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
8681 29859cb7 Iustin Pop
                       os=self.op.os,
8682 29859cb7 Iustin Pop
                       tags=self.op.tags,
8683 29859cb7 Iustin Pop
                       nics=self.op.nics,
8684 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
8685 8cc7e742 Guido Trotter
                       hypervisor=self.op.hypervisor,
8686 29859cb7 Iustin Pop
                       )
8687 29859cb7 Iustin Pop
    else:
8688 923ddac0 Michael Hanselmann
      ial = IAllocator(self.cfg, self.rpc,
8689 29859cb7 Iustin Pop
                       mode=self.op.mode,
8690 29859cb7 Iustin Pop
                       name=self.op.name,
8691 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
8692 29859cb7 Iustin Pop
                       )
8693 d61df03e Iustin Pop
8694 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
8695 d1c2dd75 Iustin Pop
      result = ial.in_text
8696 298fe380 Iustin Pop
    else:
8697 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
8698 d1c2dd75 Iustin Pop
      result = ial.out_text
8699 298fe380 Iustin Pop
    return result