Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 88cd08aa

History | View | Annotate | Download (293 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import time
29 a8083063 Iustin Pop
import re
30 a8083063 Iustin Pop
import platform
31 ffa1c0dc Iustin Pop
import logging
32 74409b12 Iustin Pop
import copy
33 a8083063 Iustin Pop
34 a8083063 Iustin Pop
from ganeti import ssh
35 a8083063 Iustin Pop
from ganeti import utils
36 a8083063 Iustin Pop
from ganeti import errors
37 a8083063 Iustin Pop
from ganeti import hypervisor
38 6048c986 Guido Trotter
from ganeti import locking
39 a8083063 Iustin Pop
from ganeti import constants
40 a8083063 Iustin Pop
from ganeti import objects
41 8d14b30d Iustin Pop
from ganeti import serializer
42 112f18a5 Iustin Pop
from ganeti import ssconf
43 d61df03e Iustin Pop
44 d61df03e Iustin Pop
45 a8083063 Iustin Pop
class LogicalUnit(object):
46 396e1b78 Michael Hanselmann
  """Logical Unit base class.
47 a8083063 Iustin Pop

48 a8083063 Iustin Pop
  Subclasses must follow these rules:
49 d465bdc8 Guido Trotter
    - implement ExpandNames
50 6fd35c4d Michael Hanselmann
    - implement CheckPrereq (except when tasklets are used)
51 6fd35c4d Michael Hanselmann
    - implement Exec (except when tasklets are used)
52 a8083063 Iustin Pop
    - implement BuildHooksEnv
53 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
54 05f86716 Guido Trotter
    - optionally redefine their run requirements:
55 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
56 05f86716 Guido Trotter

57 05f86716 Guido Trotter
  Note that all commands require root permissions.
58 a8083063 Iustin Pop

59 20777413 Iustin Pop
  @ivar dry_run_result: the value (if any) that will be returned to the caller
60 20777413 Iustin Pop
      in dry-run mode (signalled by opcode dry_run parameter)
61 20777413 Iustin Pop

62 a8083063 Iustin Pop
  """
63 a8083063 Iustin Pop
  HPATH = None
64 a8083063 Iustin Pop
  HTYPE = None
65 a8083063 Iustin Pop
  _OP_REQP = []
66 7e55040e Guido Trotter
  REQ_BGL = True
67 a8083063 Iustin Pop
68 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
69 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
70 a8083063 Iustin Pop

71 5bbd3f7f Michael Hanselmann
    This needs to be overridden in derived classes in order to check op
72 a8083063 Iustin Pop
    validity.
73 a8083063 Iustin Pop

74 a8083063 Iustin Pop
    """
75 5bfac263 Iustin Pop
    self.proc = processor
76 a8083063 Iustin Pop
    self.op = op
77 77b657a3 Guido Trotter
    self.cfg = context.cfg
78 77b657a3 Guido Trotter
    self.context = context
79 72737a7f Iustin Pop
    self.rpc = rpc
80 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
81 d465bdc8 Guido Trotter
    self.needed_locks = None
82 6683bba2 Guido Trotter
    self.acquired_locks = {}
83 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 0)
84 ca2a79e1 Guido Trotter
    self.add_locks = {}
85 ca2a79e1 Guido Trotter
    self.remove_locks = {}
86 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
87 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
88 c92b310a Michael Hanselmann
    self.__ssh = None
89 86d9d3bb Iustin Pop
    # logging
90 86d9d3bb Iustin Pop
    self.LogWarning = processor.LogWarning
91 86d9d3bb Iustin Pop
    self.LogInfo = processor.LogInfo
92 2bb5c911 Michael Hanselmann
    self.LogStep = processor.LogStep
93 20777413 Iustin Pop
    # support for dry-run
94 20777413 Iustin Pop
    self.dry_run_result = None
95 c92b310a Michael Hanselmann
96 6fd35c4d Michael Hanselmann
    # Tasklets
97 3a012b41 Michael Hanselmann
    self.tasklets = None
98 6fd35c4d Michael Hanselmann
99 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
100 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
101 a8083063 Iustin Pop
      if attr_val is None:
102 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
103 3ecf6786 Iustin Pop
                                   attr_name)
104 6fd35c4d Michael Hanselmann
105 4be4691d Iustin Pop
    self.CheckArguments()
106 a8083063 Iustin Pop
107 c92b310a Michael Hanselmann
  def __GetSSH(self):
108 c92b310a Michael Hanselmann
    """Returns the SshRunner object
109 c92b310a Michael Hanselmann

110 c92b310a Michael Hanselmann
    """
111 c92b310a Michael Hanselmann
    if not self.__ssh:
112 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
113 c92b310a Michael Hanselmann
    return self.__ssh
114 c92b310a Michael Hanselmann
115 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
116 c92b310a Michael Hanselmann
117 4be4691d Iustin Pop
  def CheckArguments(self):
118 4be4691d Iustin Pop
    """Check syntactic validity for the opcode arguments.
119 4be4691d Iustin Pop

120 4be4691d Iustin Pop
    This method is for doing a simple syntactic check and ensure
121 4be4691d Iustin Pop
    validity of opcode parameters, without any cluster-related
122 4be4691d Iustin Pop
    checks. While the same can be accomplished in ExpandNames and/or
123 4be4691d Iustin Pop
    CheckPrereq, doing these separate is better because:
124 4be4691d Iustin Pop

125 4be4691d Iustin Pop
      - ExpandNames is left as as purely a lock-related function
126 5bbd3f7f Michael Hanselmann
      - CheckPrereq is run after we have acquired locks (and possible
127 4be4691d Iustin Pop
        waited for them)
128 4be4691d Iustin Pop

129 4be4691d Iustin Pop
    The function is allowed to change the self.op attribute so that
130 4be4691d Iustin Pop
    later methods can no longer worry about missing parameters.
131 4be4691d Iustin Pop

132 4be4691d Iustin Pop
    """
133 4be4691d Iustin Pop
    pass
134 4be4691d Iustin Pop
135 d465bdc8 Guido Trotter
  def ExpandNames(self):
136 d465bdc8 Guido Trotter
    """Expand names for this LU.
137 d465bdc8 Guido Trotter

138 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
139 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
140 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
141 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
142 d465bdc8 Guido Trotter

143 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
144 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
145 d465bdc8 Guido Trotter
    as values. Rules:
146 e4376078 Iustin Pop

147 e4376078 Iustin Pop
      - use an empty dict if you don't need any lock
148 e4376078 Iustin Pop
      - if you don't need any lock at a particular level omit that level
149 e4376078 Iustin Pop
      - don't put anything for the BGL level
150 e4376078 Iustin Pop
      - if you want all locks at a level use locking.ALL_SET as a value
151 d465bdc8 Guido Trotter

152 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
153 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
154 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
155 3977a4c1 Guido Trotter

156 6fd35c4d Michael Hanselmann
    This function can also define a list of tasklets, which then will be
157 6fd35c4d Michael Hanselmann
    executed in order instead of the usual LU-level CheckPrereq and Exec
158 6fd35c4d Michael Hanselmann
    functions, if those are not defined by the LU.
159 6fd35c4d Michael Hanselmann

160 e4376078 Iustin Pop
    Examples::
161 e4376078 Iustin Pop

162 e4376078 Iustin Pop
      # Acquire all nodes and one instance
163 e4376078 Iustin Pop
      self.needed_locks = {
164 e4376078 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
165 e4376078 Iustin Pop
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
166 e4376078 Iustin Pop
      }
167 e4376078 Iustin Pop
      # Acquire just two nodes
168 e4376078 Iustin Pop
      self.needed_locks = {
169 e4376078 Iustin Pop
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
170 e4376078 Iustin Pop
      }
171 e4376078 Iustin Pop
      # Acquire no locks
172 e4376078 Iustin Pop
      self.needed_locks = {} # No, you can't leave it to the default value None
173 d465bdc8 Guido Trotter

174 d465bdc8 Guido Trotter
    """
175 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
176 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
177 d465bdc8 Guido Trotter
    # time.
178 d465bdc8 Guido Trotter
    if self.REQ_BGL:
179 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
180 d465bdc8 Guido Trotter
    else:
181 d465bdc8 Guido Trotter
      raise NotImplementedError
182 d465bdc8 Guido Trotter
183 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
184 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
185 fb8dcb62 Guido Trotter

186 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
187 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
188 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
189 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
190 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
191 fb8dcb62 Guido Trotter
    default it does nothing.
192 fb8dcb62 Guido Trotter

193 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
194 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
195 fb8dcb62 Guido Trotter

196 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
197 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
198 fb8dcb62 Guido Trotter

199 fb8dcb62 Guido Trotter
    """
200 fb8dcb62 Guido Trotter
201 a8083063 Iustin Pop
  def CheckPrereq(self):
202 a8083063 Iustin Pop
    """Check prerequisites for this LU.
203 a8083063 Iustin Pop

204 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
205 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
206 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
207 a8083063 Iustin Pop
    allowed.
208 a8083063 Iustin Pop

209 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
210 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
211 a8083063 Iustin Pop

212 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
213 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
214 a8083063 Iustin Pop

215 a8083063 Iustin Pop
    """
216 3a012b41 Michael Hanselmann
    if self.tasklets is not None:
217 b4a9eb66 Michael Hanselmann
      for (idx, tl) in enumerate(self.tasklets):
218 abae1b2b Michael Hanselmann
        logging.debug("Checking prerequisites for tasklet %s/%s",
219 abae1b2b Michael Hanselmann
                      idx + 1, len(self.tasklets))
220 6fd35c4d Michael Hanselmann
        tl.CheckPrereq()
221 6fd35c4d Michael Hanselmann
    else:
222 6fd35c4d Michael Hanselmann
      raise NotImplementedError
223 a8083063 Iustin Pop
224 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
225 a8083063 Iustin Pop
    """Execute the LU.
226 a8083063 Iustin Pop

227 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
228 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
229 a8083063 Iustin Pop
    code, or expected.
230 a8083063 Iustin Pop

231 a8083063 Iustin Pop
    """
232 3a012b41 Michael Hanselmann
    if self.tasklets is not None:
233 b4a9eb66 Michael Hanselmann
      for (idx, tl) in enumerate(self.tasklets):
234 abae1b2b Michael Hanselmann
        logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
235 6fd35c4d Michael Hanselmann
        tl.Exec(feedback_fn)
236 6fd35c4d Michael Hanselmann
    else:
237 6fd35c4d Michael Hanselmann
      raise NotImplementedError
238 a8083063 Iustin Pop
239 a8083063 Iustin Pop
  def BuildHooksEnv(self):
240 a8083063 Iustin Pop
    """Build hooks environment for this LU.
241 a8083063 Iustin Pop

242 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
243 a8083063 Iustin Pop
    containing the environment that will be used for running the
244 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
245 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
246 a8083063 Iustin Pop
    the hook should run after the execution.
247 a8083063 Iustin Pop

248 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
249 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
250 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
251 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
252 a8083063 Iustin Pop

253 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
254 a8083063 Iustin Pop

255 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
256 a8083063 Iustin Pop
    not be called.
257 a8083063 Iustin Pop

258 a8083063 Iustin Pop
    """
259 a8083063 Iustin Pop
    raise NotImplementedError
260 a8083063 Iustin Pop
261 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
262 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
263 1fce5219 Guido Trotter

264 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
265 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
266 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
267 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
268 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
269 1fce5219 Guido Trotter

270 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
271 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
272 e4376078 Iustin Pop
    @param hook_results: the results of the multi-node hooks rpc call
273 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
274 e4376078 Iustin Pop
    @param lu_result: the previous Exec result this LU had, or None
275 e4376078 Iustin Pop
        in the PRE phase
276 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
277 e4376078 Iustin Pop
        and hook results
278 1fce5219 Guido Trotter

279 1fce5219 Guido Trotter
    """
280 1fce5219 Guido Trotter
    return lu_result
281 1fce5219 Guido Trotter
282 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
283 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
284 43905206 Guido Trotter

285 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
286 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
287 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
288 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
289 43905206 Guido Trotter
    before.
290 43905206 Guido Trotter

291 43905206 Guido Trotter
    """
292 43905206 Guido Trotter
    if self.needed_locks is None:
293 43905206 Guido Trotter
      self.needed_locks = {}
294 43905206 Guido Trotter
    else:
295 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
296 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
297 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
298 43905206 Guido Trotter
    if expanded_name is None:
299 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
300 43905206 Guido Trotter
                                  self.op.instance_name)
301 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
302 43905206 Guido Trotter
    self.op.instance_name = expanded_name
303 43905206 Guido Trotter
304 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
305 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
306 c4a2fee1 Guido Trotter

307 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
308 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
309 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
310 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
311 c4a2fee1 Guido Trotter

312 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
313 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
314 c4a2fee1 Guido Trotter

315 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
316 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
317 c4a2fee1 Guido Trotter

318 e4376078 Iustin Pop
    If should be called in DeclareLocks in a way similar to::
319 c4a2fee1 Guido Trotter

320 e4376078 Iustin Pop
      if level == locking.LEVEL_NODE:
321 e4376078 Iustin Pop
        self._LockInstancesNodes()
322 c4a2fee1 Guido Trotter

323 a82ce292 Guido Trotter
    @type primary_only: boolean
324 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
325 a82ce292 Guido Trotter

326 c4a2fee1 Guido Trotter
    """
327 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
328 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
329 c4a2fee1 Guido Trotter
330 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
331 c4a2fee1 Guido Trotter
332 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
333 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
334 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
335 c4a2fee1 Guido Trotter
    wanted_nodes = []
336 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
337 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
338 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
339 a82ce292 Guido Trotter
      if not primary_only:
340 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
341 9513b6ab Guido Trotter
342 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
343 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
344 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
345 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
346 c4a2fee1 Guido Trotter
347 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
348 c4a2fee1 Guido Trotter
349 a8083063 Iustin Pop
350 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
351 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
352 a8083063 Iustin Pop

353 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
354 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
355 a8083063 Iustin Pop

356 a8083063 Iustin Pop
  """
357 a8083063 Iustin Pop
  HPATH = None
358 a8083063 Iustin Pop
  HTYPE = None
359 a8083063 Iustin Pop
360 a8083063 Iustin Pop
361 9a6800e1 Michael Hanselmann
class Tasklet:
362 9a6800e1 Michael Hanselmann
  """Tasklet base class.
363 9a6800e1 Michael Hanselmann

364 9a6800e1 Michael Hanselmann
  Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
365 9a6800e1 Michael Hanselmann
  they can mix legacy code with tasklets. Locking needs to be done in the LU,
366 9a6800e1 Michael Hanselmann
  tasklets know nothing about locks.
367 9a6800e1 Michael Hanselmann

368 9a6800e1 Michael Hanselmann
  Subclasses must follow these rules:
369 9a6800e1 Michael Hanselmann
    - Implement CheckPrereq
370 9a6800e1 Michael Hanselmann
    - Implement Exec
371 9a6800e1 Michael Hanselmann

372 9a6800e1 Michael Hanselmann
  """
373 464243a7 Michael Hanselmann
  def __init__(self, lu):
374 464243a7 Michael Hanselmann
    self.lu = lu
375 464243a7 Michael Hanselmann
376 464243a7 Michael Hanselmann
    # Shortcuts
377 464243a7 Michael Hanselmann
    self.cfg = lu.cfg
378 464243a7 Michael Hanselmann
    self.rpc = lu.rpc
379 464243a7 Michael Hanselmann
380 9a6800e1 Michael Hanselmann
  def CheckPrereq(self):
381 9a6800e1 Michael Hanselmann
    """Check prerequisites for this tasklets.
382 9a6800e1 Michael Hanselmann

383 9a6800e1 Michael Hanselmann
    This method should check whether the prerequisites for the execution of
384 9a6800e1 Michael Hanselmann
    this tasklet are fulfilled. It can do internode communication, but it
385 9a6800e1 Michael Hanselmann
    should be idempotent - no cluster or system changes are allowed.
386 9a6800e1 Michael Hanselmann

387 9a6800e1 Michael Hanselmann
    The method should raise errors.OpPrereqError in case something is not
388 9a6800e1 Michael Hanselmann
    fulfilled. Its return value is ignored.
389 9a6800e1 Michael Hanselmann

390 9a6800e1 Michael Hanselmann
    This method should also update all parameters to their canonical form if it
391 9a6800e1 Michael Hanselmann
    hasn't been done before.
392 9a6800e1 Michael Hanselmann

393 9a6800e1 Michael Hanselmann
    """
394 9a6800e1 Michael Hanselmann
    raise NotImplementedError
395 9a6800e1 Michael Hanselmann
396 9a6800e1 Michael Hanselmann
  def Exec(self, feedback_fn):
397 9a6800e1 Michael Hanselmann
    """Execute the tasklet.
398 9a6800e1 Michael Hanselmann

399 9a6800e1 Michael Hanselmann
    This method should implement the actual work. It should raise
400 9a6800e1 Michael Hanselmann
    errors.OpExecError for failures that are somewhat dealt with in code, or
401 9a6800e1 Michael Hanselmann
    expected.
402 9a6800e1 Michael Hanselmann

403 9a6800e1 Michael Hanselmann
    """
404 9a6800e1 Michael Hanselmann
    raise NotImplementedError
405 9a6800e1 Michael Hanselmann
406 9a6800e1 Michael Hanselmann
407 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
408 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
409 83120a01 Michael Hanselmann

410 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
411 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
412 e4376078 Iustin Pop
  @type nodes: list
413 e4376078 Iustin Pop
  @param nodes: list of node names or None for all nodes
414 e4376078 Iustin Pop
  @rtype: list
415 e4376078 Iustin Pop
  @return: the list of nodes, sorted
416 e4376078 Iustin Pop
  @raise errors.OpProgrammerError: if the nodes parameter is wrong type
417 83120a01 Michael Hanselmann

418 83120a01 Michael Hanselmann
  """
419 3312b702 Iustin Pop
  if not isinstance(nodes, list):
420 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
421 dcb93971 Michael Hanselmann
422 ea47808a Guido Trotter
  if not nodes:
423 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
424 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
425 dcb93971 Michael Hanselmann
426 ea47808a Guido Trotter
  wanted = []
427 ea47808a Guido Trotter
  for name in nodes:
428 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
429 ea47808a Guido Trotter
    if node is None:
430 ea47808a Guido Trotter
      raise errors.OpPrereqError("No such node name '%s'" % name)
431 ea47808a Guido Trotter
    wanted.append(node)
432 dcb93971 Michael Hanselmann
433 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
434 3312b702 Iustin Pop
435 3312b702 Iustin Pop
436 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
437 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
438 3312b702 Iustin Pop

439 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
440 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
441 e4376078 Iustin Pop
  @type instances: list
442 e4376078 Iustin Pop
  @param instances: list of instance names or None for all instances
443 e4376078 Iustin Pop
  @rtype: list
444 e4376078 Iustin Pop
  @return: the list of instances, sorted
445 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if the instances parameter is wrong type
446 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if any of the passed instances is not found
447 3312b702 Iustin Pop

448 3312b702 Iustin Pop
  """
449 3312b702 Iustin Pop
  if not isinstance(instances, list):
450 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
451 3312b702 Iustin Pop
452 3312b702 Iustin Pop
  if instances:
453 3312b702 Iustin Pop
    wanted = []
454 3312b702 Iustin Pop
455 3312b702 Iustin Pop
    for name in instances:
456 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
457 3312b702 Iustin Pop
      if instance is None:
458 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
459 3312b702 Iustin Pop
      wanted.append(instance)
460 3312b702 Iustin Pop
461 3312b702 Iustin Pop
  else:
462 a7f5dc98 Iustin Pop
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
463 a7f5dc98 Iustin Pop
  return wanted
464 dcb93971 Michael Hanselmann
465 dcb93971 Michael Hanselmann
466 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
467 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
468 83120a01 Michael Hanselmann

469 a2d2e1a7 Iustin Pop
  @type static: L{utils.FieldSet}
470 31bf511f Iustin Pop
  @param static: static fields set
471 a2d2e1a7 Iustin Pop
  @type dynamic: L{utils.FieldSet}
472 31bf511f Iustin Pop
  @param dynamic: dynamic fields set
473 83120a01 Michael Hanselmann

474 83120a01 Michael Hanselmann
  """
475 a2d2e1a7 Iustin Pop
  f = utils.FieldSet()
476 31bf511f Iustin Pop
  f.Extend(static)
477 31bf511f Iustin Pop
  f.Extend(dynamic)
478 dcb93971 Michael Hanselmann
479 31bf511f Iustin Pop
  delta = f.NonMatching(selected)
480 31bf511f Iustin Pop
  if delta:
481 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
482 31bf511f Iustin Pop
                               % ",".join(delta))
483 dcb93971 Michael Hanselmann
484 dcb93971 Michael Hanselmann
485 a5961235 Iustin Pop
def _CheckBooleanOpField(op, name):
486 a5961235 Iustin Pop
  """Validates boolean opcode parameters.
487 a5961235 Iustin Pop

488 a5961235 Iustin Pop
  This will ensure that an opcode parameter is either a boolean value,
489 a5961235 Iustin Pop
  or None (but that it always exists).
490 a5961235 Iustin Pop

491 a5961235 Iustin Pop
  """
492 a5961235 Iustin Pop
  val = getattr(op, name, None)
493 a5961235 Iustin Pop
  if not (val is None or isinstance(val, bool)):
494 a5961235 Iustin Pop
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
495 a5961235 Iustin Pop
                               (name, str(val)))
496 a5961235 Iustin Pop
  setattr(op, name, val)
497 a5961235 Iustin Pop
498 a5961235 Iustin Pop
499 a5961235 Iustin Pop
def _CheckNodeOnline(lu, node):
500 a5961235 Iustin Pop
  """Ensure that a given node is online.
501 a5961235 Iustin Pop

502 a5961235 Iustin Pop
  @param lu: the LU on behalf of which we make the check
503 a5961235 Iustin Pop
  @param node: the node to check
504 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is offline
505 a5961235 Iustin Pop

506 a5961235 Iustin Pop
  """
507 a5961235 Iustin Pop
  if lu.cfg.GetNodeInfo(node).offline:
508 a5961235 Iustin Pop
    raise errors.OpPrereqError("Can't use offline node %s" % node)
509 a5961235 Iustin Pop
510 a5961235 Iustin Pop
511 733a2b6a Iustin Pop
def _CheckNodeNotDrained(lu, node):
512 733a2b6a Iustin Pop
  """Ensure that a given node is not drained.
513 733a2b6a Iustin Pop

514 733a2b6a Iustin Pop
  @param lu: the LU on behalf of which we make the check
515 733a2b6a Iustin Pop
  @param node: the node to check
516 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is drained
517 733a2b6a Iustin Pop

518 733a2b6a Iustin Pop
  """
519 733a2b6a Iustin Pop
  if lu.cfg.GetNodeInfo(node).drained:
520 733a2b6a Iustin Pop
    raise errors.OpPrereqError("Can't use drained node %s" % node)
521 733a2b6a Iustin Pop
522 733a2b6a Iustin Pop
523 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
524 67fc3042 Iustin Pop
                          memory, vcpus, nics, disk_template, disks,
525 7c4d6c7b Michael Hanselmann
                          bep, hvp, hypervisor_name):
526 e4376078 Iustin Pop
  """Builds instance related env variables for hooks
527 e4376078 Iustin Pop

528 e4376078 Iustin Pop
  This builds the hook environment from individual variables.
529 e4376078 Iustin Pop

530 e4376078 Iustin Pop
  @type name: string
531 e4376078 Iustin Pop
  @param name: the name of the instance
532 e4376078 Iustin Pop
  @type primary_node: string
533 e4376078 Iustin Pop
  @param primary_node: the name of the instance's primary node
534 e4376078 Iustin Pop
  @type secondary_nodes: list
535 e4376078 Iustin Pop
  @param secondary_nodes: list of secondary nodes as strings
536 e4376078 Iustin Pop
  @type os_type: string
537 e4376078 Iustin Pop
  @param os_type: the name of the instance's OS
538 0d68c45d Iustin Pop
  @type status: boolean
539 0d68c45d Iustin Pop
  @param status: the should_run status of the instance
540 e4376078 Iustin Pop
  @type memory: string
541 e4376078 Iustin Pop
  @param memory: the memory size of the instance
542 e4376078 Iustin Pop
  @type vcpus: string
543 e4376078 Iustin Pop
  @param vcpus: the count of VCPUs the instance has
544 e4376078 Iustin Pop
  @type nics: list
545 5e3d3eb3 Guido Trotter
  @param nics: list of tuples (ip, mac, mode, link) representing
546 5e3d3eb3 Guido Trotter
      the NICs the instance has
547 2c2690c9 Iustin Pop
  @type disk_template: string
548 5bbd3f7f Michael Hanselmann
  @param disk_template: the disk template of the instance
549 2c2690c9 Iustin Pop
  @type disks: list
550 2c2690c9 Iustin Pop
  @param disks: the list of (size, mode) pairs
551 67fc3042 Iustin Pop
  @type bep: dict
552 67fc3042 Iustin Pop
  @param bep: the backend parameters for the instance
553 67fc3042 Iustin Pop
  @type hvp: dict
554 67fc3042 Iustin Pop
  @param hvp: the hypervisor parameters for the instance
555 7c4d6c7b Michael Hanselmann
  @type hypervisor_name: string
556 7c4d6c7b Michael Hanselmann
  @param hypervisor_name: the hypervisor for the instance
557 e4376078 Iustin Pop
  @rtype: dict
558 e4376078 Iustin Pop
  @return: the hook environment for this instance
559 ecb215b5 Michael Hanselmann

560 396e1b78 Michael Hanselmann
  """
561 0d68c45d Iustin Pop
  if status:
562 0d68c45d Iustin Pop
    str_status = "up"
563 0d68c45d Iustin Pop
  else:
564 0d68c45d Iustin Pop
    str_status = "down"
565 396e1b78 Michael Hanselmann
  env = {
566 0e137c28 Iustin Pop
    "OP_TARGET": name,
567 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
568 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
569 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
570 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
571 0d68c45d Iustin Pop
    "INSTANCE_STATUS": str_status,
572 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
573 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
574 2c2690c9 Iustin Pop
    "INSTANCE_DISK_TEMPLATE": disk_template,
575 7c4d6c7b Michael Hanselmann
    "INSTANCE_HYPERVISOR": hypervisor_name,
576 396e1b78 Michael Hanselmann
  }
577 396e1b78 Michael Hanselmann
578 396e1b78 Michael Hanselmann
  if nics:
579 396e1b78 Michael Hanselmann
    nic_count = len(nics)
580 62f0dd02 Guido Trotter
    for idx, (ip, mac, mode, link) in enumerate(nics):
581 396e1b78 Michael Hanselmann
      if ip is None:
582 396e1b78 Michael Hanselmann
        ip = ""
583 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
584 2c2690c9 Iustin Pop
      env["INSTANCE_NIC%d_MAC" % idx] = mac
585 62f0dd02 Guido Trotter
      env["INSTANCE_NIC%d_MODE" % idx] = mode
586 62f0dd02 Guido Trotter
      env["INSTANCE_NIC%d_LINK" % idx] = link
587 62f0dd02 Guido Trotter
      if mode == constants.NIC_MODE_BRIDGED:
588 62f0dd02 Guido Trotter
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
589 396e1b78 Michael Hanselmann
  else:
590 396e1b78 Michael Hanselmann
    nic_count = 0
591 396e1b78 Michael Hanselmann
592 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
593 396e1b78 Michael Hanselmann
594 2c2690c9 Iustin Pop
  if disks:
595 2c2690c9 Iustin Pop
    disk_count = len(disks)
596 2c2690c9 Iustin Pop
    for idx, (size, mode) in enumerate(disks):
597 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_SIZE" % idx] = size
598 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_MODE" % idx] = mode
599 2c2690c9 Iustin Pop
  else:
600 2c2690c9 Iustin Pop
    disk_count = 0
601 2c2690c9 Iustin Pop
602 2c2690c9 Iustin Pop
  env["INSTANCE_DISK_COUNT"] = disk_count
603 2c2690c9 Iustin Pop
604 67fc3042 Iustin Pop
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
605 67fc3042 Iustin Pop
    for key, value in source.items():
606 67fc3042 Iustin Pop
      env["INSTANCE_%s_%s" % (kind, key)] = value
607 67fc3042 Iustin Pop
608 396e1b78 Michael Hanselmann
  return env
609 396e1b78 Michael Hanselmann
610 96acbc09 Michael Hanselmann
611 f9b10246 Guido Trotter
def _NICListToTuple(lu, nics):
612 62f0dd02 Guido Trotter
  """Build a list of nic information tuples.
613 62f0dd02 Guido Trotter

614 f9b10246 Guido Trotter
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
615 f9b10246 Guido Trotter
  value in LUQueryInstanceData.
616 62f0dd02 Guido Trotter

617 62f0dd02 Guido Trotter
  @type lu:  L{LogicalUnit}
618 62f0dd02 Guido Trotter
  @param lu: the logical unit on whose behalf we execute
619 62f0dd02 Guido Trotter
  @type nics: list of L{objects.NIC}
620 62f0dd02 Guido Trotter
  @param nics: list of nics to convert to hooks tuples
621 62f0dd02 Guido Trotter

622 62f0dd02 Guido Trotter
  """
623 62f0dd02 Guido Trotter
  hooks_nics = []
624 62f0dd02 Guido Trotter
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[constants.PP_DEFAULT]
625 62f0dd02 Guido Trotter
  for nic in nics:
626 62f0dd02 Guido Trotter
    ip = nic.ip
627 62f0dd02 Guido Trotter
    mac = nic.mac
628 62f0dd02 Guido Trotter
    filled_params = objects.FillDict(c_nicparams, nic.nicparams)
629 62f0dd02 Guido Trotter
    mode = filled_params[constants.NIC_MODE]
630 62f0dd02 Guido Trotter
    link = filled_params[constants.NIC_LINK]
631 62f0dd02 Guido Trotter
    hooks_nics.append((ip, mac, mode, link))
632 62f0dd02 Guido Trotter
  return hooks_nics
633 396e1b78 Michael Hanselmann
634 96acbc09 Michael Hanselmann
635 338e51e8 Iustin Pop
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
636 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
637 ecb215b5 Michael Hanselmann

638 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
639 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
640 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
641 e4376078 Iustin Pop
  @param instance: the instance for which we should build the
642 e4376078 Iustin Pop
      environment
643 e4376078 Iustin Pop
  @type override: dict
644 e4376078 Iustin Pop
  @param override: dictionary with key/values that will override
645 e4376078 Iustin Pop
      our values
646 e4376078 Iustin Pop
  @rtype: dict
647 e4376078 Iustin Pop
  @return: the hook environment dictionary
648 e4376078 Iustin Pop

649 ecb215b5 Michael Hanselmann
  """
650 67fc3042 Iustin Pop
  cluster = lu.cfg.GetClusterInfo()
651 67fc3042 Iustin Pop
  bep = cluster.FillBE(instance)
652 67fc3042 Iustin Pop
  hvp = cluster.FillHV(instance)
653 396e1b78 Michael Hanselmann
  args = {
654 396e1b78 Michael Hanselmann
    'name': instance.name,
655 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
656 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
657 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
658 0d68c45d Iustin Pop
    'status': instance.admin_up,
659 338e51e8 Iustin Pop
    'memory': bep[constants.BE_MEMORY],
660 338e51e8 Iustin Pop
    'vcpus': bep[constants.BE_VCPUS],
661 f9b10246 Guido Trotter
    'nics': _NICListToTuple(lu, instance.nics),
662 2c2690c9 Iustin Pop
    'disk_template': instance.disk_template,
663 2c2690c9 Iustin Pop
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
664 67fc3042 Iustin Pop
    'bep': bep,
665 67fc3042 Iustin Pop
    'hvp': hvp,
666 b0c63e2b Iustin Pop
    'hypervisor_name': instance.hypervisor,
667 396e1b78 Michael Hanselmann
  }
668 396e1b78 Michael Hanselmann
  if override:
669 396e1b78 Michael Hanselmann
    args.update(override)
670 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
671 396e1b78 Michael Hanselmann
672 396e1b78 Michael Hanselmann
673 44485f49 Guido Trotter
def _AdjustCandidatePool(lu, exceptions):
674 ec0292f1 Iustin Pop
  """Adjust the candidate pool after node operations.
675 ec0292f1 Iustin Pop

676 ec0292f1 Iustin Pop
  """
677 44485f49 Guido Trotter
  mod_list = lu.cfg.MaintainCandidatePool(exceptions)
678 ec0292f1 Iustin Pop
  if mod_list:
679 ec0292f1 Iustin Pop
    lu.LogInfo("Promoted nodes to master candidate role: %s",
680 ee513a66 Iustin Pop
               ", ".join(node.name for node in mod_list))
681 ec0292f1 Iustin Pop
    for name in mod_list:
682 ec0292f1 Iustin Pop
      lu.context.ReaddNode(name)
683 44485f49 Guido Trotter
  mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
684 ec0292f1 Iustin Pop
  if mc_now > mc_max:
685 ec0292f1 Iustin Pop
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
686 ec0292f1 Iustin Pop
               (mc_now, mc_max))
687 ec0292f1 Iustin Pop
688 ec0292f1 Iustin Pop
689 6d7e1f20 Guido Trotter
def _DecideSelfPromotion(lu, exceptions=None):
690 6d7e1f20 Guido Trotter
  """Decide whether I should promote myself as a master candidate.
691 6d7e1f20 Guido Trotter

692 6d7e1f20 Guido Trotter
  """
693 6d7e1f20 Guido Trotter
  cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
694 6d7e1f20 Guido Trotter
  mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
695 6d7e1f20 Guido Trotter
  # the new node will increase mc_max with one, so:
696 6d7e1f20 Guido Trotter
  mc_should = min(mc_should + 1, cp_size)
697 6d7e1f20 Guido Trotter
  return mc_now < mc_should
698 6d7e1f20 Guido Trotter
699 6d7e1f20 Guido Trotter
700 b165e77e Guido Trotter
def _CheckNicsBridgesExist(lu, target_nics, target_node,
701 b165e77e Guido Trotter
                               profile=constants.PP_DEFAULT):
702 b165e77e Guido Trotter
  """Check that the brigdes needed by a list of nics exist.
703 b165e77e Guido Trotter

704 b165e77e Guido Trotter
  """
705 b165e77e Guido Trotter
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[profile]
706 b165e77e Guido Trotter
  paramslist = [objects.FillDict(c_nicparams, nic.nicparams)
707 b165e77e Guido Trotter
                for nic in target_nics]
708 b165e77e Guido Trotter
  brlist = [params[constants.NIC_LINK] for params in paramslist
709 b165e77e Guido Trotter
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
710 b165e77e Guido Trotter
  if brlist:
711 b165e77e Guido Trotter
    result = lu.rpc.call_bridges_exist(target_node, brlist)
712 4c4e4e1e Iustin Pop
    result.Raise("Error checking bridges on destination node '%s'" %
713 4c4e4e1e Iustin Pop
                 target_node, prereq=True)
714 b165e77e Guido Trotter
715 b165e77e Guido Trotter
716 b165e77e Guido Trotter
def _CheckInstanceBridgesExist(lu, instance, node=None):
717 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
718 bf6929a2 Alexander Schreiber

719 bf6929a2 Alexander Schreiber
  """
720 b165e77e Guido Trotter
  if node is None:
721 29921401 Iustin Pop
    node = instance.primary_node
722 b165e77e Guido Trotter
  _CheckNicsBridgesExist(lu, instance.nics, node)
723 bf6929a2 Alexander Schreiber
724 bf6929a2 Alexander Schreiber
725 f2c05717 Guido Trotter
def _CheckOSVariant(os, name):
726 f2c05717 Guido Trotter
  """Check whether an OS name conforms to the os variants specification.
727 f2c05717 Guido Trotter

728 f2c05717 Guido Trotter
  @type os: L{objects.OS}
729 f2c05717 Guido Trotter
  @param os: OS object to check
730 f2c05717 Guido Trotter
  @type name: string
731 f2c05717 Guido Trotter
  @param name: OS name passed by the user, to check for validity
732 f2c05717 Guido Trotter

733 f2c05717 Guido Trotter
  """
734 f2c05717 Guido Trotter
  if not os.supported_variants:
735 f2c05717 Guido Trotter
    return
736 f2c05717 Guido Trotter
  try:
737 f2c05717 Guido Trotter
    variant = name.split("+", 1)[1]
738 f2c05717 Guido Trotter
  except IndexError:
739 f2c05717 Guido Trotter
    raise errors.OpPrereqError("OS name must include a variant")
740 f2c05717 Guido Trotter
741 f2c05717 Guido Trotter
  if variant not in os.supported_variants:
742 f2c05717 Guido Trotter
    raise errors.OpPrereqError("Unsupported OS variant")
743 f2c05717 Guido Trotter
744 f2c05717 Guido Trotter
745 5ba9701d Michael Hanselmann
def _GetNodeInstancesInner(cfg, fn):
746 5ba9701d Michael Hanselmann
  return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
747 5ba9701d Michael Hanselmann
748 5ba9701d Michael Hanselmann
749 e9721add Michael Hanselmann
def _GetNodeInstances(cfg, node_name):
750 e9721add Michael Hanselmann
  """Returns a list of all primary and secondary instances on a node.
751 e9721add Michael Hanselmann

752 e9721add Michael Hanselmann
  """
753 e9721add Michael Hanselmann
754 e9721add Michael Hanselmann
  return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
755 e9721add Michael Hanselmann
756 e9721add Michael Hanselmann
757 80cb875c Michael Hanselmann
def _GetNodePrimaryInstances(cfg, node_name):
758 80cb875c Michael Hanselmann
  """Returns primary instances on a node.
759 80cb875c Michael Hanselmann

760 80cb875c Michael Hanselmann
  """
761 5ba9701d Michael Hanselmann
  return _GetNodeInstancesInner(cfg,
762 5ba9701d Michael Hanselmann
                                lambda inst: node_name == inst.primary_node)
763 80cb875c Michael Hanselmann
764 80cb875c Michael Hanselmann
765 692738fc Michael Hanselmann
def _GetNodeSecondaryInstances(cfg, node_name):
766 692738fc Michael Hanselmann
  """Returns secondary instances on a node.
767 692738fc Michael Hanselmann

768 692738fc Michael Hanselmann
  """
769 5ba9701d Michael Hanselmann
  return _GetNodeInstancesInner(cfg,
770 5ba9701d Michael Hanselmann
                                lambda inst: node_name in inst.secondary_nodes)
771 692738fc Michael Hanselmann
772 692738fc Michael Hanselmann
773 efb8da02 Michael Hanselmann
def _GetStorageTypeArgs(cfg, storage_type):
774 efb8da02 Michael Hanselmann
  """Returns the arguments for a storage type.
775 efb8da02 Michael Hanselmann

776 efb8da02 Michael Hanselmann
  """
777 efb8da02 Michael Hanselmann
  # Special case for file storage
778 efb8da02 Michael Hanselmann
  if storage_type == constants.ST_FILE:
779 a4d138b7 Michael Hanselmann
    # storage.FileStorage wants a list of storage directories
780 a4d138b7 Michael Hanselmann
    return [[cfg.GetFileStorageDir()]]
781 efb8da02 Michael Hanselmann
782 efb8da02 Michael Hanselmann
  return []
783 efb8da02 Michael Hanselmann
784 efb8da02 Michael Hanselmann
785 2d9005d8 Michael Hanselmann
def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
786 2d9005d8 Michael Hanselmann
  faulty = []
787 2d9005d8 Michael Hanselmann
788 2d9005d8 Michael Hanselmann
  for dev in instance.disks:
789 2d9005d8 Michael Hanselmann
    cfg.SetDiskID(dev, node_name)
790 2d9005d8 Michael Hanselmann
791 2d9005d8 Michael Hanselmann
  result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
792 2d9005d8 Michael Hanselmann
  result.Raise("Failed to get disk status from node %s" % node_name,
793 2d9005d8 Michael Hanselmann
               prereq=prereq)
794 2d9005d8 Michael Hanselmann
795 2d9005d8 Michael Hanselmann
  for idx, bdev_status in enumerate(result.payload):
796 2d9005d8 Michael Hanselmann
    if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
797 2d9005d8 Michael Hanselmann
      faulty.append(idx)
798 2d9005d8 Michael Hanselmann
799 2d9005d8 Michael Hanselmann
  return faulty
800 2d9005d8 Michael Hanselmann
801 2d9005d8 Michael Hanselmann
802 b5f5fae9 Luca Bigliardi
class LUPostInitCluster(LogicalUnit):
803 b5f5fae9 Luca Bigliardi
  """Logical unit for running hooks after cluster initialization.
804 b5f5fae9 Luca Bigliardi

805 b5f5fae9 Luca Bigliardi
  """
806 b5f5fae9 Luca Bigliardi
  HPATH = "cluster-init"
807 b5f5fae9 Luca Bigliardi
  HTYPE = constants.HTYPE_CLUSTER
808 b5f5fae9 Luca Bigliardi
  _OP_REQP = []
809 b5f5fae9 Luca Bigliardi
810 b5f5fae9 Luca Bigliardi
  def BuildHooksEnv(self):
811 b5f5fae9 Luca Bigliardi
    """Build hooks env.
812 b5f5fae9 Luca Bigliardi

813 b5f5fae9 Luca Bigliardi
    """
814 b5f5fae9 Luca Bigliardi
    env = {"OP_TARGET": self.cfg.GetClusterName()}
815 b5f5fae9 Luca Bigliardi
    mn = self.cfg.GetMasterNode()
816 b5f5fae9 Luca Bigliardi
    return env, [], [mn]
817 b5f5fae9 Luca Bigliardi
818 b5f5fae9 Luca Bigliardi
  def CheckPrereq(self):
819 b5f5fae9 Luca Bigliardi
    """No prerequisites to check.
820 b5f5fae9 Luca Bigliardi

821 b5f5fae9 Luca Bigliardi
    """
822 b5f5fae9 Luca Bigliardi
    return True
823 b5f5fae9 Luca Bigliardi
824 b5f5fae9 Luca Bigliardi
  def Exec(self, feedback_fn):
825 b5f5fae9 Luca Bigliardi
    """Nothing to do.
826 b5f5fae9 Luca Bigliardi

827 b5f5fae9 Luca Bigliardi
    """
828 b5f5fae9 Luca Bigliardi
    return True
829 b5f5fae9 Luca Bigliardi
830 b5f5fae9 Luca Bigliardi
831 b2c750a4 Luca Bigliardi
class LUDestroyCluster(LogicalUnit):
832 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
833 a8083063 Iustin Pop

834 a8083063 Iustin Pop
  """
835 b2c750a4 Luca Bigliardi
  HPATH = "cluster-destroy"
836 b2c750a4 Luca Bigliardi
  HTYPE = constants.HTYPE_CLUSTER
837 a8083063 Iustin Pop
  _OP_REQP = []
838 a8083063 Iustin Pop
839 b2c750a4 Luca Bigliardi
  def BuildHooksEnv(self):
840 b2c750a4 Luca Bigliardi
    """Build hooks env.
841 b2c750a4 Luca Bigliardi

842 b2c750a4 Luca Bigliardi
    """
843 b2c750a4 Luca Bigliardi
    env = {"OP_TARGET": self.cfg.GetClusterName()}
844 b2c750a4 Luca Bigliardi
    return env, [], []
845 b2c750a4 Luca Bigliardi
846 a8083063 Iustin Pop
  def CheckPrereq(self):
847 a8083063 Iustin Pop
    """Check prerequisites.
848 a8083063 Iustin Pop

849 a8083063 Iustin Pop
    This checks whether the cluster is empty.
850 a8083063 Iustin Pop

851 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
852 a8083063 Iustin Pop

853 a8083063 Iustin Pop
    """
854 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
855 a8083063 Iustin Pop
856 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
857 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
858 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
859 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
860 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
861 db915bd1 Michael Hanselmann
    if instancelist:
862 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
863 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
864 a8083063 Iustin Pop
865 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
866 a8083063 Iustin Pop
    """Destroys the cluster.
867 a8083063 Iustin Pop

868 a8083063 Iustin Pop
    """
869 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
870 3141ad3b Luca Bigliardi
871 3141ad3b Luca Bigliardi
    # Run post hooks on master node before it's removed
872 3141ad3b Luca Bigliardi
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
873 3141ad3b Luca Bigliardi
    try:
874 3141ad3b Luca Bigliardi
      hm.RunPhase(constants.HOOKS_PHASE_POST, [master])
875 3141ad3b Luca Bigliardi
    except:
876 3141ad3b Luca Bigliardi
      self.LogWarning("Errors occurred running hooks on %s" % master)
877 3141ad3b Luca Bigliardi
878 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
879 4c4e4e1e Iustin Pop
    result.Raise("Could not disable the master role")
880 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
881 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
882 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
883 140aa4a8 Iustin Pop
    return master
884 a8083063 Iustin Pop
885 a8083063 Iustin Pop
886 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
887 a8083063 Iustin Pop
  """Verifies the cluster status.
888 a8083063 Iustin Pop

889 a8083063 Iustin Pop
  """
890 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
891 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
892 a0c9776a Iustin Pop
  _OP_REQP = ["skip_checks", "verbose", "error_codes", "debug_simulate_errors"]
893 d4b9d97f Guido Trotter
  REQ_BGL = False
894 d4b9d97f Guido Trotter
895 7c874ee1 Iustin Pop
  TCLUSTER = "cluster"
896 7c874ee1 Iustin Pop
  TNODE = "node"
897 7c874ee1 Iustin Pop
  TINSTANCE = "instance"
898 7c874ee1 Iustin Pop
899 7c874ee1 Iustin Pop
  ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
900 7c874ee1 Iustin Pop
  EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
901 7c874ee1 Iustin Pop
  EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
902 7c874ee1 Iustin Pop
  EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
903 7c874ee1 Iustin Pop
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
904 7c874ee1 Iustin Pop
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
905 7c874ee1 Iustin Pop
  EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
906 7c874ee1 Iustin Pop
  ENODEDRBD = (TNODE, "ENODEDRBD")
907 7c874ee1 Iustin Pop
  ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
908 7c874ee1 Iustin Pop
  ENODEHOOKS = (TNODE, "ENODEHOOKS")
909 7c874ee1 Iustin Pop
  ENODEHV = (TNODE, "ENODEHV")
910 7c874ee1 Iustin Pop
  ENODELVM = (TNODE, "ENODELVM")
911 7c874ee1 Iustin Pop
  ENODEN1 = (TNODE, "ENODEN1")
912 7c874ee1 Iustin Pop
  ENODENET = (TNODE, "ENODENET")
913 7c874ee1 Iustin Pop
  ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
914 7c874ee1 Iustin Pop
  ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
915 7c874ee1 Iustin Pop
  ENODERPC = (TNODE, "ENODERPC")
916 7c874ee1 Iustin Pop
  ENODESSH = (TNODE, "ENODESSH")
917 7c874ee1 Iustin Pop
  ENODEVERSION = (TNODE, "ENODEVERSION")
918 7c874ee1 Iustin Pop
919 a0c9776a Iustin Pop
  ETYPE_FIELD = "code"
920 a0c9776a Iustin Pop
  ETYPE_ERROR = "ERROR"
921 a0c9776a Iustin Pop
  ETYPE_WARNING = "WARNING"
922 a0c9776a Iustin Pop
923 d4b9d97f Guido Trotter
  def ExpandNames(self):
924 d4b9d97f Guido Trotter
    self.needed_locks = {
925 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
926 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
927 d4b9d97f Guido Trotter
    }
928 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
929 a8083063 Iustin Pop
930 7c874ee1 Iustin Pop
  def _Error(self, ecode, item, msg, *args, **kwargs):
931 7c874ee1 Iustin Pop
    """Format an error message.
932 7c874ee1 Iustin Pop

933 7c874ee1 Iustin Pop
    Based on the opcode's error_codes parameter, either format a
934 7c874ee1 Iustin Pop
    parseable error code, or a simpler error string.
935 7c874ee1 Iustin Pop

936 7c874ee1 Iustin Pop
    This must be called only from Exec and functions called from Exec.
937 7c874ee1 Iustin Pop

938 7c874ee1 Iustin Pop
    """
939 a0c9776a Iustin Pop
    ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
940 7c874ee1 Iustin Pop
    itype, etxt = ecode
941 7c874ee1 Iustin Pop
    # first complete the msg
942 7c874ee1 Iustin Pop
    if args:
943 7c874ee1 Iustin Pop
      msg = msg % args
944 7c874ee1 Iustin Pop
    # then format the whole message
945 7c874ee1 Iustin Pop
    if self.op.error_codes:
946 7c874ee1 Iustin Pop
      msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
947 7c874ee1 Iustin Pop
    else:
948 7c874ee1 Iustin Pop
      if item:
949 7c874ee1 Iustin Pop
        item = " " + item
950 7c874ee1 Iustin Pop
      else:
951 7c874ee1 Iustin Pop
        item = ""
952 7c874ee1 Iustin Pop
      msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
953 7c874ee1 Iustin Pop
    # and finally report it via the feedback_fn
954 7c874ee1 Iustin Pop
    self._feedback_fn("  - %s" % msg)
955 7c874ee1 Iustin Pop
956 a0c9776a Iustin Pop
  def _ErrorIf(self, cond, *args, **kwargs):
957 a0c9776a Iustin Pop
    """Log an error message if the passed condition is True.
958 a0c9776a Iustin Pop

959 a0c9776a Iustin Pop
    """
960 a0c9776a Iustin Pop
    cond = bool(cond) or self.op.debug_simulate_errors
961 a0c9776a Iustin Pop
    if cond:
962 a0c9776a Iustin Pop
      self._Error(*args, **kwargs)
963 a0c9776a Iustin Pop
    # do not mark the operation as failed for WARN cases only
964 a0c9776a Iustin Pop
    if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
965 a0c9776a Iustin Pop
      self.bad = self.bad or cond
966 a0c9776a Iustin Pop
967 25361b9a Iustin Pop
  def _VerifyNode(self, nodeinfo, file_list, local_cksum,
968 7c874ee1 Iustin Pop
                  node_result, master_files, drbd_map, vg_name):
969 a8083063 Iustin Pop
    """Run multiple tests against a node.
970 a8083063 Iustin Pop

971 112f18a5 Iustin Pop
    Test list:
972 e4376078 Iustin Pop

973 a8083063 Iustin Pop
      - compares ganeti version
974 5bbd3f7f Michael Hanselmann
      - checks vg existence and size > 20G
975 a8083063 Iustin Pop
      - checks config file checksum
976 a8083063 Iustin Pop
      - checks ssh to other nodes
977 a8083063 Iustin Pop

978 112f18a5 Iustin Pop
    @type nodeinfo: L{objects.Node}
979 112f18a5 Iustin Pop
    @param nodeinfo: the node to check
980 e4376078 Iustin Pop
    @param file_list: required list of files
981 e4376078 Iustin Pop
    @param local_cksum: dictionary of local files and their checksums
982 e4376078 Iustin Pop
    @param node_result: the results from the node
983 112f18a5 Iustin Pop
    @param master_files: list of files that only masters should have
984 6d2e83d5 Iustin Pop
    @param drbd_map: the useddrbd minors for this node, in
985 6d2e83d5 Iustin Pop
        form of minor: (instance, must_exist) which correspond to instances
986 6d2e83d5 Iustin Pop
        and their running status
987 cc9e1230 Guido Trotter
    @param vg_name: Ganeti Volume Group (result of self.cfg.GetVGName())
988 098c0958 Michael Hanselmann

989 a8083063 Iustin Pop
    """
990 112f18a5 Iustin Pop
    node = nodeinfo.name
991 a0c9776a Iustin Pop
    _ErrorIf = self._ErrorIf
992 25361b9a Iustin Pop
993 25361b9a Iustin Pop
    # main result, node_result should be a non-empty dict
994 a0c9776a Iustin Pop
    test = not node_result or not isinstance(node_result, dict)
995 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODERPC, node,
996 7c874ee1 Iustin Pop
                  "unable to verify node: no data returned")
997 a0c9776a Iustin Pop
    if test:
998 a0c9776a Iustin Pop
      return
999 25361b9a Iustin Pop
1000 a8083063 Iustin Pop
    # compares ganeti version
1001 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
1002 25361b9a Iustin Pop
    remote_version = node_result.get('version', None)
1003 a0c9776a Iustin Pop
    test = not (remote_version and
1004 a0c9776a Iustin Pop
                isinstance(remote_version, (list, tuple)) and
1005 a0c9776a Iustin Pop
                len(remote_version) == 2)
1006 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODERPC, node,
1007 a0c9776a Iustin Pop
             "connection to node returned invalid data")
1008 a0c9776a Iustin Pop
    if test:
1009 a0c9776a Iustin Pop
      return
1010 a0c9776a Iustin Pop
1011 a0c9776a Iustin Pop
    test = local_version != remote_version[0]
1012 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODEVERSION, node,
1013 a0c9776a Iustin Pop
             "incompatible protocol versions: master %s,"
1014 a0c9776a Iustin Pop
             " node %s", local_version, remote_version[0])
1015 a0c9776a Iustin Pop
    if test:
1016 a0c9776a Iustin Pop
      return
1017 a8083063 Iustin Pop
1018 e9ce0a64 Iustin Pop
    # node seems compatible, we can actually try to look into its results
1019 a8083063 Iustin Pop
1020 e9ce0a64 Iustin Pop
    # full package version
1021 a0c9776a Iustin Pop
    self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
1022 a0c9776a Iustin Pop
                  self.ENODEVERSION, node,
1023 7c874ee1 Iustin Pop
                  "software version mismatch: master %s, node %s",
1024 7c874ee1 Iustin Pop
                  constants.RELEASE_VERSION, remote_version[1],
1025 a0c9776a Iustin Pop
                  code=self.ETYPE_WARNING)
1026 e9ce0a64 Iustin Pop
1027 e9ce0a64 Iustin Pop
    # checks vg existence and size > 20G
1028 cc9e1230 Guido Trotter
    if vg_name is not None:
1029 cc9e1230 Guido Trotter
      vglist = node_result.get(constants.NV_VGLIST, None)
1030 a0c9776a Iustin Pop
      test = not vglist
1031 a0c9776a Iustin Pop
      _ErrorIf(test, self.ENODELVM, node, "unable to check volume groups")
1032 a0c9776a Iustin Pop
      if not test:
1033 cc9e1230 Guido Trotter
        vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
1034 cc9e1230 Guido Trotter
                                              constants.MIN_VG_SIZE)
1035 a0c9776a Iustin Pop
        _ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
1036 a8083063 Iustin Pop
1037 a8083063 Iustin Pop
    # checks config file checksum
1038 a8083063 Iustin Pop
1039 25361b9a Iustin Pop
    remote_cksum = node_result.get(constants.NV_FILELIST, None)
1040 a0c9776a Iustin Pop
    test = not isinstance(remote_cksum, dict)
1041 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODEFILECHECK, node,
1042 a0c9776a Iustin Pop
             "node hasn't returned file checksum data")
1043 a0c9776a Iustin Pop
    if not test:
1044 a8083063 Iustin Pop
      for file_name in file_list:
1045 112f18a5 Iustin Pop
        node_is_mc = nodeinfo.master_candidate
1046 a0c9776a Iustin Pop
        must_have = (file_name not in master_files) or node_is_mc
1047 a0c9776a Iustin Pop
        # missing
1048 a0c9776a Iustin Pop
        test1 = file_name not in remote_cksum
1049 a0c9776a Iustin Pop
        # invalid checksum
1050 a0c9776a Iustin Pop
        test2 = not test1 and remote_cksum[file_name] != local_cksum[file_name]
1051 a0c9776a Iustin Pop
        # existing and good
1052 a0c9776a Iustin Pop
        test3 = not test1 and remote_cksum[file_name] == local_cksum[file_name]
1053 a0c9776a Iustin Pop
        _ErrorIf(test1 and must_have, self.ENODEFILECHECK, node,
1054 a0c9776a Iustin Pop
                 "file '%s' missing", file_name)
1055 a0c9776a Iustin Pop
        _ErrorIf(test2 and must_have, self.ENODEFILECHECK, node,
1056 a0c9776a Iustin Pop
                 "file '%s' has wrong checksum", file_name)
1057 a0c9776a Iustin Pop
        # not candidate and this is not a must-have file
1058 a0c9776a Iustin Pop
        _ErrorIf(test2 and not must_have, self.ENODEFILECHECK, node,
1059 a0c9776a Iustin Pop
                 "file '%s' should not exist on non master"
1060 a0c9776a Iustin Pop
                 " candidates (and the file is outdated)", file_name)
1061 a0c9776a Iustin Pop
        # all good, except non-master/non-must have combination
1062 a0c9776a Iustin Pop
        _ErrorIf(test3 and not must_have, self.ENODEFILECHECK, node,
1063 a0c9776a Iustin Pop
                 "file '%s' should not exist"
1064 a0c9776a Iustin Pop
                 " on non master candidates", file_name)
1065 a8083063 Iustin Pop
1066 25361b9a Iustin Pop
    # checks ssh to any
1067 25361b9a Iustin Pop
1068 a0c9776a Iustin Pop
    test = constants.NV_NODELIST not in node_result
1069 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODESSH, node,
1070 a0c9776a Iustin Pop
             "node hasn't returned node ssh connectivity data")
1071 a0c9776a Iustin Pop
    if not test:
1072 25361b9a Iustin Pop
      if node_result[constants.NV_NODELIST]:
1073 7c874ee1 Iustin Pop
        for a_node, a_msg in node_result[constants.NV_NODELIST].items():
1074 a0c9776a Iustin Pop
          _ErrorIf(True, self.ENODESSH, node,
1075 a0c9776a Iustin Pop
                   "ssh communication with node '%s': %s", a_node, a_msg)
1076 25361b9a Iustin Pop
1077 a0c9776a Iustin Pop
    test = constants.NV_NODENETTEST not in node_result
1078 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODENET, node,
1079 a0c9776a Iustin Pop
             "node hasn't returned node tcp connectivity data")
1080 a0c9776a Iustin Pop
    if not test:
1081 25361b9a Iustin Pop
      if node_result[constants.NV_NODENETTEST]:
1082 25361b9a Iustin Pop
        nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys())
1083 7c874ee1 Iustin Pop
        for anode in nlist:
1084 a0c9776a Iustin Pop
          _ErrorIf(True, self.ENODENET, node,
1085 a0c9776a Iustin Pop
                   "tcp communication with node '%s': %s",
1086 a0c9776a Iustin Pop
                   anode, node_result[constants.NV_NODENETTEST][anode])
1087 9d4bfc96 Iustin Pop
1088 25361b9a Iustin Pop
    hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
1089 e69d05fd Iustin Pop
    if isinstance(hyp_result, dict):
1090 e69d05fd Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
1091 a0c9776a Iustin Pop
        test = hv_result is not None
1092 a0c9776a Iustin Pop
        _ErrorIf(test, self.ENODEHV, node,
1093 a0c9776a Iustin Pop
                 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1094 6d2e83d5 Iustin Pop
1095 6d2e83d5 Iustin Pop
    # check used drbd list
1096 cc9e1230 Guido Trotter
    if vg_name is not None:
1097 cc9e1230 Guido Trotter
      used_minors = node_result.get(constants.NV_DRBDLIST, [])
1098 a0c9776a Iustin Pop
      test = not isinstance(used_minors, (tuple, list))
1099 a0c9776a Iustin Pop
      _ErrorIf(test, self.ENODEDRBD, node,
1100 a0c9776a Iustin Pop
               "cannot parse drbd status file: %s", str(used_minors))
1101 a0c9776a Iustin Pop
      if not test:
1102 cc9e1230 Guido Trotter
        for minor, (iname, must_exist) in drbd_map.items():
1103 a0c9776a Iustin Pop
          test = minor not in used_minors and must_exist
1104 a0c9776a Iustin Pop
          _ErrorIf(test, self.ENODEDRBD, node,
1105 a0c9776a Iustin Pop
                   "drbd minor %d of instance %s is not active",
1106 a0c9776a Iustin Pop
                   minor, iname)
1107 cc9e1230 Guido Trotter
        for minor in used_minors:
1108 a0c9776a Iustin Pop
          test = minor not in drbd_map
1109 a0c9776a Iustin Pop
          _ErrorIf(test, self.ENODEDRBD, node,
1110 a0c9776a Iustin Pop
                   "unallocated drbd minor %d is in use", minor)
1111 a8083063 Iustin Pop
1112 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
1113 7c874ee1 Iustin Pop
                      node_instance, n_offline):
1114 a8083063 Iustin Pop
    """Verify an instance.
1115 a8083063 Iustin Pop

1116 a8083063 Iustin Pop
    This function checks to see if the required block devices are
1117 a8083063 Iustin Pop
    available on the instance's node.
1118 a8083063 Iustin Pop

1119 a8083063 Iustin Pop
    """
1120 a0c9776a Iustin Pop
    _ErrorIf = self._ErrorIf
1121 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
1122 a8083063 Iustin Pop
1123 a8083063 Iustin Pop
    node_vol_should = {}
1124 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
1125 a8083063 Iustin Pop
1126 a8083063 Iustin Pop
    for node in node_vol_should:
1127 0a66c968 Iustin Pop
      if node in n_offline:
1128 0a66c968 Iustin Pop
        # ignore missing volumes on offline nodes
1129 0a66c968 Iustin Pop
        continue
1130 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
1131 a0c9776a Iustin Pop
        test = node not in node_vol_is or volume not in node_vol_is[node]
1132 a0c9776a Iustin Pop
        _ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
1133 a0c9776a Iustin Pop
                 "volume %s missing on node %s", volume, node)
1134 a8083063 Iustin Pop
1135 0d68c45d Iustin Pop
    if instanceconfig.admin_up:
1136 a0c9776a Iustin Pop
      test = ((node_current not in node_instance or
1137 a0c9776a Iustin Pop
               not instance in node_instance[node_current]) and
1138 a0c9776a Iustin Pop
              node_current not in n_offline)
1139 a0c9776a Iustin Pop
      _ErrorIf(test, self.EINSTANCEDOWN, instance,
1140 a0c9776a Iustin Pop
               "instance not running on its primary node %s",
1141 a0c9776a Iustin Pop
               node_current)
1142 a8083063 Iustin Pop
1143 a8083063 Iustin Pop
    for node in node_instance:
1144 a8083063 Iustin Pop
      if (not node == node_current):
1145 a0c9776a Iustin Pop
        test = instance in node_instance[node]
1146 a0c9776a Iustin Pop
        _ErrorIf(test, self.EINSTANCEWRONGNODE, instance,
1147 a0c9776a Iustin Pop
                 "instance should not run on node %s", node)
1148 a8083063 Iustin Pop
1149 7c874ee1 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is):
1150 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
1151 a8083063 Iustin Pop

1152 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
1153 a8083063 Iustin Pop
    reported as unknown.
1154 a8083063 Iustin Pop

1155 a8083063 Iustin Pop
    """
1156 a8083063 Iustin Pop
    for node in node_vol_is:
1157 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
1158 a0c9776a Iustin Pop
        test = (node not in node_vol_should or
1159 a0c9776a Iustin Pop
                volume not in node_vol_should[node])
1160 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEORPHANLV, node,
1161 7c874ee1 Iustin Pop
                      "volume %s is unknown", volume)
1162 a8083063 Iustin Pop
1163 7c874ee1 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance):
1164 a8083063 Iustin Pop
    """Verify the list of running instances.
1165 a8083063 Iustin Pop

1166 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
1167 a8083063 Iustin Pop

1168 a8083063 Iustin Pop
    """
1169 a8083063 Iustin Pop
    for node in node_instance:
1170 7c874ee1 Iustin Pop
      for o_inst in node_instance[node]:
1171 a0c9776a Iustin Pop
        test = o_inst not in instancelist
1172 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEORPHANINSTANCE, node,
1173 7c874ee1 Iustin Pop
                      "instance %s on node %s should not exist", o_inst, node)
1174 a8083063 Iustin Pop
1175 7c874ee1 Iustin Pop
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg):
1176 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
1177 2b3b6ddd Guido Trotter

1178 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
1179 2b3b6ddd Guido Trotter
    was primary for.
1180 2b3b6ddd Guido Trotter

1181 2b3b6ddd Guido Trotter
    """
1182 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
1183 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
1184 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
1185 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
1186 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
1187 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
1188 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
1189 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
1190 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
1191 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
1192 2b3b6ddd Guido Trotter
        needed_mem = 0
1193 2b3b6ddd Guido Trotter
        for instance in instances:
1194 338e51e8 Iustin Pop
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
1195 c0f2b229 Iustin Pop
          if bep[constants.BE_AUTO_BALANCE]:
1196 3924700f Iustin Pop
            needed_mem += bep[constants.BE_MEMORY]
1197 a0c9776a Iustin Pop
        test = nodeinfo['mfree'] < needed_mem
1198 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEN1, node,
1199 7c874ee1 Iustin Pop
                      "not enough memory on to accommodate"
1200 7c874ee1 Iustin Pop
                      " failovers should peer node %s fail", prinode)
1201 2b3b6ddd Guido Trotter
1202 a8083063 Iustin Pop
  def CheckPrereq(self):
1203 a8083063 Iustin Pop
    """Check prerequisites.
1204 a8083063 Iustin Pop

1205 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
1206 e54c4c5e Guido Trotter
    all its members are valid.
1207 a8083063 Iustin Pop

1208 a8083063 Iustin Pop
    """
1209 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
1210 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
1211 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
1212 a8083063 Iustin Pop
1213 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
1214 d8fff41c Guido Trotter
    """Build hooks env.
1215 d8fff41c Guido Trotter

1216 5bbd3f7f Michael Hanselmann
    Cluster-Verify hooks just ran in the post phase and their failure makes
1217 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
1218 d8fff41c Guido Trotter

1219 d8fff41c Guido Trotter
    """
1220 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
1221 35e994e9 Iustin Pop
    env = {
1222 35e994e9 Iustin Pop
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
1223 35e994e9 Iustin Pop
      }
1224 35e994e9 Iustin Pop
    for node in self.cfg.GetAllNodesInfo().values():
1225 35e994e9 Iustin Pop
      env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
1226 35e994e9 Iustin Pop
1227 d8fff41c Guido Trotter
    return env, [], all_nodes
1228 d8fff41c Guido Trotter
1229 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1230 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
1231 a8083063 Iustin Pop

1232 a8083063 Iustin Pop
    """
1233 a0c9776a Iustin Pop
    self.bad = False
1234 a0c9776a Iustin Pop
    _ErrorIf = self._ErrorIf
1235 7c874ee1 Iustin Pop
    verbose = self.op.verbose
1236 7c874ee1 Iustin Pop
    self._feedback_fn = feedback_fn
1237 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
1238 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
1239 a0c9776a Iustin Pop
      _ErrorIf(True, self.ECLUSTERCFG, None, msg)
1240 a8083063 Iustin Pop
1241 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
1242 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
1243 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
1244 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
1245 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
1246 6d2e83d5 Iustin Pop
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
1247 6d2e83d5 Iustin Pop
                        for iname in instancelist)
1248 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
1249 3924700f Iustin Pop
    i_non_a_balanced = [] # Non auto-balanced instances
1250 0a66c968 Iustin Pop
    n_offline = [] # List of offline nodes
1251 22f0f71d Iustin Pop
    n_drained = [] # List of nodes being drained
1252 a8083063 Iustin Pop
    node_volume = {}
1253 a8083063 Iustin Pop
    node_instance = {}
1254 9c9c7d30 Guido Trotter
    node_info = {}
1255 26b6af5e Guido Trotter
    instance_cfg = {}
1256 a8083063 Iustin Pop
1257 a8083063 Iustin Pop
    # FIXME: verify OS list
1258 a8083063 Iustin Pop
    # do local checksums
1259 112f18a5 Iustin Pop
    master_files = [constants.CLUSTER_CONF_FILE]
1260 112f18a5 Iustin Pop
1261 112f18a5 Iustin Pop
    file_names = ssconf.SimpleStore().GetFileList()
1262 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
1263 699777f2 Michael Hanselmann
    file_names.append(constants.RAPI_CERT_FILE)
1264 112f18a5 Iustin Pop
    file_names.extend(master_files)
1265 112f18a5 Iustin Pop
1266 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
1267 a8083063 Iustin Pop
1268 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
1269 a8083063 Iustin Pop
    node_verify_param = {
1270 25361b9a Iustin Pop
      constants.NV_FILELIST: file_names,
1271 82e37788 Iustin Pop
      constants.NV_NODELIST: [node.name for node in nodeinfo
1272 82e37788 Iustin Pop
                              if not node.offline],
1273 25361b9a Iustin Pop
      constants.NV_HYPERVISOR: hypervisors,
1274 25361b9a Iustin Pop
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
1275 82e37788 Iustin Pop
                                  node.secondary_ip) for node in nodeinfo
1276 82e37788 Iustin Pop
                                 if not node.offline],
1277 25361b9a Iustin Pop
      constants.NV_INSTANCELIST: hypervisors,
1278 25361b9a Iustin Pop
      constants.NV_VERSION: None,
1279 25361b9a Iustin Pop
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
1280 a8083063 Iustin Pop
      }
1281 cc9e1230 Guido Trotter
    if vg_name is not None:
1282 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_VGLIST] = None
1283 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_LVLIST] = vg_name
1284 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_DRBDLIST] = None
1285 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
1286 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
1287 a8083063 Iustin Pop
1288 3924700f Iustin Pop
    cluster = self.cfg.GetClusterInfo()
1289 112f18a5 Iustin Pop
    master_node = self.cfg.GetMasterNode()
1290 6d2e83d5 Iustin Pop
    all_drbd_map = self.cfg.ComputeDRBDMap()
1291 6d2e83d5 Iustin Pop
1292 7c874ee1 Iustin Pop
    feedback_fn("* Verifying node status")
1293 112f18a5 Iustin Pop
    for node_i in nodeinfo:
1294 112f18a5 Iustin Pop
      node = node_i.name
1295 25361b9a Iustin Pop
1296 0a66c968 Iustin Pop
      if node_i.offline:
1297 7c874ee1 Iustin Pop
        if verbose:
1298 7c874ee1 Iustin Pop
          feedback_fn("* Skipping offline node %s" % (node,))
1299 0a66c968 Iustin Pop
        n_offline.append(node)
1300 0a66c968 Iustin Pop
        continue
1301 0a66c968 Iustin Pop
1302 112f18a5 Iustin Pop
      if node == master_node:
1303 25361b9a Iustin Pop
        ntype = "master"
1304 112f18a5 Iustin Pop
      elif node_i.master_candidate:
1305 25361b9a Iustin Pop
        ntype = "master candidate"
1306 22f0f71d Iustin Pop
      elif node_i.drained:
1307 22f0f71d Iustin Pop
        ntype = "drained"
1308 22f0f71d Iustin Pop
        n_drained.append(node)
1309 112f18a5 Iustin Pop
      else:
1310 25361b9a Iustin Pop
        ntype = "regular"
1311 7c874ee1 Iustin Pop
      if verbose:
1312 7c874ee1 Iustin Pop
        feedback_fn("* Verifying node %s (%s)" % (node, ntype))
1313 25361b9a Iustin Pop
1314 4c4e4e1e Iustin Pop
      msg = all_nvinfo[node].fail_msg
1315 a0c9776a Iustin Pop
      _ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg)
1316 6f68a739 Iustin Pop
      if msg:
1317 25361b9a Iustin Pop
        continue
1318 25361b9a Iustin Pop
1319 6f68a739 Iustin Pop
      nresult = all_nvinfo[node].payload
1320 6d2e83d5 Iustin Pop
      node_drbd = {}
1321 6d2e83d5 Iustin Pop
      for minor, instance in all_drbd_map[node].items():
1322 a0c9776a Iustin Pop
        test = instance not in instanceinfo
1323 a0c9776a Iustin Pop
        _ErrorIf(test, self.ECLUSTERCFG, None,
1324 a0c9776a Iustin Pop
                 "ghost instance '%s' in temporary DRBD map", instance)
1325 c614e5fb Iustin Pop
          # ghost instance should not be running, but otherwise we
1326 c614e5fb Iustin Pop
          # don't give double warnings (both ghost instance and
1327 c614e5fb Iustin Pop
          # unallocated minor in use)
1328 a0c9776a Iustin Pop
        if test:
1329 c614e5fb Iustin Pop
          node_drbd[minor] = (instance, False)
1330 c614e5fb Iustin Pop
        else:
1331 c614e5fb Iustin Pop
          instance = instanceinfo[instance]
1332 c614e5fb Iustin Pop
          node_drbd[minor] = (instance.name, instance.admin_up)
1333 a0c9776a Iustin Pop
      self._VerifyNode(node_i, file_names, local_checksums,
1334 a0c9776a Iustin Pop
                       nresult, master_files, node_drbd, vg_name)
1335 a8083063 Iustin Pop
1336 25361b9a Iustin Pop
      lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1337 cc9e1230 Guido Trotter
      if vg_name is None:
1338 cc9e1230 Guido Trotter
        node_volume[node] = {}
1339 cc9e1230 Guido Trotter
      elif isinstance(lvdata, basestring):
1340 a0c9776a Iustin Pop
        _ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s",
1341 a0c9776a Iustin Pop
                 utils.SafeEncode(lvdata))
1342 b63ed789 Iustin Pop
        node_volume[node] = {}
1343 25361b9a Iustin Pop
      elif not isinstance(lvdata, dict):
1344 a0c9776a Iustin Pop
        _ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)")
1345 a8083063 Iustin Pop
        continue
1346 b63ed789 Iustin Pop
      else:
1347 25361b9a Iustin Pop
        node_volume[node] = lvdata
1348 a8083063 Iustin Pop
1349 a8083063 Iustin Pop
      # node_instance
1350 25361b9a Iustin Pop
      idata = nresult.get(constants.NV_INSTANCELIST, None)
1351 a0c9776a Iustin Pop
      test = not isinstance(idata, list)
1352 a0c9776a Iustin Pop
      _ErrorIf(test, self.ENODEHV, node,
1353 a0c9776a Iustin Pop
               "rpc call to node failed (instancelist)")
1354 a0c9776a Iustin Pop
      if test:
1355 a8083063 Iustin Pop
        continue
1356 a8083063 Iustin Pop
1357 25361b9a Iustin Pop
      node_instance[node] = idata
1358 a8083063 Iustin Pop
1359 9c9c7d30 Guido Trotter
      # node_info
1360 25361b9a Iustin Pop
      nodeinfo = nresult.get(constants.NV_HVINFO, None)
1361 a0c9776a Iustin Pop
      test = not isinstance(nodeinfo, dict)
1362 a0c9776a Iustin Pop
      _ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)")
1363 a0c9776a Iustin Pop
      if test:
1364 9c9c7d30 Guido Trotter
        continue
1365 9c9c7d30 Guido Trotter
1366 9c9c7d30 Guido Trotter
      try:
1367 9c9c7d30 Guido Trotter
        node_info[node] = {
1368 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
1369 93e4c50b Guido Trotter
          "pinst": [],
1370 93e4c50b Guido Trotter
          "sinst": [],
1371 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
1372 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
1373 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
1374 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
1375 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
1376 36e7da50 Guido Trotter
          # secondary.
1377 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
1378 9c9c7d30 Guido Trotter
        }
1379 cc9e1230 Guido Trotter
        # FIXME: devise a free space model for file based instances as well
1380 cc9e1230 Guido Trotter
        if vg_name is not None:
1381 a0c9776a Iustin Pop
          test = (constants.NV_VGLIST not in nresult or
1382 a0c9776a Iustin Pop
                  vg_name not in nresult[constants.NV_VGLIST])
1383 a0c9776a Iustin Pop
          _ErrorIf(test, self.ENODELVM, node,
1384 a0c9776a Iustin Pop
                   "node didn't return data for the volume group '%s'"
1385 a0c9776a Iustin Pop
                   " - it is either missing or broken", vg_name)
1386 a0c9776a Iustin Pop
          if test:
1387 9a198532 Iustin Pop
            continue
1388 cc9e1230 Guido Trotter
          node_info[node]["dfree"] = int(nresult[constants.NV_VGLIST][vg_name])
1389 9a198532 Iustin Pop
      except (ValueError, KeyError):
1390 a0c9776a Iustin Pop
        _ErrorIf(True, self.ENODERPC, node,
1391 a0c9776a Iustin Pop
                 "node returned invalid nodeinfo, check lvm/hypervisor")
1392 9c9c7d30 Guido Trotter
        continue
1393 9c9c7d30 Guido Trotter
1394 a8083063 Iustin Pop
    node_vol_should = {}
1395 a8083063 Iustin Pop
1396 7c874ee1 Iustin Pop
    feedback_fn("* Verifying instance status")
1397 a8083063 Iustin Pop
    for instance in instancelist:
1398 7c874ee1 Iustin Pop
      if verbose:
1399 7c874ee1 Iustin Pop
        feedback_fn("* Verifying instance %s" % instance)
1400 6d2e83d5 Iustin Pop
      inst_config = instanceinfo[instance]
1401 a0c9776a Iustin Pop
      self._VerifyInstance(instance, inst_config, node_volume,
1402 a0c9776a Iustin Pop
                           node_instance, n_offline)
1403 832261fd Iustin Pop
      inst_nodes_offline = []
1404 a8083063 Iustin Pop
1405 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
1406 a8083063 Iustin Pop
1407 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
1408 26b6af5e Guido Trotter
1409 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
1410 a0c9776a Iustin Pop
      _ErrorIf(pnode not in node_info and pnode not in n_offline,
1411 a0c9776a Iustin Pop
               self.ENODERPC, pnode, "instance %s, connection to"
1412 a0c9776a Iustin Pop
               " primary node failed", instance)
1413 93e4c50b Guido Trotter
      if pnode in node_info:
1414 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
1415 93e4c50b Guido Trotter
1416 832261fd Iustin Pop
      if pnode in n_offline:
1417 832261fd Iustin Pop
        inst_nodes_offline.append(pnode)
1418 832261fd Iustin Pop
1419 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
1420 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
1421 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
1422 93e4c50b Guido Trotter
      # supported either.
1423 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
1424 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
1425 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
1426 a0c9776a Iustin Pop
      _ErrorIf(len(inst_config.secondary_nodes) > 1,
1427 a0c9776a Iustin Pop
               self.EINSTANCELAYOUT, instance,
1428 a0c9776a Iustin Pop
               "instance has multiple secondary nodes", code="WARNING")
1429 93e4c50b Guido Trotter
1430 c0f2b229 Iustin Pop
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1431 3924700f Iustin Pop
        i_non_a_balanced.append(instance)
1432 3924700f Iustin Pop
1433 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
1434 a0c9776a Iustin Pop
        _ErrorIf(snode not in node_info and snode not in n_offline,
1435 a0c9776a Iustin Pop
                 self.ENODERPC, snode,
1436 a0c9776a Iustin Pop
                 "instance %s, connection to secondary node"
1437 a0c9776a Iustin Pop
                 "failed", instance)
1438 a0c9776a Iustin Pop
1439 93e4c50b Guido Trotter
        if snode in node_info:
1440 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
1441 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
1442 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
1443 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
1444 a0c9776a Iustin Pop
1445 832261fd Iustin Pop
        if snode in n_offline:
1446 832261fd Iustin Pop
          inst_nodes_offline.append(snode)
1447 832261fd Iustin Pop
1448 a0c9776a Iustin Pop
      # warn that the instance lives on offline nodes
1449 a0c9776a Iustin Pop
      _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
1450 a0c9776a Iustin Pop
               "instance lives on offline node(s) %s",
1451 a0c9776a Iustin Pop
               ", ".join(inst_nodes_offline))
1452 93e4c50b Guido Trotter
1453 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
1454 a0c9776a Iustin Pop
    self._VerifyOrphanVolumes(node_vol_should, node_volume)
1455 a8083063 Iustin Pop
1456 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
1457 a0c9776a Iustin Pop
    self._VerifyOrphanInstances(instancelist, node_instance)
1458 a8083063 Iustin Pop
1459 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1460 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
1461 a0c9776a Iustin Pop
      self._VerifyNPlusOneMemory(node_info, instance_cfg)
1462 2b3b6ddd Guido Trotter
1463 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
1464 2b3b6ddd Guido Trotter
    if i_non_redundant:
1465 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1466 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
1467 2b3b6ddd Guido Trotter
1468 3924700f Iustin Pop
    if i_non_a_balanced:
1469 3924700f Iustin Pop
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1470 3924700f Iustin Pop
                  % len(i_non_a_balanced))
1471 3924700f Iustin Pop
1472 0a66c968 Iustin Pop
    if n_offline:
1473 0a66c968 Iustin Pop
      feedback_fn("  - NOTICE: %d offline node(s) found." % len(n_offline))
1474 0a66c968 Iustin Pop
1475 22f0f71d Iustin Pop
    if n_drained:
1476 22f0f71d Iustin Pop
      feedback_fn("  - NOTICE: %d drained node(s) found." % len(n_drained))
1477 22f0f71d Iustin Pop
1478 a0c9776a Iustin Pop
    return not self.bad
1479 a8083063 Iustin Pop
1480 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1481 5bbd3f7f Michael Hanselmann
    """Analyze the post-hooks' result
1482 e4376078 Iustin Pop

1483 e4376078 Iustin Pop
    This method analyses the hook result, handles it, and sends some
1484 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
1485 d8fff41c Guido Trotter

1486 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1487 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1488 e4376078 Iustin Pop
    @param hooks_results: the results of the multi-node hooks rpc call
1489 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
1490 e4376078 Iustin Pop
    @param lu_result: previous Exec result
1491 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
1492 e4376078 Iustin Pop
        and hook results
1493 d8fff41c Guido Trotter

1494 d8fff41c Guido Trotter
    """
1495 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
1496 38206f3c Iustin Pop
    # their results
1497 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
1498 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
1499 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
1500 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
1501 7c874ee1 Iustin Pop
      assert hooks_results, "invalid result from hooks"
1502 7c874ee1 Iustin Pop
1503 7c874ee1 Iustin Pop
      for node_name in hooks_results:
1504 7c874ee1 Iustin Pop
        show_node_header = True
1505 7c874ee1 Iustin Pop
        res = hooks_results[node_name]
1506 7c874ee1 Iustin Pop
        msg = res.fail_msg
1507 a0c9776a Iustin Pop
        test = msg and not res.offline
1508 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEHOOKS, node_name,
1509 7c874ee1 Iustin Pop
                      "Communication failure in hooks execution: %s", msg)
1510 a0c9776a Iustin Pop
        if test:
1511 a0c9776a Iustin Pop
          # override manually lu_result here as _ErrorIf only
1512 a0c9776a Iustin Pop
          # overrides self.bad
1513 7c874ee1 Iustin Pop
          lu_result = 1
1514 7c874ee1 Iustin Pop
          continue
1515 7c874ee1 Iustin Pop
        for script, hkr, output in res.payload:
1516 a0c9776a Iustin Pop
          test = hkr == constants.HKR_FAIL
1517 a0c9776a Iustin Pop
          self._ErrorIf(test, self.ENODEHOOKS, node_name,
1518 7c874ee1 Iustin Pop
                        "Script %s failed, output:", script)
1519 a0c9776a Iustin Pop
          if test:
1520 7c874ee1 Iustin Pop
            output = indent_re.sub('      ', output)
1521 7c874ee1 Iustin Pop
            feedback_fn("%s" % output)
1522 7c874ee1 Iustin Pop
            lu_result = 1
1523 d8fff41c Guido Trotter
1524 d8fff41c Guido Trotter
      return lu_result
1525 d8fff41c Guido Trotter
1526 a8083063 Iustin Pop
1527 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
1528 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
1529 2c95a8d4 Iustin Pop

1530 2c95a8d4 Iustin Pop
  """
1531 2c95a8d4 Iustin Pop
  _OP_REQP = []
1532 d4b9d97f Guido Trotter
  REQ_BGL = False
1533 d4b9d97f Guido Trotter
1534 d4b9d97f Guido Trotter
  def ExpandNames(self):
1535 d4b9d97f Guido Trotter
    self.needed_locks = {
1536 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1537 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1538 d4b9d97f Guido Trotter
    }
1539 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1540 2c95a8d4 Iustin Pop
1541 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
1542 2c95a8d4 Iustin Pop
    """Check prerequisites.
1543 2c95a8d4 Iustin Pop

1544 2c95a8d4 Iustin Pop
    This has no prerequisites.
1545 2c95a8d4 Iustin Pop

1546 2c95a8d4 Iustin Pop
    """
1547 2c95a8d4 Iustin Pop
    pass
1548 2c95a8d4 Iustin Pop
1549 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
1550 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
1551 2c95a8d4 Iustin Pop

1552 29d376ec Iustin Pop
    @rtype: tuple of three items
1553 29d376ec Iustin Pop
    @return: a tuple of (dict of node-to-node_error, list of instances
1554 29d376ec Iustin Pop
        which need activate-disks, dict of instance: (node, volume) for
1555 29d376ec Iustin Pop
        missing volumes
1556 29d376ec Iustin Pop

1557 2c95a8d4 Iustin Pop
    """
1558 29d376ec Iustin Pop
    result = res_nodes, res_instances, res_missing = {}, [], {}
1559 2c95a8d4 Iustin Pop
1560 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
1561 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1562 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
1563 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
1564 2c95a8d4 Iustin Pop
1565 2c95a8d4 Iustin Pop
    nv_dict = {}
1566 2c95a8d4 Iustin Pop
    for inst in instances:
1567 2c95a8d4 Iustin Pop
      inst_lvs = {}
1568 0d68c45d Iustin Pop
      if (not inst.admin_up or
1569 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
1570 2c95a8d4 Iustin Pop
        continue
1571 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
1572 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1573 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
1574 2c95a8d4 Iustin Pop
        for vol in vol_list:
1575 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
1576 2c95a8d4 Iustin Pop
1577 2c95a8d4 Iustin Pop
    if not nv_dict:
1578 2c95a8d4 Iustin Pop
      return result
1579 2c95a8d4 Iustin Pop
1580 b2a6ccd4 Iustin Pop
    node_lvs = self.rpc.call_lv_list(nodes, vg_name)
1581 2c95a8d4 Iustin Pop
1582 2c95a8d4 Iustin Pop
    for node in nodes:
1583 2c95a8d4 Iustin Pop
      # node_volume
1584 29d376ec Iustin Pop
      node_res = node_lvs[node]
1585 29d376ec Iustin Pop
      if node_res.offline:
1586 ea9ddc07 Iustin Pop
        continue
1587 4c4e4e1e Iustin Pop
      msg = node_res.fail_msg
1588 29d376ec Iustin Pop
      if msg:
1589 29d376ec Iustin Pop
        logging.warning("Error enumerating LVs on node %s: %s", node, msg)
1590 29d376ec Iustin Pop
        res_nodes[node] = msg
1591 2c95a8d4 Iustin Pop
        continue
1592 2c95a8d4 Iustin Pop
1593 29d376ec Iustin Pop
      lvs = node_res.payload
1594 29d376ec Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.items():
1595 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
1596 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
1597 b63ed789 Iustin Pop
            and inst.name not in res_instances):
1598 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
1599 2c95a8d4 Iustin Pop
1600 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1601 b63ed789 Iustin Pop
    # data better
1602 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1603 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1604 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1605 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1606 b63ed789 Iustin Pop
1607 2c95a8d4 Iustin Pop
    return result
1608 2c95a8d4 Iustin Pop
1609 2c95a8d4 Iustin Pop
1610 60975797 Iustin Pop
class LURepairDiskSizes(NoHooksLU):
1611 60975797 Iustin Pop
  """Verifies the cluster disks sizes.
1612 60975797 Iustin Pop

1613 60975797 Iustin Pop
  """
1614 60975797 Iustin Pop
  _OP_REQP = ["instances"]
1615 60975797 Iustin Pop
  REQ_BGL = False
1616 60975797 Iustin Pop
1617 60975797 Iustin Pop
  def ExpandNames(self):
1618 60975797 Iustin Pop
    if not isinstance(self.op.instances, list):
1619 60975797 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'")
1620 60975797 Iustin Pop
1621 60975797 Iustin Pop
    if self.op.instances:
1622 60975797 Iustin Pop
      self.wanted_names = []
1623 60975797 Iustin Pop
      for name in self.op.instances:
1624 60975797 Iustin Pop
        full_name = self.cfg.ExpandInstanceName(name)
1625 60975797 Iustin Pop
        if full_name is None:
1626 60975797 Iustin Pop
          raise errors.OpPrereqError("Instance '%s' not known" % name)
1627 60975797 Iustin Pop
        self.wanted_names.append(full_name)
1628 60975797 Iustin Pop
      self.needed_locks = {
1629 60975797 Iustin Pop
        locking.LEVEL_NODE: [],
1630 60975797 Iustin Pop
        locking.LEVEL_INSTANCE: self.wanted_names,
1631 60975797 Iustin Pop
        }
1632 60975797 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1633 60975797 Iustin Pop
    else:
1634 60975797 Iustin Pop
      self.wanted_names = None
1635 60975797 Iustin Pop
      self.needed_locks = {
1636 60975797 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
1637 60975797 Iustin Pop
        locking.LEVEL_INSTANCE: locking.ALL_SET,
1638 60975797 Iustin Pop
        }
1639 60975797 Iustin Pop
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1640 60975797 Iustin Pop
1641 60975797 Iustin Pop
  def DeclareLocks(self, level):
1642 60975797 Iustin Pop
    if level == locking.LEVEL_NODE and self.wanted_names is not None:
1643 60975797 Iustin Pop
      self._LockInstancesNodes(primary_only=True)
1644 60975797 Iustin Pop
1645 60975797 Iustin Pop
  def CheckPrereq(self):
1646 60975797 Iustin Pop
    """Check prerequisites.
1647 60975797 Iustin Pop

1648 60975797 Iustin Pop
    This only checks the optional instance list against the existing names.
1649 60975797 Iustin Pop

1650 60975797 Iustin Pop
    """
1651 60975797 Iustin Pop
    if self.wanted_names is None:
1652 60975797 Iustin Pop
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
1653 60975797 Iustin Pop
1654 60975797 Iustin Pop
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
1655 60975797 Iustin Pop
                             in self.wanted_names]
1656 60975797 Iustin Pop
1657 b775c337 Iustin Pop
  def _EnsureChildSizes(self, disk):
1658 b775c337 Iustin Pop
    """Ensure children of the disk have the needed disk size.
1659 b775c337 Iustin Pop

1660 b775c337 Iustin Pop
    This is valid mainly for DRBD8 and fixes an issue where the
1661 b775c337 Iustin Pop
    children have smaller disk size.
1662 b775c337 Iustin Pop

1663 b775c337 Iustin Pop
    @param disk: an L{ganeti.objects.Disk} object
1664 b775c337 Iustin Pop

1665 b775c337 Iustin Pop
    """
1666 b775c337 Iustin Pop
    if disk.dev_type == constants.LD_DRBD8:
1667 b775c337 Iustin Pop
      assert disk.children, "Empty children for DRBD8?"
1668 b775c337 Iustin Pop
      fchild = disk.children[0]
1669 b775c337 Iustin Pop
      mismatch = fchild.size < disk.size
1670 b775c337 Iustin Pop
      if mismatch:
1671 b775c337 Iustin Pop
        self.LogInfo("Child disk has size %d, parent %d, fixing",
1672 b775c337 Iustin Pop
                     fchild.size, disk.size)
1673 b775c337 Iustin Pop
        fchild.size = disk.size
1674 b775c337 Iustin Pop
1675 b775c337 Iustin Pop
      # and we recurse on this child only, not on the metadev
1676 b775c337 Iustin Pop
      return self._EnsureChildSizes(fchild) or mismatch
1677 b775c337 Iustin Pop
    else:
1678 b775c337 Iustin Pop
      return False
1679 b775c337 Iustin Pop
1680 60975797 Iustin Pop
  def Exec(self, feedback_fn):
1681 60975797 Iustin Pop
    """Verify the size of cluster disks.
1682 60975797 Iustin Pop

1683 60975797 Iustin Pop
    """
1684 60975797 Iustin Pop
    # TODO: check child disks too
1685 60975797 Iustin Pop
    # TODO: check differences in size between primary/secondary nodes
1686 60975797 Iustin Pop
    per_node_disks = {}
1687 60975797 Iustin Pop
    for instance in self.wanted_instances:
1688 60975797 Iustin Pop
      pnode = instance.primary_node
1689 60975797 Iustin Pop
      if pnode not in per_node_disks:
1690 60975797 Iustin Pop
        per_node_disks[pnode] = []
1691 60975797 Iustin Pop
      for idx, disk in enumerate(instance.disks):
1692 60975797 Iustin Pop
        per_node_disks[pnode].append((instance, idx, disk))
1693 60975797 Iustin Pop
1694 60975797 Iustin Pop
    changed = []
1695 60975797 Iustin Pop
    for node, dskl in per_node_disks.items():
1696 4d9e6835 Iustin Pop
      newl = [v[2].Copy() for v in dskl]
1697 4d9e6835 Iustin Pop
      for dsk in newl:
1698 4d9e6835 Iustin Pop
        self.cfg.SetDiskID(dsk, node)
1699 4d9e6835 Iustin Pop
      result = self.rpc.call_blockdev_getsizes(node, newl)
1700 3cebe102 Michael Hanselmann
      if result.fail_msg:
1701 60975797 Iustin Pop
        self.LogWarning("Failure in blockdev_getsizes call to node"
1702 60975797 Iustin Pop
                        " %s, ignoring", node)
1703 60975797 Iustin Pop
        continue
1704 60975797 Iustin Pop
      if len(result.data) != len(dskl):
1705 60975797 Iustin Pop
        self.LogWarning("Invalid result from node %s, ignoring node results",
1706 60975797 Iustin Pop
                        node)
1707 60975797 Iustin Pop
        continue
1708 60975797 Iustin Pop
      for ((instance, idx, disk), size) in zip(dskl, result.data):
1709 60975797 Iustin Pop
        if size is None:
1710 60975797 Iustin Pop
          self.LogWarning("Disk %d of instance %s did not return size"
1711 60975797 Iustin Pop
                          " information, ignoring", idx, instance.name)
1712 60975797 Iustin Pop
          continue
1713 60975797 Iustin Pop
        if not isinstance(size, (int, long)):
1714 60975797 Iustin Pop
          self.LogWarning("Disk %d of instance %s did not return valid"
1715 60975797 Iustin Pop
                          " size information, ignoring", idx, instance.name)
1716 60975797 Iustin Pop
          continue
1717 60975797 Iustin Pop
        size = size >> 20
1718 60975797 Iustin Pop
        if size != disk.size:
1719 60975797 Iustin Pop
          self.LogInfo("Disk %d of instance %s has mismatched size,"
1720 60975797 Iustin Pop
                       " correcting: recorded %d, actual %d", idx,
1721 60975797 Iustin Pop
                       instance.name, disk.size, size)
1722 60975797 Iustin Pop
          disk.size = size
1723 60975797 Iustin Pop
          self.cfg.Update(instance)
1724 60975797 Iustin Pop
          changed.append((instance.name, idx, size))
1725 b775c337 Iustin Pop
        if self._EnsureChildSizes(disk):
1726 b775c337 Iustin Pop
          self.cfg.Update(instance)
1727 b775c337 Iustin Pop
          changed.append((instance.name, idx, disk.size))
1728 60975797 Iustin Pop
    return changed
1729 60975797 Iustin Pop
1730 60975797 Iustin Pop
1731 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1732 07bd8a51 Iustin Pop
  """Rename the cluster.
1733 07bd8a51 Iustin Pop

1734 07bd8a51 Iustin Pop
  """
1735 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1736 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1737 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1738 07bd8a51 Iustin Pop
1739 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1740 07bd8a51 Iustin Pop
    """Build hooks env.
1741 07bd8a51 Iustin Pop

1742 07bd8a51 Iustin Pop
    """
1743 07bd8a51 Iustin Pop
    env = {
1744 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1745 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1746 07bd8a51 Iustin Pop
      }
1747 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1748 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1749 07bd8a51 Iustin Pop
1750 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1751 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1752 07bd8a51 Iustin Pop

1753 07bd8a51 Iustin Pop
    """
1754 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1755 07bd8a51 Iustin Pop
1756 bcf043c9 Iustin Pop
    new_name = hostname.name
1757 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1758 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
1759 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
1760 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1761 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1762 07bd8a51 Iustin Pop
                                 " cluster has changed")
1763 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1764 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1765 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1766 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1767 07bd8a51 Iustin Pop
                                   new_ip)
1768 07bd8a51 Iustin Pop
1769 07bd8a51 Iustin Pop
    self.op.name = new_name
1770 07bd8a51 Iustin Pop
1771 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1772 07bd8a51 Iustin Pop
    """Rename the cluster.
1773 07bd8a51 Iustin Pop

1774 07bd8a51 Iustin Pop
    """
1775 07bd8a51 Iustin Pop
    clustername = self.op.name
1776 07bd8a51 Iustin Pop
    ip = self.ip
1777 07bd8a51 Iustin Pop
1778 07bd8a51 Iustin Pop
    # shutdown the master IP
1779 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
1780 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
1781 4c4e4e1e Iustin Pop
    result.Raise("Could not disable the master role")
1782 07bd8a51 Iustin Pop
1783 07bd8a51 Iustin Pop
    try:
1784 55cf7d83 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
1785 55cf7d83 Iustin Pop
      cluster.cluster_name = clustername
1786 55cf7d83 Iustin Pop
      cluster.master_ip = ip
1787 55cf7d83 Iustin Pop
      self.cfg.Update(cluster)
1788 ec85e3d5 Iustin Pop
1789 ec85e3d5 Iustin Pop
      # update the known hosts file
1790 ec85e3d5 Iustin Pop
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
1791 ec85e3d5 Iustin Pop
      node_list = self.cfg.GetNodeList()
1792 ec85e3d5 Iustin Pop
      try:
1793 ec85e3d5 Iustin Pop
        node_list.remove(master)
1794 ec85e3d5 Iustin Pop
      except ValueError:
1795 ec85e3d5 Iustin Pop
        pass
1796 ec85e3d5 Iustin Pop
      result = self.rpc.call_upload_file(node_list,
1797 ec85e3d5 Iustin Pop
                                         constants.SSH_KNOWN_HOSTS_FILE)
1798 ec85e3d5 Iustin Pop
      for to_node, to_result in result.iteritems():
1799 6f7d4e75 Iustin Pop
        msg = to_result.fail_msg
1800 6f7d4e75 Iustin Pop
        if msg:
1801 6f7d4e75 Iustin Pop
          msg = ("Copy of file %s to node %s failed: %s" %
1802 6f7d4e75 Iustin Pop
                 (constants.SSH_KNOWN_HOSTS_FILE, to_node, msg))
1803 6f7d4e75 Iustin Pop
          self.proc.LogWarning(msg)
1804 ec85e3d5 Iustin Pop
1805 07bd8a51 Iustin Pop
    finally:
1806 3583908a Guido Trotter
      result = self.rpc.call_node_start_master(master, False, False)
1807 4c4e4e1e Iustin Pop
      msg = result.fail_msg
1808 b726aff0 Iustin Pop
      if msg:
1809 86d9d3bb Iustin Pop
        self.LogWarning("Could not re-enable the master role on"
1810 b726aff0 Iustin Pop
                        " the master, please restart manually: %s", msg)
1811 07bd8a51 Iustin Pop
1812 07bd8a51 Iustin Pop
1813 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1814 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1815 8084f9f6 Manuel Franceschini

1816 e4376078 Iustin Pop
  @type disk: L{objects.Disk}
1817 e4376078 Iustin Pop
  @param disk: the disk to check
1818 5bbd3f7f Michael Hanselmann
  @rtype: boolean
1819 e4376078 Iustin Pop
  @return: boolean indicating whether a LD_LV dev_type was found or not
1820 8084f9f6 Manuel Franceschini

1821 8084f9f6 Manuel Franceschini
  """
1822 8084f9f6 Manuel Franceschini
  if disk.children:
1823 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1824 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1825 8084f9f6 Manuel Franceschini
        return True
1826 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1827 8084f9f6 Manuel Franceschini
1828 8084f9f6 Manuel Franceschini
1829 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1830 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1831 8084f9f6 Manuel Franceschini

1832 8084f9f6 Manuel Franceschini
  """
1833 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1834 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1835 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1836 c53279cf Guido Trotter
  REQ_BGL = False
1837 c53279cf Guido Trotter
1838 3994f455 Iustin Pop
  def CheckArguments(self):
1839 4b7735f9 Iustin Pop
    """Check parameters
1840 4b7735f9 Iustin Pop

1841 4b7735f9 Iustin Pop
    """
1842 4b7735f9 Iustin Pop
    if not hasattr(self.op, "candidate_pool_size"):
1843 4b7735f9 Iustin Pop
      self.op.candidate_pool_size = None
1844 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1845 4b7735f9 Iustin Pop
      try:
1846 4b7735f9 Iustin Pop
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1847 3994f455 Iustin Pop
      except (ValueError, TypeError), err:
1848 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1849 4b7735f9 Iustin Pop
                                   str(err))
1850 4b7735f9 Iustin Pop
      if self.op.candidate_pool_size < 1:
1851 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("At least one master candidate needed")
1852 4b7735f9 Iustin Pop
1853 c53279cf Guido Trotter
  def ExpandNames(self):
1854 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
1855 c53279cf Guido Trotter
    # all nodes to be modified.
1856 c53279cf Guido Trotter
    self.needed_locks = {
1857 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1858 c53279cf Guido Trotter
    }
1859 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1860 8084f9f6 Manuel Franceschini
1861 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1862 8084f9f6 Manuel Franceschini
    """Build hooks env.
1863 8084f9f6 Manuel Franceschini

1864 8084f9f6 Manuel Franceschini
    """
1865 8084f9f6 Manuel Franceschini
    env = {
1866 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1867 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1868 8084f9f6 Manuel Franceschini
      }
1869 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1870 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1871 8084f9f6 Manuel Franceschini
1872 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1873 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1874 8084f9f6 Manuel Franceschini

1875 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1876 5f83e263 Iustin Pop
    if the given volume group is valid.
1877 8084f9f6 Manuel Franceschini

1878 8084f9f6 Manuel Franceschini
    """
1879 779c15bb Iustin Pop
    if self.op.vg_name is not None and not self.op.vg_name:
1880 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
1881 8084f9f6 Manuel Franceschini
      for inst in instances:
1882 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1883 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1884 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1885 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1886 8084f9f6 Manuel Franceschini
1887 779c15bb Iustin Pop
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1888 779c15bb Iustin Pop
1889 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1890 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1891 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
1892 8084f9f6 Manuel Franceschini
      for node in node_list:
1893 4c4e4e1e Iustin Pop
        msg = vglist[node].fail_msg
1894 e480923b Iustin Pop
        if msg:
1895 781de953 Iustin Pop
          # ignoring down node
1896 e480923b Iustin Pop
          self.LogWarning("Error while gathering data on node %s"
1897 e480923b Iustin Pop
                          " (ignoring node): %s", node, msg)
1898 781de953 Iustin Pop
          continue
1899 e480923b Iustin Pop
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
1900 781de953 Iustin Pop
                                              self.op.vg_name,
1901 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1902 8084f9f6 Manuel Franceschini
        if vgstatus:
1903 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1904 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1905 8084f9f6 Manuel Franceschini
1906 779c15bb Iustin Pop
    self.cluster = cluster = self.cfg.GetClusterInfo()
1907 5af3da74 Guido Trotter
    # validate params changes
1908 779c15bb Iustin Pop
    if self.op.beparams:
1909 a5728081 Guido Trotter
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
1910 abe609b2 Guido Trotter
      self.new_beparams = objects.FillDict(
1911 4ef7f423 Guido Trotter
        cluster.beparams[constants.PP_DEFAULT], self.op.beparams)
1912 779c15bb Iustin Pop
1913 5af3da74 Guido Trotter
    if self.op.nicparams:
1914 5af3da74 Guido Trotter
      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
1915 5af3da74 Guido Trotter
      self.new_nicparams = objects.FillDict(
1916 5af3da74 Guido Trotter
        cluster.nicparams[constants.PP_DEFAULT], self.op.nicparams)
1917 5af3da74 Guido Trotter
      objects.NIC.CheckParameterSyntax(self.new_nicparams)
1918 5af3da74 Guido Trotter
1919 779c15bb Iustin Pop
    # hypervisor list/parameters
1920 abe609b2 Guido Trotter
    self.new_hvparams = objects.FillDict(cluster.hvparams, {})
1921 779c15bb Iustin Pop
    if self.op.hvparams:
1922 779c15bb Iustin Pop
      if not isinstance(self.op.hvparams, dict):
1923 779c15bb Iustin Pop
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1924 779c15bb Iustin Pop
      for hv_name, hv_dict in self.op.hvparams.items():
1925 779c15bb Iustin Pop
        if hv_name not in self.new_hvparams:
1926 779c15bb Iustin Pop
          self.new_hvparams[hv_name] = hv_dict
1927 779c15bb Iustin Pop
        else:
1928 779c15bb Iustin Pop
          self.new_hvparams[hv_name].update(hv_dict)
1929 779c15bb Iustin Pop
1930 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1931 779c15bb Iustin Pop
      self.hv_list = self.op.enabled_hypervisors
1932 b119bccb Guido Trotter
      if not self.hv_list:
1933 b119bccb Guido Trotter
        raise errors.OpPrereqError("Enabled hypervisors list must contain at"
1934 b119bccb Guido Trotter
                                   " least one member")
1935 b119bccb Guido Trotter
      invalid_hvs = set(self.hv_list) - constants.HYPER_TYPES
1936 b119bccb Guido Trotter
      if invalid_hvs:
1937 b119bccb Guido Trotter
        raise errors.OpPrereqError("Enabled hypervisors contains invalid"
1938 6915bc28 Guido Trotter
                                   " entries: %s" % " ,".join(invalid_hvs))
1939 779c15bb Iustin Pop
    else:
1940 779c15bb Iustin Pop
      self.hv_list = cluster.enabled_hypervisors
1941 779c15bb Iustin Pop
1942 779c15bb Iustin Pop
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1943 779c15bb Iustin Pop
      # either the enabled list has changed, or the parameters have, validate
1944 779c15bb Iustin Pop
      for hv_name, hv_params in self.new_hvparams.items():
1945 779c15bb Iustin Pop
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1946 779c15bb Iustin Pop
            (self.op.enabled_hypervisors and
1947 779c15bb Iustin Pop
             hv_name in self.op.enabled_hypervisors)):
1948 779c15bb Iustin Pop
          # either this is a new hypervisor, or its parameters have changed
1949 779c15bb Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
1950 a5728081 Guido Trotter
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1951 779c15bb Iustin Pop
          hv_class.CheckParameterSyntax(hv_params)
1952 779c15bb Iustin Pop
          _CheckHVParams(self, node_list, hv_name, hv_params)
1953 779c15bb Iustin Pop
1954 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1955 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1956 8084f9f6 Manuel Franceschini

1957 8084f9f6 Manuel Franceschini
    """
1958 779c15bb Iustin Pop
    if self.op.vg_name is not None:
1959 b2482333 Guido Trotter
      new_volume = self.op.vg_name
1960 b2482333 Guido Trotter
      if not new_volume:
1961 b2482333 Guido Trotter
        new_volume = None
1962 b2482333 Guido Trotter
      if new_volume != self.cfg.GetVGName():
1963 b2482333 Guido Trotter
        self.cfg.SetVGName(new_volume)
1964 779c15bb Iustin Pop
      else:
1965 779c15bb Iustin Pop
        feedback_fn("Cluster LVM configuration already in desired"
1966 779c15bb Iustin Pop
                    " state, not changing")
1967 779c15bb Iustin Pop
    if self.op.hvparams:
1968 779c15bb Iustin Pop
      self.cluster.hvparams = self.new_hvparams
1969 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1970 779c15bb Iustin Pop
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1971 779c15bb Iustin Pop
    if self.op.beparams:
1972 4ef7f423 Guido Trotter
      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
1973 5af3da74 Guido Trotter
    if self.op.nicparams:
1974 5af3da74 Guido Trotter
      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
1975 5af3da74 Guido Trotter
1976 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1977 4b7735f9 Iustin Pop
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
1978 75e914fb Iustin Pop
      # we need to update the pool size here, otherwise the save will fail
1979 44485f49 Guido Trotter
      _AdjustCandidatePool(self, [])
1980 4b7735f9 Iustin Pop
1981 779c15bb Iustin Pop
    self.cfg.Update(self.cluster)
1982 8084f9f6 Manuel Franceschini
1983 8084f9f6 Manuel Franceschini
1984 28eddce5 Guido Trotter
def _RedistributeAncillaryFiles(lu, additional_nodes=None):
1985 28eddce5 Guido Trotter
  """Distribute additional files which are part of the cluster configuration.
1986 28eddce5 Guido Trotter

1987 28eddce5 Guido Trotter
  ConfigWriter takes care of distributing the config and ssconf files, but
1988 28eddce5 Guido Trotter
  there are more files which should be distributed to all nodes. This function
1989 28eddce5 Guido Trotter
  makes sure those are copied.
1990 28eddce5 Guido Trotter

1991 28eddce5 Guido Trotter
  @param lu: calling logical unit
1992 28eddce5 Guido Trotter
  @param additional_nodes: list of nodes not in the config to distribute to
1993 28eddce5 Guido Trotter

1994 28eddce5 Guido Trotter
  """
1995 28eddce5 Guido Trotter
  # 1. Gather target nodes
1996 28eddce5 Guido Trotter
  myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
1997 28eddce5 Guido Trotter
  dist_nodes = lu.cfg.GetNodeList()
1998 28eddce5 Guido Trotter
  if additional_nodes is not None:
1999 28eddce5 Guido Trotter
    dist_nodes.extend(additional_nodes)
2000 28eddce5 Guido Trotter
  if myself.name in dist_nodes:
2001 28eddce5 Guido Trotter
    dist_nodes.remove(myself.name)
2002 28eddce5 Guido Trotter
  # 2. Gather files to distribute
2003 28eddce5 Guido Trotter
  dist_files = set([constants.ETC_HOSTS,
2004 28eddce5 Guido Trotter
                    constants.SSH_KNOWN_HOSTS_FILE,
2005 28eddce5 Guido Trotter
                    constants.RAPI_CERT_FILE,
2006 28eddce5 Guido Trotter
                    constants.RAPI_USERS_FILE,
2007 4a34c5cf Guido Trotter
                    constants.HMAC_CLUSTER_KEY,
2008 28eddce5 Guido Trotter
                   ])
2009 e1b8653f Guido Trotter
2010 e1b8653f Guido Trotter
  enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
2011 e1b8653f Guido Trotter
  for hv_name in enabled_hypervisors:
2012 e1b8653f Guido Trotter
    hv_class = hypervisor.GetHypervisor(hv_name)
2013 e1b8653f Guido Trotter
    dist_files.update(hv_class.GetAncillaryFiles())
2014 e1b8653f Guido Trotter
2015 28eddce5 Guido Trotter
  # 3. Perform the files upload
2016 28eddce5 Guido Trotter
  for fname in dist_files:
2017 28eddce5 Guido Trotter
    if os.path.exists(fname):
2018 28eddce5 Guido Trotter
      result = lu.rpc.call_upload_file(dist_nodes, fname)
2019 28eddce5 Guido Trotter
      for to_node, to_result in result.items():
2020 6f7d4e75 Iustin Pop
        msg = to_result.fail_msg
2021 6f7d4e75 Iustin Pop
        if msg:
2022 6f7d4e75 Iustin Pop
          msg = ("Copy of file %s to node %s failed: %s" %
2023 6f7d4e75 Iustin Pop
                 (fname, to_node, msg))
2024 6f7d4e75 Iustin Pop
          lu.proc.LogWarning(msg)
2025 28eddce5 Guido Trotter
2026 28eddce5 Guido Trotter
2027 afee0879 Iustin Pop
class LURedistributeConfig(NoHooksLU):
2028 afee0879 Iustin Pop
  """Force the redistribution of cluster configuration.
2029 afee0879 Iustin Pop

2030 afee0879 Iustin Pop
  This is a very simple LU.
2031 afee0879 Iustin Pop

2032 afee0879 Iustin Pop
  """
2033 afee0879 Iustin Pop
  _OP_REQP = []
2034 afee0879 Iustin Pop
  REQ_BGL = False
2035 afee0879 Iustin Pop
2036 afee0879 Iustin Pop
  def ExpandNames(self):
2037 afee0879 Iustin Pop
    self.needed_locks = {
2038 afee0879 Iustin Pop
      locking.LEVEL_NODE: locking.ALL_SET,
2039 afee0879 Iustin Pop
    }
2040 afee0879 Iustin Pop
    self.share_locks[locking.LEVEL_NODE] = 1
2041 afee0879 Iustin Pop
2042 afee0879 Iustin Pop
  def CheckPrereq(self):
2043 afee0879 Iustin Pop
    """Check prerequisites.
2044 afee0879 Iustin Pop

2045 afee0879 Iustin Pop
    """
2046 afee0879 Iustin Pop
2047 afee0879 Iustin Pop
  def Exec(self, feedback_fn):
2048 afee0879 Iustin Pop
    """Redistribute the configuration.
2049 afee0879 Iustin Pop

2050 afee0879 Iustin Pop
    """
2051 afee0879 Iustin Pop
    self.cfg.Update(self.cfg.GetClusterInfo())
2052 28eddce5 Guido Trotter
    _RedistributeAncillaryFiles(self)
2053 afee0879 Iustin Pop
2054 afee0879 Iustin Pop
2055 b9bddb6b Iustin Pop
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
2056 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
2057 a8083063 Iustin Pop

2058 a8083063 Iustin Pop
  """
2059 a8083063 Iustin Pop
  if not instance.disks:
2060 a8083063 Iustin Pop
    return True
2061 a8083063 Iustin Pop
2062 a8083063 Iustin Pop
  if not oneshot:
2063 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
2064 a8083063 Iustin Pop
2065 a8083063 Iustin Pop
  node = instance.primary_node
2066 a8083063 Iustin Pop
2067 a8083063 Iustin Pop
  for dev in instance.disks:
2068 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
2069 a8083063 Iustin Pop
2070 a8083063 Iustin Pop
  retries = 0
2071 fbafd7a8 Iustin Pop
  degr_retries = 10 # in seconds, as we sleep 1 second each time
2072 a8083063 Iustin Pop
  while True:
2073 a8083063 Iustin Pop
    max_time = 0
2074 a8083063 Iustin Pop
    done = True
2075 a8083063 Iustin Pop
    cumul_degraded = False
2076 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
2077 4c4e4e1e Iustin Pop
    msg = rstats.fail_msg
2078 3efa9051 Iustin Pop
    if msg:
2079 3efa9051 Iustin Pop
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
2080 a8083063 Iustin Pop
      retries += 1
2081 a8083063 Iustin Pop
      if retries >= 10:
2082 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
2083 3ecf6786 Iustin Pop
                                 " aborting." % node)
2084 a8083063 Iustin Pop
      time.sleep(6)
2085 a8083063 Iustin Pop
      continue
2086 3efa9051 Iustin Pop
    rstats = rstats.payload
2087 a8083063 Iustin Pop
    retries = 0
2088 1492cca7 Iustin Pop
    for i, mstat in enumerate(rstats):
2089 a8083063 Iustin Pop
      if mstat is None:
2090 86d9d3bb Iustin Pop
        lu.LogWarning("Can't compute data for node %s/%s",
2091 86d9d3bb Iustin Pop
                           node, instance.disks[i].iv_name)
2092 a8083063 Iustin Pop
        continue
2093 36145b12 Michael Hanselmann
2094 36145b12 Michael Hanselmann
      cumul_degraded = (cumul_degraded or
2095 36145b12 Michael Hanselmann
                        (mstat.is_degraded and mstat.sync_percent is None))
2096 36145b12 Michael Hanselmann
      if mstat.sync_percent is not None:
2097 a8083063 Iustin Pop
        done = False
2098 36145b12 Michael Hanselmann
        if mstat.estimated_time is not None:
2099 36145b12 Michael Hanselmann
          rem_time = "%d estimated seconds remaining" % mstat.estimated_time
2100 36145b12 Michael Hanselmann
          max_time = mstat.estimated_time
2101 a8083063 Iustin Pop
        else:
2102 a8083063 Iustin Pop
          rem_time = "no time estimate"
2103 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
2104 4d4a651d Michael Hanselmann
                        (instance.disks[i].iv_name, mstat.sync_percent,
2105 4d4a651d Michael Hanselmann
                         rem_time))
2106 fbafd7a8 Iustin Pop
2107 fbafd7a8 Iustin Pop
    # if we're done but degraded, let's do a few small retries, to
2108 fbafd7a8 Iustin Pop
    # make sure we see a stable and not transient situation; therefore
2109 fbafd7a8 Iustin Pop
    # we force restart of the loop
2110 fbafd7a8 Iustin Pop
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
2111 fbafd7a8 Iustin Pop
      logging.info("Degraded disks found, %d retries left", degr_retries)
2112 fbafd7a8 Iustin Pop
      degr_retries -= 1
2113 fbafd7a8 Iustin Pop
      time.sleep(1)
2114 fbafd7a8 Iustin Pop
      continue
2115 fbafd7a8 Iustin Pop
2116 a8083063 Iustin Pop
    if done or oneshot:
2117 a8083063 Iustin Pop
      break
2118 a8083063 Iustin Pop
2119 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
2120 a8083063 Iustin Pop
2121 a8083063 Iustin Pop
  if done:
2122 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
2123 a8083063 Iustin Pop
  return not cumul_degraded
2124 a8083063 Iustin Pop
2125 a8083063 Iustin Pop
2126 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
2127 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
2128 a8083063 Iustin Pop

2129 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
2130 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
2131 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
2132 0834c866 Iustin Pop

2133 a8083063 Iustin Pop
  """
2134 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
2135 a8083063 Iustin Pop
2136 a8083063 Iustin Pop
  result = True
2137 96acbc09 Michael Hanselmann
2138 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
2139 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
2140 4c4e4e1e Iustin Pop
    msg = rstats.fail_msg
2141 23829f6f Iustin Pop
    if msg:
2142 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
2143 23829f6f Iustin Pop
      result = False
2144 23829f6f Iustin Pop
    elif not rstats.payload:
2145 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s", node)
2146 a8083063 Iustin Pop
      result = False
2147 a8083063 Iustin Pop
    else:
2148 96acbc09 Michael Hanselmann
      if ldisk:
2149 f208978a Michael Hanselmann
        result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
2150 96acbc09 Michael Hanselmann
      else:
2151 96acbc09 Michael Hanselmann
        result = result and not rstats.payload.is_degraded
2152 96acbc09 Michael Hanselmann
2153 a8083063 Iustin Pop
  if dev.children:
2154 a8083063 Iustin Pop
    for child in dev.children:
2155 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
2156 a8083063 Iustin Pop
2157 a8083063 Iustin Pop
  return result
2158 a8083063 Iustin Pop
2159 a8083063 Iustin Pop
2160 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
2161 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
2162 a8083063 Iustin Pop

2163 a8083063 Iustin Pop
  """
2164 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2165 6bf01bbb Guido Trotter
  REQ_BGL = False
2166 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet()
2167 1e288a26 Guido Trotter
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status", "variants")
2168 1e288a26 Guido Trotter
  # Fields that need calculation of global os validity
2169 1e288a26 Guido Trotter
  _FIELDS_NEEDVALID = frozenset(["valid", "variants"])
2170 a8083063 Iustin Pop
2171 6bf01bbb Guido Trotter
  def ExpandNames(self):
2172 1f9430d6 Iustin Pop
    if self.op.names:
2173 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
2174 1f9430d6 Iustin Pop
2175 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2176 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2177 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
2178 1f9430d6 Iustin Pop
2179 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
2180 a6ab004b Iustin Pop
    # Temporary removal of locks, should be reverted later
2181 a6ab004b Iustin Pop
    # TODO: reintroduce locks when they are lighter-weight
2182 6bf01bbb Guido Trotter
    self.needed_locks = {}
2183 a6ab004b Iustin Pop
    #self.share_locks[locking.LEVEL_NODE] = 1
2184 a6ab004b Iustin Pop
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2185 6bf01bbb Guido Trotter
2186 6bf01bbb Guido Trotter
  def CheckPrereq(self):
2187 6bf01bbb Guido Trotter
    """Check prerequisites.
2188 6bf01bbb Guido Trotter

2189 6bf01bbb Guido Trotter
    """
2190 6bf01bbb Guido Trotter
2191 1f9430d6 Iustin Pop
  @staticmethod
2192 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
2193 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
2194 1f9430d6 Iustin Pop

2195 e4376078 Iustin Pop
    @param node_list: a list with the names of all nodes
2196 e4376078 Iustin Pop
    @param rlist: a map with node names as keys and OS objects as values
2197 1f9430d6 Iustin Pop

2198 e4376078 Iustin Pop
    @rtype: dict
2199 5fcc718f Iustin Pop
    @return: a dictionary with osnames as keys and as value another map, with
2200 255dcebd Iustin Pop
        nodes as keys and tuples of (path, status, diagnose) as values, eg::
2201 e4376078 Iustin Pop

2202 255dcebd Iustin Pop
          {"debian-etch": {"node1": [(/usr/lib/..., True, ""),
2203 255dcebd Iustin Pop
                                     (/srv/..., False, "invalid api")],
2204 255dcebd Iustin Pop
                           "node2": [(/srv/..., True, "")]}
2205 e4376078 Iustin Pop
          }
2206 1f9430d6 Iustin Pop

2207 1f9430d6 Iustin Pop
    """
2208 1f9430d6 Iustin Pop
    all_os = {}
2209 a6ab004b Iustin Pop
    # we build here the list of nodes that didn't fail the RPC (at RPC
2210 a6ab004b Iustin Pop
    # level), so that nodes with a non-responding node daemon don't
2211 a6ab004b Iustin Pop
    # make all OSes invalid
2212 a6ab004b Iustin Pop
    good_nodes = [node_name for node_name in rlist
2213 4c4e4e1e Iustin Pop
                  if not rlist[node_name].fail_msg]
2214 83d92ad8 Iustin Pop
    for node_name, nr in rlist.items():
2215 4c4e4e1e Iustin Pop
      if nr.fail_msg or not nr.payload:
2216 1f9430d6 Iustin Pop
        continue
2217 ba00557a Guido Trotter
      for name, path, status, diagnose, variants in nr.payload:
2218 255dcebd Iustin Pop
        if name not in all_os:
2219 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
2220 1f9430d6 Iustin Pop
          # for each node in node_list
2221 255dcebd Iustin Pop
          all_os[name] = {}
2222 a6ab004b Iustin Pop
          for nname in good_nodes:
2223 255dcebd Iustin Pop
            all_os[name][nname] = []
2224 ba00557a Guido Trotter
        all_os[name][node_name].append((path, status, diagnose, variants))
2225 1f9430d6 Iustin Pop
    return all_os
2226 a8083063 Iustin Pop
2227 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2228 a8083063 Iustin Pop
    """Compute the list of OSes.
2229 a8083063 Iustin Pop

2230 a8083063 Iustin Pop
    """
2231 a6ab004b Iustin Pop
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
2232 94a02bb5 Iustin Pop
    node_data = self.rpc.call_os_diagnose(valid_nodes)
2233 94a02bb5 Iustin Pop
    pol = self._DiagnoseByOS(valid_nodes, node_data)
2234 1f9430d6 Iustin Pop
    output = []
2235 1e288a26 Guido Trotter
    calc_valid = self._FIELDS_NEEDVALID.intersection(self.op.output_fields)
2236 1e288a26 Guido Trotter
    calc_variants = "variants" in self.op.output_fields
2237 1e288a26 Guido Trotter
2238 83d92ad8 Iustin Pop
    for os_name, os_data in pol.items():
2239 1f9430d6 Iustin Pop
      row = []
2240 1e288a26 Guido Trotter
      if calc_valid:
2241 1e288a26 Guido Trotter
        valid = True
2242 1e288a26 Guido Trotter
        variants = None
2243 1e288a26 Guido Trotter
        for osl in os_data.values():
2244 1e288a26 Guido Trotter
          valid = valid and osl and osl[0][1]
2245 1e288a26 Guido Trotter
          if not valid:
2246 1e288a26 Guido Trotter
            variants = None
2247 1e288a26 Guido Trotter
            break
2248 1e288a26 Guido Trotter
          if calc_variants:
2249 1e288a26 Guido Trotter
            node_variants = osl[0][3]
2250 1e288a26 Guido Trotter
            if variants is None:
2251 1e288a26 Guido Trotter
              variants = node_variants
2252 1e288a26 Guido Trotter
            else:
2253 1e288a26 Guido Trotter
              variants = [v for v in variants if v in node_variants]
2254 1e288a26 Guido Trotter
2255 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
2256 1f9430d6 Iustin Pop
        if field == "name":
2257 1f9430d6 Iustin Pop
          val = os_name
2258 1f9430d6 Iustin Pop
        elif field == "valid":
2259 1e288a26 Guido Trotter
          val = valid
2260 1f9430d6 Iustin Pop
        elif field == "node_status":
2261 255dcebd Iustin Pop
          # this is just a copy of the dict
2262 1f9430d6 Iustin Pop
          val = {}
2263 255dcebd Iustin Pop
          for node_name, nos_list in os_data.items():
2264 255dcebd Iustin Pop
            val[node_name] = nos_list
2265 1e288a26 Guido Trotter
        elif field == "variants":
2266 1e288a26 Guido Trotter
          val =  variants
2267 1f9430d6 Iustin Pop
        else:
2268 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
2269 1f9430d6 Iustin Pop
        row.append(val)
2270 1f9430d6 Iustin Pop
      output.append(row)
2271 1f9430d6 Iustin Pop
2272 1f9430d6 Iustin Pop
    return output
2273 a8083063 Iustin Pop
2274 a8083063 Iustin Pop
2275 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
2276 a8083063 Iustin Pop
  """Logical unit for removing a node.
2277 a8083063 Iustin Pop

2278 a8083063 Iustin Pop
  """
2279 a8083063 Iustin Pop
  HPATH = "node-remove"
2280 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2281 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
2282 a8083063 Iustin Pop
2283 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2284 a8083063 Iustin Pop
    """Build hooks env.
2285 a8083063 Iustin Pop

2286 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
2287 d08869ee Guido Trotter
    node would then be impossible to remove.
2288 a8083063 Iustin Pop

2289 a8083063 Iustin Pop
    """
2290 396e1b78 Michael Hanselmann
    env = {
2291 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
2292 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
2293 396e1b78 Michael Hanselmann
      }
2294 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
2295 cd46f3b4 Luca Bigliardi
    if self.op.node_name in all_nodes:
2296 cd46f3b4 Luca Bigliardi
      all_nodes.remove(self.op.node_name)
2297 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
2298 a8083063 Iustin Pop
2299 a8083063 Iustin Pop
  def CheckPrereq(self):
2300 a8083063 Iustin Pop
    """Check prerequisites.
2301 a8083063 Iustin Pop

2302 a8083063 Iustin Pop
    This checks:
2303 a8083063 Iustin Pop
     - the node exists in the configuration
2304 a8083063 Iustin Pop
     - it does not have primary or secondary instances
2305 a8083063 Iustin Pop
     - it's not the master
2306 a8083063 Iustin Pop

2307 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
2308 a8083063 Iustin Pop

2309 a8083063 Iustin Pop
    """
2310 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
2311 a8083063 Iustin Pop
    if node is None:
2312 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
2313 a8083063 Iustin Pop
2314 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
2315 a8083063 Iustin Pop
2316 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
2317 a8083063 Iustin Pop
    if node.name == masternode:
2318 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
2319 3ecf6786 Iustin Pop
                                 " you need to failover first.")
2320 a8083063 Iustin Pop
2321 a8083063 Iustin Pop
    for instance_name in instance_list:
2322 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
2323 6b12959c Iustin Pop
      if node.name in instance.all_nodes:
2324 6b12959c Iustin Pop
        raise errors.OpPrereqError("Instance %s is still running on the node,"
2325 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
2326 a8083063 Iustin Pop
    self.op.node_name = node.name
2327 a8083063 Iustin Pop
    self.node = node
2328 a8083063 Iustin Pop
2329 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2330 a8083063 Iustin Pop
    """Removes the node from the cluster.
2331 a8083063 Iustin Pop

2332 a8083063 Iustin Pop
    """
2333 a8083063 Iustin Pop
    node = self.node
2334 9a4f63d1 Iustin Pop
    logging.info("Stopping the node daemon and removing configs from node %s",
2335 9a4f63d1 Iustin Pop
                 node.name)
2336 a8083063 Iustin Pop
2337 44485f49 Guido Trotter
    # Promote nodes to master candidate as needed
2338 44485f49 Guido Trotter
    _AdjustCandidatePool(self, exceptions=[node.name])
2339 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
2340 a8083063 Iustin Pop
2341 cd46f3b4 Luca Bigliardi
    # Run post hooks on the node before it's removed
2342 cd46f3b4 Luca Bigliardi
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
2343 cd46f3b4 Luca Bigliardi
    try:
2344 cd46f3b4 Luca Bigliardi
      h_results = hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
2345 3cb5c1e3 Luca Bigliardi
    except:
2346 3cb5c1e3 Luca Bigliardi
      self.LogWarning("Errors occurred running hooks on %s" % node.name)
2347 cd46f3b4 Luca Bigliardi
2348 0623d351 Iustin Pop
    result = self.rpc.call_node_leave_cluster(node.name)
2349 4c4e4e1e Iustin Pop
    msg = result.fail_msg
2350 0623d351 Iustin Pop
    if msg:
2351 0623d351 Iustin Pop
      self.LogWarning("Errors encountered on the remote node while leaving"
2352 0623d351 Iustin Pop
                      " the cluster: %s", msg)
2353 c8a0948f Michael Hanselmann
2354 a8083063 Iustin Pop
2355 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
2356 a8083063 Iustin Pop
  """Logical unit for querying nodes.
2357 a8083063 Iustin Pop

2358 a8083063 Iustin Pop
  """
2359 bc8e4a1a Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
2360 35705d8f Guido Trotter
  REQ_BGL = False
2361 19bed813 Iustin Pop
2362 19bed813 Iustin Pop
  _SIMPLE_FIELDS = ["name", "serial_no", "ctime", "mtime", "uuid",
2363 19bed813 Iustin Pop
                    "master_candidate", "offline", "drained"]
2364 19bed813 Iustin Pop
2365 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet(
2366 31bf511f Iustin Pop
    "dtotal", "dfree",
2367 31bf511f Iustin Pop
    "mtotal", "mnode", "mfree",
2368 31bf511f Iustin Pop
    "bootid",
2369 0105bad3 Iustin Pop
    "ctotal", "cnodes", "csockets",
2370 31bf511f Iustin Pop
    )
2371 31bf511f Iustin Pop
2372 19bed813 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*[
2373 19bed813 Iustin Pop
    "pinst_cnt", "sinst_cnt",
2374 31bf511f Iustin Pop
    "pinst_list", "sinst_list",
2375 31bf511f Iustin Pop
    "pip", "sip", "tags",
2376 0e67cdbe Iustin Pop
    "master",
2377 19bed813 Iustin Pop
    "role"] + _SIMPLE_FIELDS
2378 31bf511f Iustin Pop
    )
2379 a8083063 Iustin Pop
2380 35705d8f Guido Trotter
  def ExpandNames(self):
2381 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2382 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2383 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2384 a8083063 Iustin Pop
2385 35705d8f Guido Trotter
    self.needed_locks = {}
2386 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2387 c8d8b4c8 Iustin Pop
2388 c8d8b4c8 Iustin Pop
    if self.op.names:
2389 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
2390 35705d8f Guido Trotter
    else:
2391 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
2392 c8d8b4c8 Iustin Pop
2393 bc8e4a1a Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
2394 bc8e4a1a Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
2395 c8d8b4c8 Iustin Pop
    if self.do_locking:
2396 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
2397 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
2398 c8d8b4c8 Iustin Pop
2399 35705d8f Guido Trotter
2400 35705d8f Guido Trotter
  def CheckPrereq(self):
2401 35705d8f Guido Trotter
    """Check prerequisites.
2402 35705d8f Guido Trotter

2403 35705d8f Guido Trotter
    """
2404 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
2405 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
2406 c8d8b4c8 Iustin Pop
    pass
2407 a8083063 Iustin Pop
2408 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2409 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2410 a8083063 Iustin Pop

2411 a8083063 Iustin Pop
    """
2412 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
2413 c8d8b4c8 Iustin Pop
    if self.do_locking:
2414 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
2415 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
2416 3fa93523 Guido Trotter
      nodenames = self.wanted
2417 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
2418 3fa93523 Guido Trotter
      if missing:
2419 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
2420 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
2421 c8d8b4c8 Iustin Pop
    else:
2422 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
2423 c1f1cbb2 Iustin Pop
2424 c1f1cbb2 Iustin Pop
    nodenames = utils.NiceSort(nodenames)
2425 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
2426 a8083063 Iustin Pop
2427 a8083063 Iustin Pop
    # begin data gathering
2428 a8083063 Iustin Pop
2429 bc8e4a1a Iustin Pop
    if self.do_node_query:
2430 a8083063 Iustin Pop
      live_data = {}
2431 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
2432 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
2433 a8083063 Iustin Pop
      for name in nodenames:
2434 781de953 Iustin Pop
        nodeinfo = node_data[name]
2435 4c4e4e1e Iustin Pop
        if not nodeinfo.fail_msg and nodeinfo.payload:
2436 070e998b Iustin Pop
          nodeinfo = nodeinfo.payload
2437 d599d686 Iustin Pop
          fn = utils.TryConvert
2438 a8083063 Iustin Pop
          live_data[name] = {
2439 d599d686 Iustin Pop
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
2440 d599d686 Iustin Pop
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
2441 d599d686 Iustin Pop
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
2442 d599d686 Iustin Pop
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
2443 d599d686 Iustin Pop
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
2444 d599d686 Iustin Pop
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
2445 d599d686 Iustin Pop
            "bootid": nodeinfo.get('bootid', None),
2446 0105bad3 Iustin Pop
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
2447 0105bad3 Iustin Pop
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
2448 a8083063 Iustin Pop
            }
2449 a8083063 Iustin Pop
        else:
2450 a8083063 Iustin Pop
          live_data[name] = {}
2451 a8083063 Iustin Pop
    else:
2452 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
2453 a8083063 Iustin Pop
2454 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
2455 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
2456 a8083063 Iustin Pop
2457 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
2458 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
2459 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
2460 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
2461 a8083063 Iustin Pop
2462 ec223efb Iustin Pop
      for instance_name in instancelist:
2463 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
2464 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
2465 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
2466 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
2467 ec223efb Iustin Pop
          if secnode in node_to_secondary:
2468 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
2469 a8083063 Iustin Pop
2470 0e67cdbe Iustin Pop
    master_node = self.cfg.GetMasterNode()
2471 0e67cdbe Iustin Pop
2472 a8083063 Iustin Pop
    # end data gathering
2473 a8083063 Iustin Pop
2474 a8083063 Iustin Pop
    output = []
2475 a8083063 Iustin Pop
    for node in nodelist:
2476 a8083063 Iustin Pop
      node_output = []
2477 a8083063 Iustin Pop
      for field in self.op.output_fields:
2478 19bed813 Iustin Pop
        if field in self._SIMPLE_FIELDS:
2479 19bed813 Iustin Pop
          val = getattr(node, field)
2480 ec223efb Iustin Pop
        elif field == "pinst_list":
2481 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
2482 ec223efb Iustin Pop
        elif field == "sinst_list":
2483 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
2484 ec223efb Iustin Pop
        elif field == "pinst_cnt":
2485 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
2486 ec223efb Iustin Pop
        elif field == "sinst_cnt":
2487 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
2488 a8083063 Iustin Pop
        elif field == "pip":
2489 a8083063 Iustin Pop
          val = node.primary_ip
2490 a8083063 Iustin Pop
        elif field == "sip":
2491 a8083063 Iustin Pop
          val = node.secondary_ip
2492 130a6a6f Iustin Pop
        elif field == "tags":
2493 130a6a6f Iustin Pop
          val = list(node.GetTags())
2494 0e67cdbe Iustin Pop
        elif field == "master":
2495 0e67cdbe Iustin Pop
          val = node.name == master_node
2496 31bf511f Iustin Pop
        elif self._FIELDS_DYNAMIC.Matches(field):
2497 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
2498 c120ff34 Iustin Pop
        elif field == "role":
2499 c120ff34 Iustin Pop
          if node.name == master_node:
2500 c120ff34 Iustin Pop
            val = "M"
2501 c120ff34 Iustin Pop
          elif node.master_candidate:
2502 c120ff34 Iustin Pop
            val = "C"
2503 c120ff34 Iustin Pop
          elif node.drained:
2504 c120ff34 Iustin Pop
            val = "D"
2505 c120ff34 Iustin Pop
          elif node.offline:
2506 c120ff34 Iustin Pop
            val = "O"
2507 c120ff34 Iustin Pop
          else:
2508 c120ff34 Iustin Pop
            val = "R"
2509 a8083063 Iustin Pop
        else:
2510 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2511 a8083063 Iustin Pop
        node_output.append(val)
2512 a8083063 Iustin Pop
      output.append(node_output)
2513 a8083063 Iustin Pop
2514 a8083063 Iustin Pop
    return output
2515 a8083063 Iustin Pop
2516 a8083063 Iustin Pop
2517 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
2518 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
2519 dcb93971 Michael Hanselmann

2520 dcb93971 Michael Hanselmann
  """
2521 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
2522 21a15682 Guido Trotter
  REQ_BGL = False
2523 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
2524 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("node")
2525 21a15682 Guido Trotter
2526 21a15682 Guido Trotter
  def ExpandNames(self):
2527 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2528 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2529 21a15682 Guido Trotter
                       selected=self.op.output_fields)
2530 21a15682 Guido Trotter
2531 21a15682 Guido Trotter
    self.needed_locks = {}
2532 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2533 21a15682 Guido Trotter
    if not self.op.nodes:
2534 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2535 21a15682 Guido Trotter
    else:
2536 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
2537 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
2538 dcb93971 Michael Hanselmann
2539 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
2540 dcb93971 Michael Hanselmann
    """Check prerequisites.
2541 dcb93971 Michael Hanselmann

2542 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
2543 dcb93971 Michael Hanselmann

2544 dcb93971 Michael Hanselmann
    """
2545 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
2546 dcb93971 Michael Hanselmann
2547 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
2548 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
2549 dcb93971 Michael Hanselmann

2550 dcb93971 Michael Hanselmann
    """
2551 a7ba5e53 Iustin Pop
    nodenames = self.nodes
2552 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
2553 dcb93971 Michael Hanselmann
2554 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
2555 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
2556 dcb93971 Michael Hanselmann
2557 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
2558 dcb93971 Michael Hanselmann
2559 dcb93971 Michael Hanselmann
    output = []
2560 dcb93971 Michael Hanselmann
    for node in nodenames:
2561 10bfe6cb Iustin Pop
      nresult = volumes[node]
2562 10bfe6cb Iustin Pop
      if nresult.offline:
2563 10bfe6cb Iustin Pop
        continue
2564 4c4e4e1e Iustin Pop
      msg = nresult.fail_msg
2565 10bfe6cb Iustin Pop
      if msg:
2566 10bfe6cb Iustin Pop
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
2567 37d19eb2 Michael Hanselmann
        continue
2568 37d19eb2 Michael Hanselmann
2569 10bfe6cb Iustin Pop
      node_vols = nresult.payload[:]
2570 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
2571 dcb93971 Michael Hanselmann
2572 dcb93971 Michael Hanselmann
      for vol in node_vols:
2573 dcb93971 Michael Hanselmann
        node_output = []
2574 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
2575 dcb93971 Michael Hanselmann
          if field == "node":
2576 dcb93971 Michael Hanselmann
            val = node
2577 dcb93971 Michael Hanselmann
          elif field == "phys":
2578 dcb93971 Michael Hanselmann
            val = vol['dev']
2579 dcb93971 Michael Hanselmann
          elif field == "vg":
2580 dcb93971 Michael Hanselmann
            val = vol['vg']
2581 dcb93971 Michael Hanselmann
          elif field == "name":
2582 dcb93971 Michael Hanselmann
            val = vol['name']
2583 dcb93971 Michael Hanselmann
          elif field == "size":
2584 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
2585 dcb93971 Michael Hanselmann
          elif field == "instance":
2586 dcb93971 Michael Hanselmann
            for inst in ilist:
2587 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
2588 dcb93971 Michael Hanselmann
                continue
2589 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
2590 dcb93971 Michael Hanselmann
                val = inst.name
2591 dcb93971 Michael Hanselmann
                break
2592 dcb93971 Michael Hanselmann
            else:
2593 dcb93971 Michael Hanselmann
              val = '-'
2594 dcb93971 Michael Hanselmann
          else:
2595 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
2596 dcb93971 Michael Hanselmann
          node_output.append(str(val))
2597 dcb93971 Michael Hanselmann
2598 dcb93971 Michael Hanselmann
        output.append(node_output)
2599 dcb93971 Michael Hanselmann
2600 dcb93971 Michael Hanselmann
    return output
2601 dcb93971 Michael Hanselmann
2602 dcb93971 Michael Hanselmann
2603 9e5442ce Michael Hanselmann
class LUQueryNodeStorage(NoHooksLU):
2604 9e5442ce Michael Hanselmann
  """Logical unit for getting information on storage units on node(s).
2605 9e5442ce Michael Hanselmann

2606 9e5442ce Michael Hanselmann
  """
2607 9e5442ce Michael Hanselmann
  _OP_REQP = ["nodes", "storage_type", "output_fields"]
2608 9e5442ce Michael Hanselmann
  REQ_BGL = False
2609 9e5442ce Michael Hanselmann
  _FIELDS_STATIC = utils.FieldSet("node")
2610 9e5442ce Michael Hanselmann
2611 9e5442ce Michael Hanselmann
  def ExpandNames(self):
2612 9e5442ce Michael Hanselmann
    storage_type = self.op.storage_type
2613 9e5442ce Michael Hanselmann
2614 9e5442ce Michael Hanselmann
    if storage_type not in constants.VALID_STORAGE_FIELDS:
2615 9e5442ce Michael Hanselmann
      raise errors.OpPrereqError("Unknown storage type: %s" % storage_type)
2616 9e5442ce Michael Hanselmann
2617 9e5442ce Michael Hanselmann
    dynamic_fields = constants.VALID_STORAGE_FIELDS[storage_type]
2618 9e5442ce Michael Hanselmann
2619 9e5442ce Michael Hanselmann
    _CheckOutputFields(static=self._FIELDS_STATIC,
2620 9e5442ce Michael Hanselmann
                       dynamic=utils.FieldSet(*dynamic_fields),
2621 9e5442ce Michael Hanselmann
                       selected=self.op.output_fields)
2622 9e5442ce Michael Hanselmann
2623 9e5442ce Michael Hanselmann
    self.needed_locks = {}
2624 9e5442ce Michael Hanselmann
    self.share_locks[locking.LEVEL_NODE] = 1
2625 9e5442ce Michael Hanselmann
2626 9e5442ce Michael Hanselmann
    if self.op.nodes:
2627 9e5442ce Michael Hanselmann
      self.needed_locks[locking.LEVEL_NODE] = \
2628 9e5442ce Michael Hanselmann
        _GetWantedNodes(self, self.op.nodes)
2629 9e5442ce Michael Hanselmann
    else:
2630 9e5442ce Michael Hanselmann
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2631 9e5442ce Michael Hanselmann
2632 9e5442ce Michael Hanselmann
  def CheckPrereq(self):
2633 9e5442ce Michael Hanselmann
    """Check prerequisites.
2634 9e5442ce Michael Hanselmann

2635 9e5442ce Michael Hanselmann
    This checks that the fields required are valid output fields.
2636 9e5442ce Michael Hanselmann

2637 9e5442ce Michael Hanselmann
    """
2638 9e5442ce Michael Hanselmann
    self.op.name = getattr(self.op, "name", None)
2639 9e5442ce Michael Hanselmann
2640 9e5442ce Michael Hanselmann
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
2641 9e5442ce Michael Hanselmann
2642 9e5442ce Michael Hanselmann
  def Exec(self, feedback_fn):
2643 9e5442ce Michael Hanselmann
    """Computes the list of nodes and their attributes.
2644 9e5442ce Michael Hanselmann

2645 9e5442ce Michael Hanselmann
    """
2646 9e5442ce Michael Hanselmann
    # Always get name to sort by
2647 9e5442ce Michael Hanselmann
    if constants.SF_NAME in self.op.output_fields:
2648 9e5442ce Michael Hanselmann
      fields = self.op.output_fields[:]
2649 9e5442ce Michael Hanselmann
    else:
2650 9e5442ce Michael Hanselmann
      fields = [constants.SF_NAME] + self.op.output_fields
2651 9e5442ce Michael Hanselmann
2652 9e5442ce Michael Hanselmann
    # Never ask for node as it's only known to the LU
2653 9e5442ce Michael Hanselmann
    while "node" in fields:
2654 9e5442ce Michael Hanselmann
      fields.remove("node")
2655 9e5442ce Michael Hanselmann
2656 9e5442ce Michael Hanselmann
    field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
2657 9e5442ce Michael Hanselmann
    name_idx = field_idx[constants.SF_NAME]
2658 9e5442ce Michael Hanselmann
2659 efb8da02 Michael Hanselmann
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
2660 9e5442ce Michael Hanselmann
    data = self.rpc.call_storage_list(self.nodes,
2661 9e5442ce Michael Hanselmann
                                      self.op.storage_type, st_args,
2662 9e5442ce Michael Hanselmann
                                      self.op.name, fields)
2663 9e5442ce Michael Hanselmann
2664 9e5442ce Michael Hanselmann
    result = []
2665 9e5442ce Michael Hanselmann
2666 9e5442ce Michael Hanselmann
    for node in utils.NiceSort(self.nodes):
2667 9e5442ce Michael Hanselmann
      nresult = data[node]
2668 9e5442ce Michael Hanselmann
      if nresult.offline:
2669 9e5442ce Michael Hanselmann
        continue
2670 9e5442ce Michael Hanselmann
2671 9e5442ce Michael Hanselmann
      msg = nresult.fail_msg
2672 9e5442ce Michael Hanselmann
      if msg:
2673 9e5442ce Michael Hanselmann
        self.LogWarning("Can't get storage data from node %s: %s", node, msg)
2674 9e5442ce Michael Hanselmann
        continue
2675 9e5442ce Michael Hanselmann
2676 9e5442ce Michael Hanselmann
      rows = dict([(row[name_idx], row) for row in nresult.payload])
2677 9e5442ce Michael Hanselmann
2678 9e5442ce Michael Hanselmann
      for name in utils.NiceSort(rows.keys()):
2679 9e5442ce Michael Hanselmann
        row = rows[name]
2680 9e5442ce Michael Hanselmann
2681 9e5442ce Michael Hanselmann
        out = []
2682 9e5442ce Michael Hanselmann
2683 9e5442ce Michael Hanselmann
        for field in self.op.output_fields:
2684 9e5442ce Michael Hanselmann
          if field == "node":
2685 9e5442ce Michael Hanselmann
            val = node
2686 9e5442ce Michael Hanselmann
          elif field in field_idx:
2687 9e5442ce Michael Hanselmann
            val = row[field_idx[field]]
2688 9e5442ce Michael Hanselmann
          else:
2689 9e5442ce Michael Hanselmann
            raise errors.ParameterError(field)
2690 9e5442ce Michael Hanselmann
2691 9e5442ce Michael Hanselmann
          out.append(val)
2692 9e5442ce Michael Hanselmann
2693 9e5442ce Michael Hanselmann
        result.append(out)
2694 9e5442ce Michael Hanselmann
2695 9e5442ce Michael Hanselmann
    return result
2696 9e5442ce Michael Hanselmann
2697 9e5442ce Michael Hanselmann
2698 efb8da02 Michael Hanselmann
class LUModifyNodeStorage(NoHooksLU):
2699 efb8da02 Michael Hanselmann
  """Logical unit for modifying a storage volume on a node.
2700 efb8da02 Michael Hanselmann

2701 efb8da02 Michael Hanselmann
  """
2702 efb8da02 Michael Hanselmann
  _OP_REQP = ["node_name", "storage_type", "name", "changes"]
2703 efb8da02 Michael Hanselmann
  REQ_BGL = False
2704 efb8da02 Michael Hanselmann
2705 efb8da02 Michael Hanselmann
  def CheckArguments(self):
2706 efb8da02 Michael Hanselmann
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2707 efb8da02 Michael Hanselmann
    if node_name is None:
2708 efb8da02 Michael Hanselmann
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2709 efb8da02 Michael Hanselmann
2710 efb8da02 Michael Hanselmann
    self.op.node_name = node_name
2711 efb8da02 Michael Hanselmann
2712 efb8da02 Michael Hanselmann
    storage_type = self.op.storage_type
2713 efb8da02 Michael Hanselmann
    if storage_type not in constants.VALID_STORAGE_FIELDS:
2714 efb8da02 Michael Hanselmann
      raise errors.OpPrereqError("Unknown storage type: %s" % storage_type)
2715 efb8da02 Michael Hanselmann
2716 efb8da02 Michael Hanselmann
  def ExpandNames(self):
2717 efb8da02 Michael Hanselmann
    self.needed_locks = {
2718 efb8da02 Michael Hanselmann
      locking.LEVEL_NODE: self.op.node_name,
2719 efb8da02 Michael Hanselmann
      }
2720 efb8da02 Michael Hanselmann
2721 efb8da02 Michael Hanselmann
  def CheckPrereq(self):
2722 efb8da02 Michael Hanselmann
    """Check prerequisites.
2723 efb8da02 Michael Hanselmann

2724 efb8da02 Michael Hanselmann
    """
2725 efb8da02 Michael Hanselmann
    storage_type = self.op.storage_type
2726 efb8da02 Michael Hanselmann
2727 efb8da02 Michael Hanselmann
    try:
2728 efb8da02 Michael Hanselmann
      modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
2729 efb8da02 Michael Hanselmann
    except KeyError:
2730 efb8da02 Michael Hanselmann
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
2731 efb8da02 Michael Hanselmann
                                 " modified" % storage_type)
2732 efb8da02 Michael Hanselmann
2733 efb8da02 Michael Hanselmann
    diff = set(self.op.changes.keys()) - modifiable
2734 efb8da02 Michael Hanselmann
    if diff:
2735 efb8da02 Michael Hanselmann
      raise errors.OpPrereqError("The following fields can not be modified for"
2736 efb8da02 Michael Hanselmann
                                 " storage units of type '%s': %r" %
2737 efb8da02 Michael Hanselmann
                                 (storage_type, list(diff)))
2738 efb8da02 Michael Hanselmann
2739 efb8da02 Michael Hanselmann
  def Exec(self, feedback_fn):
2740 efb8da02 Michael Hanselmann
    """Computes the list of nodes and their attributes.
2741 efb8da02 Michael Hanselmann

2742 efb8da02 Michael Hanselmann
    """
2743 efb8da02 Michael Hanselmann
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
2744 efb8da02 Michael Hanselmann
    result = self.rpc.call_storage_modify(self.op.node_name,
2745 efb8da02 Michael Hanselmann
                                          self.op.storage_type, st_args,
2746 efb8da02 Michael Hanselmann
                                          self.op.name, self.op.changes)
2747 efb8da02 Michael Hanselmann
    result.Raise("Failed to modify storage unit '%s' on %s" %
2748 efb8da02 Michael Hanselmann
                 (self.op.name, self.op.node_name))
2749 efb8da02 Michael Hanselmann
2750 efb8da02 Michael Hanselmann
2751 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
2752 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
2753 a8083063 Iustin Pop

2754 a8083063 Iustin Pop
  """
2755 a8083063 Iustin Pop
  HPATH = "node-add"
2756 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2757 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
2758 a8083063 Iustin Pop
2759 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2760 a8083063 Iustin Pop
    """Build hooks env.
2761 a8083063 Iustin Pop

2762 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
2763 a8083063 Iustin Pop

2764 a8083063 Iustin Pop
    """
2765 a8083063 Iustin Pop
    env = {
2766 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
2767 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
2768 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
2769 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
2770 a8083063 Iustin Pop
      }
2771 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
2772 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
2773 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
2774 a8083063 Iustin Pop
2775 a8083063 Iustin Pop
  def CheckPrereq(self):
2776 a8083063 Iustin Pop
    """Check prerequisites.
2777 a8083063 Iustin Pop

2778 a8083063 Iustin Pop
    This checks:
2779 a8083063 Iustin Pop
     - the new node is not already in the config
2780 a8083063 Iustin Pop
     - it is resolvable
2781 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
2782 a8083063 Iustin Pop

2783 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
2784 a8083063 Iustin Pop

2785 a8083063 Iustin Pop
    """
2786 a8083063 Iustin Pop
    node_name = self.op.node_name
2787 a8083063 Iustin Pop
    cfg = self.cfg
2788 a8083063 Iustin Pop
2789 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
2790 a8083063 Iustin Pop
2791 bcf043c9 Iustin Pop
    node = dns_data.name
2792 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
2793 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
2794 a8083063 Iustin Pop
    if secondary_ip is None:
2795 a8083063 Iustin Pop
      secondary_ip = primary_ip
2796 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
2797 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
2798 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
2799 e7c6e02b Michael Hanselmann
2800 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
2801 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
2802 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
2803 e7c6e02b Michael Hanselmann
                                 node)
2804 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
2805 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
2806 a8083063 Iustin Pop
2807 a8083063 Iustin Pop
    for existing_node_name in node_list:
2808 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
2809 e7c6e02b Michael Hanselmann
2810 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
2811 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
2812 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
2813 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
2814 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
2815 e7c6e02b Michael Hanselmann
        continue
2816 e7c6e02b Michael Hanselmann
2817 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
2818 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
2819 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
2820 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
2821 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
2822 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
2823 a8083063 Iustin Pop
2824 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
2825 a8083063 Iustin Pop
    # same as for the master
2826 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
2827 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
2828 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
2829 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
2830 a8083063 Iustin Pop
      if master_singlehomed:
2831 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
2832 3ecf6786 Iustin Pop
                                   " new node has one")
2833 a8083063 Iustin Pop
      else:
2834 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
2835 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
2836 a8083063 Iustin Pop
2837 5bbd3f7f Michael Hanselmann
    # checks reachability
2838 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
2839 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
2840 a8083063 Iustin Pop
2841 a8083063 Iustin Pop
    if not newbie_singlehomed:
2842 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
2843 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
2844 b15d625f Iustin Pop
                           source=myself.secondary_ip):
2845 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
2846 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
2847 a8083063 Iustin Pop
2848 a8ae3eb5 Iustin Pop
    if self.op.readd:
2849 a8ae3eb5 Iustin Pop
      exceptions = [node]
2850 a8ae3eb5 Iustin Pop
    else:
2851 a8ae3eb5 Iustin Pop
      exceptions = []
2852 6d7e1f20 Guido Trotter
2853 6d7e1f20 Guido Trotter
    self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
2854 0fff97e9 Guido Trotter
2855 a8ae3eb5 Iustin Pop
    if self.op.readd:
2856 a8ae3eb5 Iustin Pop
      self.new_node = self.cfg.GetNodeInfo(node)
2857 a8ae3eb5 Iustin Pop
      assert self.new_node is not None, "Can't retrieve locked node %s" % node
2858 a8ae3eb5 Iustin Pop
    else:
2859 a8ae3eb5 Iustin Pop
      self.new_node = objects.Node(name=node,
2860 a8ae3eb5 Iustin Pop
                                   primary_ip=primary_ip,
2861 a8ae3eb5 Iustin Pop
                                   secondary_ip=secondary_ip,
2862 a8ae3eb5 Iustin Pop
                                   master_candidate=self.master_candidate,
2863 a8ae3eb5 Iustin Pop
                                   offline=False, drained=False)
2864 a8083063 Iustin Pop
2865 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2866 a8083063 Iustin Pop
    """Adds the new node to the cluster.
2867 a8083063 Iustin Pop

2868 a8083063 Iustin Pop
    """
2869 a8083063 Iustin Pop
    new_node = self.new_node
2870 a8083063 Iustin Pop
    node = new_node.name
2871 a8083063 Iustin Pop
2872 a8ae3eb5 Iustin Pop
    # for re-adds, reset the offline/drained/master-candidate flags;
2873 a8ae3eb5 Iustin Pop
    # we need to reset here, otherwise offline would prevent RPC calls
2874 a8ae3eb5 Iustin Pop
    # later in the procedure; this also means that if the re-add
2875 a8ae3eb5 Iustin Pop
    # fails, we are left with a non-offlined, broken node
2876 a8ae3eb5 Iustin Pop
    if self.op.readd:
2877 a8ae3eb5 Iustin Pop
      new_node.drained = new_node.offline = False
2878 a8ae3eb5 Iustin Pop
      self.LogInfo("Readding a node, the offline/drained flags were reset")
2879 a8ae3eb5 Iustin Pop
      # if we demote the node, we do cleanup later in the procedure
2880 a8ae3eb5 Iustin Pop
      new_node.master_candidate = self.master_candidate
2881 a8ae3eb5 Iustin Pop
2882 a8ae3eb5 Iustin Pop
    # notify the user about any possible mc promotion
2883 a8ae3eb5 Iustin Pop
    if new_node.master_candidate:
2884 a8ae3eb5 Iustin Pop
      self.LogInfo("Node will be a master candidate")
2885 a8ae3eb5 Iustin Pop
2886 a8083063 Iustin Pop
    # check connectivity
2887 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
2888 4c4e4e1e Iustin Pop
    result.Raise("Can't get version information from node %s" % node)
2889 90b54c26 Iustin Pop
    if constants.PROTOCOL_VERSION == result.payload:
2890 90b54c26 Iustin Pop
      logging.info("Communication to node %s fine, sw version %s match",
2891 90b54c26 Iustin Pop
                   node, result.payload)
2892 a8083063 Iustin Pop
    else:
2893 90b54c26 Iustin Pop
      raise errors.OpExecError("Version mismatch master version %s,"
2894 90b54c26 Iustin Pop
                               " node version %s" %
2895 90b54c26 Iustin Pop
                               (constants.PROTOCOL_VERSION, result.payload))
2896 a8083063 Iustin Pop
2897 a8083063 Iustin Pop
    # setup ssh on node
2898 9a4f63d1 Iustin Pop
    logging.info("Copy ssh key to node %s", node)
2899 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
2900 a8083063 Iustin Pop
    keyarray = []
2901 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
2902 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
2903 70d9e3d8 Iustin Pop
                priv_key, pub_key]
2904 a8083063 Iustin Pop
2905 a8083063 Iustin Pop
    for i in keyfiles:
2906 13998ef2 Michael Hanselmann
      keyarray.append(utils.ReadFile(i))
2907 a8083063 Iustin Pop
2908 72737a7f Iustin Pop
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
2909 72737a7f Iustin Pop
                                    keyarray[2],
2910 72737a7f Iustin Pop
                                    keyarray[3], keyarray[4], keyarray[5])
2911 4c4e4e1e Iustin Pop
    result.Raise("Cannot transfer ssh keys to the new node")
2912 a8083063 Iustin Pop
2913 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
2914 b86a6bcd Guido Trotter
    if self.cfg.GetClusterInfo().modify_etc_hosts:
2915 b86a6bcd Guido Trotter
      utils.AddHostToEtcHosts(new_node.name)
2916 c8a0948f Michael Hanselmann
2917 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
2918 781de953 Iustin Pop
      result = self.rpc.call_node_has_ip_address(new_node.name,
2919 781de953 Iustin Pop
                                                 new_node.secondary_ip)
2920 4c4e4e1e Iustin Pop
      result.Raise("Failure checking secondary ip on node %s" % new_node.name,
2921 4c4e4e1e Iustin Pop
                   prereq=True)
2922 c2fc8250 Iustin Pop
      if not result.payload:
2923 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
2924 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
2925 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
2926 a8083063 Iustin Pop
2927 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
2928 5c0527ed Guido Trotter
    node_verify_param = {
2929 f60759f7 Iustin Pop
      constants.NV_NODELIST: [node],
2930 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
2931 5c0527ed Guido Trotter
    }
2932 5c0527ed Guido Trotter
2933 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
2934 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
2935 5c0527ed Guido Trotter
    for verifier in node_verify_list:
2936 4c4e4e1e Iustin Pop
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
2937 f60759f7 Iustin Pop
      nl_payload = result[verifier].payload[constants.NV_NODELIST]
2938 6f68a739 Iustin Pop
      if nl_payload:
2939 6f68a739 Iustin Pop
        for failed in nl_payload:
2940 31821208 Iustin Pop
          feedback_fn("ssh/hostname verification failed"
2941 31821208 Iustin Pop
                      " (checking from %s): %s" %
2942 6f68a739 Iustin Pop
                      (verifier, nl_payload[failed]))
2943 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
2944 ff98055b Iustin Pop
2945 d8470559 Michael Hanselmann
    if self.op.readd:
2946 28eddce5 Guido Trotter
      _RedistributeAncillaryFiles(self)
2947 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
2948 a8ae3eb5 Iustin Pop
      # make sure we redistribute the config
2949 a8ae3eb5 Iustin Pop
      self.cfg.Update(new_node)
2950 a8ae3eb5 Iustin Pop
      # and make sure the new node will not have old files around
2951 a8ae3eb5 Iustin Pop
      if not new_node.master_candidate:
2952 a8ae3eb5 Iustin Pop
        result = self.rpc.call_node_demote_from_mc(new_node.name)
2953 3cebe102 Michael Hanselmann
        msg = result.fail_msg
2954 a8ae3eb5 Iustin Pop
        if msg:
2955 a8ae3eb5 Iustin Pop
          self.LogWarning("Node failed to demote itself from master"
2956 a8ae3eb5 Iustin Pop
                          " candidate status: %s" % msg)
2957 d8470559 Michael Hanselmann
    else:
2958 035566e3 Iustin Pop
      _RedistributeAncillaryFiles(self, additional_nodes=[node])
2959 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
2960 a8083063 Iustin Pop
2961 a8083063 Iustin Pop
2962 b31c8676 Iustin Pop
class LUSetNodeParams(LogicalUnit):
2963 b31c8676 Iustin Pop
  """Modifies the parameters of a node.
2964 b31c8676 Iustin Pop

2965 b31c8676 Iustin Pop
  """
2966 b31c8676 Iustin Pop
  HPATH = "node-modify"
2967 b31c8676 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2968 b31c8676 Iustin Pop
  _OP_REQP = ["node_name"]
2969 b31c8676 Iustin Pop
  REQ_BGL = False
2970 b31c8676 Iustin Pop
2971 b31c8676 Iustin Pop
  def CheckArguments(self):
2972 b31c8676 Iustin Pop
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2973 b31c8676 Iustin Pop
    if node_name is None:
2974 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2975 b31c8676 Iustin Pop
    self.op.node_name = node_name
2976 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'master_candidate')
2977 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'offline')
2978 c9d443ea Iustin Pop
    _CheckBooleanOpField(self.op, 'drained')
2979 c9d443ea Iustin Pop
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
2980 c9d443ea Iustin Pop
    if all_mods.count(None) == 3:
2981 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Please pass at least one modification")
2982 c9d443ea Iustin Pop
    if all_mods.count(True) > 1:
2983 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Can't set the node into more than one"
2984 c9d443ea Iustin Pop
                                 " state at the same time")
2985 b31c8676 Iustin Pop
2986 b31c8676 Iustin Pop
  def ExpandNames(self):
2987 b31c8676 Iustin Pop
    self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
2988 b31c8676 Iustin Pop
2989 b31c8676 Iustin Pop
  def BuildHooksEnv(self):
2990 b31c8676 Iustin Pop
    """Build hooks env.
2991 b31c8676 Iustin Pop

2992 b31c8676 Iustin Pop
    This runs on the master node.
2993 b31c8676 Iustin Pop

2994 b31c8676 Iustin Pop
    """
2995 b31c8676 Iustin Pop
    env = {
2996 b31c8676 Iustin Pop
      "OP_TARGET": self.op.node_name,
2997 b31c8676 Iustin Pop
      "MASTER_CANDIDATE": str(self.op.master_candidate),
2998 3a5ba66a Iustin Pop
      "OFFLINE": str(self.op.offline),
2999 c9d443ea Iustin Pop
      "DRAINED": str(self.op.drained),
3000 b31c8676 Iustin Pop
      }
3001 b31c8676 Iustin Pop
    nl = [self.cfg.GetMasterNode(),
3002 b31c8676 Iustin Pop
          self.op.node_name]
3003 b31c8676 Iustin Pop
    return env, nl, nl
3004 b31c8676 Iustin Pop
3005 b31c8676 Iustin Pop
  def CheckPrereq(self):
3006 b31c8676 Iustin Pop
    """Check prerequisites.
3007 b31c8676 Iustin Pop

3008 b31c8676 Iustin Pop
    This only checks the instance list against the existing names.
3009 b31c8676 Iustin Pop

3010 b31c8676 Iustin Pop
    """
3011 3a5ba66a Iustin Pop
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
3012 b31c8676 Iustin Pop
3013 97c61d46 Iustin Pop
    if (self.op.master_candidate is not None or
3014 97c61d46 Iustin Pop
        self.op.drained is not None or
3015 97c61d46 Iustin Pop
        self.op.offline is not None):
3016 97c61d46 Iustin Pop
      # we can't change the master's node flags
3017 97c61d46 Iustin Pop
      if self.op.node_name == self.cfg.GetMasterNode():
3018 97c61d46 Iustin Pop
        raise errors.OpPrereqError("The master role can be changed"
3019 97c61d46 Iustin Pop
                                   " only via masterfailover")
3020 97c61d46 Iustin Pop
3021 8fbf5ac7 Guido Trotter
    # Boolean value that tells us whether we're offlining or draining the node
3022 8fbf5ac7 Guido Trotter
    offline_or_drain = self.op.offline == True or self.op.drained == True
3023 3d9eb52b Guido Trotter
    deoffline_or_drain = self.op.offline == False or self.op.drained == False
3024 8fbf5ac7 Guido Trotter
3025 8fbf5ac7 Guido Trotter
    if (node.master_candidate and
3026 8fbf5ac7 Guido Trotter
        (self.op.master_candidate == False or offline_or_drain)):
3027 3e83dd48 Iustin Pop
      cp_size = self.cfg.GetClusterInfo().candidate_pool_size
3028 8fbf5ac7 Guido Trotter
      mc_now, mc_should, mc_max = self.cfg.GetMasterCandidateStats()
3029 8fbf5ac7 Guido Trotter
      if mc_now <= cp_size:
3030 3e83dd48 Iustin Pop
        msg = ("Not enough master candidates (desired"
3031 8fbf5ac7 Guido Trotter
               " %d, new value will be %d)" % (cp_size, mc_now-1))
3032 8fbf5ac7 Guido Trotter
        # Only allow forcing the operation if it's an offline/drain operation,
3033 8fbf5ac7 Guido Trotter
        # and we could not possibly promote more nodes.
3034 8fbf5ac7 Guido Trotter
        # FIXME: this can still lead to issues if in any way another node which
3035 8fbf5ac7 Guido Trotter
        # could be promoted appears in the meantime.
3036 8fbf5ac7 Guido Trotter
        if self.op.force and offline_or_drain and mc_should == mc_max:
3037 3e83dd48 Iustin Pop
          self.LogWarning(msg)
3038 3e83dd48 Iustin Pop
        else:
3039 3e83dd48 Iustin Pop
          raise errors.OpPrereqError(msg)
3040 3e83dd48 Iustin Pop
3041 c9d443ea Iustin Pop
    if (self.op.master_candidate == True and
3042 c9d443ea Iustin Pop
        ((node.offline and not self.op.offline == False) or
3043 c9d443ea Iustin Pop
         (node.drained and not self.op.drained == False))):
3044 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
3045 949bdabe Iustin Pop
                                 " to master_candidate" % node.name)
3046 3a5ba66a Iustin Pop
3047 3d9eb52b Guido Trotter
    # If we're being deofflined/drained, we'll MC ourself if needed
3048 3d9eb52b Guido Trotter
    if (deoffline_or_drain and not offline_or_drain and not
3049 3d9eb52b Guido Trotter
        self.op.master_candidate == True):
3050 3d9eb52b Guido Trotter
      self.op.master_candidate = _DecideSelfPromotion(self)
3051 3d9eb52b Guido Trotter
      if self.op.master_candidate:
3052 3d9eb52b Guido Trotter
        self.LogInfo("Autopromoting node to master candidate")
3053 3d9eb52b Guido Trotter
3054 b31c8676 Iustin Pop
    return
3055 b31c8676 Iustin Pop
3056 b31c8676 Iustin Pop
  def Exec(self, feedback_fn):
3057 b31c8676 Iustin Pop
    """Modifies a node.
3058 b31c8676 Iustin Pop

3059 b31c8676 Iustin Pop
    """
3060 3a5ba66a Iustin Pop
    node = self.node
3061 b31c8676 Iustin Pop
3062 b31c8676 Iustin Pop
    result = []
3063 c9d443ea Iustin Pop
    changed_mc = False
3064 b31c8676 Iustin Pop
3065 3a5ba66a Iustin Pop
    if self.op.offline is not None:
3066 3a5ba66a Iustin Pop
      node.offline = self.op.offline
3067 3a5ba66a Iustin Pop
      result.append(("offline", str(self.op.offline)))
3068 c9d443ea Iustin Pop
      if self.op.offline == True:
3069 c9d443ea Iustin Pop
        if node.master_candidate:
3070 c9d443ea Iustin Pop
          node.master_candidate = False
3071 c9d443ea Iustin Pop
          changed_mc = True
3072 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to offline"))
3073 c9d443ea Iustin Pop
        if node.drained:
3074 c9d443ea Iustin Pop
          node.drained = False
3075 c9d443ea Iustin Pop
          result.append(("drained", "clear drained status due to offline"))
3076 3a5ba66a Iustin Pop
3077 b31c8676 Iustin Pop
    if self.op.master_candidate is not None:
3078 b31c8676 Iustin Pop
      node.master_candidate = self.op.master_candidate
3079 c9d443ea Iustin Pop
      changed_mc = True
3080 b31c8676 Iustin Pop
      result.append(("master_candidate", str(self.op.master_candidate)))
3081 56aa9fd5 Iustin Pop
      if self.op.master_candidate == False:
3082 56aa9fd5 Iustin Pop
        rrc = self.rpc.call_node_demote_from_mc(node.name)
3083 4c4e4e1e Iustin Pop
        msg = rrc.fail_msg
3084 0959c824 Iustin Pop
        if msg:
3085 0959c824 Iustin Pop
          self.LogWarning("Node failed to demote itself: %s" % msg)
3086 b31c8676 Iustin Pop
3087 c9d443ea Iustin Pop
    if self.op.drained is not None:
3088 c9d443ea Iustin Pop
      node.drained = self.op.drained
3089 82e12743 Iustin Pop
      result.append(("drained", str(self.op.drained)))
3090 c9d443ea Iustin Pop
      if self.op.drained == True:
3091 c9d443ea Iustin Pop
        if node.master_candidate:
3092 c9d443ea Iustin Pop
          node.master_candidate = False
3093 c9d443ea Iustin Pop
          changed_mc = True
3094 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to drain"))
3095 dec0d9da Iustin Pop
          rrc = self.rpc.call_node_demote_from_mc(node.name)
3096 3cebe102 Michael Hanselmann
          msg = rrc.fail_msg
3097 dec0d9da Iustin Pop
          if msg:
3098 dec0d9da Iustin Pop
            self.LogWarning("Node failed to demote itself: %s" % msg)
3099 c9d443ea Iustin Pop
        if node.offline:
3100 c9d443ea Iustin Pop
          node.offline = False
3101 c9d443ea Iustin Pop
          result.append(("offline", "clear offline status due to drain"))
3102 c9d443ea Iustin Pop
3103 b31c8676 Iustin Pop
    # this will trigger configuration file update, if needed
3104 b31c8676 Iustin Pop
    self.cfg.Update(node)
3105 b31c8676 Iustin Pop
    # this will trigger job queue propagation or cleanup
3106 c9d443ea Iustin Pop
    if changed_mc:
3107 3a26773f Iustin Pop
      self.context.ReaddNode(node)
3108 b31c8676 Iustin Pop
3109 b31c8676 Iustin Pop
    return result
3110 b31c8676 Iustin Pop
3111 b31c8676 Iustin Pop
3112 f5118ade Iustin Pop
class LUPowercycleNode(NoHooksLU):
3113 f5118ade Iustin Pop
  """Powercycles a node.
3114 f5118ade Iustin Pop

3115 f5118ade Iustin Pop
  """
3116 f5118ade Iustin Pop
  _OP_REQP = ["node_name", "force"]
3117 f5118ade Iustin Pop
  REQ_BGL = False
3118 f5118ade Iustin Pop
3119 f5118ade Iustin Pop
  def CheckArguments(self):
3120 f5118ade Iustin Pop
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
3121 f5118ade Iustin Pop
    if node_name is None:
3122 f5118ade Iustin Pop
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
3123 f5118ade Iustin Pop
    self.op.node_name = node_name
3124 f5118ade Iustin Pop
    if node_name == self.cfg.GetMasterNode() and not self.op.force:
3125 f5118ade Iustin Pop
      raise errors.OpPrereqError("The node is the master and the force"
3126 f5118ade Iustin Pop
                                 " parameter was not set")
3127 f5118ade Iustin Pop
3128 f5118ade Iustin Pop
  def ExpandNames(self):
3129 f5118ade Iustin Pop
    """Locking for PowercycleNode.
3130 f5118ade Iustin Pop

3131 efb8da02 Michael Hanselmann
    This is a last-resort option and shouldn't block on other
3132 f5118ade Iustin Pop
    jobs. Therefore, we grab no locks.
3133 f5118ade Iustin Pop

3134 f5118ade Iustin Pop
    """
3135 f5118ade Iustin Pop
    self.needed_locks = {}
3136 f5118ade Iustin Pop
3137 f5118ade Iustin Pop
  def CheckPrereq(self):
3138 f5118ade Iustin Pop
    """Check prerequisites.
3139 f5118ade Iustin Pop

3140 f5118ade Iustin Pop
    This LU has no prereqs.
3141 f5118ade Iustin Pop

3142 f5118ade Iustin Pop
    """
3143 f5118ade Iustin Pop
    pass
3144 f5118ade Iustin Pop
3145 f5118ade Iustin Pop
  def Exec(self, feedback_fn):
3146 f5118ade Iustin Pop
    """Reboots a node.
3147 f5118ade Iustin Pop

3148 f5118ade Iustin Pop
    """
3149 f5118ade Iustin Pop
    result = self.rpc.call_node_powercycle(self.op.node_name,
3150 f5118ade Iustin Pop
                                           self.cfg.GetHypervisorType())
3151 4c4e4e1e Iustin Pop
    result.Raise("Failed to schedule the reboot")
3152 f5118ade Iustin Pop
    return result.payload
3153 f5118ade Iustin Pop
3154 f5118ade Iustin Pop
3155 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
3156 a8083063 Iustin Pop
  """Query cluster configuration.
3157 a8083063 Iustin Pop

3158 a8083063 Iustin Pop
  """
3159 a8083063 Iustin Pop
  _OP_REQP = []
3160 642339cf Guido Trotter
  REQ_BGL = False
3161 642339cf Guido Trotter
3162 642339cf Guido Trotter
  def ExpandNames(self):
3163 642339cf Guido Trotter
    self.needed_locks = {}
3164 a8083063 Iustin Pop
3165 a8083063 Iustin Pop
  def CheckPrereq(self):
3166 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
3167 a8083063 Iustin Pop

3168 a8083063 Iustin Pop
    """
3169 a8083063 Iustin Pop
    pass
3170 a8083063 Iustin Pop
3171 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3172 a8083063 Iustin Pop
    """Return cluster config.
3173 a8083063 Iustin Pop

3174 a8083063 Iustin Pop
    """
3175 469f88e1 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
3176 a8083063 Iustin Pop
    result = {
3177 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
3178 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
3179 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
3180 d1a7d66f Guido Trotter
      "os_api_version": max(constants.OS_API_VERSIONS),
3181 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
3182 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
3183 469f88e1 Iustin Pop
      "name": cluster.cluster_name,
3184 469f88e1 Iustin Pop
      "master": cluster.master_node,
3185 066f465d Guido Trotter
      "default_hypervisor": cluster.enabled_hypervisors[0],
3186 469f88e1 Iustin Pop
      "enabled_hypervisors": cluster.enabled_hypervisors,
3187 b8810fec Michael Hanselmann
      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
3188 7c4d6c7b Michael Hanselmann
                        for hypervisor_name in cluster.enabled_hypervisors]),
3189 469f88e1 Iustin Pop
      "beparams": cluster.beparams,
3190 1094acda Guido Trotter
      "nicparams": cluster.nicparams,
3191 4b7735f9 Iustin Pop
      "candidate_pool_size": cluster.candidate_pool_size,
3192 7a56b411 Guido Trotter
      "master_netdev": cluster.master_netdev,
3193 7a56b411 Guido Trotter
      "volume_group_name": cluster.volume_group_name,
3194 7a56b411 Guido Trotter
      "file_storage_dir": cluster.file_storage_dir,
3195 90f72445 Iustin Pop
      "ctime": cluster.ctime,
3196 90f72445 Iustin Pop
      "mtime": cluster.mtime,
3197 259578eb Iustin Pop
      "uuid": cluster.uuid,
3198 c118d1f4 Michael Hanselmann
      "tags": list(cluster.GetTags()),
3199 a8083063 Iustin Pop
      }
3200 a8083063 Iustin Pop
3201 a8083063 Iustin Pop
    return result
3202 a8083063 Iustin Pop
3203 a8083063 Iustin Pop
3204 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
3205 ae5849b5 Michael Hanselmann
  """Return configuration values.
3206 a8083063 Iustin Pop

3207 a8083063 Iustin Pop
  """
3208 a8083063 Iustin Pop
  _OP_REQP = []
3209 642339cf Guido Trotter
  REQ_BGL = False
3210 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet()
3211 05e50653 Michael Hanselmann
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag",
3212 05e50653 Michael Hanselmann
                                  "watcher_pause")
3213 642339cf Guido Trotter
3214 642339cf Guido Trotter
  def ExpandNames(self):
3215 642339cf Guido Trotter
    self.needed_locks = {}
3216 a8083063 Iustin Pop
3217 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
3218 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
3219 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
3220 ae5849b5 Michael Hanselmann
3221 a8083063 Iustin Pop
  def CheckPrereq(self):
3222 a8083063 Iustin Pop
    """No prerequisites.
3223 a8083063 Iustin Pop

3224 a8083063 Iustin Pop
    """
3225 a8083063 Iustin Pop
    pass
3226 a8083063 Iustin Pop
3227 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3228 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
3229 a8083063 Iustin Pop

3230 a8083063 Iustin Pop
    """
3231 ae5849b5 Michael Hanselmann
    values = []
3232 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
3233 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
3234 3ccafd0e Iustin Pop
        entry = self.cfg.GetClusterName()
3235 ae5849b5 Michael Hanselmann
      elif field == "master_node":
3236 3ccafd0e Iustin Pop
        entry = self.cfg.GetMasterNode()
3237 3ccafd0e Iustin Pop
      elif field == "drain_flag":
3238 3ccafd0e Iustin Pop
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
3239 05e50653 Michael Hanselmann
      elif field == "watcher_pause":
3240 05e50653 Michael Hanselmann
        return utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
3241 ae5849b5 Michael Hanselmann
      else:
3242 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
3243 3ccafd0e Iustin Pop
      values.append(entry)
3244 ae5849b5 Michael Hanselmann
    return values
3245 a8083063 Iustin Pop
3246 a8083063 Iustin Pop
3247 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
3248 a8083063 Iustin Pop
  """Bring up an instance's disks.
3249 a8083063 Iustin Pop

3250 a8083063 Iustin Pop
  """
3251 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3252 f22a8ba3 Guido Trotter
  REQ_BGL = False
3253 f22a8ba3 Guido Trotter
3254 f22a8ba3 Guido Trotter
  def ExpandNames(self):
3255 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
3256 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3257 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3258 f22a8ba3 Guido Trotter
3259 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
3260 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
3261 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
3262 a8083063 Iustin Pop
3263 a8083063 Iustin Pop
  def CheckPrereq(self):
3264 a8083063 Iustin Pop
    """Check prerequisites.
3265 a8083063 Iustin Pop

3266 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3267 a8083063 Iustin Pop

3268 a8083063 Iustin Pop
    """
3269 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3270 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
3271 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3272 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
3273 b4ec07f8 Iustin Pop
    if not hasattr(self.op, "ignore_size"):
3274 b4ec07f8 Iustin Pop
      self.op.ignore_size = False
3275 a8083063 Iustin Pop
3276 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3277 a8083063 Iustin Pop
    """Activate the disks.
3278 a8083063 Iustin Pop

3279 a8083063 Iustin Pop
    """
3280 b4ec07f8 Iustin Pop
    disks_ok, disks_info = \
3281 b4ec07f8 Iustin Pop
              _AssembleInstanceDisks(self, self.instance,
3282 b4ec07f8 Iustin Pop
                                     ignore_size=self.op.ignore_size)
3283 a8083063 Iustin Pop
    if not disks_ok:
3284 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
3285 a8083063 Iustin Pop
3286 a8083063 Iustin Pop
    return disks_info
3287 a8083063 Iustin Pop
3288 a8083063 Iustin Pop
3289 e3443b36 Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False,
3290 e3443b36 Iustin Pop
                           ignore_size=False):
3291 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
3292 a8083063 Iustin Pop

3293 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
3294 a8083063 Iustin Pop

3295 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
3296 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
3297 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
3298 e4376078 Iustin Pop
  @param instance: the instance for whose disks we assemble
3299 e4376078 Iustin Pop
  @type ignore_secondaries: boolean
3300 e4376078 Iustin Pop
  @param ignore_secondaries: if true, errors on secondary nodes
3301 e4376078 Iustin Pop
      won't result in an error return from the function
3302 e3443b36 Iustin Pop
  @type ignore_size: boolean
3303 e3443b36 Iustin Pop
  @param ignore_size: if true, the current known size of the disk
3304 e3443b36 Iustin Pop
      will not be used during the disk activation, useful for cases
3305 e3443b36 Iustin Pop
      when the size is wrong
3306 e4376078 Iustin Pop
  @return: False if the operation failed, otherwise a list of
3307 e4376078 Iustin Pop
      (host, instance_visible_name, node_visible_name)
3308 e4376078 Iustin Pop
      with the mapping from node devices to instance devices
3309 a8083063 Iustin Pop

3310 a8083063 Iustin Pop
  """
3311 a8083063 Iustin Pop
  device_info = []
3312 a8083063 Iustin Pop
  disks_ok = True
3313 fdbd668d Iustin Pop
  iname = instance.name
3314 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
3315 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
3316 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
3317 fdbd668d Iustin Pop
3318 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
3319 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
3320 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
3321 fdbd668d Iustin Pop
  # SyncSource, etc.)
3322 fdbd668d Iustin Pop
3323 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
3324 a8083063 Iustin Pop
  for inst_disk in instance.disks:
3325 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
3326 e3443b36 Iustin Pop
      if ignore_size:
3327 e3443b36 Iustin Pop
        node_disk = node_disk.Copy()
3328 e3443b36 Iustin Pop
        node_disk.UnsetSize()
3329 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
3330 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
3331 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3332 53c14ef1 Iustin Pop
      if msg:
3333 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
3334 53c14ef1 Iustin Pop
                           " (is_primary=False, pass=1): %s",
3335 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
3336 fdbd668d Iustin Pop
        if not ignore_secondaries:
3337 a8083063 Iustin Pop
          disks_ok = False
3338 fdbd668d Iustin Pop
3339 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
3340 fdbd668d Iustin Pop
3341 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
3342 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
3343 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
3344 fdbd668d Iustin Pop
      if node != instance.primary_node:
3345 fdbd668d Iustin Pop
        continue
3346 e3443b36 Iustin Pop
      if ignore_size:
3347 e3443b36 Iustin Pop
        node_disk = node_disk.Copy()
3348 e3443b36 Iustin Pop
        node_disk.UnsetSize()
3349 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
3350 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
3351 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3352 53c14ef1 Iustin Pop
      if msg:
3353 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
3354 53c14ef1 Iustin Pop
                           " (is_primary=True, pass=2): %s",
3355 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
3356 fdbd668d Iustin Pop
        disks_ok = False
3357 1dff8e07 Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name,
3358 1dff8e07 Iustin Pop
                        result.payload))
3359 a8083063 Iustin Pop
3360 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
3361 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
3362 b352ab5b Iustin Pop
  # improving the logical/physical id handling
3363 b352ab5b Iustin Pop
  for disk in instance.disks:
3364 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
3365 b352ab5b Iustin Pop
3366 a8083063 Iustin Pop
  return disks_ok, device_info
3367 a8083063 Iustin Pop
3368 a8083063 Iustin Pop
3369 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
3370 3ecf6786 Iustin Pop
  """Start the disks of an instance.
3371 3ecf6786 Iustin Pop

3372 3ecf6786 Iustin Pop
  """
3373 7c4d6c7b Michael Hanselmann
  disks_ok, _ = _AssembleInstanceDisks(lu, instance,
3374 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
3375 fe7b0351 Michael Hanselmann
  if not disks_ok:
3376 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
3377 fe7b0351 Michael Hanselmann
    if force is not None and not force:
3378 86d9d3bb Iustin Pop
      lu.proc.LogWarning("", hint="If the message above refers to a"
3379 86d9d3bb Iustin Pop
                         " secondary node,"
3380 86d9d3bb Iustin Pop
                         " you can retry the operation using '--force'.")
3381 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
3382 fe7b0351 Michael Hanselmann
3383 fe7b0351 Michael Hanselmann
3384 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
3385 a8083063 Iustin Pop
  """Shutdown an instance's disks.
3386 a8083063 Iustin Pop

3387 a8083063 Iustin Pop
  """
3388 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3389 f22a8ba3 Guido Trotter
  REQ_BGL = False
3390 f22a8ba3 Guido Trotter
3391 f22a8ba3 Guido Trotter
  def ExpandNames(self):
3392 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
3393 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3394 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3395 f22a8ba3 Guido Trotter
3396 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
3397 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
3398 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
3399 a8083063 Iustin Pop
3400 a8083063 Iustin Pop
  def CheckPrereq(self):
3401 a8083063 Iustin Pop
    """Check prerequisites.
3402 a8083063 Iustin Pop

3403 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3404 a8083063 Iustin Pop

3405 a8083063 Iustin Pop
    """
3406 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3407 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
3408 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3409 a8083063 Iustin Pop
3410 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3411 a8083063 Iustin Pop
    """Deactivate the disks
3412 a8083063 Iustin Pop

3413 a8083063 Iustin Pop
    """
3414 a8083063 Iustin Pop
    instance = self.instance
3415 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
3416 a8083063 Iustin Pop
3417 a8083063 Iustin Pop
3418 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
3419 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
3420 155d6c75 Guido Trotter

3421 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
3422 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
3423 155d6c75 Guido Trotter

3424 155d6c75 Guido Trotter
  """
3425 aca13712 Iustin Pop
  pnode = instance.primary_node
3426 4c4e4e1e Iustin Pop
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
3427 4c4e4e1e Iustin Pop
  ins_l.Raise("Can't contact node %s" % pnode)
3428 aca13712 Iustin Pop
3429 aca13712 Iustin Pop
  if instance.name in ins_l.payload:
3430 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
3431 155d6c75 Guido Trotter
                             " block devices.")
3432 155d6c75 Guido Trotter
3433 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
3434 a8083063 Iustin Pop
3435 a8083063 Iustin Pop
3436 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
3437 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
3438 a8083063 Iustin Pop

3439 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
3440 a8083063 Iustin Pop

3441 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
3442 a8083063 Iustin Pop
  ignored.
3443 a8083063 Iustin Pop

3444 a8083063 Iustin Pop
  """
3445 cacfd1fd Iustin Pop
  all_result = True
3446 a8083063 Iustin Pop
  for disk in instance.disks:
3447 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
3448 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
3449 781de953 Iustin Pop
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
3450 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3451 cacfd1fd Iustin Pop
      if msg:
3452 cacfd1fd Iustin Pop
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
3453 cacfd1fd Iustin Pop
                      disk.iv_name, node, msg)
3454 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
3455 cacfd1fd Iustin Pop
          all_result = False
3456 cacfd1fd Iustin Pop
  return all_result
3457 a8083063 Iustin Pop
3458 a8083063 Iustin Pop
3459 9ca87a96 Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
3460 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
3461 d4f16fd9 Iustin Pop

3462 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
3463 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
3464 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
3465 d4f16fd9 Iustin Pop
  exception.
3466 d4f16fd9 Iustin Pop

3467 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
3468 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
3469 e69d05fd Iustin Pop
  @type node: C{str}
3470 e69d05fd Iustin Pop
  @param node: the node to check
3471 e69d05fd Iustin Pop
  @type reason: C{str}
3472 e69d05fd Iustin Pop
  @param reason: string to use in the error message
3473 e69d05fd Iustin Pop
  @type requested: C{int}
3474 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
3475 9ca87a96 Iustin Pop
  @type hypervisor_name: C{str}
3476 9ca87a96 Iustin Pop
  @param hypervisor_name: the hypervisor to ask for memory stats
3477 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
3478 e69d05fd Iustin Pop
      we cannot check the node
3479 d4f16fd9 Iustin Pop

3480 d4f16fd9 Iustin Pop
  """
3481 9ca87a96 Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
3482 4c4e4e1e Iustin Pop
  nodeinfo[node].Raise("Can't get data from node %s" % node, prereq=True)
3483 070e998b Iustin Pop
  free_mem = nodeinfo[node].payload.get('memory_free', None)
3484 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
3485 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
3486 070e998b Iustin Pop
                               " was '%s'" % (node, free_mem))
3487 d4f16fd9 Iustin Pop
  if requested > free_mem:
3488 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
3489 070e998b Iustin Pop
                               " needed %s MiB, available %s MiB" %
3490 070e998b Iustin Pop
                               (node, reason, requested, free_mem))
3491 d4f16fd9 Iustin Pop
3492 d4f16fd9 Iustin Pop
3493 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
3494 a8083063 Iustin Pop
  """Starts an instance.
3495 a8083063 Iustin Pop

3496 a8083063 Iustin Pop
  """
3497 a8083063 Iustin Pop
  HPATH = "instance-start"
3498 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3499 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
3500 e873317a Guido Trotter
  REQ_BGL = False
3501 e873317a Guido Trotter
3502 e873317a Guido Trotter
  def ExpandNames(self):
3503 e873317a Guido Trotter
    self._ExpandAndLockInstance()
3504 a8083063 Iustin Pop
3505 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3506 a8083063 Iustin Pop
    """Build hooks env.
3507 a8083063 Iustin Pop

3508 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3509 a8083063 Iustin Pop

3510 a8083063 Iustin Pop
    """
3511 a8083063 Iustin Pop
    env = {
3512 a8083063 Iustin Pop
      "FORCE": self.op.force,
3513 a8083063 Iustin Pop
      }
3514 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3515 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3516 a8083063 Iustin Pop
    return env, nl, nl
3517 a8083063 Iustin Pop
3518 a8083063 Iustin Pop
  def CheckPrereq(self):
3519 a8083063 Iustin Pop
    """Check prerequisites.
3520 a8083063 Iustin Pop

3521 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3522 a8083063 Iustin Pop

3523 a8083063 Iustin Pop
    """
3524 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3525 e873317a Guido Trotter
    assert self.instance is not None, \
3526 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3527 a8083063 Iustin Pop
3528 d04aaa2f Iustin Pop
    # extra beparams
3529 d04aaa2f Iustin Pop
    self.beparams = getattr(self.op, "beparams", {})
3530 d04aaa2f Iustin Pop
    if self.beparams:
3531 d04aaa2f Iustin Pop
      if not isinstance(self.beparams, dict):
3532 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid beparams passed: %s, expected"
3533 d04aaa2f Iustin Pop
                                   " dict" % (type(self.beparams), ))
3534 d04aaa2f Iustin Pop
      # fill the beparams dict
3535 d04aaa2f Iustin Pop
      utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
3536 d04aaa2f Iustin Pop
      self.op.beparams = self.beparams
3537 d04aaa2f Iustin Pop
3538 d04aaa2f Iustin Pop
    # extra hvparams
3539 d04aaa2f Iustin Pop
    self.hvparams = getattr(self.op, "hvparams", {})
3540 d04aaa2f Iustin Pop
    if self.hvparams:
3541 d04aaa2f Iustin Pop
      if not isinstance(self.hvparams, dict):
3542 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid hvparams passed: %s, expected"
3543 d04aaa2f Iustin Pop
                                   " dict" % (type(self.hvparams), ))
3544 d04aaa2f Iustin Pop
3545 d04aaa2f Iustin Pop
      # check hypervisor parameter syntax (locally)
3546 d04aaa2f Iustin Pop
      cluster = self.cfg.GetClusterInfo()
3547 d04aaa2f Iustin Pop
      utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
3548 abe609b2 Guido Trotter
      filled_hvp = objects.FillDict(cluster.hvparams[instance.hypervisor],
3549 d04aaa2f Iustin Pop
                                    instance.hvparams)
3550 d04aaa2f Iustin Pop
      filled_hvp.update(self.hvparams)
3551 d04aaa2f Iustin Pop
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
3552 d04aaa2f Iustin Pop
      hv_type.CheckParameterSyntax(filled_hvp)
3553 d04aaa2f Iustin Pop
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
3554 d04aaa2f Iustin Pop
      self.op.hvparams = self.hvparams
3555 d04aaa2f Iustin Pop
3556 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3557 7527a8a4 Iustin Pop
3558 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3559 5bbd3f7f Michael Hanselmann
    # check bridges existence
3560 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
3561 a8083063 Iustin Pop
3562 f1926756 Guido Trotter
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3563 f1926756 Guido Trotter
                                              instance.name,
3564 f1926756 Guido Trotter
                                              instance.hypervisor)
3565 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
3566 4c4e4e1e Iustin Pop
                      prereq=True)
3567 7ad1af4a Iustin Pop
    if not remote_info.payload: # not running already
3568 f1926756 Guido Trotter
      _CheckNodeFreeMemory(self, instance.primary_node,
3569 f1926756 Guido Trotter
                           "starting instance %s" % instance.name,
3570 f1926756 Guido Trotter
                           bep[constants.BE_MEMORY], instance.hypervisor)
3571 d4f16fd9 Iustin Pop
3572 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3573 a8083063 Iustin Pop
    """Start the instance.
3574 a8083063 Iustin Pop

3575 a8083063 Iustin Pop
    """
3576 a8083063 Iustin Pop
    instance = self.instance
3577 a8083063 Iustin Pop
    force = self.op.force
3578 a8083063 Iustin Pop
3579 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
3580 fe482621 Iustin Pop
3581 a8083063 Iustin Pop
    node_current = instance.primary_node
3582 a8083063 Iustin Pop
3583 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
3584 a8083063 Iustin Pop
3585 d04aaa2f Iustin Pop
    result = self.rpc.call_instance_start(node_current, instance,
3586 d04aaa2f Iustin Pop
                                          self.hvparams, self.beparams)
3587 4c4e4e1e Iustin Pop
    msg = result.fail_msg
3588 dd279568 Iustin Pop
    if msg:
3589 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
3590 dd279568 Iustin Pop
      raise errors.OpExecError("Could not start instance: %s" % msg)
3591 a8083063 Iustin Pop
3592 a8083063 Iustin Pop
3593 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
3594 bf6929a2 Alexander Schreiber
  """Reboot an instance.
3595 bf6929a2 Alexander Schreiber

3596 bf6929a2 Alexander Schreiber
  """
3597 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
3598 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
3599 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
3600 e873317a Guido Trotter
  REQ_BGL = False
3601 e873317a Guido Trotter
3602 e873317a Guido Trotter
  def ExpandNames(self):
3603 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
3604 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
3605 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
3606 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
3607 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
3608 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
3609 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
3610 e873317a Guido Trotter
    self._ExpandAndLockInstance()
3611 bf6929a2 Alexander Schreiber
3612 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
3613 bf6929a2 Alexander Schreiber
    """Build hooks env.
3614 bf6929a2 Alexander Schreiber

3615 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
3616 bf6929a2 Alexander Schreiber

3617 bf6929a2 Alexander Schreiber
    """
3618 bf6929a2 Alexander Schreiber
    env = {
3619 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
3620 2c2690c9 Iustin Pop
      "REBOOT_TYPE": self.op.reboot_type,
3621 bf6929a2 Alexander Schreiber
      }
3622 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3623 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3624 bf6929a2 Alexander Schreiber
    return env, nl, nl
3625 bf6929a2 Alexander Schreiber
3626 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
3627 bf6929a2 Alexander Schreiber
    """Check prerequisites.
3628 bf6929a2 Alexander Schreiber

3629 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
3630 bf6929a2 Alexander Schreiber

3631 bf6929a2 Alexander Schreiber
    """
3632 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3633 e873317a Guido Trotter
    assert self.instance is not None, \
3634 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3635 bf6929a2 Alexander Schreiber
3636 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3637 7527a8a4 Iustin Pop
3638 5bbd3f7f Michael Hanselmann
    # check bridges existence
3639 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
3640 bf6929a2 Alexander Schreiber
3641 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
3642 bf6929a2 Alexander Schreiber
    """Reboot the instance.
3643 bf6929a2 Alexander Schreiber

3644 bf6929a2 Alexander Schreiber
    """
3645 bf6929a2 Alexander Schreiber
    instance = self.instance
3646 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
3647 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
3648 bf6929a2 Alexander Schreiber
3649 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
3650 bf6929a2 Alexander Schreiber
3651 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
3652 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
3653 ae48ac32 Iustin Pop
      for disk in instance.disks:
3654 ae48ac32 Iustin Pop
        self.cfg.SetDiskID(disk, node_current)
3655 781de953 Iustin Pop
      result = self.rpc.call_instance_reboot(node_current, instance,
3656 07813a9e Iustin Pop
                                             reboot_type)
3657 4c4e4e1e Iustin Pop
      result.Raise("Could not reboot instance")
3658 bf6929a2 Alexander Schreiber
    else:
3659 1fae010f Iustin Pop
      result = self.rpc.call_instance_shutdown(node_current, instance)
3660 4c4e4e1e Iustin Pop
      result.Raise("Could not shutdown instance for full reboot")
3661 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
3662 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
3663 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(node_current, instance, None, None)
3664 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3665 dd279568 Iustin Pop
      if msg:
3666 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3667 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance for"
3668 dd279568 Iustin Pop
                                 " full reboot: %s" % msg)
3669 bf6929a2 Alexander Schreiber
3670 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
3671 bf6929a2 Alexander Schreiber
3672 bf6929a2 Alexander Schreiber
3673 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
3674 a8083063 Iustin Pop
  """Shutdown an instance.
3675 a8083063 Iustin Pop

3676 a8083063 Iustin Pop
  """
3677 a8083063 Iustin Pop
  HPATH = "instance-stop"
3678 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3679 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3680 e873317a Guido Trotter
  REQ_BGL = False
3681 e873317a Guido Trotter
3682 e873317a Guido Trotter
  def ExpandNames(self):
3683 e873317a Guido Trotter
    self._ExpandAndLockInstance()
3684 a8083063 Iustin Pop
3685 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3686 a8083063 Iustin Pop
    """Build hooks env.
3687 a8083063 Iustin Pop

3688 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3689 a8083063 Iustin Pop

3690 a8083063 Iustin Pop
    """
3691 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3692 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3693 a8083063 Iustin Pop
    return env, nl, nl
3694 a8083063 Iustin Pop
3695 a8083063 Iustin Pop
  def CheckPrereq(self):
3696 a8083063 Iustin Pop
    """Check prerequisites.
3697 a8083063 Iustin Pop

3698 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3699 a8083063 Iustin Pop

3700 a8083063 Iustin Pop
    """
3701 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3702 e873317a Guido Trotter
    assert self.instance is not None, \
3703 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3704 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
3705 a8083063 Iustin Pop
3706 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3707 a8083063 Iustin Pop
    """Shutdown the instance.
3708 a8083063 Iustin Pop

3709 a8083063 Iustin Pop
    """
3710 a8083063 Iustin Pop
    instance = self.instance
3711 a8083063 Iustin Pop
    node_current = instance.primary_node
3712 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
3713 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(node_current, instance)
3714 4c4e4e1e Iustin Pop
    msg = result.fail_msg
3715 1fae010f Iustin Pop
    if msg:
3716 1fae010f Iustin Pop
      self.proc.LogWarning("Could not shutdown instance: %s" % msg)
3717 a8083063 Iustin Pop
3718 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
3719 a8083063 Iustin Pop
3720 a8083063 Iustin Pop
3721 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
3722 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
3723 fe7b0351 Michael Hanselmann

3724 fe7b0351 Michael Hanselmann
  """
3725 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
3726 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
3727 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
3728 4e0b4d2d Guido Trotter
  REQ_BGL = False
3729 4e0b4d2d Guido Trotter
3730 4e0b4d2d Guido Trotter
  def ExpandNames(self):
3731 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
3732 fe7b0351 Michael Hanselmann
3733 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
3734 fe7b0351 Michael Hanselmann
    """Build hooks env.
3735 fe7b0351 Michael Hanselmann

3736 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
3737 fe7b0351 Michael Hanselmann

3738 fe7b0351 Michael Hanselmann
    """
3739 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3740 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3741 fe7b0351 Michael Hanselmann
    return env, nl, nl
3742 fe7b0351 Michael Hanselmann
3743 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
3744 fe7b0351 Michael Hanselmann
    """Check prerequisites.
3745 fe7b0351 Michael Hanselmann

3746 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
3747 fe7b0351 Michael Hanselmann

3748 fe7b0351 Michael Hanselmann
    """
3749 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3750 4e0b4d2d Guido Trotter
    assert instance is not None, \
3751 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3752 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3753 4e0b4d2d Guido Trotter
3754 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
3755 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
3756 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3757 0d68c45d Iustin Pop
    if instance.admin_up:
3758 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3759 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3760 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3761 72737a7f Iustin Pop
                                              instance.name,
3762 72737a7f Iustin Pop
                                              instance.hypervisor)
3763 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
3764 4c4e4e1e Iustin Pop
                      prereq=True)
3765 7ad1af4a Iustin Pop
    if remote_info.payload:
3766 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3767 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
3768 3ecf6786 Iustin Pop
                                  instance.primary_node))
3769 d0834de3 Michael Hanselmann
3770 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
3771 f2c05717 Guido Trotter
    self.op.force_variant = getattr(self.op, "force_variant", False)
3772 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
3773 d0834de3 Michael Hanselmann
      # OS verification
3774 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
3775 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
3776 d0834de3 Michael Hanselmann
      if pnode is None:
3777 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
3778 3ecf6786 Iustin Pop
                                   self.op.pnode)
3779 781de953 Iustin Pop
      result = self.rpc.call_os_get(pnode.name, self.op.os_type)
3780 4c4e4e1e Iustin Pop
      result.Raise("OS '%s' not in supported OS list for primary node %s" %
3781 4c4e4e1e Iustin Pop
                   (self.op.os_type, pnode.name), prereq=True)
3782 f2c05717 Guido Trotter
      if not self.op.force_variant:
3783 f2c05717 Guido Trotter
        _CheckOSVariant(result.payload, self.op.os_type)
3784 d0834de3 Michael Hanselmann
3785 fe7b0351 Michael Hanselmann
    self.instance = instance
3786 fe7b0351 Michael Hanselmann
3787 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
3788 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
3789 fe7b0351 Michael Hanselmann

3790 fe7b0351 Michael Hanselmann
    """
3791 fe7b0351 Michael Hanselmann
    inst = self.instance
3792 fe7b0351 Michael Hanselmann
3793 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
3794 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
3795 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
3796 97abc79f Iustin Pop
      self.cfg.Update(inst)
3797 d0834de3 Michael Hanselmann
3798 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
3799 fe7b0351 Michael Hanselmann
    try:
3800 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
3801 e557bae9 Guido Trotter
      result = self.rpc.call_instance_os_add(inst.primary_node, inst, True)
3802 4c4e4e1e Iustin Pop
      result.Raise("Could not install OS for instance %s on node %s" %
3803 4c4e4e1e Iustin Pop
                   (inst.name, inst.primary_node))
3804 fe7b0351 Michael Hanselmann
    finally:
3805 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
3806 fe7b0351 Michael Hanselmann
3807 fe7b0351 Michael Hanselmann
3808 bd315bfa Iustin Pop
class LURecreateInstanceDisks(LogicalUnit):
3809 bd315bfa Iustin Pop
  """Recreate an instance's missing disks.
3810 bd315bfa Iustin Pop

3811 bd315bfa Iustin Pop
  """
3812 bd315bfa Iustin Pop
  HPATH = "instance-recreate-disks"
3813 bd315bfa Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3814 bd315bfa Iustin Pop
  _OP_REQP = ["instance_name", "disks"]
3815 bd315bfa Iustin Pop
  REQ_BGL = False
3816 bd315bfa Iustin Pop
3817 bd315bfa Iustin Pop
  def CheckArguments(self):
3818 bd315bfa Iustin Pop
    """Check the arguments.
3819 bd315bfa Iustin Pop

3820 bd315bfa Iustin Pop
    """
3821 bd315bfa Iustin Pop
    if not isinstance(self.op.disks, list):
3822 bd315bfa Iustin Pop
      raise errors.OpPrereqError("Invalid disks parameter")
3823 bd315bfa Iustin Pop
    for item in self.op.disks:
3824 bd315bfa Iustin Pop
      if (not isinstance(item, int) or
3825 bd315bfa Iustin Pop
          item < 0):
3826 bd315bfa Iustin Pop
        raise errors.OpPrereqError("Invalid disk specification '%s'" %
3827 bd315bfa Iustin Pop
                                   str(item))
3828 bd315bfa Iustin Pop
3829 bd315bfa Iustin Pop
  def ExpandNames(self):
3830 bd315bfa Iustin Pop
    self._ExpandAndLockInstance()
3831 bd315bfa Iustin Pop
3832 bd315bfa Iustin Pop
  def BuildHooksEnv(self):
3833 bd315bfa Iustin Pop
    """Build hooks env.
3834 bd315bfa Iustin Pop

3835 bd315bfa Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3836 bd315bfa Iustin Pop

3837 bd315bfa Iustin Pop
    """
3838 bd315bfa Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3839 bd315bfa Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3840 bd315bfa Iustin Pop
    return env, nl, nl
3841 bd315bfa Iustin Pop
3842 bd315bfa Iustin Pop
  def CheckPrereq(self):
3843 bd315bfa Iustin Pop
    """Check prerequisites.
3844 bd315bfa Iustin Pop

3845 bd315bfa Iustin Pop
    This checks that the instance is in the cluster and is not running.
3846 bd315bfa Iustin Pop

3847 bd315bfa Iustin Pop
    """
3848 bd315bfa Iustin Pop
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3849 bd315bfa Iustin Pop
    assert instance is not None, \
3850 bd315bfa Iustin Pop
      "Cannot retrieve locked instance %s" % self.op.instance_name
3851 bd315bfa Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3852 bd315bfa Iustin Pop
3853 bd315bfa Iustin Pop
    if instance.disk_template == constants.DT_DISKLESS:
3854 bd315bfa Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
3855 bd315bfa Iustin Pop
                                 self.op.instance_name)
3856 bd315bfa Iustin Pop
    if instance.admin_up:
3857 bd315bfa Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3858 bd315bfa Iustin Pop
                                 self.op.instance_name)
3859 bd315bfa Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3860 bd315bfa Iustin Pop
                                              instance.name,
3861 bd315bfa Iustin Pop
                                              instance.hypervisor)
3862 bd315bfa Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
3863 bd315bfa Iustin Pop
                      prereq=True)
3864 bd315bfa Iustin Pop
    if remote_info.payload:
3865 bd315bfa Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3866 bd315bfa Iustin Pop
                                 (self.op.instance_name,
3867 bd315bfa Iustin Pop
                                  instance.primary_node))
3868 bd315bfa Iustin Pop
3869 bd315bfa Iustin Pop
    if not self.op.disks:
3870 bd315bfa Iustin Pop
      self.op.disks = range(len(instance.disks))
3871 bd315bfa Iustin Pop
    else:
3872 bd315bfa Iustin Pop
      for idx in self.op.disks:
3873 bd315bfa Iustin Pop
        if idx >= len(instance.disks):
3874 bd315bfa Iustin Pop
          raise errors.OpPrereqError("Invalid disk index passed '%s'" % idx)
3875 bd315bfa Iustin Pop
3876 bd315bfa Iustin Pop
    self.instance = instance
3877 bd315bfa Iustin Pop
3878 bd315bfa Iustin Pop
  def Exec(self, feedback_fn):
3879 bd315bfa Iustin Pop
    """Recreate the disks.
3880 bd315bfa Iustin Pop

3881 bd315bfa Iustin Pop
    """
3882 bd315bfa Iustin Pop
    to_skip = []
3883 bd315bfa Iustin Pop
    for idx, disk in enumerate(self.instance.disks):
3884 bd315bfa Iustin Pop
      if idx not in self.op.disks: # disk idx has not been passed in
3885 bd315bfa Iustin Pop
        to_skip.append(idx)
3886 bd315bfa Iustin Pop
        continue
3887 bd315bfa Iustin Pop
3888 bd315bfa Iustin Pop
    _CreateDisks(self, self.instance, to_skip=to_skip)
3889 bd315bfa Iustin Pop
3890 bd315bfa Iustin Pop
3891 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
3892 decd5f45 Iustin Pop
  """Rename an instance.
3893 decd5f45 Iustin Pop

3894 decd5f45 Iustin Pop
  """
3895 decd5f45 Iustin Pop
  HPATH = "instance-rename"
3896 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3897 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
3898 decd5f45 Iustin Pop
3899 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
3900 decd5f45 Iustin Pop
    """Build hooks env.
3901 decd5f45 Iustin Pop

3902 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3903 decd5f45 Iustin Pop

3904 decd5f45 Iustin Pop
    """
3905 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3906 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
3907 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3908 decd5f45 Iustin Pop
    return env, nl, nl
3909 decd5f45 Iustin Pop
3910 decd5f45 Iustin Pop
  def CheckPrereq(self):
3911 decd5f45 Iustin Pop
    """Check prerequisites.
3912 decd5f45 Iustin Pop

3913 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
3914 decd5f45 Iustin Pop

3915 decd5f45 Iustin Pop
    """
3916 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3917 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3918 decd5f45 Iustin Pop
    if instance is None:
3919 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3920 decd5f45 Iustin Pop
                                 self.op.instance_name)
3921 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3922 7527a8a4 Iustin Pop
3923 0d68c45d Iustin Pop
    if instance.admin_up:
3924 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3925 decd5f45 Iustin Pop
                                 self.op.instance_name)
3926 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3927 72737a7f Iustin Pop
                                              instance.name,
3928 72737a7f Iustin Pop
                                              instance.hypervisor)
3929 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
3930 4c4e4e1e Iustin Pop
                      prereq=True)
3931 7ad1af4a Iustin Pop
    if remote_info.payload:
3932 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3933 decd5f45 Iustin Pop
                                 (self.op.instance_name,
3934 decd5f45 Iustin Pop
                                  instance.primary_node))
3935 decd5f45 Iustin Pop
    self.instance = instance
3936 decd5f45 Iustin Pop
3937 decd5f45 Iustin Pop
    # new name verification
3938 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
3939 decd5f45 Iustin Pop
3940 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
3941 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
3942 7bde3275 Guido Trotter
    if new_name in instance_list:
3943 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3944 c09f363f Manuel Franceschini
                                 new_name)
3945 7bde3275 Guido Trotter
3946 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
3947 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
3948 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3949 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
3950 decd5f45 Iustin Pop
3951 decd5f45 Iustin Pop
3952 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
3953 decd5f45 Iustin Pop
    """Reinstall the instance.
3954 decd5f45 Iustin Pop

3955 decd5f45 Iustin Pop
    """
3956 decd5f45 Iustin Pop
    inst = self.instance
3957 decd5f45 Iustin Pop
    old_name = inst.name
3958 decd5f45 Iustin Pop
3959 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
3960 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3961 b23c4333 Manuel Franceschini
3962 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
3963 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
3964 cb4e8387 Iustin Pop
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
3965 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
3966 decd5f45 Iustin Pop
3967 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
3968 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
3969 decd5f45 Iustin Pop
3970 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
3971 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3972 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
3973 72737a7f Iustin Pop
                                                     old_file_storage_dir,
3974 72737a7f Iustin Pop
                                                     new_file_storage_dir)
3975 4c4e4e1e Iustin Pop
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
3976 4c4e4e1e Iustin Pop
                   " (but the instance has been renamed in Ganeti)" %
3977 4c4e4e1e Iustin Pop
                   (inst.primary_node, old_file_storage_dir,
3978 4c4e4e1e Iustin Pop
                    new_file_storage_dir))
3979 b23c4333 Manuel Franceschini
3980 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
3981 decd5f45 Iustin Pop
    try:
3982 781de953 Iustin Pop
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
3983 781de953 Iustin Pop
                                                 old_name)
3984 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3985 96841384 Iustin Pop
      if msg:
3986 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
3987 96841384 Iustin Pop
               " (but the instance has been renamed in Ganeti): %s" %
3988 96841384 Iustin Pop
               (inst.name, inst.primary_node, msg))
3989 86d9d3bb Iustin Pop
        self.proc.LogWarning(msg)
3990 decd5f45 Iustin Pop
    finally:
3991 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
3992 decd5f45 Iustin Pop
3993 decd5f45 Iustin Pop
3994 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
3995 a8083063 Iustin Pop
  """Remove an instance.
3996 a8083063 Iustin Pop

3997 a8083063 Iustin Pop
  """
3998 a8083063 Iustin Pop
  HPATH = "instance-remove"
3999 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4000 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
4001 cf472233 Guido Trotter
  REQ_BGL = False
4002 cf472233 Guido Trotter
4003 cf472233 Guido Trotter
  def ExpandNames(self):
4004 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
4005 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4006 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4007 cf472233 Guido Trotter
4008 cf472233 Guido Trotter
  def DeclareLocks(self, level):
4009 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
4010 cf472233 Guido Trotter
      self._LockInstancesNodes()
4011 a8083063 Iustin Pop
4012 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4013 a8083063 Iustin Pop
    """Build hooks env.
4014 a8083063 Iustin Pop

4015 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4016 a8083063 Iustin Pop

4017 a8083063 Iustin Pop
    """
4018 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4019 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
4020 a8083063 Iustin Pop
    return env, nl, nl
4021 a8083063 Iustin Pop
4022 a8083063 Iustin Pop
  def CheckPrereq(self):
4023 a8083063 Iustin Pop
    """Check prerequisites.
4024 a8083063 Iustin Pop

4025 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4026 a8083063 Iustin Pop

4027 a8083063 Iustin Pop
    """
4028 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4029 cf472233 Guido Trotter
    assert self.instance is not None, \
4030 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4031 a8083063 Iustin Pop
4032 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4033 a8083063 Iustin Pop
    """Remove the instance.
4034 a8083063 Iustin Pop

4035 a8083063 Iustin Pop
    """
4036 a8083063 Iustin Pop
    instance = self.instance
4037 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
4038 9a4f63d1 Iustin Pop
                 instance.name, instance.primary_node)
4039 a8083063 Iustin Pop
4040 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
4041 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4042 1fae010f Iustin Pop
    if msg:
4043 1d67656e Iustin Pop
      if self.op.ignore_failures:
4044 1fae010f Iustin Pop
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
4045 1d67656e Iustin Pop
      else:
4046 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
4047 1fae010f Iustin Pop
                                 " node %s: %s" %
4048 1fae010f Iustin Pop
                                 (instance.name, instance.primary_node, msg))
4049 a8083063 Iustin Pop
4050 9a4f63d1 Iustin Pop
    logging.info("Removing block devices for instance %s", instance.name)
4051 a8083063 Iustin Pop
4052 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
4053 1d67656e Iustin Pop
      if self.op.ignore_failures:
4054 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
4055 1d67656e Iustin Pop
      else:
4056 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
4057 a8083063 Iustin Pop
4058 9a4f63d1 Iustin Pop
    logging.info("Removing instance %s out of cluster config", instance.name)
4059 a8083063 Iustin Pop
4060 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
4061 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
4062 a8083063 Iustin Pop
4063 a8083063 Iustin Pop
4064 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
4065 a8083063 Iustin Pop
  """Logical unit for querying instances.
4066 a8083063 Iustin Pop

4067 a8083063 Iustin Pop
  """
4068 ec79568d Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
4069 7eb9d8f7 Guido Trotter
  REQ_BGL = False
4070 19bed813 Iustin Pop
  _SIMPLE_FIELDS = ["name", "os", "network_port", "hypervisor",
4071 19bed813 Iustin Pop
                    "serial_no", "ctime", "mtime", "uuid"]
4072 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
4073 5b460366 Iustin Pop
                                    "admin_state",
4074 a2d2e1a7 Iustin Pop
                                    "disk_template", "ip", "mac", "bridge",
4075 638c6349 Guido Trotter
                                    "nic_mode", "nic_link",
4076 a2d2e1a7 Iustin Pop
                                    "sda_size", "sdb_size", "vcpus", "tags",
4077 a2d2e1a7 Iustin Pop
                                    "network_port", "beparams",
4078 8aec325c Iustin Pop
                                    r"(disk)\.(size)/([0-9]+)",
4079 8aec325c Iustin Pop
                                    r"(disk)\.(sizes)", "disk_usage",
4080 638c6349 Guido Trotter
                                    r"(nic)\.(mac|ip|mode|link)/([0-9]+)",
4081 638c6349 Guido Trotter
                                    r"(nic)\.(bridge)/([0-9]+)",
4082 638c6349 Guido Trotter
                                    r"(nic)\.(macs|ips|modes|links|bridges)",
4083 8aec325c Iustin Pop
                                    r"(disk|nic)\.(count)",
4084 19bed813 Iustin Pop
                                    "hvparams",
4085 19bed813 Iustin Pop
                                    ] + _SIMPLE_FIELDS +
4086 a2d2e1a7 Iustin Pop
                                  ["hv/%s" % name
4087 a2d2e1a7 Iustin Pop
                                   for name in constants.HVS_PARAMETERS] +
4088 a2d2e1a7 Iustin Pop
                                  ["be/%s" % name
4089 a2d2e1a7 Iustin Pop
                                   for name in constants.BES_PARAMETERS])
4090 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
4091 31bf511f Iustin Pop
4092 a8083063 Iustin Pop
4093 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
4094 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
4095 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
4096 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
4097 a8083063 Iustin Pop
4098 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
4099 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
4100 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
4101 7eb9d8f7 Guido Trotter
4102 57a2fb91 Iustin Pop
    if self.op.names:
4103 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
4104 7eb9d8f7 Guido Trotter
    else:
4105 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
4106 7eb9d8f7 Guido Trotter
4107 ec79568d Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
4108 ec79568d Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
4109 57a2fb91 Iustin Pop
    if self.do_locking:
4110 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
4111 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
4112 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4113 7eb9d8f7 Guido Trotter
4114 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
4115 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
4116 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
4117 7eb9d8f7 Guido Trotter
4118 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
4119 7eb9d8f7 Guido Trotter
    """Check prerequisites.
4120 7eb9d8f7 Guido Trotter

4121 7eb9d8f7 Guido Trotter
    """
4122 57a2fb91 Iustin Pop
    pass
4123 069dcc86 Iustin Pop
4124 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4125 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
4126 a8083063 Iustin Pop

4127 a8083063 Iustin Pop
    """
4128 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
4129 a7f5dc98 Iustin Pop
    if self.wanted == locking.ALL_SET:
4130 a7f5dc98 Iustin Pop
      # caller didn't specify instance names, so ordering is not important
4131 a7f5dc98 Iustin Pop
      if self.do_locking:
4132 a7f5dc98 Iustin Pop
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
4133 a7f5dc98 Iustin Pop
      else:
4134 a7f5dc98 Iustin Pop
        instance_names = all_info.keys()
4135 a7f5dc98 Iustin Pop
      instance_names = utils.NiceSort(instance_names)
4136 57a2fb91 Iustin Pop
    else:
4137 a7f5dc98 Iustin Pop
      # caller did specify names, so we must keep the ordering
4138 a7f5dc98 Iustin Pop
      if self.do_locking:
4139 a7f5dc98 Iustin Pop
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
4140 a7f5dc98 Iustin Pop
      else:
4141 a7f5dc98 Iustin Pop
        tgt_set = all_info.keys()
4142 a7f5dc98 Iustin Pop
      missing = set(self.wanted).difference(tgt_set)
4143 a7f5dc98 Iustin Pop
      if missing:
4144 a7f5dc98 Iustin Pop
        raise errors.OpExecError("Some instances were removed before"
4145 a7f5dc98 Iustin Pop
                                 " retrieving their data: %s" % missing)
4146 a7f5dc98 Iustin Pop
      instance_names = self.wanted
4147 c1f1cbb2 Iustin Pop
4148 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
4149 a8083063 Iustin Pop
4150 a8083063 Iustin Pop
    # begin data gathering
4151 a8083063 Iustin Pop
4152 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
4153 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
4154 a8083063 Iustin Pop
4155 a8083063 Iustin Pop
    bad_nodes = []
4156 cbfc4681 Iustin Pop
    off_nodes = []
4157 ec79568d Iustin Pop
    if self.do_node_query:
4158 a8083063 Iustin Pop
      live_data = {}
4159 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
4160 a8083063 Iustin Pop
      for name in nodes:
4161 a8083063 Iustin Pop
        result = node_data[name]
4162 cbfc4681 Iustin Pop
        if result.offline:
4163 cbfc4681 Iustin Pop
          # offline nodes will be in both lists
4164 cbfc4681 Iustin Pop
          off_nodes.append(name)
4165 3cebe102 Michael Hanselmann
        if result.fail_msg:
4166 a8083063 Iustin Pop
          bad_nodes.append(name)
4167 781de953 Iustin Pop
        else:
4168 2fa74ef4 Iustin Pop
          if result.payload:
4169 2fa74ef4 Iustin Pop
            live_data.update(result.payload)
4170 2fa74ef4 Iustin Pop
          # else no instance is alive
4171 a8083063 Iustin Pop
    else:
4172 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
4173 a8083063 Iustin Pop
4174 a8083063 Iustin Pop
    # end data gathering
4175 a8083063 Iustin Pop
4176 5018a335 Iustin Pop
    HVPREFIX = "hv/"
4177 338e51e8 Iustin Pop
    BEPREFIX = "be/"
4178 a8083063 Iustin Pop
    output = []
4179 638c6349 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
4180 a8083063 Iustin Pop
    for instance in instance_list:
4181 a8083063 Iustin Pop
      iout = []
4182 638c6349 Guido Trotter
      i_hv = cluster.FillHV(instance)
4183 638c6349 Guido Trotter
      i_be = cluster.FillBE(instance)
4184 638c6349 Guido Trotter
      i_nicp = [objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
4185 638c6349 Guido Trotter
                                 nic.nicparams) for nic in instance.nics]
4186 a8083063 Iustin Pop
      for field in self.op.output_fields:
4187 71c1af58 Iustin Pop
        st_match = self._FIELDS_STATIC.Matches(field)
4188 19bed813 Iustin Pop
        if field in self._SIMPLE_FIELDS:
4189 19bed813 Iustin Pop
          val = getattr(instance, field)
4190 a8083063 Iustin Pop
        elif field == "pnode":
4191 a8083063 Iustin Pop
          val = instance.primary_node
4192 a8083063 Iustin Pop
        elif field == "snodes":
4193 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
4194 a8083063 Iustin Pop
        elif field == "admin_state":
4195 0d68c45d Iustin Pop
          val = instance.admin_up
4196 a8083063 Iustin Pop
        elif field == "oper_state":
4197 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
4198 8a23d2d3 Iustin Pop
            val = None
4199 a8083063 Iustin Pop
          else:
4200 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
4201 d8052456 Iustin Pop
        elif field == "status":
4202 cbfc4681 Iustin Pop
          if instance.primary_node in off_nodes:
4203 cbfc4681 Iustin Pop
            val = "ERROR_nodeoffline"
4204 cbfc4681 Iustin Pop
          elif instance.primary_node in bad_nodes:
4205 d8052456 Iustin Pop
            val = "ERROR_nodedown"
4206 d8052456 Iustin Pop
          else:
4207 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
4208 d8052456 Iustin Pop
            if running:
4209 0d68c45d Iustin Pop
              if instance.admin_up:
4210 d8052456 Iustin Pop
                val = "running"
4211 d8052456 Iustin Pop
              else:
4212 d8052456 Iustin Pop
                val = "ERROR_up"
4213 d8052456 Iustin Pop
            else:
4214 0d68c45d Iustin Pop
              if instance.admin_up:
4215 d8052456 Iustin Pop
                val = "ERROR_down"
4216 d8052456 Iustin Pop
              else:
4217 d8052456 Iustin Pop
                val = "ADMIN_down"
4218 a8083063 Iustin Pop
        elif field == "oper_ram":
4219 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
4220 8a23d2d3 Iustin Pop
            val = None
4221 a8083063 Iustin Pop
          elif instance.name in live_data:
4222 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
4223 a8083063 Iustin Pop
          else:
4224 a8083063 Iustin Pop
            val = "-"
4225 c1ce76bb Iustin Pop
        elif field == "vcpus":
4226 c1ce76bb Iustin Pop
          val = i_be[constants.BE_VCPUS]
4227 a8083063 Iustin Pop
        elif field == "disk_template":
4228 a8083063 Iustin Pop
          val = instance.disk_template
4229 a8083063 Iustin Pop
        elif field == "ip":
4230 39a02558 Guido Trotter
          if instance.nics:
4231 39a02558 Guido Trotter
            val = instance.nics[0].ip
4232 39a02558 Guido Trotter
          else:
4233 39a02558 Guido Trotter
            val = None
4234 638c6349 Guido Trotter
        elif field == "nic_mode":
4235 638c6349 Guido Trotter
          if instance.nics:
4236 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_MODE]
4237 638c6349 Guido Trotter
          else:
4238 638c6349 Guido Trotter
            val = None
4239 638c6349 Guido Trotter
        elif field == "nic_link":
4240 39a02558 Guido Trotter
          if instance.nics:
4241 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_LINK]
4242 638c6349 Guido Trotter
          else:
4243 638c6349 Guido Trotter
            val = None
4244 638c6349 Guido Trotter
        elif field == "bridge":
4245 638c6349 Guido Trotter
          if (instance.nics and
4246 638c6349 Guido Trotter
              i_nicp[0][constants.NIC_MODE] == constants.NIC_MODE_BRIDGED):
4247 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_LINK]
4248 39a02558 Guido Trotter
          else:
4249 39a02558 Guido Trotter
            val = None
4250 a8083063 Iustin Pop
        elif field == "mac":
4251 39a02558 Guido Trotter
          if instance.nics:
4252 39a02558 Guido Trotter
            val = instance.nics[0].mac
4253 39a02558 Guido Trotter
          else:
4254 39a02558 Guido Trotter
            val = None
4255 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
4256 ad24e046 Iustin Pop
          idx = ord(field[2]) - ord('a')
4257 ad24e046 Iustin Pop
          try:
4258 ad24e046 Iustin Pop
            val = instance.FindDisk(idx).size
4259 ad24e046 Iustin Pop
          except errors.OpPrereqError:
4260 8a23d2d3 Iustin Pop
            val = None
4261 024e157f Iustin Pop
        elif field == "disk_usage": # total disk usage per node
4262 024e157f Iustin Pop
          disk_sizes = [{'size': disk.size} for disk in instance.disks]
4263 024e157f Iustin Pop
          val = _ComputeDiskSize(instance.disk_template, disk_sizes)
4264 130a6a6f Iustin Pop
        elif field == "tags":
4265 130a6a6f Iustin Pop
          val = list(instance.GetTags())
4266 338e51e8 Iustin Pop
        elif field == "hvparams":
4267 338e51e8 Iustin Pop
          val = i_hv
4268 5018a335 Iustin Pop
        elif (field.startswith(HVPREFIX) and
4269 5018a335 Iustin Pop
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
4270 5018a335 Iustin Pop
          val = i_hv.get(field[len(HVPREFIX):], None)
4271 338e51e8 Iustin Pop
        elif field == "beparams":
4272 338e51e8 Iustin Pop
          val = i_be
4273 338e51e8 Iustin Pop
        elif (field.startswith(BEPREFIX) and
4274 338e51e8 Iustin Pop
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
4275 338e51e8 Iustin Pop
          val = i_be.get(field[len(BEPREFIX):], None)
4276 71c1af58 Iustin Pop
        elif st_match and st_match.groups():
4277 71c1af58 Iustin Pop
          # matches a variable list
4278 71c1af58 Iustin Pop
          st_groups = st_match.groups()
4279 71c1af58 Iustin Pop
          if st_groups and st_groups[0] == "disk":
4280 71c1af58 Iustin Pop
            if st_groups[1] == "count":
4281 71c1af58 Iustin Pop
              val = len(instance.disks)
4282 41a776da Iustin Pop
            elif st_groups[1] == "sizes":
4283 41a776da Iustin Pop
              val = [disk.size for disk in instance.disks]
4284 71c1af58 Iustin Pop
            elif st_groups[1] == "size":
4285 3e0cea06 Iustin Pop
              try:
4286 3e0cea06 Iustin Pop
                val = instance.FindDisk(st_groups[2]).size
4287 3e0cea06 Iustin Pop
              except errors.OpPrereqError:
4288 71c1af58 Iustin Pop
                val = None
4289 71c1af58 Iustin Pop
            else:
4290 71c1af58 Iustin Pop
              assert False, "Unhandled disk parameter"
4291 71c1af58 Iustin Pop
          elif st_groups[0] == "nic":
4292 71c1af58 Iustin Pop
            if st_groups[1] == "count":
4293 71c1af58 Iustin Pop
              val = len(instance.nics)
4294 41a776da Iustin Pop
            elif st_groups[1] == "macs":
4295 41a776da Iustin Pop
              val = [nic.mac for nic in instance.nics]
4296 41a776da Iustin Pop
            elif st_groups[1] == "ips":
4297 41a776da Iustin Pop
              val = [nic.ip for nic in instance.nics]
4298 638c6349 Guido Trotter
            elif st_groups[1] == "modes":
4299 638c6349 Guido Trotter
              val = [nicp[constants.NIC_MODE] for nicp in i_nicp]
4300 638c6349 Guido Trotter
            elif st_groups[1] == "links":
4301 638c6349 Guido Trotter
              val = [nicp[constants.NIC_LINK] for nicp in i_nicp]
4302 41a776da Iustin Pop
            elif st_groups[1] == "bridges":
4303 638c6349 Guido Trotter
              val = []
4304 638c6349 Guido Trotter
              for nicp in i_nicp:
4305 638c6349 Guido Trotter
                if nicp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
4306 638c6349 Guido Trotter
                  val.append(nicp[constants.NIC_LINK])
4307 638c6349 Guido Trotter
                else:
4308 638c6349 Guido Trotter
                  val.append(None)
4309 71c1af58 Iustin Pop
            else:
4310 71c1af58 Iustin Pop
              # index-based item
4311 71c1af58 Iustin Pop
              nic_idx = int(st_groups[2])
4312 71c1af58 Iustin Pop
              if nic_idx >= len(instance.nics):
4313 71c1af58 Iustin Pop
                val = None
4314 71c1af58 Iustin Pop
              else:
4315 71c1af58 Iustin Pop
                if st_groups[1] == "mac":
4316 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].mac
4317 71c1af58 Iustin Pop
                elif st_groups[1] == "ip":
4318 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].ip
4319 638c6349 Guido Trotter
                elif st_groups[1] == "mode":
4320 638c6349 Guido Trotter
                  val = i_nicp[nic_idx][constants.NIC_MODE]
4321 638c6349 Guido Trotter
                elif st_groups[1] == "link":
4322 638c6349 Guido Trotter
                  val = i_nicp[nic_idx][constants.NIC_LINK]
4323 71c1af58 Iustin Pop
                elif st_groups[1] == "bridge":
4324 638c6349 Guido Trotter
                  nic_mode = i_nicp[nic_idx][constants.NIC_MODE]
4325 638c6349 Guido Trotter
                  if nic_mode == constants.NIC_MODE_BRIDGED:
4326 638c6349 Guido Trotter
                    val = i_nicp[nic_idx][constants.NIC_LINK]
4327 638c6349 Guido Trotter
                  else:
4328 638c6349 Guido Trotter
                    val = None
4329 71c1af58 Iustin Pop
                else:
4330 71c1af58 Iustin Pop
                  assert False, "Unhandled NIC parameter"
4331 71c1af58 Iustin Pop
          else:
4332 c1ce76bb Iustin Pop
            assert False, ("Declared but unhandled variable parameter '%s'" %
4333 c1ce76bb Iustin Pop
                           field)
4334 a8083063 Iustin Pop
        else:
4335 c1ce76bb Iustin Pop
          assert False, "Declared but unhandled parameter '%s'" % field
4336 a8083063 Iustin Pop
        iout.append(val)
4337 a8083063 Iustin Pop
      output.append(iout)
4338 a8083063 Iustin Pop
4339 a8083063 Iustin Pop
    return output
4340 a8083063 Iustin Pop
4341 a8083063 Iustin Pop
4342 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
4343 a8083063 Iustin Pop
  """Failover an instance.
4344 a8083063 Iustin Pop

4345 a8083063 Iustin Pop
  """
4346 a8083063 Iustin Pop
  HPATH = "instance-failover"
4347 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4348 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
4349 c9e5c064 Guido Trotter
  REQ_BGL = False
4350 c9e5c064 Guido Trotter
4351 c9e5c064 Guido Trotter
  def ExpandNames(self):
4352 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
4353 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4354 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4355 c9e5c064 Guido Trotter
4356 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
4357 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
4358 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
4359 a8083063 Iustin Pop
4360 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4361 a8083063 Iustin Pop
    """Build hooks env.
4362 a8083063 Iustin Pop

4363 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4364 a8083063 Iustin Pop

4365 a8083063 Iustin Pop
    """
4366 a8083063 Iustin Pop
    env = {
4367 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
4368 a8083063 Iustin Pop
      }
4369 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4370 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
4371 a8083063 Iustin Pop
    return env, nl, nl
4372 a8083063 Iustin Pop
4373 a8083063 Iustin Pop
  def CheckPrereq(self):
4374 a8083063 Iustin Pop
    """Check prerequisites.
4375 a8083063 Iustin Pop

4376 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4377 a8083063 Iustin Pop

4378 a8083063 Iustin Pop
    """
4379 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4380 c9e5c064 Guido Trotter
    assert self.instance is not None, \
4381 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4382 a8083063 Iustin Pop
4383 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
4384 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
4385 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
4386 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
4387 2a710df1 Michael Hanselmann
4388 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
4389 2a710df1 Michael Hanselmann
    if not secondary_nodes:
4390 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
4391 abdf0113 Iustin Pop
                                   "a mirrored disk template")
4392 2a710df1 Michael Hanselmann
4393 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
4394 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, target_node)
4395 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, target_node)
4396 d27776f0 Iustin Pop
    if instance.admin_up:
4397 d27776f0 Iustin Pop
      # check memory requirements on the secondary node
4398 d27776f0 Iustin Pop
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
4399 d27776f0 Iustin Pop
                           instance.name, bep[constants.BE_MEMORY],
4400 d27776f0 Iustin Pop
                           instance.hypervisor)
4401 d27776f0 Iustin Pop
    else:
4402 d27776f0 Iustin Pop
      self.LogInfo("Not checking memory on the secondary node as"
4403 d27776f0 Iustin Pop
                   " instance will not be started")
4404 3a7c308e Guido Trotter
4405 a8083063 Iustin Pop
    # check bridge existance
4406 b165e77e Guido Trotter
    _CheckInstanceBridgesExist(self, instance, node=target_node)
4407 a8083063 Iustin Pop
4408 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4409 a8083063 Iustin Pop
    """Failover an instance.
4410 a8083063 Iustin Pop

4411 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
4412 a8083063 Iustin Pop
    starting it on the secondary.
4413 a8083063 Iustin Pop

4414 a8083063 Iustin Pop
    """
4415 a8083063 Iustin Pop
    instance = self.instance
4416 a8083063 Iustin Pop
4417 a8083063 Iustin Pop
    source_node = instance.primary_node
4418 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
4419 a8083063 Iustin Pop
4420 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
4421 a8083063 Iustin Pop
    for dev in instance.disks:
4422 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
4423 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
4424 0d68c45d Iustin Pop
        if instance.admin_up and not self.op.ignore_consistency:
4425 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
4426 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
4427 a8083063 Iustin Pop
4428 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
4429 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
4430 9a4f63d1 Iustin Pop
                 instance.name, source_node)
4431 a8083063 Iustin Pop
4432 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(source_node, instance)
4433 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4434 1fae010f Iustin Pop
    if msg:
4435 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
4436 86d9d3bb Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
4437 1fae010f Iustin Pop
                             " Proceeding anyway. Please make sure node"
4438 1fae010f Iustin Pop
                             " %s is down. Error details: %s",
4439 1fae010f Iustin Pop
                             instance.name, source_node, source_node, msg)
4440 24a40d57 Iustin Pop
      else:
4441 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
4442 1fae010f Iustin Pop
                                 " node %s: %s" %
4443 1fae010f Iustin Pop
                                 (instance.name, source_node, msg))
4444 a8083063 Iustin Pop
4445 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
4446 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
4447 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
4448 a8083063 Iustin Pop
4449 a8083063 Iustin Pop
    instance.primary_node = target_node
4450 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
4451 b6102dab Guido Trotter
    self.cfg.Update(instance)
4452 a8083063 Iustin Pop
4453 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
4454 0d68c45d Iustin Pop
    if instance.admin_up:
4455 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
4456 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s",
4457 9a4f63d1 Iustin Pop
                   instance.name, target_node)
4458 12a0cfbe Guido Trotter
4459 7c4d6c7b Michael Hanselmann
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
4460 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
4461 12a0cfbe Guido Trotter
      if not disks_ok:
4462 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
4463 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
4464 a8083063 Iustin Pop
4465 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
4466 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance, None, None)
4467 4c4e4e1e Iustin Pop
      msg = result.fail_msg
4468 dd279568 Iustin Pop
      if msg:
4469 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
4470 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
4471 dd279568 Iustin Pop
                                 (instance.name, target_node, msg))
4472 a8083063 Iustin Pop
4473 a8083063 Iustin Pop
4474 53c776b5 Iustin Pop
class LUMigrateInstance(LogicalUnit):
4475 53c776b5 Iustin Pop
  """Migrate an instance.
4476 53c776b5 Iustin Pop

4477 53c776b5 Iustin Pop
  This is migration without shutting down, compared to the failover,
4478 53c776b5 Iustin Pop
  which is done with shutdown.
4479 53c776b5 Iustin Pop

4480 53c776b5 Iustin Pop
  """
4481 53c776b5 Iustin Pop
  HPATH = "instance-migrate"
4482 53c776b5 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4483 53c776b5 Iustin Pop
  _OP_REQP = ["instance_name", "live", "cleanup"]
4484 53c776b5 Iustin Pop
4485 53c776b5 Iustin Pop
  REQ_BGL = False
4486 53c776b5 Iustin Pop
4487 53c776b5 Iustin Pop
  def ExpandNames(self):
4488 53c776b5 Iustin Pop
    self._ExpandAndLockInstance()
4489 3e06e001 Michael Hanselmann
4490 53c776b5 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
4491 53c776b5 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4492 53c776b5 Iustin Pop
4493 3e06e001 Michael Hanselmann
    self._migrater = TLMigrateInstance(self, self.op.instance_name,
4494 3e06e001 Michael Hanselmann
                                       self.op.live, self.op.cleanup)
4495 3a012b41 Michael Hanselmann
    self.tasklets = [self._migrater]
4496 3e06e001 Michael Hanselmann
4497 53c776b5 Iustin Pop
  def DeclareLocks(self, level):
4498 53c776b5 Iustin Pop
    if level == locking.LEVEL_NODE:
4499 53c776b5 Iustin Pop
      self._LockInstancesNodes()
4500 53c776b5 Iustin Pop
4501 53c776b5 Iustin Pop
  def BuildHooksEnv(self):
4502 53c776b5 Iustin Pop
    """Build hooks env.
4503 53c776b5 Iustin Pop

4504 53c776b5 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4505 53c776b5 Iustin Pop

4506 53c776b5 Iustin Pop
    """
4507 3e06e001 Michael Hanselmann
    instance = self._migrater.instance
4508 3e06e001 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self, instance)
4509 2c2690c9 Iustin Pop
    env["MIGRATE_LIVE"] = self.op.live
4510 2c2690c9 Iustin Pop
    env["MIGRATE_CLEANUP"] = self.op.cleanup
4511 3e06e001 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
4512 53c776b5 Iustin Pop
    return env, nl, nl
4513 53c776b5 Iustin Pop
4514 3e06e001 Michael Hanselmann
4515 313bcead Iustin Pop
class LUMoveInstance(LogicalUnit):
4516 313bcead Iustin Pop
  """Move an instance by data-copying.
4517 313bcead Iustin Pop

4518 313bcead Iustin Pop
  """
4519 313bcead Iustin Pop
  HPATH = "instance-move"
4520 313bcead Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4521 313bcead Iustin Pop
  _OP_REQP = ["instance_name", "target_node"]
4522 313bcead Iustin Pop
  REQ_BGL = False
4523 313bcead Iustin Pop
4524 313bcead Iustin Pop
  def ExpandNames(self):
4525 313bcead Iustin Pop
    self._ExpandAndLockInstance()
4526 313bcead Iustin Pop
    target_node = self.cfg.ExpandNodeName(self.op.target_node)
4527 313bcead Iustin Pop
    if target_node is None:
4528 313bcead Iustin Pop
      raise errors.OpPrereqError("Node '%s' not known" %
4529 313bcead Iustin Pop
                                  self.op.target_node)
4530 313bcead Iustin Pop
    self.op.target_node = target_node
4531 313bcead Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
4532 313bcead Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
4533 313bcead Iustin Pop
4534 313bcead Iustin Pop
  def DeclareLocks(self, level):
4535 313bcead Iustin Pop
    if level == locking.LEVEL_NODE:
4536 313bcead Iustin Pop
      self._LockInstancesNodes(primary_only=True)
4537 313bcead Iustin Pop
4538 313bcead Iustin Pop
  def BuildHooksEnv(self):
4539 313bcead Iustin Pop
    """Build hooks env.
4540 313bcead Iustin Pop

4541 313bcead Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4542 313bcead Iustin Pop

4543 313bcead Iustin Pop
    """
4544 313bcead Iustin Pop
    env = {
4545 313bcead Iustin Pop
      "TARGET_NODE": self.op.target_node,
4546 313bcead Iustin Pop
      }
4547 313bcead Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4548 313bcead Iustin Pop
    nl = [self.cfg.GetMasterNode()] + [self.instance.primary_node,
4549 313bcead Iustin Pop
                                       self.op.target_node]
4550 313bcead Iustin Pop
    return env, nl, nl
4551 313bcead Iustin Pop
4552 313bcead Iustin Pop
  def CheckPrereq(self):
4553 313bcead Iustin Pop
    """Check prerequisites.
4554 313bcead Iustin Pop

4555 313bcead Iustin Pop
    This checks that the instance is in the cluster.
4556 313bcead Iustin Pop

4557 313bcead Iustin Pop
    """
4558 313bcead Iustin Pop
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4559 313bcead Iustin Pop
    assert self.instance is not None, \
4560 313bcead Iustin Pop
      "Cannot retrieve locked instance %s" % self.op.instance_name
4561 313bcead Iustin Pop
4562 313bcead Iustin Pop
    node = self.cfg.GetNodeInfo(self.op.target_node)
4563 313bcead Iustin Pop
    assert node is not None, \
4564 313bcead Iustin Pop
      "Cannot retrieve locked node %s" % self.op.target_node
4565 313bcead Iustin Pop
4566 313bcead Iustin Pop
    self.target_node = target_node = node.name
4567 313bcead Iustin Pop
4568 313bcead Iustin Pop
    if target_node == instance.primary_node:
4569 313bcead Iustin Pop
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
4570 313bcead Iustin Pop
                                 (instance.name, target_node))
4571 313bcead Iustin Pop
4572 313bcead Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
4573 313bcead Iustin Pop
4574 313bcead Iustin Pop
    for idx, dsk in enumerate(instance.disks):
4575 313bcead Iustin Pop
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
4576 313bcead Iustin Pop
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
4577 313bcead Iustin Pop
                                   " cannot copy")
4578 313bcead Iustin Pop
4579 313bcead Iustin Pop
    _CheckNodeOnline(self, target_node)
4580 313bcead Iustin Pop
    _CheckNodeNotDrained(self, target_node)
4581 313bcead Iustin Pop
4582 313bcead Iustin Pop
    if instance.admin_up:
4583 313bcead Iustin Pop
      # check memory requirements on the secondary node
4584 313bcead Iustin Pop
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
4585 313bcead Iustin Pop
                           instance.name, bep[constants.BE_MEMORY],
4586 313bcead Iustin Pop
                           instance.hypervisor)
4587 313bcead Iustin Pop
    else:
4588 313bcead Iustin Pop
      self.LogInfo("Not checking memory on the secondary node as"
4589 313bcead Iustin Pop
                   " instance will not be started")
4590 313bcead Iustin Pop
4591 313bcead Iustin Pop
    # check bridge existance
4592 313bcead Iustin Pop
    _CheckInstanceBridgesExist(self, instance, node=target_node)
4593 313bcead Iustin Pop
4594 313bcead Iustin Pop
  def Exec(self, feedback_fn):
4595 313bcead Iustin Pop
    """Move an instance.
4596 313bcead Iustin Pop

4597 313bcead Iustin Pop
    The move is done by shutting it down on its present node, copying
4598 313bcead Iustin Pop
    the data over (slow) and starting it on the new node.
4599 313bcead Iustin Pop

4600 313bcead Iustin Pop
    """
4601 313bcead Iustin Pop
    instance = self.instance
4602 313bcead Iustin Pop
4603 313bcead Iustin Pop
    source_node = instance.primary_node
4604 313bcead Iustin Pop
    target_node = self.target_node
4605 313bcead Iustin Pop
4606 313bcead Iustin Pop
    self.LogInfo("Shutting down instance %s on source node %s",
4607 313bcead Iustin Pop
                 instance.name, source_node)
4608 313bcead Iustin Pop
4609 313bcead Iustin Pop
    result = self.rpc.call_instance_shutdown(source_node, instance)
4610 313bcead Iustin Pop
    msg = result.fail_msg
4611 313bcead Iustin Pop
    if msg:
4612 313bcead Iustin Pop
      if self.op.ignore_consistency:
4613 313bcead Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
4614 313bcead Iustin Pop
                             " Proceeding anyway. Please make sure node"
4615 313bcead Iustin Pop
                             " %s is down. Error details: %s",
4616 313bcead Iustin Pop
                             instance.name, source_node, source_node, msg)
4617 313bcead Iustin Pop
      else:
4618 313bcead Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
4619 313bcead Iustin Pop
                                 " node %s: %s" %
4620 313bcead Iustin Pop
                                 (instance.name, source_node, msg))
4621 313bcead Iustin Pop
4622 313bcead Iustin Pop
    # create the target disks
4623 313bcead Iustin Pop
    try:
4624 313bcead Iustin Pop
      _CreateDisks(self, instance, target_node=target_node)
4625 313bcead Iustin Pop
    except errors.OpExecError:
4626 313bcead Iustin Pop
      self.LogWarning("Device creation failed, reverting...")
4627 313bcead Iustin Pop
      try:
4628 313bcead Iustin Pop
        _RemoveDisks(self, instance, target_node=target_node)
4629 313bcead Iustin Pop
      finally:
4630 313bcead Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
4631 313bcead Iustin Pop
        raise
4632 313bcead Iustin Pop
4633 313bcead Iustin Pop
    cluster_name = self.cfg.GetClusterInfo().cluster_name
4634 313bcead Iustin Pop
4635 313bcead Iustin Pop
    errs = []
4636 313bcead Iustin Pop
    # activate, get path, copy the data over
4637 313bcead Iustin Pop
    for idx, disk in enumerate(instance.disks):
4638 313bcead Iustin Pop
      self.LogInfo("Copying data for disk %d", idx)
4639 313bcead Iustin Pop
      result = self.rpc.call_blockdev_assemble(target_node, disk,
4640 313bcead Iustin Pop
                                               instance.name, True)
4641 313bcead Iustin Pop
      if result.fail_msg:
4642 313bcead Iustin Pop
        self.LogWarning("Can't assemble newly created disk %d: %s",
4643 313bcead Iustin Pop
                        idx, result.fail_msg)
4644 313bcead Iustin Pop
        errs.append(result.fail_msg)
4645 313bcead Iustin Pop
        break
4646 313bcead Iustin Pop
      dev_path = result.payload
4647 313bcead Iustin Pop
      result = self.rpc.call_blockdev_export(source_node, disk,
4648 313bcead Iustin Pop
                                             target_node, dev_path,
4649 313bcead Iustin Pop
                                             cluster_name)
4650 313bcead Iustin Pop
      if result.fail_msg:
4651 313bcead Iustin Pop
        self.LogWarning("Can't copy data over for disk %d: %s",
4652 313bcead Iustin Pop
                        idx, result.fail_msg)
4653 313bcead Iustin Pop
        errs.append(result.fail_msg)
4654 313bcead Iustin Pop
        break
4655 313bcead Iustin Pop
4656 313bcead Iustin Pop
    if errs:
4657 313bcead Iustin Pop
      self.LogWarning("Some disks failed to copy, aborting")
4658 313bcead Iustin Pop
      try:
4659 313bcead Iustin Pop
        _RemoveDisks(self, instance, target_node=target_node)
4660 313bcead Iustin Pop
      finally:
4661 313bcead Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
4662 313bcead Iustin Pop
        raise errors.OpExecError("Errors during disk copy: %s" %
4663 313bcead Iustin Pop
                                 (",".join(errs),))
4664 313bcead Iustin Pop
4665 313bcead Iustin Pop
    instance.primary_node = target_node
4666 313bcead Iustin Pop
    self.cfg.Update(instance)
4667 313bcead Iustin Pop
4668 313bcead Iustin Pop
    self.LogInfo("Removing the disks on the original node")
4669 313bcead Iustin Pop
    _RemoveDisks(self, instance, target_node=source_node)
4670 313bcead Iustin Pop
4671 313bcead Iustin Pop
    # Only start the instance if it's marked as up
4672 313bcead Iustin Pop
    if instance.admin_up:
4673 313bcead Iustin Pop
      self.LogInfo("Starting instance %s on node %s",
4674 313bcead Iustin Pop
                   instance.name, target_node)
4675 313bcead Iustin Pop
4676 313bcead Iustin Pop
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
4677 313bcead Iustin Pop
                                           ignore_secondaries=True)
4678 313bcead Iustin Pop
      if not disks_ok:
4679 313bcead Iustin Pop
        _ShutdownInstanceDisks(self, instance)
4680 313bcead Iustin Pop
        raise errors.OpExecError("Can't activate the instance's disks")
4681 313bcead Iustin Pop
4682 313bcead Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance, None, None)
4683 313bcead Iustin Pop
      msg = result.fail_msg
4684 313bcead Iustin Pop
      if msg:
4685 313bcead Iustin Pop
        _ShutdownInstanceDisks(self, instance)
4686 313bcead Iustin Pop
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
4687 313bcead Iustin Pop
                                 (instance.name, target_node, msg))
4688 313bcead Iustin Pop
4689 313bcead Iustin Pop
4690 80cb875c Michael Hanselmann
class LUMigrateNode(LogicalUnit):
4691 80cb875c Michael Hanselmann
  """Migrate all instances from a node.
4692 80cb875c Michael Hanselmann

4693 80cb875c Michael Hanselmann
  """
4694 80cb875c Michael Hanselmann
  HPATH = "node-migrate"
4695 80cb875c Michael Hanselmann
  HTYPE = constants.HTYPE_NODE
4696 80cb875c Michael Hanselmann
  _OP_REQP = ["node_name", "live"]
4697 80cb875c Michael Hanselmann
  REQ_BGL = False
4698 80cb875c Michael Hanselmann
4699 80cb875c Michael Hanselmann
  def ExpandNames(self):
4700 80cb875c Michael Hanselmann
    self.op.node_name = self.cfg.ExpandNodeName(self.op.node_name)
4701 80cb875c Michael Hanselmann
    if self.op.node_name is None:
4702 80cb875c Michael Hanselmann
      raise errors.OpPrereqError("Node '%s' not known" % self.op.node_name)
4703 80cb875c Michael Hanselmann
4704 80cb875c Michael Hanselmann
    self.needed_locks = {
4705 80cb875c Michael Hanselmann
      locking.LEVEL_NODE: [self.op.node_name],
4706 80cb875c Michael Hanselmann
      }
4707 80cb875c Michael Hanselmann
4708 80cb875c Michael Hanselmann
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
4709 80cb875c Michael Hanselmann
4710 80cb875c Michael Hanselmann
    # Create tasklets for migrating instances for all instances on this node
4711 80cb875c Michael Hanselmann
    names = []
4712 80cb875c Michael Hanselmann
    tasklets = []
4713 80cb875c Michael Hanselmann
4714 80cb875c Michael Hanselmann
    for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name):
4715 80cb875c Michael Hanselmann
      logging.debug("Migrating instance %s", inst.name)
4716 80cb875c Michael Hanselmann
      names.append(inst.name)
4717 80cb875c Michael Hanselmann
4718 80cb875c Michael Hanselmann
      tasklets.append(TLMigrateInstance(self, inst.name, self.op.live, False))
4719 80cb875c Michael Hanselmann
4720 80cb875c Michael Hanselmann
    self.tasklets = tasklets
4721 80cb875c Michael Hanselmann
4722 80cb875c Michael Hanselmann
    # Declare instance locks
4723 80cb875c Michael Hanselmann
    self.needed_locks[locking.LEVEL_INSTANCE] = names
4724 80cb875c Michael Hanselmann
4725 80cb875c Michael Hanselmann
  def DeclareLocks(self, level):
4726 80cb875c Michael Hanselmann
    if level == locking.LEVEL_NODE:
4727 80cb875c Michael Hanselmann
      self._LockInstancesNodes()
4728 80cb875c Michael Hanselmann
4729 80cb875c Michael Hanselmann
  def BuildHooksEnv(self):
4730 80cb875c Michael Hanselmann
    """Build hooks env.
4731 80cb875c Michael Hanselmann

4732 80cb875c Michael Hanselmann
    This runs on the master, the primary and all the secondaries.
4733 80cb875c Michael Hanselmann

4734 80cb875c Michael Hanselmann
    """
4735 80cb875c Michael Hanselmann
    env = {
4736 80cb875c Michael Hanselmann
      "NODE_NAME": self.op.node_name,
4737 80cb875c Michael Hanselmann
      }
4738 80cb875c Michael Hanselmann
4739 80cb875c Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
4740 80cb875c Michael Hanselmann
4741 80cb875c Michael Hanselmann
    return (env, nl, nl)
4742 80cb875c Michael Hanselmann
4743 80cb875c Michael Hanselmann
4744 3e06e001 Michael Hanselmann
class TLMigrateInstance(Tasklet):
4745 3e06e001 Michael Hanselmann
  def __init__(self, lu, instance_name, live, cleanup):
4746 3e06e001 Michael Hanselmann
    """Initializes this class.
4747 3e06e001 Michael Hanselmann

4748 3e06e001 Michael Hanselmann
    """
4749 464243a7 Michael Hanselmann
    Tasklet.__init__(self, lu)
4750 464243a7 Michael Hanselmann
4751 3e06e001 Michael Hanselmann
    # Parameters
4752 3e06e001 Michael Hanselmann
    self.instance_name = instance_name
4753 3e06e001 Michael Hanselmann
    self.live = live
4754 3e06e001 Michael Hanselmann
    self.cleanup = cleanup
4755 3e06e001 Michael Hanselmann
4756 53c776b5 Iustin Pop
  def CheckPrereq(self):
4757 53c776b5 Iustin Pop
    """Check prerequisites.
4758 53c776b5 Iustin Pop

4759 53c776b5 Iustin Pop
    This checks that the instance is in the cluster.
4760 53c776b5 Iustin Pop

4761 53c776b5 Iustin Pop
    """
4762 53c776b5 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
4763 3e06e001 Michael Hanselmann
      self.cfg.ExpandInstanceName(self.instance_name))
4764 53c776b5 Iustin Pop
    if instance is None:
4765 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
4766 3e06e001 Michael Hanselmann
                                 self.instance_name)
4767 53c776b5 Iustin Pop
4768 53c776b5 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
4769 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
4770 53c776b5 Iustin Pop
                                 " drbd8, cannot migrate.")
4771 53c776b5 Iustin Pop
4772 53c776b5 Iustin Pop
    secondary_nodes = instance.secondary_nodes
4773 53c776b5 Iustin Pop
    if not secondary_nodes:
4774 733a2b6a Iustin Pop
      raise errors.ConfigurationError("No secondary node but using"
4775 733a2b6a Iustin Pop
                                      " drbd8 disk template")
4776 53c776b5 Iustin Pop
4777 53c776b5 Iustin Pop
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
4778 53c776b5 Iustin Pop
4779 53c776b5 Iustin Pop
    target_node = secondary_nodes[0]
4780 53c776b5 Iustin Pop
    # check memory requirements on the secondary node
4781 53c776b5 Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
4782 53c776b5 Iustin Pop
                         instance.name, i_be[constants.BE_MEMORY],
4783 53c776b5 Iustin Pop
                         instance.hypervisor)
4784 53c776b5 Iustin Pop
4785 53c776b5 Iustin Pop
    # check bridge existance
4786 b165e77e Guido Trotter
    _CheckInstanceBridgesExist(self, instance, node=target_node)
4787 53c776b5 Iustin Pop
4788 3e06e001 Michael Hanselmann
    if not self.cleanup:
4789 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, target_node)
4790 53c776b5 Iustin Pop
      result = self.rpc.call_instance_migratable(instance.primary_node,
4791 53c776b5 Iustin Pop
                                                 instance)
4792 4c4e4e1e Iustin Pop
      result.Raise("Can't migrate, please use failover", prereq=True)
4793 53c776b5 Iustin Pop
4794 53c776b5 Iustin Pop
    self.instance = instance
4795 53c776b5 Iustin Pop
4796 53c776b5 Iustin Pop
  def _WaitUntilSync(self):
4797 53c776b5 Iustin Pop
    """Poll with custom rpc for disk sync.
4798 53c776b5 Iustin Pop

4799 53c776b5 Iustin Pop
    This uses our own step-based rpc call.
4800 53c776b5 Iustin Pop

4801 53c776b5 Iustin Pop
    """
4802 53c776b5 Iustin Pop
    self.feedback_fn("* wait until resync is done")
4803 53c776b5 Iustin Pop
    all_done = False
4804 53c776b5 Iustin Pop
    while not all_done:
4805 53c776b5 Iustin Pop
      all_done = True
4806 53c776b5 Iustin Pop
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
4807 53c776b5 Iustin Pop
                                            self.nodes_ip,
4808 53c776b5 Iustin Pop
                                            self.instance.disks)
4809 53c776b5 Iustin Pop
      min_percent = 100
4810 53c776b5 Iustin Pop
      for node, nres in result.items():
4811 4c4e4e1e Iustin Pop
        nres.Raise("Cannot resync disks on node %s" % node)
4812 0959c824 Iustin Pop
        node_done, node_percent = nres.payload
4813 53c776b5 Iustin Pop
        all_done = all_done and node_done
4814 53c776b5 Iustin Pop
        if node_percent is not None:
4815 53c776b5 Iustin Pop
          min_percent = min(min_percent, node_percent)
4816 53c776b5 Iustin Pop
      if not all_done:
4817 53c776b5 Iustin Pop
        if min_percent < 100:
4818 53c776b5 Iustin Pop
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
4819 53c776b5 Iustin Pop
        time.sleep(2)
4820 53c776b5 Iustin Pop
4821 53c776b5 Iustin Pop
  def _EnsureSecondary(self, node):
4822 53c776b5 Iustin Pop
    """Demote a node to secondary.
4823 53c776b5 Iustin Pop

4824 53c776b5 Iustin Pop
    """
4825 53c776b5 Iustin Pop
    self.feedback_fn("* switching node %s to secondary mode" % node)
4826 53c776b5 Iustin Pop
4827 53c776b5 Iustin Pop
    for dev in self.instance.disks:
4828 53c776b5 Iustin Pop
      self.cfg.SetDiskID(dev, node)
4829 53c776b5 Iustin Pop
4830 53c776b5 Iustin Pop
    result = self.rpc.call_blockdev_close(node, self.instance.name,
4831 53c776b5 Iustin Pop
                                          self.instance.disks)
4832 4c4e4e1e Iustin Pop
    result.Raise("Cannot change disk to secondary on node %s" % node)
4833 53c776b5 Iustin Pop
4834 53c776b5 Iustin Pop
  def _GoStandalone(self):
4835 53c776b5 Iustin Pop
    """Disconnect from the network.
4836 53c776b5 Iustin Pop

4837 53c776b5 Iustin Pop
    """
4838 53c776b5 Iustin Pop
    self.feedback_fn("* changing into standalone mode")
4839 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
4840 53c776b5 Iustin Pop
                                               self.instance.disks)
4841 53c776b5 Iustin Pop
    for node, nres in result.items():
4842 4c4e4e1e Iustin Pop
      nres.Raise("Cannot disconnect disks node %s" % node)
4843 53c776b5 Iustin Pop
4844 53c776b5 Iustin Pop
  def _GoReconnect(self, multimaster):
4845 53c776b5 Iustin Pop
    """Reconnect to the network.
4846 53c776b5 Iustin Pop

4847 53c776b5 Iustin Pop
    """
4848 53c776b5 Iustin Pop
    if multimaster:
4849 53c776b5 Iustin Pop
      msg = "dual-master"
4850 53c776b5 Iustin Pop
    else:
4851 53c776b5 Iustin Pop
      msg = "single-master"
4852 53c776b5 Iustin Pop
    self.feedback_fn("* changing disks into %s mode" % msg)
4853 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
4854 53c776b5 Iustin Pop
                                           self.instance.disks,
4855 53c776b5 Iustin Pop
                                           self.instance.name, multimaster)
4856 53c776b5 Iustin Pop
    for node, nres in result.items():
4857 4c4e4e1e Iustin Pop
      nres.Raise("Cannot change disks config on node %s" % node)
4858 53c776b5 Iustin Pop
4859 53c776b5 Iustin Pop
  def _ExecCleanup(self):
4860 53c776b5 Iustin Pop
    """Try to cleanup after a failed migration.
4861 53c776b5 Iustin Pop

4862 53c776b5 Iustin Pop
    The cleanup is done by:
4863 53c776b5 Iustin Pop
      - check that the instance is running only on one node
4864 53c776b5 Iustin Pop
        (and update the config if needed)
4865 53c776b5 Iustin Pop
      - change disks on its secondary node to secondary
4866 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
4867 53c776b5 Iustin Pop
      - disconnect from the network
4868 53c776b5 Iustin Pop
      - change disks into single-master mode
4869 53c776b5 Iustin Pop
      - wait again until disks are fully synchronized
4870 53c776b5 Iustin Pop

4871 53c776b5 Iustin Pop
    """
4872 53c776b5 Iustin Pop
    instance = self.instance
4873 53c776b5 Iustin Pop
    target_node = self.target_node
4874 53c776b5 Iustin Pop
    source_node = self.source_node
4875 53c776b5 Iustin Pop
4876 53c776b5 Iustin Pop
    # check running on only one node
4877 53c776b5 Iustin Pop
    self.feedback_fn("* checking where the instance actually runs"
4878 53c776b5 Iustin Pop
                     " (if this hangs, the hypervisor might be in"
4879 53c776b5 Iustin Pop
                     " a bad state)")
4880 53c776b5 Iustin Pop
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
4881 53c776b5 Iustin Pop
    for node, result in ins_l.items():
4882 4c4e4e1e Iustin Pop
      result.Raise("Can't contact node %s" % node)
4883 53c776b5 Iustin Pop
4884 aca13712 Iustin Pop
    runningon_source = instance.name in ins_l[source_node].payload
4885 aca13712 Iustin Pop
    runningon_target = instance.name in ins_l[target_node].payload
4886 53c776b5 Iustin Pop
4887 53c776b5 Iustin Pop
    if runningon_source and runningon_target:
4888 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance seems to be running on two nodes,"
4889 53c776b5 Iustin Pop
                               " or the hypervisor is confused. You will have"
4890 53c776b5 Iustin Pop
                               " to ensure manually that it runs only on one"
4891 53c776b5 Iustin Pop
                               " and restart this operation.")
4892 53c776b5 Iustin Pop
4893 53c776b5 Iustin Pop
    if not (runningon_source or runningon_target):
4894 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance does not seem to be running at all."
4895 53c776b5 Iustin Pop
                               " In this case, it's safer to repair by"
4896 53c776b5 Iustin Pop
                               " running 'gnt-instance stop' to ensure disk"
4897 53c776b5 Iustin Pop
                               " shutdown, and then restarting it.")
4898 53c776b5 Iustin Pop
4899 53c776b5 Iustin Pop
    if runningon_target:
4900 53c776b5 Iustin Pop
      # the migration has actually succeeded, we need to update the config
4901 53c776b5 Iustin Pop
      self.feedback_fn("* instance running on secondary node (%s),"
4902 53c776b5 Iustin Pop
                       " updating config" % target_node)
4903 53c776b5 Iustin Pop
      instance.primary_node = target_node
4904 53c776b5 Iustin Pop
      self.cfg.Update(instance)
4905 53c776b5 Iustin Pop
      demoted_node = source_node
4906 53c776b5 Iustin Pop
    else:
4907 53c776b5 Iustin Pop
      self.feedback_fn("* instance confirmed to be running on its"
4908 53c776b5 Iustin Pop
                       " primary node (%s)" % source_node)
4909 53c776b5 Iustin Pop
      demoted_node = target_node
4910 53c776b5 Iustin Pop
4911 53c776b5 Iustin Pop
    self._EnsureSecondary(demoted_node)
4912 53c776b5 Iustin Pop
    try:
4913 53c776b5 Iustin Pop
      self._WaitUntilSync()
4914 53c776b5 Iustin Pop
    except errors.OpExecError:
4915 53c776b5 Iustin Pop
      # we ignore here errors, since if the device is standalone, it
4916 53c776b5 Iustin Pop
      # won't be able to sync
4917 53c776b5 Iustin Pop
      pass
4918 53c776b5 Iustin Pop
    self._GoStandalone()
4919 53c776b5 Iustin Pop
    self._GoReconnect(False)
4920 53c776b5 Iustin Pop
    self._WaitUntilSync()
4921 53c776b5 Iustin Pop
4922 53c776b5 Iustin Pop
    self.feedback_fn("* done")
4923 53c776b5 Iustin Pop
4924 6906a9d8 Guido Trotter
  def _RevertDiskStatus(self):
4925 6906a9d8 Guido Trotter
    """Try to revert the disk status after a failed migration.
4926 6906a9d8 Guido Trotter

4927 6906a9d8 Guido Trotter
    """
4928 6906a9d8 Guido Trotter
    target_node = self.target_node
4929 6906a9d8 Guido Trotter
    try:
4930 6906a9d8 Guido Trotter
      self._EnsureSecondary(target_node)
4931 6906a9d8 Guido Trotter
      self._GoStandalone()
4932 6906a9d8 Guido Trotter
      self._GoReconnect(False)
4933 6906a9d8 Guido Trotter
      self._WaitUntilSync()
4934 6906a9d8 Guido Trotter
    except errors.OpExecError, err:
4935 3e06e001 Michael Hanselmann
      self.lu.LogWarning("Migration failed and I can't reconnect the"
4936 3e06e001 Michael Hanselmann
                         " drives: error '%s'\n"
4937 3e06e001 Michael Hanselmann
                         "Please look and recover the instance status" %
4938 3e06e001 Michael Hanselmann
                         str(err))
4939 6906a9d8 Guido Trotter
4940 6906a9d8 Guido Trotter
  def _AbortMigration(self):
4941 6906a9d8 Guido Trotter
    """Call the hypervisor code to abort a started migration.
4942 6906a9d8 Guido Trotter

4943 6906a9d8 Guido Trotter
    """
4944 6906a9d8 Guido Trotter
    instance = self.instance
4945 6906a9d8 Guido Trotter
    target_node = self.target_node
4946 6906a9d8 Guido Trotter
    migration_info = self.migration_info
4947 6906a9d8 Guido Trotter
4948 6906a9d8 Guido Trotter
    abort_result = self.rpc.call_finalize_migration(target_node,
4949 6906a9d8 Guido Trotter
                                                    instance,
4950 6906a9d8 Guido Trotter
                                                    migration_info,
4951 6906a9d8 Guido Trotter
                                                    False)
4952 4c4e4e1e Iustin Pop
    abort_msg = abort_result.fail_msg
4953 6906a9d8 Guido Trotter
    if abort_msg:
4954 6906a9d8 Guido Trotter
      logging.error("Aborting migration failed on target node %s: %s" %
4955 6906a9d8 Guido Trotter
                    (target_node, abort_msg))
4956 6906a9d8 Guido Trotter
      # Don't raise an exception here, as we stil have to try to revert the
4957 6906a9d8 Guido Trotter
      # disk status, even if this step failed.
4958 6906a9d8 Guido Trotter
4959 53c776b5 Iustin Pop
  def _ExecMigration(self):
4960 53c776b5 Iustin Pop
    """Migrate an instance.
4961 53c776b5 Iustin Pop

4962 53c776b5 Iustin Pop
    The migrate is done by:
4963 53c776b5 Iustin Pop
      - change the disks into dual-master mode
4964 53c776b5 Iustin Pop
      - wait until disks are fully synchronized again
4965 53c776b5 Iustin Pop
      - migrate the instance
4966 53c776b5 Iustin Pop
      - change disks on the new secondary node (the old primary) to secondary
4967 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
4968 53c776b5 Iustin Pop
      - change disks into single-master mode
4969 53c776b5 Iustin Pop

4970 53c776b5 Iustin Pop
    """
4971 53c776b5 Iustin Pop
    instance = self.instance
4972 53c776b5 Iustin Pop
    target_node = self.target_node
4973 53c776b5 Iustin Pop
    source_node = self.source_node
4974 53c776b5 Iustin Pop
4975 53c776b5 Iustin Pop
    self.feedback_fn("* checking disk consistency between source and target")
4976 53c776b5 Iustin Pop
    for dev in instance.disks:
4977 53c776b5 Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
4978 53c776b5 Iustin Pop
        raise errors.OpExecError("Disk %s is degraded or not fully"
4979 53c776b5 Iustin Pop
                                 " synchronized on target node,"
4980 53c776b5 Iustin Pop
                                 " aborting migrate." % dev.iv_name)
4981 53c776b5 Iustin Pop
4982 6906a9d8 Guido Trotter
    # First get the migration information from the remote node
4983 6906a9d8 Guido Trotter
    result = self.rpc.call_migration_info(source_node, instance)
4984 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4985 6906a9d8 Guido Trotter
    if msg:
4986 6906a9d8 Guido Trotter
      log_err = ("Failed fetching source migration information from %s: %s" %
4987 0959c824 Iustin Pop
                 (source_node, msg))
4988 6906a9d8 Guido Trotter
      logging.error(log_err)
4989 6906a9d8 Guido Trotter
      raise errors.OpExecError(log_err)
4990 6906a9d8 Guido Trotter
4991 0959c824 Iustin Pop
    self.migration_info = migration_info = result.payload
4992 6906a9d8 Guido Trotter
4993 6906a9d8 Guido Trotter
    # Then switch the disks to master/master mode
4994 53c776b5 Iustin Pop
    self._EnsureSecondary(target_node)
4995 53c776b5 Iustin Pop
    self._GoStandalone()
4996 53c776b5 Iustin Pop
    self._GoReconnect(True)
4997 53c776b5 Iustin Pop
    self._WaitUntilSync()
4998 53c776b5 Iustin Pop
4999 6906a9d8 Guido Trotter
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
5000 6906a9d8 Guido Trotter
    result = self.rpc.call_accept_instance(target_node,
5001 6906a9d8 Guido Trotter
                                           instance,
5002 6906a9d8 Guido Trotter
                                           migration_info,
5003 6906a9d8 Guido Trotter
                                           self.nodes_ip[target_node])
5004 6906a9d8 Guido Trotter
5005 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5006 6906a9d8 Guido Trotter
    if msg:
5007 6906a9d8 Guido Trotter
      logging.error("Instance pre-migration failed, trying to revert"
5008 6906a9d8 Guido Trotter
                    " disk status: %s", msg)
5009 6906a9d8 Guido Trotter
      self._AbortMigration()
5010 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
5011 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
5012 6906a9d8 Guido Trotter
                               (instance.name, msg))
5013 6906a9d8 Guido Trotter
5014 53c776b5 Iustin Pop
    self.feedback_fn("* migrating instance to %s" % target_node)
5015 53c776b5 Iustin Pop
    time.sleep(10)
5016 53c776b5 Iustin Pop
    result = self.rpc.call_instance_migrate(source_node, instance,
5017 53c776b5 Iustin Pop
                                            self.nodes_ip[target_node],
5018 3e06e001 Michael Hanselmann
                                            self.live)
5019 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5020 53c776b5 Iustin Pop
    if msg:
5021 53c776b5 Iustin Pop
      logging.error("Instance migration failed, trying to revert"
5022 53c776b5 Iustin Pop
                    " disk status: %s", msg)
5023 6906a9d8 Guido Trotter
      self._AbortMigration()
5024 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
5025 53c776b5 Iustin Pop
      raise errors.OpExecError("Could not migrate instance %s: %s" %
5026 53c776b5 Iustin Pop
                               (instance.name, msg))
5027 53c776b5 Iustin Pop
    time.sleep(10)
5028 53c776b5 Iustin Pop
5029 53c776b5 Iustin Pop
    instance.primary_node = target_node
5030 53c776b5 Iustin Pop
    # distribute new instance config to the other nodes
5031 53c776b5 Iustin Pop
    self.cfg.Update(instance)
5032 53c776b5 Iustin Pop
5033 6906a9d8 Guido Trotter
    result = self.rpc.call_finalize_migration(target_node,
5034 6906a9d8 Guido Trotter
                                              instance,
5035 6906a9d8 Guido Trotter
                                              migration_info,
5036 6906a9d8 Guido Trotter
                                              True)
5037 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5038 6906a9d8 Guido Trotter
    if msg:
5039 6906a9d8 Guido Trotter
      logging.error("Instance migration succeeded, but finalization failed:"
5040 6906a9d8 Guido Trotter
                    " %s" % msg)
5041 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not finalize instance migration: %s" %
5042 6906a9d8 Guido Trotter
                               msg)
5043 6906a9d8 Guido Trotter
5044 53c776b5 Iustin Pop
    self._EnsureSecondary(source_node)
5045 53c776b5 Iustin Pop
    self._WaitUntilSync()
5046 53c776b5 Iustin Pop
    self._GoStandalone()
5047 53c776b5 Iustin Pop
    self._GoReconnect(False)
5048 53c776b5 Iustin Pop
    self._WaitUntilSync()
5049 53c776b5 Iustin Pop
5050 53c776b5 Iustin Pop
    self.feedback_fn("* done")
5051 53c776b5 Iustin Pop
5052 53c776b5 Iustin Pop
  def Exec(self, feedback_fn):
5053 53c776b5 Iustin Pop
    """Perform the migration.
5054 53c776b5 Iustin Pop

5055 53c776b5 Iustin Pop
    """
5056 80cb875c Michael Hanselmann
    feedback_fn("Migrating instance %s" % self.instance.name)
5057 80cb875c Michael Hanselmann
5058 53c776b5 Iustin Pop
    self.feedback_fn = feedback_fn
5059 53c776b5 Iustin Pop
5060 53c776b5 Iustin Pop
    self.source_node = self.instance.primary_node
5061 53c776b5 Iustin Pop
    self.target_node = self.instance.secondary_nodes[0]
5062 53c776b5 Iustin Pop
    self.all_nodes = [self.source_node, self.target_node]
5063 53c776b5 Iustin Pop
    self.nodes_ip = {
5064 53c776b5 Iustin Pop
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
5065 53c776b5 Iustin Pop
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
5066 53c776b5 Iustin Pop
      }
5067 3e06e001 Michael Hanselmann
5068 3e06e001 Michael Hanselmann
    if self.cleanup:
5069 53c776b5 Iustin Pop
      return self._ExecCleanup()
5070 53c776b5 Iustin Pop
    else:
5071 53c776b5 Iustin Pop
      return self._ExecMigration()
5072 53c776b5 Iustin Pop
5073 53c776b5 Iustin Pop
5074 428958aa Iustin Pop
def _CreateBlockDev(lu, node, instance, device, force_create,
5075 428958aa Iustin Pop
                    info, force_open):
5076 428958aa Iustin Pop
  """Create a tree of block devices on a given node.
5077 a8083063 Iustin Pop

5078 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
5079 a8083063 Iustin Pop
  all its children.
5080 a8083063 Iustin Pop

5081 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
5082 a8083063 Iustin Pop

5083 428958aa Iustin Pop
  @param lu: the lu on whose behalf we execute
5084 428958aa Iustin Pop
  @param node: the node on which to create the device
5085 428958aa Iustin Pop
  @type instance: L{objects.Instance}
5086 428958aa Iustin Pop
  @param instance: the instance which owns the device
5087 428958aa Iustin Pop
  @type device: L{objects.Disk}
5088 428958aa Iustin Pop
  @param device: the device to create
5089 428958aa Iustin Pop
  @type force_create: boolean
5090 428958aa Iustin Pop
  @param force_create: whether to force creation of this device; this
5091 428958aa Iustin Pop
      will be change to True whenever we find a device which has
5092 428958aa Iustin Pop
      CreateOnSecondary() attribute
5093 428958aa Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
5094 428958aa Iustin Pop
      (this will be represented as a LVM tag)
5095 428958aa Iustin Pop
  @type force_open: boolean
5096 428958aa Iustin Pop
  @param force_open: this parameter will be passes to the
5097 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
5098 428958aa Iustin Pop
      whether we run on primary or not, and it affects both
5099 428958aa Iustin Pop
      the child assembly and the device own Open() execution
5100 428958aa Iustin Pop

5101 a8083063 Iustin Pop
  """
5102 a8083063 Iustin Pop
  if device.CreateOnSecondary():
5103 428958aa Iustin Pop
    force_create = True
5104 796cab27 Iustin Pop
5105 a8083063 Iustin Pop
  if device.children:
5106 a8083063 Iustin Pop
    for child in device.children:
5107 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, child, force_create,
5108 428958aa Iustin Pop
                      info, force_open)
5109 a8083063 Iustin Pop
5110 428958aa Iustin Pop
  if not force_create:
5111 796cab27 Iustin Pop
    return
5112 796cab27 Iustin Pop
5113 de12473a Iustin Pop
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
5114 de12473a Iustin Pop
5115 de12473a Iustin Pop
5116 de12473a Iustin Pop
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
5117 de12473a Iustin Pop
  """Create a single block device on a given node.
5118 de12473a Iustin Pop

5119 de12473a Iustin Pop
  This will not recurse over children of the device, so they must be
5120 de12473a Iustin Pop
  created in advance.
5121 de12473a Iustin Pop

5122 de12473a Iustin Pop
  @param lu: the lu on whose behalf we execute
5123 de12473a Iustin Pop
  @param node: the node on which to create the device
5124 de12473a Iustin Pop
  @type instance: L{objects.Instance}
5125 de12473a Iustin Pop
  @param instance: the instance which owns the device
5126 de12473a Iustin Pop
  @type device: L{objects.Disk}
5127 de12473a Iustin Pop
  @param device: the device to create
5128 de12473a Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
5129 de12473a Iustin Pop
      (this will be represented as a LVM tag)
5130 de12473a Iustin Pop
  @type force_open: boolean
5131 de12473a Iustin Pop
  @param force_open: this parameter will be passes to the
5132 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
5133 de12473a Iustin Pop
      whether we run on primary or not, and it affects both
5134 de12473a Iustin Pop
      the child assembly and the device own Open() execution
5135 de12473a Iustin Pop

5136 de12473a Iustin Pop
  """
5137 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
5138 7d81697f Iustin Pop
  result = lu.rpc.call_blockdev_create(node, device, device.size,
5139 428958aa Iustin Pop
                                       instance.name, force_open, info)
5140 4c4e4e1e Iustin Pop
  result.Raise("Can't create block device %s on"
5141 4c4e4e1e Iustin Pop
               " node %s for instance %s" % (device, node, instance.name))
5142 a8083063 Iustin Pop
  if device.physical_id is None:
5143 0959c824 Iustin Pop
    device.physical_id = result.payload
5144 a8083063 Iustin Pop
5145 a8083063 Iustin Pop
5146 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
5147 923b1523 Iustin Pop
  """Generate a suitable LV name.
5148 923b1523 Iustin Pop

5149 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
5150 923b1523 Iustin Pop

5151 923b1523 Iustin Pop
  """
5152 923b1523 Iustin Pop
  results = []
5153 923b1523 Iustin Pop
  for val in exts:
5154 b9bddb6b Iustin Pop
    new_id = lu.cfg.GenerateUniqueID()
5155 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
5156 923b1523 Iustin Pop
  return results
5157 923b1523 Iustin Pop
5158 923b1523 Iustin Pop
5159 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
5160 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
5161 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
5162 a1f445d3 Iustin Pop

5163 a1f445d3 Iustin Pop
  """
5164 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
5165 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
5166 b9bddb6b Iustin Pop
  shared_secret = lu.cfg.GenerateDRBDSecret()
5167 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
5168 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
5169 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
5170 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
5171 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
5172 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
5173 f9518d38 Iustin Pop
                                      p_minor, s_minor,
5174 f9518d38 Iustin Pop
                                      shared_secret),
5175 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
5176 a1f445d3 Iustin Pop
                          iv_name=iv_name)
5177 a1f445d3 Iustin Pop
  return drbd_dev
5178 a1f445d3 Iustin Pop
5179 7c0d6283 Michael Hanselmann
5180 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
5181 a8083063 Iustin Pop
                          instance_name, primary_node,
5182 08db7c5c Iustin Pop
                          secondary_nodes, disk_info,
5183 e2a65344 Iustin Pop
                          file_storage_dir, file_driver,
5184 e2a65344 Iustin Pop
                          base_index):
5185 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
5186 a8083063 Iustin Pop

5187 a8083063 Iustin Pop
  """
5188 a8083063 Iustin Pop
  #TODO: compute space requirements
5189 a8083063 Iustin Pop
5190 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
5191 08db7c5c Iustin Pop
  disk_count = len(disk_info)
5192 08db7c5c Iustin Pop
  disks = []
5193 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
5194 08db7c5c Iustin Pop
    pass
5195 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
5196 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
5197 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
5198 923b1523 Iustin Pop
5199 fb4b324b Guido Trotter
    names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
5200 08db7c5c Iustin Pop
                                      for i in range(disk_count)])
5201 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
5202 e2a65344 Iustin Pop
      disk_index = idx + base_index
5203 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
5204 08db7c5c Iustin Pop
                              logical_id=(vgname, names[idx]),
5205 6ec66eae Iustin Pop
                              iv_name="disk/%d" % disk_index,
5206 6ec66eae Iustin Pop
                              mode=disk["mode"])
5207 08db7c5c Iustin Pop
      disks.append(disk_dev)
5208 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
5209 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
5210 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
5211 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
5212 08db7c5c Iustin Pop
    minors = lu.cfg.AllocateDRBDMinor(
5213 08db7c5c Iustin Pop
      [primary_node, remote_node] * len(disk_info), instance_name)
5214 08db7c5c Iustin Pop
5215 e6c1ff2f Iustin Pop
    names = []
5216 fb4b324b Guido Trotter
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
5217 e6c1ff2f Iustin Pop
                                               for i in range(disk_count)]):
5218 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_data")
5219 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_meta")
5220 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
5221 112050d9 Iustin Pop
      disk_index = idx + base_index
5222 08db7c5c Iustin Pop
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
5223 08db7c5c Iustin Pop
                                      disk["size"], names[idx*2:idx*2+2],
5224 e2a65344 Iustin Pop
                                      "disk/%d" % disk_index,
5225 08db7c5c Iustin Pop
                                      minors[idx*2], minors[idx*2+1])
5226 6ec66eae Iustin Pop
      disk_dev.mode = disk["mode"]
5227 08db7c5c Iustin Pop
      disks.append(disk_dev)
5228 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
5229 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
5230 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
5231 0f1a06e3 Manuel Franceschini
5232 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
5233 112050d9 Iustin Pop
      disk_index = idx + base_index
5234 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
5235 e2a65344 Iustin Pop
                              iv_name="disk/%d" % disk_index,
5236 08db7c5c Iustin Pop
                              logical_id=(file_driver,
5237 08db7c5c Iustin Pop
                                          "%s/disk%d" % (file_storage_dir,
5238 43e99cff Guido Trotter
                                                         disk_index)),
5239 6ec66eae Iustin Pop
                              mode=disk["mode"])
5240 08db7c5c Iustin Pop
      disks.append(disk_dev)
5241 a8083063 Iustin Pop
  else:
5242 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
5243 a8083063 Iustin Pop
  return disks
5244 a8083063 Iustin Pop
5245 a8083063 Iustin Pop
5246 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
5247 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
5248 3ecf6786 Iustin Pop

5249 3ecf6786 Iustin Pop
  """
5250 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
5251 a0c3fea1 Michael Hanselmann
5252 a0c3fea1 Michael Hanselmann
5253 621b7678 Iustin Pop
def _CreateDisks(lu, instance, to_skip=None, target_node=None):
5254 a8083063 Iustin Pop
  """Create all disks for an instance.
5255 a8083063 Iustin Pop

5256 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
5257 a8083063 Iustin Pop

5258 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
5259 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
5260 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
5261 e4376078 Iustin Pop
  @param instance: the instance whose disks we should create
5262 bd315bfa Iustin Pop
  @type to_skip: list
5263 bd315bfa Iustin Pop
  @param to_skip: list of indices to skip
5264 621b7678 Iustin Pop
  @type target_node: string
5265 621b7678 Iustin Pop
  @param target_node: if passed, overrides the target node for creation
5266 e4376078 Iustin Pop
  @rtype: boolean
5267 e4376078 Iustin Pop
  @return: the success of the creation
5268 a8083063 Iustin Pop

5269 a8083063 Iustin Pop
  """
5270 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
5271 621b7678 Iustin Pop
  if target_node is None:
5272 621b7678 Iustin Pop
    pnode = instance.primary_node
5273 621b7678 Iustin Pop
    all_nodes = instance.all_nodes
5274 621b7678 Iustin Pop
  else:
5275 621b7678 Iustin Pop
    pnode = target_node
5276 621b7678 Iustin Pop
    all_nodes = [pnode]
5277 a0c3fea1 Michael Hanselmann
5278 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
5279 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
5280 428958aa Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
5281 0f1a06e3 Manuel Franceschini
5282 4c4e4e1e Iustin Pop
    result.Raise("Failed to create directory '%s' on"
5283 9b4127eb Guido Trotter
                 " node %s" % (file_storage_dir, pnode))
5284 0f1a06e3 Manuel Franceschini
5285 24991749 Iustin Pop
  # Note: this needs to be kept in sync with adding of disks in
5286 24991749 Iustin Pop
  # LUSetInstanceParams
5287 bd315bfa Iustin Pop
  for idx, device in enumerate(instance.disks):
5288 bd315bfa Iustin Pop
    if to_skip and idx in to_skip:
5289 bd315bfa Iustin Pop
      continue
5290 9a4f63d1 Iustin Pop
    logging.info("Creating volume %s for instance %s",
5291 9a4f63d1 Iustin Pop
                 device.iv_name, instance.name)
5292 a8083063 Iustin Pop
    #HARDCODE
5293 621b7678 Iustin Pop
    for node in all_nodes:
5294 428958aa Iustin Pop
      f_create = node == pnode
5295 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
5296 a8083063 Iustin Pop
5297 a8083063 Iustin Pop
5298 621b7678 Iustin Pop
def _RemoveDisks(lu, instance, target_node=None):
5299 a8083063 Iustin Pop
  """Remove all disks for an instance.
5300 a8083063 Iustin Pop

5301 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
5302 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
5303 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
5304 a8083063 Iustin Pop
  with `_CreateDisks()`).
5305 a8083063 Iustin Pop

5306 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
5307 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
5308 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
5309 e4376078 Iustin Pop
  @param instance: the instance whose disks we should remove
5310 621b7678 Iustin Pop
  @type target_node: string
5311 621b7678 Iustin Pop
  @param target_node: used to override the node on which to remove the disks
5312 e4376078 Iustin Pop
  @rtype: boolean
5313 e4376078 Iustin Pop
  @return: the success of the removal
5314 a8083063 Iustin Pop

5315 a8083063 Iustin Pop
  """
5316 9a4f63d1 Iustin Pop
  logging.info("Removing block devices for instance %s", instance.name)
5317 a8083063 Iustin Pop
5318 e1bc0878 Iustin Pop
  all_result = True
5319 a8083063 Iustin Pop
  for device in instance.disks:
5320 621b7678 Iustin Pop
    if target_node:
5321 621b7678 Iustin Pop
      edata = [(target_node, device)]
5322 621b7678 Iustin Pop
    else:
5323 621b7678 Iustin Pop
      edata = device.ComputeNodeTree(instance.primary_node)
5324 621b7678 Iustin Pop
    for node, disk in edata:
5325 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
5326 4c4e4e1e Iustin Pop
      msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
5327 e1bc0878 Iustin Pop
      if msg:
5328 e1bc0878 Iustin Pop
        lu.LogWarning("Could not remove block device %s on node %s,"
5329 e1bc0878 Iustin Pop
                      " continuing anyway: %s", device.iv_name, node, msg)
5330 e1bc0878 Iustin Pop
        all_result = False
5331 0f1a06e3 Manuel Franceschini
5332 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
5333 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
5334 dfc2a24c Guido Trotter
    if target_node:
5335 dfc2a24c Guido Trotter
      tgt = target_node
5336 621b7678 Iustin Pop
    else:
5337 dfc2a24c Guido Trotter
      tgt = instance.primary_node
5338 621b7678 Iustin Pop
    result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
5339 621b7678 Iustin Pop
    if result.fail_msg:
5340 b2b8bcce Iustin Pop
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
5341 621b7678 Iustin Pop
                    file_storage_dir, instance.primary_node, result.fail_msg)
5342 e1bc0878 Iustin Pop
      all_result = False
5343 0f1a06e3 Manuel Franceschini
5344 e1bc0878 Iustin Pop
  return all_result
5345 a8083063 Iustin Pop
5346 a8083063 Iustin Pop
5347 08db7c5c Iustin Pop
def _ComputeDiskSize(disk_template, disks):
5348 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
5349 e2fe6369 Iustin Pop

5350 e2fe6369 Iustin Pop
  """
5351 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
5352 e2fe6369 Iustin Pop
  req_size_dict = {
5353 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
5354 08db7c5c Iustin Pop
    constants.DT_PLAIN: sum(d["size"] for d in disks),
5355 08db7c5c Iustin Pop
    # 128 MB are added for drbd metadata for each disk
5356 08db7c5c Iustin Pop
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
5357 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
5358 e2fe6369 Iustin Pop
  }
5359 e2fe6369 Iustin Pop
5360 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
5361 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
5362 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
5363 e2fe6369 Iustin Pop
5364 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
5365 e2fe6369 Iustin Pop
5366 e2fe6369 Iustin Pop
5367 74409b12 Iustin Pop
def _CheckHVParams(lu, nodenames, hvname, hvparams):
5368 74409b12 Iustin Pop
  """Hypervisor parameter validation.
5369 74409b12 Iustin Pop

5370 74409b12 Iustin Pop
  This function abstract the hypervisor parameter validation to be
5371 74409b12 Iustin Pop
  used in both instance create and instance modify.
5372 74409b12 Iustin Pop

5373 74409b12 Iustin Pop
  @type lu: L{LogicalUnit}
5374 74409b12 Iustin Pop
  @param lu: the logical unit for which we check
5375 74409b12 Iustin Pop
  @type nodenames: list
5376 74409b12 Iustin Pop
  @param nodenames: the list of nodes on which we should check
5377 74409b12 Iustin Pop
  @type hvname: string
5378 74409b12 Iustin Pop
  @param hvname: the name of the hypervisor we should use
5379 74409b12 Iustin Pop
  @type hvparams: dict
5380 74409b12 Iustin Pop
  @param hvparams: the parameters which we need to check
5381 74409b12 Iustin Pop
  @raise errors.OpPrereqError: if the parameters are not valid
5382 74409b12 Iustin Pop

5383 74409b12 Iustin Pop
  """
5384 74409b12 Iustin Pop
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
5385 74409b12 Iustin Pop
                                                  hvname,
5386 74409b12 Iustin Pop
                                                  hvparams)
5387 74409b12 Iustin Pop
  for node in nodenames:
5388 781de953 Iustin Pop
    info = hvinfo[node]
5389 68c6f21c Iustin Pop
    if info.offline:
5390 68c6f21c Iustin Pop
      continue
5391 4c4e4e1e Iustin Pop
    info.Raise("Hypervisor parameter validation failed on node %s" % node)
5392 74409b12 Iustin Pop
5393 74409b12 Iustin Pop
5394 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
5395 a8083063 Iustin Pop
  """Create an instance.
5396 a8083063 Iustin Pop

5397 a8083063 Iustin Pop
  """
5398 a8083063 Iustin Pop
  HPATH = "instance-add"
5399 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5400 08db7c5c Iustin Pop
  _OP_REQP = ["instance_name", "disks", "disk_template",
5401 08db7c5c Iustin Pop
              "mode", "start",
5402 08db7c5c Iustin Pop
              "wait_for_sync", "ip_check", "nics",
5403 338e51e8 Iustin Pop
              "hvparams", "beparams"]
5404 7baf741d Guido Trotter
  REQ_BGL = False
5405 7baf741d Guido Trotter
5406 7baf741d Guido Trotter
  def _ExpandNode(self, node):
5407 7baf741d Guido Trotter
    """Expands and checks one node name.
5408 7baf741d Guido Trotter

5409 7baf741d Guido Trotter
    """
5410 7baf741d Guido Trotter
    node_full = self.cfg.ExpandNodeName(node)
5411 7baf741d Guido Trotter
    if node_full is None:
5412 7baf741d Guido Trotter
      raise errors.OpPrereqError("Unknown node %s" % node)
5413 7baf741d Guido Trotter
    return node_full
5414 7baf741d Guido Trotter
5415 7baf741d Guido Trotter
  def ExpandNames(self):
5416 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
5417 7baf741d Guido Trotter

5418 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
5419 7baf741d Guido Trotter

5420 7baf741d Guido Trotter
    """
5421 7baf741d Guido Trotter
    self.needed_locks = {}
5422 7baf741d Guido Trotter
5423 7baf741d Guido Trotter
    # set optional parameters to none if they don't exist
5424 6785674e Iustin Pop
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
5425 7baf741d Guido Trotter
      if not hasattr(self.op, attr):
5426 7baf741d Guido Trotter
        setattr(self.op, attr, None)
5427 7baf741d Guido Trotter
5428 4b2f38dd Iustin Pop
    # cheap checks, mostly valid constants given
5429 4b2f38dd Iustin Pop
5430 7baf741d Guido Trotter
    # verify creation mode
5431 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
5432 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
5433 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
5434 7baf741d Guido Trotter
                                 self.op.mode)
5435 4b2f38dd Iustin Pop
5436 7baf741d Guido Trotter
    # disk template and mirror node verification
5437 7baf741d Guido Trotter
    if self.op.disk_template not in constants.DISK_TEMPLATES:
5438 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid disk template name")
5439 7baf741d Guido Trotter
5440 4b2f38dd Iustin Pop
    if self.op.hypervisor is None:
5441 4b2f38dd Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
5442 4b2f38dd Iustin Pop
5443 8705eb96 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
5444 8705eb96 Iustin Pop
    enabled_hvs = cluster.enabled_hypervisors
5445 4b2f38dd Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
5446 4b2f38dd Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
5447 4b2f38dd Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
5448 4b2f38dd Iustin Pop
                                  ",".join(enabled_hvs)))
5449 4b2f38dd Iustin Pop
5450 6785674e Iustin Pop
    # check hypervisor parameter syntax (locally)
5451 a5728081 Guido Trotter
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
5452 abe609b2 Guido Trotter
    filled_hvp = objects.FillDict(cluster.hvparams[self.op.hypervisor],
5453 8705eb96 Iustin Pop
                                  self.op.hvparams)
5454 6785674e Iustin Pop
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
5455 8705eb96 Iustin Pop
    hv_type.CheckParameterSyntax(filled_hvp)
5456 67fc3042 Iustin Pop
    self.hv_full = filled_hvp
5457 6785674e Iustin Pop
5458 338e51e8 Iustin Pop
    # fill and remember the beparams dict
5459 a5728081 Guido Trotter
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
5460 4ef7f423 Guido Trotter
    self.be_full = objects.FillDict(cluster.beparams[constants.PP_DEFAULT],
5461 338e51e8 Iustin Pop
                                    self.op.beparams)
5462 338e51e8 Iustin Pop
5463 7baf741d Guido Trotter
    #### instance parameters check
5464 7baf741d Guido Trotter
5465 7baf741d Guido Trotter
    # instance name verification
5466 7baf741d Guido Trotter
    hostname1 = utils.HostInfo(self.op.instance_name)
5467 7baf741d Guido Trotter
    self.op.instance_name = instance_name = hostname1.name
5468 7baf741d Guido Trotter
5469 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
5470 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
5471 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
5472 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
5473 7baf741d Guido Trotter
                                 instance_name)
5474 7baf741d Guido Trotter
5475 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
5476 7baf741d Guido Trotter
5477 08db7c5c Iustin Pop
    # NIC buildup
5478 08db7c5c Iustin Pop
    self.nics = []
5479 9dce4771 Guido Trotter
    for idx, nic in enumerate(self.op.nics):
5480 9dce4771 Guido Trotter
      nic_mode_req = nic.get("mode", None)
5481 9dce4771 Guido Trotter
      nic_mode = nic_mode_req
5482 9dce4771 Guido Trotter
      if nic_mode is None:
5483 9dce4771 Guido Trotter
        nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
5484 9dce4771 Guido Trotter
5485 9dce4771 Guido Trotter
      # in routed mode, for the first nic, the default ip is 'auto'
5486 9dce4771 Guido Trotter
      if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
5487 9dce4771 Guido Trotter
        default_ip_mode = constants.VALUE_AUTO
5488 9dce4771 Guido Trotter
      else:
5489 9dce4771 Guido Trotter
        default_ip_mode = constants.VALUE_NONE
5490 9dce4771 Guido Trotter
5491 08db7c5c Iustin Pop
      # ip validity checks
5492 9dce4771 Guido Trotter
      ip = nic.get("ip", default_ip_mode)
5493 9dce4771 Guido Trotter
      if ip is None or ip.lower() == constants.VALUE_NONE:
5494 08db7c5c Iustin Pop
        nic_ip = None
5495 08db7c5c Iustin Pop
      elif ip.lower() == constants.VALUE_AUTO:
5496 08db7c5c Iustin Pop
        nic_ip = hostname1.ip
5497 08db7c5c Iustin Pop
      else:
5498 08db7c5c Iustin Pop
        if not utils.IsValidIP(ip):
5499 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
5500 08db7c5c Iustin Pop
                                     " like a valid IP" % ip)
5501 08db7c5c Iustin Pop
        nic_ip = ip
5502 08db7c5c Iustin Pop
5503 9dce4771 Guido Trotter
      # TODO: check the ip for uniqueness !!
5504 9dce4771 Guido Trotter
      if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
5505 9dce4771 Guido Trotter
        raise errors.OpPrereqError("Routed nic mode requires an ip address")
5506 9dce4771 Guido Trotter
5507 08db7c5c Iustin Pop
      # MAC address verification
5508 08db7c5c Iustin Pop
      mac = nic.get("mac", constants.VALUE_AUTO)
5509 08db7c5c Iustin Pop
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5510 08db7c5c Iustin Pop
        if not utils.IsValidMac(mac.lower()):
5511 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Invalid MAC address specified: %s" %
5512 08db7c5c Iustin Pop
                                     mac)
5513 87e43988 Iustin Pop
        else:
5514 87e43988 Iustin Pop
          # or validate/reserve the current one
5515 87e43988 Iustin Pop
          if self.cfg.IsMacInUse(mac):
5516 87e43988 Iustin Pop
            raise errors.OpPrereqError("MAC address %s already in use"
5517 87e43988 Iustin Pop
                                       " in cluster" % mac)
5518 87e43988 Iustin Pop
5519 08db7c5c Iustin Pop
      # bridge verification
5520 9939547b Iustin Pop
      bridge = nic.get("bridge", None)
5521 9dce4771 Guido Trotter
      link = nic.get("link", None)
5522 9dce4771 Guido Trotter
      if bridge and link:
5523 29921401 Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
5524 29921401 Iustin Pop
                                   " at the same time")
5525 9dce4771 Guido Trotter
      elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
5526 9dce4771 Guido Trotter
        raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic")
5527 9dce4771 Guido Trotter
      elif bridge:
5528 9dce4771 Guido Trotter
        link = bridge
5529 9dce4771 Guido Trotter
5530 9dce4771 Guido Trotter
      nicparams = {}
5531 9dce4771 Guido Trotter
      if nic_mode_req:
5532 9dce4771 Guido Trotter
        nicparams[constants.NIC_MODE] = nic_mode_req
5533 9dce4771 Guido Trotter
      if link:
5534 9dce4771 Guido Trotter
        nicparams[constants.NIC_LINK] = link
5535 9dce4771 Guido Trotter
5536 9dce4771 Guido Trotter
      check_params = objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
5537 9dce4771 Guido Trotter
                                      nicparams)
5538 9dce4771 Guido Trotter
      objects.NIC.CheckParameterSyntax(check_params)
5539 9dce4771 Guido Trotter
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
5540 08db7c5c Iustin Pop
5541 08db7c5c Iustin Pop
    # disk checks/pre-build
5542 08db7c5c Iustin Pop
    self.disks = []
5543 08db7c5c Iustin Pop
    for disk in self.op.disks:
5544 08db7c5c Iustin Pop
      mode = disk.get("mode", constants.DISK_RDWR)
5545 08db7c5c Iustin Pop
      if mode not in constants.DISK_ACCESS_SET:
5546 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
5547 08db7c5c Iustin Pop
                                   mode)
5548 08db7c5c Iustin Pop
      size = disk.get("size", None)
5549 08db7c5c Iustin Pop
      if size is None:
5550 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Missing disk size")
5551 08db7c5c Iustin Pop
      try:
5552 08db7c5c Iustin Pop
        size = int(size)
5553 08db7c5c Iustin Pop
      except ValueError:
5554 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk size '%s'" % size)
5555 08db7c5c Iustin Pop
      self.disks.append({"size": size, "mode": mode})
5556 08db7c5c Iustin Pop
5557 7baf741d Guido Trotter
    # used in CheckPrereq for ip ping check
5558 7baf741d Guido Trotter
    self.check_ip = hostname1.ip
5559 7baf741d Guido Trotter
5560 7baf741d Guido Trotter
    # file storage checks
5561 7baf741d Guido Trotter
    if (self.op.file_driver and
5562 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
5563 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
5564 7baf741d Guido Trotter
                                 self.op.file_driver)
5565 7baf741d Guido Trotter
5566 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
5567 7baf741d Guido Trotter
      raise errors.OpPrereqError("File storage directory path not absolute")
5568 7baf741d Guido Trotter
5569 7baf741d Guido Trotter
    ### Node/iallocator related checks
5570 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
5571 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
5572 7baf741d Guido Trotter
                                 " node must be given")
5573 7baf741d Guido Trotter
5574 7baf741d Guido Trotter
    if self.op.iallocator:
5575 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5576 7baf741d Guido Trotter
    else:
5577 7baf741d Guido Trotter
      self.op.pnode = self._ExpandNode(self.op.pnode)
5578 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
5579 7baf741d Guido Trotter
      if self.op.snode is not None:
5580 7baf741d Guido Trotter
        self.op.snode = self._ExpandNode(self.op.snode)
5581 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
5582 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
5583 7baf741d Guido Trotter
5584 7baf741d Guido Trotter
    # in case of import lock the source node too
5585 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
5586 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
5587 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
5588 7baf741d Guido Trotter
5589 b9322a9f Guido Trotter
      if src_path is None:
5590 b9322a9f Guido Trotter
        self.op.src_path = src_path = self.op.instance_name
5591 b9322a9f Guido Trotter
5592 b9322a9f Guido Trotter
      if src_node is None:
5593 b9322a9f Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5594 b9322a9f Guido Trotter
        self.op.src_node = None
5595 b9322a9f Guido Trotter
        if os.path.isabs(src_path):
5596 b9322a9f Guido Trotter
          raise errors.OpPrereqError("Importing an instance from an absolute"
5597 b9322a9f Guido Trotter
                                     " path requires a source node option.")
5598 b9322a9f Guido Trotter
      else:
5599 b9322a9f Guido Trotter
        self.op.src_node = src_node = self._ExpandNode(src_node)
5600 b9322a9f Guido Trotter
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
5601 b9322a9f Guido Trotter
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
5602 b9322a9f Guido Trotter
        if not os.path.isabs(src_path):
5603 b9322a9f Guido Trotter
          self.op.src_path = src_path = \
5604 b9322a9f Guido Trotter
            os.path.join(constants.EXPORT_DIR, src_path)
5605 7baf741d Guido Trotter
5606 f2c05717 Guido Trotter
      # On import force_variant must be True, because if we forced it at
5607 f2c05717 Guido Trotter
      # initial install, our only chance when importing it back is that it
5608 f2c05717 Guido Trotter
      # works again!
5609 f2c05717 Guido Trotter
      self.op.force_variant = True
5610 f2c05717 Guido Trotter
5611 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
5612 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
5613 7baf741d Guido Trotter
        raise errors.OpPrereqError("No guest OS specified")
5614 f2c05717 Guido Trotter
      self.op.force_variant = getattr(self.op, "force_variant", False)
5615 a8083063 Iustin Pop
5616 538475ca Iustin Pop
  def _RunAllocator(self):
5617 538475ca Iustin Pop
    """Run the allocator based on input opcode.
5618 538475ca Iustin Pop

5619 538475ca Iustin Pop
    """
5620 08db7c5c Iustin Pop
    nics = [n.ToDict() for n in self.nics]
5621 923ddac0 Michael Hanselmann
    ial = IAllocator(self.cfg, self.rpc,
5622 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
5623 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
5624 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
5625 d1c2dd75 Iustin Pop
                     tags=[],
5626 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
5627 338e51e8 Iustin Pop
                     vcpus=self.be_full[constants.BE_VCPUS],
5628 338e51e8 Iustin Pop
                     mem_size=self.be_full[constants.BE_MEMORY],
5629 08db7c5c Iustin Pop
                     disks=self.disks,
5630 d1c2dd75 Iustin Pop
                     nics=nics,
5631 8cc7e742 Guido Trotter
                     hypervisor=self.op.hypervisor,
5632 29859cb7 Iustin Pop
                     )
5633 d1c2dd75 Iustin Pop
5634 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
5635 d1c2dd75 Iustin Pop
5636 d1c2dd75 Iustin Pop
    if not ial.success:
5637 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
5638 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
5639 d1c2dd75 Iustin Pop
                                                           ial.info))
5640 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
5641 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
5642 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
5643 97abc79f Iustin Pop
                                 (self.op.iallocator, len(ial.nodes),
5644 1ce4bbe3 René Nussbaumer
                                  ial.required_nodes))
5645 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
5646 86d9d3bb Iustin Pop
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
5647 86d9d3bb Iustin Pop
                 self.op.instance_name, self.op.iallocator,
5648 86d9d3bb Iustin Pop
                 ", ".join(ial.nodes))
5649 27579978 Iustin Pop
    if ial.required_nodes == 2:
5650 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
5651 538475ca Iustin Pop
5652 a8083063 Iustin Pop
  def BuildHooksEnv(self):
5653 a8083063 Iustin Pop
    """Build hooks env.
5654 a8083063 Iustin Pop

5655 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
5656 a8083063 Iustin Pop

5657 a8083063 Iustin Pop
    """
5658 a8083063 Iustin Pop
    env = {
5659 2c2690c9 Iustin Pop
      "ADD_MODE": self.op.mode,
5660 a8083063 Iustin Pop
      }
5661 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
5662 2c2690c9 Iustin Pop
      env["SRC_NODE"] = self.op.src_node
5663 2c2690c9 Iustin Pop
      env["SRC_PATH"] = self.op.src_path
5664 2c2690c9 Iustin Pop
      env["SRC_IMAGES"] = self.src_images
5665 396e1b78 Michael Hanselmann
5666 2c2690c9 Iustin Pop
    env.update(_BuildInstanceHookEnv(
5667 2c2690c9 Iustin Pop
      name=self.op.instance_name,
5668 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
5669 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
5670 4978db17 Iustin Pop
      status=self.op.start,
5671 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
5672 338e51e8 Iustin Pop
      memory=self.be_full[constants.BE_MEMORY],
5673 338e51e8 Iustin Pop
      vcpus=self.be_full[constants.BE_VCPUS],
5674 f9b10246 Guido Trotter
      nics=_NICListToTuple(self, self.nics),
5675 2c2690c9 Iustin Pop
      disk_template=self.op.disk_template,
5676 2c2690c9 Iustin Pop
      disks=[(d["size"], d["mode"]) for d in self.disks],
5677 67fc3042 Iustin Pop
      bep=self.be_full,
5678 67fc3042 Iustin Pop
      hvp=self.hv_full,
5679 3df6e710 Iustin Pop
      hypervisor_name=self.op.hypervisor,
5680 396e1b78 Michael Hanselmann
    ))
5681 a8083063 Iustin Pop
5682 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
5683 a8083063 Iustin Pop
          self.secondaries)
5684 a8083063 Iustin Pop
    return env, nl, nl
5685 a8083063 Iustin Pop
5686 a8083063 Iustin Pop
5687 a8083063 Iustin Pop
  def CheckPrereq(self):
5688 a8083063 Iustin Pop
    """Check prerequisites.
5689 a8083063 Iustin Pop

5690 a8083063 Iustin Pop
    """
5691 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
5692 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
5693 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
5694 eedc99de Manuel Franceschini
                                 " instances")
5695 eedc99de Manuel Franceschini
5696 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
5697 7baf741d Guido Trotter
      src_node = self.op.src_node
5698 7baf741d Guido Trotter
      src_path = self.op.src_path
5699 a8083063 Iustin Pop
5700 c0cbdc67 Guido Trotter
      if src_node is None:
5701 1b7bfbb7 Iustin Pop
        locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
5702 1b7bfbb7 Iustin Pop
        exp_list = self.rpc.call_export_list(locked_nodes)
5703 c0cbdc67 Guido Trotter
        found = False
5704 c0cbdc67 Guido Trotter
        for node in exp_list:
5705 4c4e4e1e Iustin Pop
          if exp_list[node].fail_msg:
5706 1b7bfbb7 Iustin Pop
            continue
5707 1b7bfbb7 Iustin Pop
          if src_path in exp_list[node].payload:
5708 c0cbdc67 Guido Trotter
            found = True
5709 c0cbdc67 Guido Trotter
            self.op.src_node = src_node = node
5710 c0cbdc67 Guido Trotter
            self.op.src_path = src_path = os.path.join(constants.EXPORT_DIR,
5711 c0cbdc67 Guido Trotter
                                                       src_path)
5712 c0cbdc67 Guido Trotter
            break
5713 c0cbdc67 Guido Trotter
        if not found:
5714 c0cbdc67 Guido Trotter
          raise errors.OpPrereqError("No export found for relative path %s" %
5715 c0cbdc67 Guido Trotter
                                      src_path)
5716 c0cbdc67 Guido Trotter
5717 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, src_node)
5718 781de953 Iustin Pop
      result = self.rpc.call_export_info(src_node, src_path)
5719 4c4e4e1e Iustin Pop
      result.Raise("No export or invalid export found in dir %s" % src_path)
5720 a8083063 Iustin Pop
5721 3eccac06 Iustin Pop
      export_info = objects.SerializableConfigParser.Loads(str(result.payload))
5722 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
5723 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
5724 a8083063 Iustin Pop
5725 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
5726 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
5727 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
5728 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
5729 a8083063 Iustin Pop
5730 09acf207 Guido Trotter
      # Check that the new instance doesn't have less disks than the export
5731 08db7c5c Iustin Pop
      instance_disks = len(self.disks)
5732 09acf207 Guido Trotter
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
5733 09acf207 Guido Trotter
      if instance_disks < export_disks:
5734 09acf207 Guido Trotter
        raise errors.OpPrereqError("Not enough disks to import."
5735 09acf207 Guido Trotter
                                   " (instance: %d, export: %d)" %
5736 726d7d68 Iustin Pop
                                   (instance_disks, export_disks))
5737 a8083063 Iustin Pop
5738 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
5739 09acf207 Guido Trotter
      disk_images = []
5740 09acf207 Guido Trotter
      for idx in range(export_disks):
5741 09acf207 Guido Trotter
        option = 'disk%d_dump' % idx
5742 09acf207 Guido Trotter
        if export_info.has_option(constants.INISECT_INS, option):
5743 09acf207 Guido Trotter
          # FIXME: are the old os-es, disk sizes, etc. useful?
5744 09acf207 Guido Trotter
          export_name = export_info.get(constants.INISECT_INS, option)
5745 09acf207 Guido Trotter
          image = os.path.join(src_path, export_name)
5746 09acf207 Guido Trotter
          disk_images.append(image)
5747 09acf207 Guido Trotter
        else:
5748 09acf207 Guido Trotter
          disk_images.append(False)
5749 09acf207 Guido Trotter
5750 09acf207 Guido Trotter
      self.src_images = disk_images
5751 901a65c1 Iustin Pop
5752 b4364a6b Guido Trotter
      old_name = export_info.get(constants.INISECT_INS, 'name')
5753 b4364a6b Guido Trotter
      # FIXME: int() here could throw a ValueError on broken exports
5754 b4364a6b Guido Trotter
      exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
5755 b4364a6b Guido Trotter
      if self.op.instance_name == old_name:
5756 b4364a6b Guido Trotter
        for idx, nic in enumerate(self.nics):
5757 b4364a6b Guido Trotter
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
5758 b4364a6b Guido Trotter
            nic_mac_ini = 'nic%d_mac' % idx
5759 b4364a6b Guido Trotter
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
5760 bc89efc3 Guido Trotter
5761 295728df Guido Trotter
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
5762 7baf741d Guido Trotter
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
5763 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
5764 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
5765 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
5766 901a65c1 Iustin Pop
5767 901a65c1 Iustin Pop
    if self.op.ip_check:
5768 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
5769 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
5770 7b3a8fb5 Iustin Pop
                                   (self.check_ip, self.op.instance_name))
5771 901a65c1 Iustin Pop
5772 295728df Guido Trotter
    #### mac address generation
5773 295728df Guido Trotter
    # By generating here the mac address both the allocator and the hooks get
5774 295728df Guido Trotter
    # the real final mac address rather than the 'auto' or 'generate' value.
5775 295728df Guido Trotter
    # There is a race condition between the generation and the instance object
5776 295728df Guido Trotter
    # creation, which means that we know the mac is valid now, but we're not
5777 295728df Guido Trotter
    # sure it will be when we actually add the instance. If things go bad
5778 295728df Guido Trotter
    # adding the instance will abort because of a duplicate mac, and the
5779 295728df Guido Trotter
    # creation job will fail.
5780 295728df Guido Trotter
    for nic in self.nics:
5781 295728df Guido Trotter
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5782 295728df Guido Trotter
        nic.mac = self.cfg.GenerateMAC()
5783 295728df Guido Trotter
5784 538475ca Iustin Pop
    #### allocator run
5785 538475ca Iustin Pop
5786 538475ca Iustin Pop
    if self.op.iallocator is not None:
5787 538475ca Iustin Pop
      self._RunAllocator()
5788 0f1a06e3 Manuel Franceschini
5789 901a65c1 Iustin Pop
    #### node related checks
5790 901a65c1 Iustin Pop
5791 901a65c1 Iustin Pop
    # check primary node
5792 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
5793 7baf741d Guido Trotter
    assert self.pnode is not None, \
5794 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
5795 7527a8a4 Iustin Pop
    if pnode.offline:
5796 7527a8a4 Iustin Pop
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
5797 7527a8a4 Iustin Pop
                                 pnode.name)
5798 733a2b6a Iustin Pop
    if pnode.drained:
5799 733a2b6a Iustin Pop
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
5800 733a2b6a Iustin Pop
                                 pnode.name)
5801 7527a8a4 Iustin Pop
5802 901a65c1 Iustin Pop
    self.secondaries = []
5803 901a65c1 Iustin Pop
5804 901a65c1 Iustin Pop
    # mirror node verification
5805 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
5806 7baf741d Guido Trotter
      if self.op.snode is None:
5807 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
5808 3ecf6786 Iustin Pop
                                   " a mirror node")
5809 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
5810 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
5811 3ecf6786 Iustin Pop
                                   " the primary node.")
5812 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, self.op.snode)
5813 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, self.op.snode)
5814 733a2b6a Iustin Pop
      self.secondaries.append(self.op.snode)
5815 a8083063 Iustin Pop
5816 6785674e Iustin Pop
    nodenames = [pnode.name] + self.secondaries
5817 6785674e Iustin Pop
5818 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
5819 08db7c5c Iustin Pop
                                self.disks)
5820 ed1ebc60 Guido Trotter
5821 8d75db10 Iustin Pop
    # Check lv size requirements
5822 8d75db10 Iustin Pop
    if req_size is not None:
5823 72737a7f Iustin Pop
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
5824 72737a7f Iustin Pop
                                         self.op.hypervisor)
5825 8d75db10 Iustin Pop
      for node in nodenames:
5826 781de953 Iustin Pop
        info = nodeinfo[node]
5827 4c4e4e1e Iustin Pop
        info.Raise("Cannot get current information from node %s" % node)
5828 070e998b Iustin Pop
        info = info.payload
5829 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
5830 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
5831 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
5832 8d75db10 Iustin Pop
                                     " node %s" % node)
5833 070e998b Iustin Pop
        if req_size > vg_free:
5834 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
5835 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
5836 070e998b Iustin Pop
                                     (node, vg_free, req_size))
5837 ed1ebc60 Guido Trotter
5838 74409b12 Iustin Pop
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
5839 6785674e Iustin Pop
5840 a8083063 Iustin Pop
    # os verification
5841 781de953 Iustin Pop
    result = self.rpc.call_os_get(pnode.name, self.op.os_type)
5842 4c4e4e1e Iustin Pop
    result.Raise("OS '%s' not in supported os list for primary node %s" %
5843 4c4e4e1e Iustin Pop
                 (self.op.os_type, pnode.name), prereq=True)
5844 f2c05717 Guido Trotter
    if not self.op.force_variant:
5845 f2c05717 Guido Trotter
      _CheckOSVariant(result.payload, self.op.os_type)
5846 a8083063 Iustin Pop
5847 b165e77e Guido Trotter
    _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
5848 a8083063 Iustin Pop
5849 49ce1563 Iustin Pop
    # memory check on primary node
5850 49ce1563 Iustin Pop
    if self.op.start:
5851 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
5852 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
5853 338e51e8 Iustin Pop
                           self.be_full[constants.BE_MEMORY],
5854 338e51e8 Iustin Pop
                           self.op.hypervisor)
5855 49ce1563 Iustin Pop
5856 08896026 Iustin Pop
    self.dry_run_result = list(nodenames)
5857 08896026 Iustin Pop
5858 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5859 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
5860 a8083063 Iustin Pop

5861 a8083063 Iustin Pop
    """
5862 a8083063 Iustin Pop
    instance = self.op.instance_name
5863 a8083063 Iustin Pop
    pnode_name = self.pnode.name
5864 a8083063 Iustin Pop
5865 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
5866 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
5867 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
5868 2a6469d5 Alexander Schreiber
    else:
5869 2a6469d5 Alexander Schreiber
      network_port = None
5870 58acb49d Alexander Schreiber
5871 6785674e Iustin Pop
    ##if self.op.vnc_bind_address is None:
5872 6785674e Iustin Pop
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
5873 31a853d2 Iustin Pop
5874 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
5875 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
5876 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
5877 2c313123 Manuel Franceschini
    else:
5878 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
5879 2c313123 Manuel Franceschini
5880 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
5881 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
5882 d6a02168 Michael Hanselmann
                                        self.cfg.GetFileStorageDir(),
5883 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
5884 0f1a06e3 Manuel Franceschini
5885 0f1a06e3 Manuel Franceschini
5886 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
5887 a8083063 Iustin Pop
                                  self.op.disk_template,
5888 a8083063 Iustin Pop
                                  instance, pnode_name,
5889 08db7c5c Iustin Pop
                                  self.secondaries,
5890 08db7c5c Iustin Pop
                                  self.disks,
5891 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
5892 e2a65344 Iustin Pop
                                  self.op.file_driver,
5893 e2a65344 Iustin Pop
                                  0)
5894 a8083063 Iustin Pop
5895 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
5896 a8083063 Iustin Pop
                            primary_node=pnode_name,
5897 08db7c5c Iustin Pop
                            nics=self.nics, disks=disks,
5898 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
5899 4978db17 Iustin Pop
                            admin_up=False,
5900 58acb49d Alexander Schreiber
                            network_port=network_port,
5901 338e51e8 Iustin Pop
                            beparams=self.op.beparams,
5902 6785674e Iustin Pop
                            hvparams=self.op.hvparams,
5903 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
5904 a8083063 Iustin Pop
                            )
5905 a8083063 Iustin Pop
5906 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
5907 796cab27 Iustin Pop
    try:
5908 796cab27 Iustin Pop
      _CreateDisks(self, iobj)
5909 796cab27 Iustin Pop
    except errors.OpExecError:
5910 796cab27 Iustin Pop
      self.LogWarning("Device creation failed, reverting...")
5911 796cab27 Iustin Pop
      try:
5912 796cab27 Iustin Pop
        _RemoveDisks(self, iobj)
5913 796cab27 Iustin Pop
      finally:
5914 796cab27 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance)
5915 796cab27 Iustin Pop
        raise
5916 a8083063 Iustin Pop
5917 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
5918 a8083063 Iustin Pop
5919 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
5920 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
5921 7baf741d Guido Trotter
    # added the instance to the config
5922 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
5923 e36e96b4 Guido Trotter
    # Unlock all the nodes
5924 9c8971d7 Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
5925 9c8971d7 Guido Trotter
      nodes_keep = [self.op.src_node]
5926 9c8971d7 Guido Trotter
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
5927 9c8971d7 Guido Trotter
                       if node != self.op.src_node]
5928 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
5929 9c8971d7 Guido Trotter
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
5930 9c8971d7 Guido Trotter
    else:
5931 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE)
5932 9c8971d7 Guido Trotter
      del self.acquired_locks[locking.LEVEL_NODE]
5933 a8083063 Iustin Pop
5934 a8083063 Iustin Pop
    if self.op.wait_for_sync:
5935 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
5936 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
5937 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
5938 a8083063 Iustin Pop
      time.sleep(15)
5939 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
5940 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
5941 a8083063 Iustin Pop
    else:
5942 a8083063 Iustin Pop
      disk_abort = False
5943 a8083063 Iustin Pop
5944 a8083063 Iustin Pop
    if disk_abort:
5945 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
5946 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
5947 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
5948 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
5949 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
5950 3ecf6786 Iustin Pop
                               " this instance")
5951 a8083063 Iustin Pop
5952 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
5953 a8083063 Iustin Pop
                (instance, pnode_name))
5954 a8083063 Iustin Pop
5955 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
5956 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
5957 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
5958 e557bae9 Guido Trotter
        result = self.rpc.call_instance_os_add(pnode_name, iobj, False)
5959 4c4e4e1e Iustin Pop
        result.Raise("Could not add os for instance %s"
5960 4c4e4e1e Iustin Pop
                     " on node %s" % (instance, pnode_name))
5961 a8083063 Iustin Pop
5962 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
5963 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
5964 a8083063 Iustin Pop
        src_node = self.op.src_node
5965 09acf207 Guido Trotter
        src_images = self.src_images
5966 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
5967 6c0af70e Guido Trotter
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
5968 09acf207 Guido Trotter
                                                         src_node, src_images,
5969 6c0af70e Guido Trotter
                                                         cluster_name)
5970 4c4e4e1e Iustin Pop
        msg = import_result.fail_msg
5971 944bf548 Iustin Pop
        if msg:
5972 944bf548 Iustin Pop
          self.LogWarning("Error while importing the disk images for instance"
5973 944bf548 Iustin Pop
                          " %s on node %s: %s" % (instance, pnode_name, msg))
5974 a8083063 Iustin Pop
      else:
5975 a8083063 Iustin Pop
        # also checked in the prereq part
5976 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
5977 3ecf6786 Iustin Pop
                                     % self.op.mode)
5978 a8083063 Iustin Pop
5979 a8083063 Iustin Pop
    if self.op.start:
5980 4978db17 Iustin Pop
      iobj.admin_up = True
5981 4978db17 Iustin Pop
      self.cfg.Update(iobj)
5982 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s", instance, pnode_name)
5983 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
5984 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
5985 4c4e4e1e Iustin Pop
      result.Raise("Could not start instance")
5986 a8083063 Iustin Pop
5987 08896026 Iustin Pop
    return list(iobj.all_nodes)
5988 08896026 Iustin Pop
5989 a8083063 Iustin Pop
5990 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
5991 a8083063 Iustin Pop
  """Connect to an instance's console.
5992 a8083063 Iustin Pop

5993 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
5994 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
5995 a8083063 Iustin Pop
  console.
5996 a8083063 Iustin Pop

5997 a8083063 Iustin Pop
  """
5998 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
5999 8659b73e Guido Trotter
  REQ_BGL = False
6000 8659b73e Guido Trotter
6001 8659b73e Guido Trotter
  def ExpandNames(self):
6002 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
6003 a8083063 Iustin Pop
6004 a8083063 Iustin Pop
  def CheckPrereq(self):
6005 a8083063 Iustin Pop
    """Check prerequisites.
6006 a8083063 Iustin Pop

6007 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
6008 a8083063 Iustin Pop

6009 a8083063 Iustin Pop
    """
6010 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6011 8659b73e Guido Trotter
    assert self.instance is not None, \
6012 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
6013 513e896d Guido Trotter
    _CheckNodeOnline(self, self.instance.primary_node)
6014 a8083063 Iustin Pop
6015 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6016 a8083063 Iustin Pop
    """Connect to the console of an instance
6017 a8083063 Iustin Pop

6018 a8083063 Iustin Pop
    """
6019 a8083063 Iustin Pop
    instance = self.instance
6020 a8083063 Iustin Pop
    node = instance.primary_node
6021 a8083063 Iustin Pop
6022 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
6023 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
6024 4c4e4e1e Iustin Pop
    node_insts.Raise("Can't get node information from %s" % node)
6025 a8083063 Iustin Pop
6026 aca13712 Iustin Pop
    if instance.name not in node_insts.payload:
6027 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
6028 a8083063 Iustin Pop
6029 9a4f63d1 Iustin Pop
    logging.debug("Connecting to console of %s on %s", instance.name, node)
6030 a8083063 Iustin Pop
6031 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
6032 5431b2e4 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
6033 5431b2e4 Guido Trotter
    # beparams and hvparams are passed separately, to avoid editing the
6034 5431b2e4 Guido Trotter
    # instance and then saving the defaults in the instance itself.
6035 5431b2e4 Guido Trotter
    hvparams = cluster.FillHV(instance)
6036 5431b2e4 Guido Trotter
    beparams = cluster.FillBE(instance)
6037 5431b2e4 Guido Trotter
    console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
6038 b047857b Michael Hanselmann
6039 82122173 Iustin Pop
    # build ssh cmdline
6040 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
6041 a8083063 Iustin Pop
6042 a8083063 Iustin Pop
6043 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
6044 a8083063 Iustin Pop
  """Replace the disks of an instance.
6045 a8083063 Iustin Pop

6046 a8083063 Iustin Pop
  """
6047 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
6048 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
6049 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
6050 efd990e4 Guido Trotter
  REQ_BGL = False
6051 efd990e4 Guido Trotter
6052 7e9366f7 Iustin Pop
  def CheckArguments(self):
6053 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
6054 efd990e4 Guido Trotter
      self.op.remote_node = None
6055 7e9366f7 Iustin Pop
    if not hasattr(self.op, "iallocator"):
6056 7e9366f7 Iustin Pop
      self.op.iallocator = None
6057 7e9366f7 Iustin Pop
6058 c68174b6 Michael Hanselmann
    TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
6059 c68174b6 Michael Hanselmann
                                  self.op.iallocator)
6060 7e9366f7 Iustin Pop
6061 7e9366f7 Iustin Pop
  def ExpandNames(self):
6062 7e9366f7 Iustin Pop
    self._ExpandAndLockInstance()
6063 7e9366f7 Iustin Pop
6064 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
6065 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6066 2bb5c911 Michael Hanselmann
6067 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
6068 efd990e4 Guido Trotter
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
6069 efd990e4 Guido Trotter
      if remote_node is None:
6070 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Node '%s' not known" %
6071 efd990e4 Guido Trotter
                                   self.op.remote_node)
6072 2bb5c911 Michael Hanselmann
6073 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
6074 2bb5c911 Michael Hanselmann
6075 3b559640 Iustin Pop
      # Warning: do not remove the locking of the new secondary here
6076 3b559640 Iustin Pop
      # unless DRBD8.AddChildren is changed to work in parallel;
6077 3b559640 Iustin Pop
      # currently it doesn't since parallel invocations of
6078 3b559640 Iustin Pop
      # FindUnusedMinor will conflict
6079 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
6080 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6081 2bb5c911 Michael Hanselmann
6082 efd990e4 Guido Trotter
    else:
6083 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
6084 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6085 efd990e4 Guido Trotter
6086 c68174b6 Michael Hanselmann
    self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
6087 c68174b6 Michael Hanselmann
                                   self.op.iallocator, self.op.remote_node,
6088 c68174b6 Michael Hanselmann
                                   self.op.disks)
6089 c68174b6 Michael Hanselmann
6090 3a012b41 Michael Hanselmann
    self.tasklets = [self.replacer]
6091 2bb5c911 Michael Hanselmann
6092 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
6093 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
6094 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
6095 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
6096 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
6097 efd990e4 Guido Trotter
      self._LockInstancesNodes()
6098 a8083063 Iustin Pop
6099 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6100 a8083063 Iustin Pop
    """Build hooks env.
6101 a8083063 Iustin Pop

6102 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
6103 a8083063 Iustin Pop

6104 a8083063 Iustin Pop
    """
6105 2bb5c911 Michael Hanselmann
    instance = self.replacer.instance
6106 a8083063 Iustin Pop
    env = {
6107 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
6108 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
6109 2bb5c911 Michael Hanselmann
      "OLD_SECONDARY": instance.secondary_nodes[0],
6110 a8083063 Iustin Pop
      }
6111 2bb5c911 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self, instance))
6112 0834c866 Iustin Pop
    nl = [
6113 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
6114 2bb5c911 Michael Hanselmann
      instance.primary_node,
6115 0834c866 Iustin Pop
      ]
6116 0834c866 Iustin Pop
    if self.op.remote_node is not None:
6117 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
6118 a8083063 Iustin Pop
    return env, nl, nl
6119 a8083063 Iustin Pop
6120 2bb5c911 Michael Hanselmann
6121 7ffc5a86 Michael Hanselmann
class LUEvacuateNode(LogicalUnit):
6122 7ffc5a86 Michael Hanselmann
  """Relocate the secondary instances from a node.
6123 7ffc5a86 Michael Hanselmann

6124 7ffc5a86 Michael Hanselmann
  """
6125 7ffc5a86 Michael Hanselmann
  HPATH = "node-evacuate"
6126 7ffc5a86 Michael Hanselmann
  HTYPE = constants.HTYPE_NODE
6127 7ffc5a86 Michael Hanselmann
  _OP_REQP = ["node_name"]
6128 7ffc5a86 Michael Hanselmann
  REQ_BGL = False
6129 7ffc5a86 Michael Hanselmann
6130 7ffc5a86 Michael Hanselmann
  def CheckArguments(self):
6131 7ffc5a86 Michael Hanselmann
    if not hasattr(self.op, "remote_node"):
6132 7ffc5a86 Michael Hanselmann
      self.op.remote_node = None
6133 7ffc5a86 Michael Hanselmann
    if not hasattr(self.op, "iallocator"):
6134 7ffc5a86 Michael Hanselmann
      self.op.iallocator = None
6135 7ffc5a86 Michael Hanselmann
6136 7ffc5a86 Michael Hanselmann
    TLReplaceDisks.CheckArguments(constants.REPLACE_DISK_CHG,
6137 7ffc5a86 Michael Hanselmann
                                  self.op.remote_node,
6138 7ffc5a86 Michael Hanselmann
                                  self.op.iallocator)
6139 7ffc5a86 Michael Hanselmann
6140 7ffc5a86 Michael Hanselmann
  def ExpandNames(self):
6141 7ffc5a86 Michael Hanselmann
    self.op.node_name = self.cfg.ExpandNodeName(self.op.node_name)
6142 7ffc5a86 Michael Hanselmann
    if self.op.node_name is None:
6143 7ffc5a86 Michael Hanselmann
      raise errors.OpPrereqError("Node '%s' not known" % self.op.node_name)
6144 7ffc5a86 Michael Hanselmann
6145 7ffc5a86 Michael Hanselmann
    self.needed_locks = {}
6146 7ffc5a86 Michael Hanselmann
6147 7ffc5a86 Michael Hanselmann
    # Declare node locks
6148 7ffc5a86 Michael Hanselmann
    if self.op.iallocator is not None:
6149 7ffc5a86 Michael Hanselmann
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6150 7ffc5a86 Michael Hanselmann
6151 7ffc5a86 Michael Hanselmann
    elif self.op.remote_node is not None:
6152 7ffc5a86 Michael Hanselmann
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
6153 7ffc5a86 Michael Hanselmann
      if remote_node is None:
6154 7ffc5a86 Michael Hanselmann
        raise errors.OpPrereqError("Node '%s' not known" %
6155 7ffc5a86 Michael Hanselmann
                                   self.op.remote_node)
6156 7ffc5a86 Michael Hanselmann
6157 7ffc5a86 Michael Hanselmann
      self.op.remote_node = remote_node
6158 7ffc5a86 Michael Hanselmann
6159 7ffc5a86 Michael Hanselmann
      # Warning: do not remove the locking of the new secondary here
6160 7ffc5a86 Michael Hanselmann
      # unless DRBD8.AddChildren is changed to work in parallel;
6161 7ffc5a86 Michael Hanselmann
      # currently it doesn't since parallel invocations of
6162 7ffc5a86 Michael Hanselmann
      # FindUnusedMinor will conflict
6163 7ffc5a86 Michael Hanselmann
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
6164 7ffc5a86 Michael Hanselmann
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6165 7ffc5a86 Michael Hanselmann
6166 7ffc5a86 Michael Hanselmann
    else:
6167 7ffc5a86 Michael Hanselmann
      raise errors.OpPrereqError("Invalid parameters")
6168 7ffc5a86 Michael Hanselmann
6169 7ffc5a86 Michael Hanselmann
    # Create tasklets for replacing disks for all secondary instances on this
6170 7ffc5a86 Michael Hanselmann
    # node
6171 7ffc5a86 Michael Hanselmann
    names = []
6172 3a012b41 Michael Hanselmann
    tasklets = []
6173 7ffc5a86 Michael Hanselmann
6174 7ffc5a86 Michael Hanselmann
    for inst in _GetNodeSecondaryInstances(self.cfg, self.op.node_name):
6175 7ffc5a86 Michael Hanselmann
      logging.debug("Replacing disks for instance %s", inst.name)
6176 7ffc5a86 Michael Hanselmann
      names.append(inst.name)
6177 7ffc5a86 Michael Hanselmann
6178 7ffc5a86 Michael Hanselmann
      replacer = TLReplaceDisks(self, inst.name, constants.REPLACE_DISK_CHG,
6179 7ffc5a86 Michael Hanselmann
                                self.op.iallocator, self.op.remote_node, [])
6180 3a012b41 Michael Hanselmann
      tasklets.append(replacer)
6181 7ffc5a86 Michael Hanselmann
6182 3a012b41 Michael Hanselmann
    self.tasklets = tasklets
6183 7ffc5a86 Michael Hanselmann
    self.instance_names = names
6184 7ffc5a86 Michael Hanselmann
6185 7ffc5a86 Michael Hanselmann
    # Declare instance locks
6186 7ffc5a86 Michael Hanselmann
    self.needed_locks[locking.LEVEL_INSTANCE] = self.instance_names
6187 7ffc5a86 Michael Hanselmann
6188 7ffc5a86 Michael Hanselmann
  def DeclareLocks(self, level):
6189 7ffc5a86 Michael Hanselmann
    # If we're not already locking all nodes in the set we have to declare the
6190 7ffc5a86 Michael Hanselmann
    # instance's primary/secondary nodes.
6191 7ffc5a86 Michael Hanselmann
    if (level == locking.LEVEL_NODE and
6192 7ffc5a86 Michael Hanselmann
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
6193 7ffc5a86 Michael Hanselmann
      self._LockInstancesNodes()
6194 7ffc5a86 Michael Hanselmann
6195 7ffc5a86 Michael Hanselmann
  def BuildHooksEnv(self):
6196 7ffc5a86 Michael Hanselmann
    """Build hooks env.
6197 7ffc5a86 Michael Hanselmann

6198 7ffc5a86 Michael Hanselmann
    This runs on the master, the primary and all the secondaries.
6199 7ffc5a86 Michael Hanselmann

6200 7ffc5a86 Michael Hanselmann
    """
6201 7ffc5a86 Michael Hanselmann
    env = {
6202 7ffc5a86 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
6203 7ffc5a86 Michael Hanselmann
      }
6204 7ffc5a86 Michael Hanselmann
6205 7ffc5a86 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
6206 7ffc5a86 Michael Hanselmann
6207 7ffc5a86 Michael Hanselmann
    if self.op.remote_node is not None:
6208 7ffc5a86 Michael Hanselmann
      env["NEW_SECONDARY"] = self.op.remote_node
6209 7ffc5a86 Michael Hanselmann
      nl.append(self.op.remote_node)
6210 7ffc5a86 Michael Hanselmann
6211 7ffc5a86 Michael Hanselmann
    return (env, nl, nl)
6212 7ffc5a86 Michael Hanselmann
6213 7ffc5a86 Michael Hanselmann
6214 c68174b6 Michael Hanselmann
class TLReplaceDisks(Tasklet):
6215 2bb5c911 Michael Hanselmann
  """Replaces disks for an instance.
6216 2bb5c911 Michael Hanselmann

6217 2bb5c911 Michael Hanselmann
  Note: Locking is not within the scope of this class.
6218 2bb5c911 Michael Hanselmann

6219 2bb5c911 Michael Hanselmann
  """
6220 2bb5c911 Michael Hanselmann
  def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
6221 2bb5c911 Michael Hanselmann
               disks):
6222 2bb5c911 Michael Hanselmann
    """Initializes this class.
6223 2bb5c911 Michael Hanselmann

6224 2bb5c911 Michael Hanselmann
    """
6225 464243a7 Michael Hanselmann
    Tasklet.__init__(self, lu)
6226 464243a7 Michael Hanselmann
6227 2bb5c911 Michael Hanselmann
    # Parameters
6228 2bb5c911 Michael Hanselmann
    self.instance_name = instance_name
6229 2bb5c911 Michael Hanselmann
    self.mode = mode
6230 2bb5c911 Michael Hanselmann
    self.iallocator_name = iallocator_name
6231 2bb5c911 Michael Hanselmann
    self.remote_node = remote_node
6232 2bb5c911 Michael Hanselmann
    self.disks = disks
6233 2bb5c911 Michael Hanselmann
6234 2bb5c911 Michael Hanselmann
    # Runtime data
6235 2bb5c911 Michael Hanselmann
    self.instance = None
6236 2bb5c911 Michael Hanselmann
    self.new_node = None
6237 2bb5c911 Michael Hanselmann
    self.target_node = None
6238 2bb5c911 Michael Hanselmann
    self.other_node = None
6239 2bb5c911 Michael Hanselmann
    self.remote_node_info = None
6240 2bb5c911 Michael Hanselmann
    self.node_secondary_ip = None
6241 2bb5c911 Michael Hanselmann
6242 2bb5c911 Michael Hanselmann
  @staticmethod
6243 2bb5c911 Michael Hanselmann
  def CheckArguments(mode, remote_node, iallocator):
6244 c68174b6 Michael Hanselmann
    """Helper function for users of this class.
6245 c68174b6 Michael Hanselmann

6246 c68174b6 Michael Hanselmann
    """
6247 2bb5c911 Michael Hanselmann
    # check for valid parameter combination
6248 2bb5c911 Michael Hanselmann
    if mode == constants.REPLACE_DISK_CHG:
6249 02a00186 Michael Hanselmann
      if remote_node is None and iallocator is None:
6250 2bb5c911 Michael Hanselmann
        raise errors.OpPrereqError("When changing the secondary either an"
6251 2bb5c911 Michael Hanselmann
                                   " iallocator script must be used or the"
6252 2bb5c911 Michael Hanselmann
                                   " new node given")
6253 02a00186 Michael Hanselmann
6254 02a00186 Michael Hanselmann
      if remote_node is not None and iallocator is not None:
6255 2bb5c911 Michael Hanselmann
        raise errors.OpPrereqError("Give either the iallocator or the new"
6256 2bb5c911 Michael Hanselmann
                                   " secondary, not both")
6257 02a00186 Michael Hanselmann
6258 02a00186 Michael Hanselmann
    elif remote_node is not None or iallocator is not None:
6259 02a00186 Michael Hanselmann
      # Not replacing the secondary
6260 02a00186 Michael Hanselmann
      raise errors.OpPrereqError("The iallocator and new node options can"
6261 02a00186 Michael Hanselmann
                                 " only be used when changing the"
6262 02a00186 Michael Hanselmann
                                 " secondary node")
6263 2bb5c911 Michael Hanselmann
6264 2bb5c911 Michael Hanselmann
  @staticmethod
6265 2bb5c911 Michael Hanselmann
  def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
6266 2bb5c911 Michael Hanselmann
    """Compute a new secondary node using an IAllocator.
6267 2bb5c911 Michael Hanselmann

6268 2bb5c911 Michael Hanselmann
    """
6269 2bb5c911 Michael Hanselmann
    ial = IAllocator(lu.cfg, lu.rpc,
6270 2bb5c911 Michael Hanselmann
                     mode=constants.IALLOCATOR_MODE_RELOC,
6271 2bb5c911 Michael Hanselmann
                     name=instance_name,
6272 2bb5c911 Michael Hanselmann
                     relocate_from=relocate_from)
6273 2bb5c911 Michael Hanselmann
6274 2bb5c911 Michael Hanselmann
    ial.Run(iallocator_name)
6275 2bb5c911 Michael Hanselmann
6276 2bb5c911 Michael Hanselmann
    if not ial.success:
6277 2bb5c911 Michael Hanselmann
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
6278 2bb5c911 Michael Hanselmann
                                 " %s" % (iallocator_name, ial.info))
6279 2bb5c911 Michael Hanselmann
6280 2bb5c911 Michael Hanselmann
    if len(ial.nodes) != ial.required_nodes:
6281 2bb5c911 Michael Hanselmann
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
6282 2bb5c911 Michael Hanselmann
                                 " of nodes (%s), required %s" %
6283 2bb5c911 Michael Hanselmann
                                 (len(ial.nodes), ial.required_nodes))
6284 2bb5c911 Michael Hanselmann
6285 2bb5c911 Michael Hanselmann
    remote_node_name = ial.nodes[0]
6286 2bb5c911 Michael Hanselmann
6287 2bb5c911 Michael Hanselmann
    lu.LogInfo("Selected new secondary for instance '%s': %s",
6288 2bb5c911 Michael Hanselmann
               instance_name, remote_node_name)
6289 2bb5c911 Michael Hanselmann
6290 2bb5c911 Michael Hanselmann
    return remote_node_name
6291 2bb5c911 Michael Hanselmann
6292 942be002 Michael Hanselmann
  def _FindFaultyDisks(self, node_name):
6293 2d9005d8 Michael Hanselmann
    return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
6294 2d9005d8 Michael Hanselmann
                                    node_name, True)
6295 942be002 Michael Hanselmann
6296 2bb5c911 Michael Hanselmann
  def CheckPrereq(self):
6297 2bb5c911 Michael Hanselmann
    """Check prerequisites.
6298 2bb5c911 Michael Hanselmann

6299 2bb5c911 Michael Hanselmann
    This checks that the instance is in the cluster.
6300 2bb5c911 Michael Hanselmann

6301 2bb5c911 Michael Hanselmann
    """
6302 2bb5c911 Michael Hanselmann
    self.instance = self.cfg.GetInstanceInfo(self.instance_name)
6303 2bb5c911 Michael Hanselmann
    assert self.instance is not None, \
6304 2bb5c911 Michael Hanselmann
      "Cannot retrieve locked instance %s" % self.instance_name
6305 2bb5c911 Michael Hanselmann
6306 2bb5c911 Michael Hanselmann
    if self.instance.disk_template != constants.DT_DRBD8:
6307 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
6308 7e9366f7 Iustin Pop
                                 " instances")
6309 a8083063 Iustin Pop
6310 2bb5c911 Michael Hanselmann
    if len(self.instance.secondary_nodes) != 1:
6311 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
6312 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
6313 2bb5c911 Michael Hanselmann
                                 len(self.instance.secondary_nodes))
6314 a8083063 Iustin Pop
6315 2bb5c911 Michael Hanselmann
    secondary_node = self.instance.secondary_nodes[0]
6316 a9e0c397 Iustin Pop
6317 2bb5c911 Michael Hanselmann
    if self.iallocator_name is None:
6318 2bb5c911 Michael Hanselmann
      remote_node = self.remote_node
6319 2bb5c911 Michael Hanselmann
    else:
6320 2bb5c911 Michael Hanselmann
      remote_node = self._RunAllocator(self.lu, self.iallocator_name,
6321 2bb5c911 Michael Hanselmann
                                       self.instance.name, secondary_node)
6322 b6e82a65 Iustin Pop
6323 a9e0c397 Iustin Pop
    if remote_node is not None:
6324 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
6325 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
6326 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
6327 a9e0c397 Iustin Pop
    else:
6328 a9e0c397 Iustin Pop
      self.remote_node_info = None
6329 2bb5c911 Michael Hanselmann
6330 2bb5c911 Michael Hanselmann
    if remote_node == self.instance.primary_node:
6331 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
6332 3ecf6786 Iustin Pop
                                 " the instance.")
6333 2bb5c911 Michael Hanselmann
6334 2bb5c911 Michael Hanselmann
    if remote_node == secondary_node:
6335 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("The specified node is already the"
6336 7e9366f7 Iustin Pop
                                 " secondary node of the instance.")
6337 7e9366f7 Iustin Pop
6338 2945fd2d Michael Hanselmann
    if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
6339 2945fd2d Michael Hanselmann
                                    constants.REPLACE_DISK_CHG):
6340 2945fd2d Michael Hanselmann
      raise errors.OpPrereqError("Cannot specify disks to be replaced")
6341 942be002 Michael Hanselmann
6342 2945fd2d Michael Hanselmann
    if self.mode == constants.REPLACE_DISK_AUTO:
6343 942be002 Michael Hanselmann
      faulty_primary = self._FindFaultyDisks(self.instance.primary_node)
6344 942be002 Michael Hanselmann
      faulty_secondary = self._FindFaultyDisks(secondary_node)
6345 942be002 Michael Hanselmann
6346 942be002 Michael Hanselmann
      if faulty_primary and faulty_secondary:
6347 942be002 Michael Hanselmann
        raise errors.OpPrereqError("Instance %s has faulty disks on more than"
6348 942be002 Michael Hanselmann
                                   " one node and can not be repaired"
6349 942be002 Michael Hanselmann
                                   " automatically" % self.instance_name)
6350 942be002 Michael Hanselmann
6351 942be002 Michael Hanselmann
      if faulty_primary:
6352 942be002 Michael Hanselmann
        self.disks = faulty_primary
6353 942be002 Michael Hanselmann
        self.target_node = self.instance.primary_node
6354 942be002 Michael Hanselmann
        self.other_node = secondary_node
6355 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
6356 942be002 Michael Hanselmann
      elif faulty_secondary:
6357 942be002 Michael Hanselmann
        self.disks = faulty_secondary
6358 942be002 Michael Hanselmann
        self.target_node = secondary_node
6359 942be002 Michael Hanselmann
        self.other_node = self.instance.primary_node
6360 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
6361 942be002 Michael Hanselmann
      else:
6362 942be002 Michael Hanselmann
        self.disks = []
6363 942be002 Michael Hanselmann
        check_nodes = []
6364 942be002 Michael Hanselmann
6365 942be002 Michael Hanselmann
    else:
6366 942be002 Michael Hanselmann
      # Non-automatic modes
6367 942be002 Michael Hanselmann
      if self.mode == constants.REPLACE_DISK_PRI:
6368 942be002 Michael Hanselmann
        self.target_node = self.instance.primary_node
6369 942be002 Michael Hanselmann
        self.other_node = secondary_node
6370 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
6371 7e9366f7 Iustin Pop
6372 942be002 Michael Hanselmann
      elif self.mode == constants.REPLACE_DISK_SEC:
6373 942be002 Michael Hanselmann
        self.target_node = secondary_node
6374 942be002 Michael Hanselmann
        self.other_node = self.instance.primary_node
6375 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
6376 a9e0c397 Iustin Pop
6377 942be002 Michael Hanselmann
      elif self.mode == constants.REPLACE_DISK_CHG:
6378 942be002 Michael Hanselmann
        self.new_node = remote_node
6379 942be002 Michael Hanselmann
        self.other_node = self.instance.primary_node
6380 942be002 Michael Hanselmann
        self.target_node = secondary_node
6381 942be002 Michael Hanselmann
        check_nodes = [self.new_node, self.other_node]
6382 54155f52 Iustin Pop
6383 942be002 Michael Hanselmann
        _CheckNodeNotDrained(self.lu, remote_node)
6384 a8083063 Iustin Pop
6385 942be002 Michael Hanselmann
      else:
6386 942be002 Michael Hanselmann
        raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
6387 942be002 Michael Hanselmann
                                     self.mode)
6388 942be002 Michael Hanselmann
6389 942be002 Michael Hanselmann
      # If not specified all disks should be replaced
6390 942be002 Michael Hanselmann
      if not self.disks:
6391 942be002 Michael Hanselmann
        self.disks = range(len(self.instance.disks))
6392 a9e0c397 Iustin Pop
6393 2bb5c911 Michael Hanselmann
    for node in check_nodes:
6394 2bb5c911 Michael Hanselmann
      _CheckNodeOnline(self.lu, node)
6395 e4376078 Iustin Pop
6396 2bb5c911 Michael Hanselmann
    # Check whether disks are valid
6397 2bb5c911 Michael Hanselmann
    for disk_idx in self.disks:
6398 2bb5c911 Michael Hanselmann
      self.instance.FindDisk(disk_idx)
6399 e4376078 Iustin Pop
6400 2bb5c911 Michael Hanselmann
    # Get secondary node IP addresses
6401 2bb5c911 Michael Hanselmann
    node_2nd_ip = {}
6402 e4376078 Iustin Pop
6403 2bb5c911 Michael Hanselmann
    for node_name in [self.target_node, self.other_node, self.new_node]:
6404 2bb5c911 Michael Hanselmann
      if node_name is not None:
6405 2bb5c911 Michael Hanselmann
        node_2nd_ip[node_name] = self.cfg.GetNodeInfo(node_name).secondary_ip
6406 e4376078 Iustin Pop
6407 2bb5c911 Michael Hanselmann
    self.node_secondary_ip = node_2nd_ip
6408 a9e0c397 Iustin Pop
6409 c68174b6 Michael Hanselmann
  def Exec(self, feedback_fn):
6410 2bb5c911 Michael Hanselmann
    """Execute disk replacement.
6411 2bb5c911 Michael Hanselmann

6412 2bb5c911 Michael Hanselmann
    This dispatches the disk replacement to the appropriate handler.
6413 cff90b79 Iustin Pop

6414 a9e0c397 Iustin Pop
    """
6415 942be002 Michael Hanselmann
    if not self.disks:
6416 942be002 Michael Hanselmann
      feedback_fn("No disks need replacement")
6417 942be002 Michael Hanselmann
      return
6418 942be002 Michael Hanselmann
6419 942be002 Michael Hanselmann
    feedback_fn("Replacing disk(s) %s for %s" %
6420 942be002 Michael Hanselmann
                (", ".join([str(i) for i in self.disks]), self.instance.name))
6421 7ffc5a86 Michael Hanselmann
6422 2bb5c911 Michael Hanselmann
    activate_disks = (not self.instance.admin_up)
6423 2bb5c911 Michael Hanselmann
6424 2bb5c911 Michael Hanselmann
    # Activate the instance disks if we're replacing them on a down instance
6425 2bb5c911 Michael Hanselmann
    if activate_disks:
6426 2bb5c911 Michael Hanselmann
      _StartInstanceDisks(self.lu, self.instance, True)
6427 2bb5c911 Michael Hanselmann
6428 2bb5c911 Michael Hanselmann
    try:
6429 942be002 Michael Hanselmann
      # Should we replace the secondary node?
6430 942be002 Michael Hanselmann
      if self.new_node is not None:
6431 2bb5c911 Michael Hanselmann
        return self._ExecDrbd8Secondary()
6432 2bb5c911 Michael Hanselmann
      else:
6433 2bb5c911 Michael Hanselmann
        return self._ExecDrbd8DiskOnly()
6434 2bb5c911 Michael Hanselmann
6435 2bb5c911 Michael Hanselmann
    finally:
6436 2bb5c911 Michael Hanselmann
      # Deactivate the instance disks if we're replacing them on a down instance
6437 2bb5c911 Michael Hanselmann
      if activate_disks:
6438 2bb5c911 Michael Hanselmann
        _SafeShutdownInstanceDisks(self.lu, self.instance)
6439 2bb5c911 Michael Hanselmann
6440 2bb5c911 Michael Hanselmann
  def _CheckVolumeGroup(self, nodes):
6441 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Checking volume groups")
6442 2bb5c911 Michael Hanselmann
6443 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
6444 cff90b79 Iustin Pop
6445 2bb5c911 Michael Hanselmann
    # Make sure volume group exists on all involved nodes
6446 2bb5c911 Michael Hanselmann
    results = self.rpc.call_vg_list(nodes)
6447 cff90b79 Iustin Pop
    if not results:
6448 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
6449 2bb5c911 Michael Hanselmann
6450 2bb5c911 Michael Hanselmann
    for node in nodes:
6451 781de953 Iustin Pop
      res = results[node]
6452 4c4e4e1e Iustin Pop
      res.Raise("Error checking node %s" % node)
6453 2bb5c911 Michael Hanselmann
      if vgname not in res.payload:
6454 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Volume group '%s' not found on node %s" %
6455 2bb5c911 Michael Hanselmann
                                 (vgname, node))
6456 2bb5c911 Michael Hanselmann
6457 2bb5c911 Michael Hanselmann
  def _CheckDisksExistence(self, nodes):
6458 2bb5c911 Michael Hanselmann
    # Check disk existence
6459 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
6460 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
6461 cff90b79 Iustin Pop
        continue
6462 2bb5c911 Michael Hanselmann
6463 2bb5c911 Michael Hanselmann
      for node in nodes:
6464 2bb5c911 Michael Hanselmann
        self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
6465 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(dev, node)
6466 2bb5c911 Michael Hanselmann
6467 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(node, dev)
6468 2bb5c911 Michael Hanselmann
6469 4c4e4e1e Iustin Pop
        msg = result.fail_msg
6470 2bb5c911 Michael Hanselmann
        if msg or not result.payload:
6471 2bb5c911 Michael Hanselmann
          if not msg:
6472 2bb5c911 Michael Hanselmann
            msg = "disk not found"
6473 23829f6f Iustin Pop
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
6474 23829f6f Iustin Pop
                                   (idx, node, msg))
6475 cff90b79 Iustin Pop
6476 2bb5c911 Michael Hanselmann
  def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
6477 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
6478 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
6479 cff90b79 Iustin Pop
        continue
6480 cff90b79 Iustin Pop
6481 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Checking disk/%d consistency on node %s" %
6482 2bb5c911 Michael Hanselmann
                      (idx, node_name))
6483 2bb5c911 Michael Hanselmann
6484 2bb5c911 Michael Hanselmann
      if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary,
6485 2bb5c911 Michael Hanselmann
                                   ldisk=ldisk):
6486 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Node %s has degraded storage, unsafe to"
6487 2bb5c911 Michael Hanselmann
                                 " replace disks for instance %s" %
6488 2bb5c911 Michael Hanselmann
                                 (node_name, self.instance.name))
6489 2bb5c911 Michael Hanselmann
6490 2bb5c911 Michael Hanselmann
  def _CreateNewStorage(self, node_name):
6491 2bb5c911 Michael Hanselmann
    vgname = self.cfg.GetVGName()
6492 2bb5c911 Michael Hanselmann
    iv_names = {}
6493 2bb5c911 Michael Hanselmann
6494 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
6495 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
6496 a9e0c397 Iustin Pop
        continue
6497 2bb5c911 Michael Hanselmann
6498 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
6499 2bb5c911 Michael Hanselmann
6500 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, node_name)
6501 2bb5c911 Michael Hanselmann
6502 2bb5c911 Michael Hanselmann
      lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
6503 2bb5c911 Michael Hanselmann
      names = _GenerateUniqueNames(self.lu, lv_names)
6504 2bb5c911 Michael Hanselmann
6505 2bb5c911 Michael Hanselmann
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
6506 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
6507 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
6508 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
6509 2bb5c911 Michael Hanselmann
6510 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
6511 a9e0c397 Iustin Pop
      old_lvs = dev.children
6512 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
6513 2bb5c911 Michael Hanselmann
6514 428958aa Iustin Pop
      # we pass force_create=True to force the LVM creation
6515 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
6516 2bb5c911 Michael Hanselmann
        _CreateBlockDev(self.lu, node_name, self.instance, new_lv, True,
6517 2bb5c911 Michael Hanselmann
                        _GetInstanceInfoText(self.instance), False)
6518 2bb5c911 Michael Hanselmann
6519 2bb5c911 Michael Hanselmann
    return iv_names
6520 2bb5c911 Michael Hanselmann
6521 2bb5c911 Michael Hanselmann
  def _CheckDevices(self, node_name, iv_names):
6522 2bb5c911 Michael Hanselmann
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
6523 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, node_name)
6524 2bb5c911 Michael Hanselmann
6525 2bb5c911 Michael Hanselmann
      result = self.rpc.call_blockdev_find(node_name, dev)
6526 2bb5c911 Michael Hanselmann
6527 2bb5c911 Michael Hanselmann
      msg = result.fail_msg
6528 2bb5c911 Michael Hanselmann
      if msg or not result.payload:
6529 2bb5c911 Michael Hanselmann
        if not msg:
6530 2bb5c911 Michael Hanselmann
          msg = "disk not found"
6531 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
6532 2bb5c911 Michael Hanselmann
                                 (name, msg))
6533 2bb5c911 Michael Hanselmann
6534 96acbc09 Michael Hanselmann
      if result.payload.is_degraded:
6535 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
6536 2bb5c911 Michael Hanselmann
6537 2bb5c911 Michael Hanselmann
  def _RemoveOldStorage(self, node_name, iv_names):
6538 2bb5c911 Michael Hanselmann
    for name, (dev, old_lvs, _) in iv_names.iteritems():
6539 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Remove logical volumes for %s" % name)
6540 2bb5c911 Michael Hanselmann
6541 2bb5c911 Michael Hanselmann
      for lv in old_lvs:
6542 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(lv, node_name)
6543 2bb5c911 Michael Hanselmann
6544 2bb5c911 Michael Hanselmann
        msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
6545 2bb5c911 Michael Hanselmann
        if msg:
6546 2bb5c911 Michael Hanselmann
          self.lu.LogWarning("Can't remove old LV: %s" % msg,
6547 2bb5c911 Michael Hanselmann
                             hint="remove unused LVs manually")
6548 2bb5c911 Michael Hanselmann
6549 2bb5c911 Michael Hanselmann
  def _ExecDrbd8DiskOnly(self):
6550 2bb5c911 Michael Hanselmann
    """Replace a disk on the primary or secondary for DRBD 8.
6551 2bb5c911 Michael Hanselmann

6552 2bb5c911 Michael Hanselmann
    The algorithm for replace is quite complicated:
6553 2bb5c911 Michael Hanselmann

6554 2bb5c911 Michael Hanselmann
      1. for each disk to be replaced:
6555 2bb5c911 Michael Hanselmann

6556 2bb5c911 Michael Hanselmann
        1. create new LVs on the target node with unique names
6557 2bb5c911 Michael Hanselmann
        1. detach old LVs from the drbd device
6558 2bb5c911 Michael Hanselmann
        1. rename old LVs to name_replaced.<time_t>
6559 2bb5c911 Michael Hanselmann
        1. rename new LVs to old LVs
6560 2bb5c911 Michael Hanselmann
        1. attach the new LVs (with the old names now) to the drbd device
6561 2bb5c911 Michael Hanselmann

6562 2bb5c911 Michael Hanselmann
      1. wait for sync across all devices
6563 2bb5c911 Michael Hanselmann

6564 2bb5c911 Michael Hanselmann
      1. for each modified disk:
6565 2bb5c911 Michael Hanselmann

6566 2bb5c911 Michael Hanselmann
        1. remove old LVs (which have the name name_replaces.<time_t>)
6567 2bb5c911 Michael Hanselmann

6568 2bb5c911 Michael Hanselmann
    Failures are not very well handled.
6569 2bb5c911 Michael Hanselmann

6570 2bb5c911 Michael Hanselmann
    """
6571 2bb5c911 Michael Hanselmann
    steps_total = 6
6572 2bb5c911 Michael Hanselmann
6573 2bb5c911 Michael Hanselmann
    # Step: check device activation
6574 2bb5c911 Michael Hanselmann
    self.lu.LogStep(1, steps_total, "Check device existence")
6575 2bb5c911 Michael Hanselmann
    self._CheckDisksExistence([self.other_node, self.target_node])
6576 2bb5c911 Michael Hanselmann
    self._CheckVolumeGroup([self.target_node, self.other_node])
6577 2bb5c911 Michael Hanselmann
6578 2bb5c911 Michael Hanselmann
    # Step: check other node consistency
6579 2bb5c911 Michael Hanselmann
    self.lu.LogStep(2, steps_total, "Check peer consistency")
6580 2bb5c911 Michael Hanselmann
    self._CheckDisksConsistency(self.other_node,
6581 2bb5c911 Michael Hanselmann
                                self.other_node == self.instance.primary_node,
6582 2bb5c911 Michael Hanselmann
                                False)
6583 2bb5c911 Michael Hanselmann
6584 2bb5c911 Michael Hanselmann
    # Step: create new storage
6585 2bb5c911 Michael Hanselmann
    self.lu.LogStep(3, steps_total, "Allocate new storage")
6586 2bb5c911 Michael Hanselmann
    iv_names = self._CreateNewStorage(self.target_node)
6587 a9e0c397 Iustin Pop
6588 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
6589 2bb5c911 Michael Hanselmann
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
6590 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
6591 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
6592 2bb5c911 Michael Hanselmann
6593 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
6594 4d4a651d Michael Hanselmann
                                                     old_lvs)
6595 4c4e4e1e Iustin Pop
      result.Raise("Can't detach drbd from local storage on node"
6596 2bb5c911 Michael Hanselmann
                   " %s for device %s" % (self.target_node, dev.iv_name))
6597 cff90b79 Iustin Pop
      #dev.children = []
6598 cff90b79 Iustin Pop
      #cfg.Update(instance)
6599 a9e0c397 Iustin Pop
6600 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
6601 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
6602 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
6603 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
6604 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
6605 cff90b79 Iustin Pop
6606 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
6607 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
6608 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
6609 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
6610 2bb5c911 Michael Hanselmann
6611 2bb5c911 Michael Hanselmann
      # Build the rename list based on what LVs exist on the node
6612 2bb5c911 Michael Hanselmann
      rename_old_to_new = []
6613 cff90b79 Iustin Pop
      for to_ren in old_lvs:
6614 2bb5c911 Michael Hanselmann
        result = self.rpc.call_blockdev_find(self.target_node, to_ren)
6615 4c4e4e1e Iustin Pop
        if not result.fail_msg and result.payload:
6616 23829f6f Iustin Pop
          # device exists
6617 2bb5c911 Michael Hanselmann
          rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
6618 cff90b79 Iustin Pop
6619 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Renaming the old LVs on the target node")
6620 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_rename(self.target_node,
6621 4d4a651d Michael Hanselmann
                                             rename_old_to_new)
6622 2bb5c911 Michael Hanselmann
      result.Raise("Can't rename old LVs on node %s" % self.target_node)
6623 2bb5c911 Michael Hanselmann
6624 2bb5c911 Michael Hanselmann
      # Now we rename the new LVs to the old LVs
6625 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Renaming the new LVs on the target node")
6626 2bb5c911 Michael Hanselmann
      rename_new_to_old = [(new, old.physical_id)
6627 2bb5c911 Michael Hanselmann
                           for old, new in zip(old_lvs, new_lvs)]
6628 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_rename(self.target_node,
6629 4d4a651d Michael Hanselmann
                                             rename_new_to_old)
6630 2bb5c911 Michael Hanselmann
      result.Raise("Can't rename new LVs on node %s" % self.target_node)
6631 cff90b79 Iustin Pop
6632 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
6633 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
6634 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(new, self.target_node)
6635 a9e0c397 Iustin Pop
6636 cff90b79 Iustin Pop
      for disk in old_lvs:
6637 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
6638 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(disk, self.target_node)
6639 a9e0c397 Iustin Pop
6640 2bb5c911 Michael Hanselmann
      # Now that the new lvs have the old name, we can add them to the device
6641 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
6642 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_addchildren(self.target_node, dev,
6643 4d4a651d Michael Hanselmann
                                                  new_lvs)
6644 4c4e4e1e Iustin Pop
      msg = result.fail_msg
6645 2cc1da8b Iustin Pop
      if msg:
6646 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
6647 4d4a651d Michael Hanselmann
          msg2 = self.rpc.call_blockdev_remove(self.target_node,
6648 4d4a651d Michael Hanselmann
                                               new_lv).fail_msg
6649 4c4e4e1e Iustin Pop
          if msg2:
6650 2bb5c911 Michael Hanselmann
            self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
6651 2bb5c911 Michael Hanselmann
                               hint=("cleanup manually the unused logical"
6652 2bb5c911 Michael Hanselmann
                                     "volumes"))
6653 2cc1da8b Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
6654 a9e0c397 Iustin Pop
6655 a9e0c397 Iustin Pop
      dev.children = new_lvs
6656 a9e0c397 Iustin Pop
6657 2bb5c911 Michael Hanselmann
      self.cfg.Update(self.instance)
6658 a9e0c397 Iustin Pop
6659 2bb5c911 Michael Hanselmann
    # Wait for sync
6660 2bb5c911 Michael Hanselmann
    # This can fail as the old devices are degraded and _WaitForSync
6661 2bb5c911 Michael Hanselmann
    # does a combined result over all disks, so we don't check its return value
6662 2bb5c911 Michael Hanselmann
    self.lu.LogStep(5, steps_total, "Sync devices")
6663 2bb5c911 Michael Hanselmann
    _WaitForSync(self.lu, self.instance, unlock=True)
6664 a9e0c397 Iustin Pop
6665 2bb5c911 Michael Hanselmann
    # Check all devices manually
6666 2bb5c911 Michael Hanselmann
    self._CheckDevices(self.instance.primary_node, iv_names)
6667 a9e0c397 Iustin Pop
6668 cff90b79 Iustin Pop
    # Step: remove old storage
6669 2bb5c911 Michael Hanselmann
    self.lu.LogStep(6, steps_total, "Removing old storage")
6670 2bb5c911 Michael Hanselmann
    self._RemoveOldStorage(self.target_node, iv_names)
6671 a9e0c397 Iustin Pop
6672 2bb5c911 Michael Hanselmann
  def _ExecDrbd8Secondary(self):
6673 2bb5c911 Michael Hanselmann
    """Replace the secondary node for DRBD 8.
6674 a9e0c397 Iustin Pop

6675 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
6676 a9e0c397 Iustin Pop
      - for all disks of the instance:
6677 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
6678 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
6679 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
6680 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
6681 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
6682 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
6683 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
6684 a9e0c397 Iustin Pop
          not network enabled
6685 a9e0c397 Iustin Pop
      - wait for sync across all devices
6686 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
6687 a9e0c397 Iustin Pop

6688 a9e0c397 Iustin Pop
    Failures are not very well handled.
6689 0834c866 Iustin Pop

6690 a9e0c397 Iustin Pop
    """
6691 0834c866 Iustin Pop
    steps_total = 6
6692 0834c866 Iustin Pop
6693 0834c866 Iustin Pop
    # Step: check device activation
6694 2bb5c911 Michael Hanselmann
    self.lu.LogStep(1, steps_total, "Check device existence")
6695 2bb5c911 Michael Hanselmann
    self._CheckDisksExistence([self.instance.primary_node])
6696 2bb5c911 Michael Hanselmann
    self._CheckVolumeGroup([self.instance.primary_node])
6697 0834c866 Iustin Pop
6698 0834c866 Iustin Pop
    # Step: check other node consistency
6699 2bb5c911 Michael Hanselmann
    self.lu.LogStep(2, steps_total, "Check peer consistency")
6700 2bb5c911 Michael Hanselmann
    self._CheckDisksConsistency(self.instance.primary_node, True, True)
6701 0834c866 Iustin Pop
6702 0834c866 Iustin Pop
    # Step: create new storage
6703 2bb5c911 Michael Hanselmann
    self.lu.LogStep(3, steps_total, "Allocate new storage")
6704 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
6705 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
6706 2bb5c911 Michael Hanselmann
                      (self.new_node, idx))
6707 428958aa Iustin Pop
      # we pass force_create=True to force LVM creation
6708 a9e0c397 Iustin Pop
      for new_lv in dev.children:
6709 2bb5c911 Michael Hanselmann
        _CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True,
6710 2bb5c911 Michael Hanselmann
                        _GetInstanceInfoText(self.instance), False)
6711 a9e0c397 Iustin Pop
6712 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
6713 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
6714 a1578d63 Iustin Pop
    # error and the success paths
6715 2bb5c911 Michael Hanselmann
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
6716 4d4a651d Michael Hanselmann
    minors = self.cfg.AllocateDRBDMinor([self.new_node
6717 4d4a651d Michael Hanselmann
                                         for dev in self.instance.disks],
6718 2bb5c911 Michael Hanselmann
                                        self.instance.name)
6719 2bb5c911 Michael Hanselmann
    logging.debug("Allocated minors %r" % (minors,))
6720 2bb5c911 Michael Hanselmann
6721 2bb5c911 Michael Hanselmann
    iv_names = {}
6722 2bb5c911 Michael Hanselmann
    for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
6723 4d4a651d Michael Hanselmann
      self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
6724 4d4a651d Michael Hanselmann
                      (self.new_node, idx))
6725 a2d59d8b Iustin Pop
      # create new devices on new_node; note that we create two IDs:
6726 a2d59d8b Iustin Pop
      # one without port, so the drbd will be activated without
6727 a2d59d8b Iustin Pop
      # networking information on the new node at this stage, and one
6728 a2d59d8b Iustin Pop
      # with network, for the latter activation in step 4
6729 a2d59d8b Iustin Pop
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
6730 2bb5c911 Michael Hanselmann
      if self.instance.primary_node == o_node1:
6731 a2d59d8b Iustin Pop
        p_minor = o_minor1
6732 ffa1c0dc Iustin Pop
      else:
6733 a2d59d8b Iustin Pop
        p_minor = o_minor2
6734 a2d59d8b Iustin Pop
6735 4d4a651d Michael Hanselmann
      new_alone_id = (self.instance.primary_node, self.new_node, None,
6736 4d4a651d Michael Hanselmann
                      p_minor, new_minor, o_secret)
6737 4d4a651d Michael Hanselmann
      new_net_id = (self.instance.primary_node, self.new_node, o_port,
6738 4d4a651d Michael Hanselmann
                    p_minor, new_minor, o_secret)
6739 a2d59d8b Iustin Pop
6740 a2d59d8b Iustin Pop
      iv_names[idx] = (dev, dev.children, new_net_id)
6741 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
6742 a2d59d8b Iustin Pop
                    new_net_id)
6743 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
6744 a2d59d8b Iustin Pop
                              logical_id=new_alone_id,
6745 8a6c7011 Iustin Pop
                              children=dev.children,
6746 8a6c7011 Iustin Pop
                              size=dev.size)
6747 796cab27 Iustin Pop
      try:
6748 2bb5c911 Michael Hanselmann
        _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
6749 2bb5c911 Michael Hanselmann
                              _GetInstanceInfoText(self.instance), False)
6750 82759cb1 Iustin Pop
      except errors.GenericError:
6751 2bb5c911 Michael Hanselmann
        self.cfg.ReleaseDRBDMinors(self.instance.name)
6752 796cab27 Iustin Pop
        raise
6753 a9e0c397 Iustin Pop
6754 2bb5c911 Michael Hanselmann
    # We have new devices, shutdown the drbd on the old secondary
6755 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
6756 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
6757 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, self.target_node)
6758 2bb5c911 Michael Hanselmann
      msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
6759 cacfd1fd Iustin Pop
      if msg:
6760 2bb5c911 Michael Hanselmann
        self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
6761 2bb5c911 Michael Hanselmann
                           "node: %s" % (idx, msg),
6762 2bb5c911 Michael Hanselmann
                           hint=("Please cleanup this device manually as"
6763 2bb5c911 Michael Hanselmann
                                 " soon as possible"))
6764 a9e0c397 Iustin Pop
6765 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
6766 4d4a651d Michael Hanselmann
    result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node],
6767 4d4a651d Michael Hanselmann
                                               self.node_secondary_ip,
6768 4d4a651d Michael Hanselmann
                                               self.instance.disks)\
6769 4d4a651d Michael Hanselmann
                                              [self.instance.primary_node]
6770 642445d9 Iustin Pop
6771 4c4e4e1e Iustin Pop
    msg = result.fail_msg
6772 a2d59d8b Iustin Pop
    if msg:
6773 a2d59d8b Iustin Pop
      # detaches didn't succeed (unlikely)
6774 2bb5c911 Michael Hanselmann
      self.cfg.ReleaseDRBDMinors(self.instance.name)
6775 a2d59d8b Iustin Pop
      raise errors.OpExecError("Can't detach the disks from the network on"
6776 a2d59d8b Iustin Pop
                               " old node: %s" % (msg,))
6777 642445d9 Iustin Pop
6778 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
6779 642445d9 Iustin Pop
    # the instance to point to the new secondary
6780 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Updating instance configuration")
6781 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
6782 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
6783 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, self.instance.primary_node)
6784 2bb5c911 Michael Hanselmann
6785 2bb5c911 Michael Hanselmann
    self.cfg.Update(self.instance)
6786 a9e0c397 Iustin Pop
6787 642445d9 Iustin Pop
    # and now perform the drbd attach
6788 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Attaching primary drbds to new secondary"
6789 2bb5c911 Michael Hanselmann
                    " (standalone => connected)")
6790 4d4a651d Michael Hanselmann
    result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
6791 4d4a651d Michael Hanselmann
                                            self.new_node],
6792 4d4a651d Michael Hanselmann
                                           self.node_secondary_ip,
6793 4d4a651d Michael Hanselmann
                                           self.instance.disks,
6794 4d4a651d Michael Hanselmann
                                           self.instance.name,
6795 a2d59d8b Iustin Pop
                                           False)
6796 a2d59d8b Iustin Pop
    for to_node, to_result in result.items():
6797 4c4e4e1e Iustin Pop
      msg = to_result.fail_msg
6798 a2d59d8b Iustin Pop
      if msg:
6799 4d4a651d Michael Hanselmann
        self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
6800 4d4a651d Michael Hanselmann
                           to_node, msg,
6801 2bb5c911 Michael Hanselmann
                           hint=("please do a gnt-instance info to see the"
6802 2bb5c911 Michael Hanselmann
                                 " status of disks"))
6803 a9e0c397 Iustin Pop
6804 2bb5c911 Michael Hanselmann
    # Wait for sync
6805 2bb5c911 Michael Hanselmann
    # This can fail as the old devices are degraded and _WaitForSync
6806 2bb5c911 Michael Hanselmann
    # does a combined result over all disks, so we don't check its return value
6807 2bb5c911 Michael Hanselmann
    self.lu.LogStep(5, steps_total, "Sync devices")
6808 2bb5c911 Michael Hanselmann
    _WaitForSync(self.lu, self.instance, unlock=True)
6809 a9e0c397 Iustin Pop
6810 2bb5c911 Michael Hanselmann
    # Check all devices manually
6811 2bb5c911 Michael Hanselmann
    self._CheckDevices(self.instance.primary_node, iv_names)
6812 22985314 Guido Trotter
6813 2bb5c911 Michael Hanselmann
    # Step: remove old storage
6814 2bb5c911 Michael Hanselmann
    self.lu.LogStep(6, steps_total, "Removing old storage")
6815 2bb5c911 Michael Hanselmann
    self._RemoveOldStorage(self.target_node, iv_names)
6816 a9e0c397 Iustin Pop
6817 a8083063 Iustin Pop
6818 76aef8fc Michael Hanselmann
class LURepairNodeStorage(NoHooksLU):
6819 76aef8fc Michael Hanselmann
  """Repairs the volume group on a node.
6820 76aef8fc Michael Hanselmann

6821 76aef8fc Michael Hanselmann
  """
6822 76aef8fc Michael Hanselmann
  _OP_REQP = ["node_name"]
6823 76aef8fc Michael Hanselmann
  REQ_BGL = False
6824 76aef8fc Michael Hanselmann
6825 76aef8fc Michael Hanselmann
  def CheckArguments(self):
6826 76aef8fc Michael Hanselmann
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
6827 76aef8fc Michael Hanselmann
    if node_name is None:
6828 76aef8fc Michael Hanselmann
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
6829 76aef8fc Michael Hanselmann
6830 76aef8fc Michael Hanselmann
    self.op.node_name = node_name
6831 76aef8fc Michael Hanselmann
6832 76aef8fc Michael Hanselmann
  def ExpandNames(self):
6833 76aef8fc Michael Hanselmann
    self.needed_locks = {
6834 76aef8fc Michael Hanselmann
      locking.LEVEL_NODE: [self.op.node_name],
6835 76aef8fc Michael Hanselmann
      }
6836 76aef8fc Michael Hanselmann
6837 76aef8fc Michael Hanselmann
  def _CheckFaultyDisks(self, instance, node_name):
6838 76aef8fc Michael Hanselmann
    if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
6839 76aef8fc Michael Hanselmann
                                node_name, True):
6840 76aef8fc Michael Hanselmann
      raise errors.OpPrereqError("Instance '%s' has faulty disks on"
6841 aa053071 Michael Hanselmann
                                 " node '%s'" % (instance.name, node_name))
6842 76aef8fc Michael Hanselmann
6843 76aef8fc Michael Hanselmann
  def CheckPrereq(self):
6844 76aef8fc Michael Hanselmann
    """Check prerequisites.
6845 76aef8fc Michael Hanselmann

6846 76aef8fc Michael Hanselmann
    """
6847 76aef8fc Michael Hanselmann
    storage_type = self.op.storage_type
6848 76aef8fc Michael Hanselmann
6849 76aef8fc Michael Hanselmann
    if (constants.SO_FIX_CONSISTENCY not in
6850 76aef8fc Michael Hanselmann
        constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
6851 76aef8fc Michael Hanselmann
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
6852 76aef8fc Michael Hanselmann
                                 " repaired" % storage_type)
6853 76aef8fc Michael Hanselmann
6854 76aef8fc Michael Hanselmann
    # Check whether any instance on this node has faulty disks
6855 76aef8fc Michael Hanselmann
    for inst in _GetNodeInstances(self.cfg, self.op.node_name):
6856 76aef8fc Michael Hanselmann
      check_nodes = set(inst.all_nodes)
6857 76aef8fc Michael Hanselmann
      check_nodes.discard(self.op.node_name)
6858 76aef8fc Michael Hanselmann
      for inst_node_name in check_nodes:
6859 76aef8fc Michael Hanselmann
        self._CheckFaultyDisks(inst, inst_node_name)
6860 76aef8fc Michael Hanselmann
6861 76aef8fc Michael Hanselmann
  def Exec(self, feedback_fn):
6862 76aef8fc Michael Hanselmann
    feedback_fn("Repairing storage unit '%s' on %s ..." %
6863 76aef8fc Michael Hanselmann
                (self.op.name, self.op.node_name))
6864 76aef8fc Michael Hanselmann
6865 76aef8fc Michael Hanselmann
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
6866 76aef8fc Michael Hanselmann
    result = self.rpc.call_storage_execute(self.op.node_name,
6867 76aef8fc Michael Hanselmann
                                           self.op.storage_type, st_args,
6868 76aef8fc Michael Hanselmann
                                           self.op.name,
6869 76aef8fc Michael Hanselmann
                                           constants.SO_FIX_CONSISTENCY)
6870 76aef8fc Michael Hanselmann
    result.Raise("Failed to repair storage unit '%s' on %s" %
6871 76aef8fc Michael Hanselmann
                 (self.op.name, self.op.node_name))
6872 76aef8fc Michael Hanselmann
6873 76aef8fc Michael Hanselmann
6874 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
6875 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
6876 8729e0d7 Iustin Pop

6877 8729e0d7 Iustin Pop
  """
6878 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
6879 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
6880 6605411d Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
6881 31e63dbf Guido Trotter
  REQ_BGL = False
6882 31e63dbf Guido Trotter
6883 31e63dbf Guido Trotter
  def ExpandNames(self):
6884 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
6885 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
6886 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6887 31e63dbf Guido Trotter
6888 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
6889 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
6890 31e63dbf Guido Trotter
      self._LockInstancesNodes()
6891 8729e0d7 Iustin Pop
6892 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
6893 8729e0d7 Iustin Pop
    """Build hooks env.
6894 8729e0d7 Iustin Pop

6895 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
6896 8729e0d7 Iustin Pop

6897 8729e0d7 Iustin Pop
    """
6898 8729e0d7 Iustin Pop
    env = {
6899 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
6900 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
6901 8729e0d7 Iustin Pop
      }
6902 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6903 8729e0d7 Iustin Pop
    nl = [
6904 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
6905 8729e0d7 Iustin Pop
      self.instance.primary_node,
6906 8729e0d7 Iustin Pop
      ]
6907 8729e0d7 Iustin Pop
    return env, nl, nl
6908 8729e0d7 Iustin Pop
6909 8729e0d7 Iustin Pop
  def CheckPrereq(self):
6910 8729e0d7 Iustin Pop
    """Check prerequisites.
6911 8729e0d7 Iustin Pop

6912 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
6913 8729e0d7 Iustin Pop

6914 8729e0d7 Iustin Pop
    """
6915 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6916 31e63dbf Guido Trotter
    assert instance is not None, \
6917 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
6918 6b12959c Iustin Pop
    nodenames = list(instance.all_nodes)
6919 6b12959c Iustin Pop
    for node in nodenames:
6920 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, node)
6921 7527a8a4 Iustin Pop
6922 31e63dbf Guido Trotter
6923 8729e0d7 Iustin Pop
    self.instance = instance
6924 8729e0d7 Iustin Pop
6925 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
6926 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
6927 8729e0d7 Iustin Pop
                                 " growing.")
6928 8729e0d7 Iustin Pop
6929 ad24e046 Iustin Pop
    self.disk = instance.FindDisk(self.op.disk)
6930 8729e0d7 Iustin Pop
6931 72737a7f Iustin Pop
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
6932 72737a7f Iustin Pop
                                       instance.hypervisor)
6933 8729e0d7 Iustin Pop
    for node in nodenames:
6934 781de953 Iustin Pop
      info = nodeinfo[node]
6935 4c4e4e1e Iustin Pop
      info.Raise("Cannot get current information from node %s" % node)
6936 070e998b Iustin Pop
      vg_free = info.payload.get('vg_free', None)
6937 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
6938 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
6939 8729e0d7 Iustin Pop
                                   " node %s" % node)
6940 781de953 Iustin Pop
      if self.op.amount > vg_free:
6941 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
6942 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
6943 781de953 Iustin Pop
                                   (node, vg_free, self.op.amount))
6944 8729e0d7 Iustin Pop
6945 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
6946 8729e0d7 Iustin Pop
    """Execute disk grow.
6947 8729e0d7 Iustin Pop

6948 8729e0d7 Iustin Pop
    """
6949 8729e0d7 Iustin Pop
    instance = self.instance
6950 ad24e046 Iustin Pop
    disk = self.disk
6951 6b12959c Iustin Pop
    for node in instance.all_nodes:
6952 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
6953 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
6954 4c4e4e1e Iustin Pop
      result.Raise("Grow request failed to node %s" % node)
6955 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
6956 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
6957 6605411d Iustin Pop
    if self.op.wait_for_sync:
6958 cd4d138f Guido Trotter
      disk_abort = not _WaitForSync(self, instance)
6959 6605411d Iustin Pop
      if disk_abort:
6960 86d9d3bb Iustin Pop
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
6961 86d9d3bb Iustin Pop
                             " status.\nPlease check the instance.")
6962 8729e0d7 Iustin Pop
6963 8729e0d7 Iustin Pop
6964 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
6965 a8083063 Iustin Pop
  """Query runtime instance data.
6966 a8083063 Iustin Pop

6967 a8083063 Iustin Pop
  """
6968 57821cac Iustin Pop
  _OP_REQP = ["instances", "static"]
6969 a987fa48 Guido Trotter
  REQ_BGL = False
6970 ae5849b5 Michael Hanselmann
6971 a987fa48 Guido Trotter
  def ExpandNames(self):
6972 a987fa48 Guido Trotter
    self.needed_locks = {}
6973 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
6974 a987fa48 Guido Trotter
6975 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
6976 a987fa48 Guido Trotter
      raise errors.OpPrereqError("Invalid argument type 'instances'")
6977 a987fa48 Guido Trotter
6978 a987fa48 Guido Trotter
    if self.op.instances:
6979 a987fa48 Guido Trotter
      self.wanted_names = []
6980 a987fa48 Guido Trotter
      for name in self.op.instances:
6981 a987fa48 Guido Trotter
        full_name = self.cfg.ExpandInstanceName(name)
6982 a987fa48 Guido Trotter
        if full_name is None:
6983 f57c76e4 Iustin Pop
          raise errors.OpPrereqError("Instance '%s' not known" % name)
6984 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
6985 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
6986 a987fa48 Guido Trotter
    else:
6987 a987fa48 Guido Trotter
      self.wanted_names = None
6988 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
6989 a987fa48 Guido Trotter
6990 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
6991 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6992 a987fa48 Guido Trotter
6993 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
6994 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
6995 a987fa48 Guido Trotter
      self._LockInstancesNodes()
6996 a8083063 Iustin Pop
6997 a8083063 Iustin Pop
  def CheckPrereq(self):
6998 a8083063 Iustin Pop
    """Check prerequisites.
6999 a8083063 Iustin Pop

7000 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
7001 a8083063 Iustin Pop

7002 a8083063 Iustin Pop
    """
7003 a987fa48 Guido Trotter
    if self.wanted_names is None:
7004 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
7005 a8083063 Iustin Pop
7006 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
7007 a987fa48 Guido Trotter
                             in self.wanted_names]
7008 a987fa48 Guido Trotter
    return
7009 a8083063 Iustin Pop
7010 98825740 Michael Hanselmann
  def _ComputeBlockdevStatus(self, node, instance_name, dev):
7011 98825740 Michael Hanselmann
    """Returns the status of a block device
7012 98825740 Michael Hanselmann

7013 98825740 Michael Hanselmann
    """
7014 4dce1a83 Michael Hanselmann
    if self.op.static or not node:
7015 98825740 Michael Hanselmann
      return None
7016 98825740 Michael Hanselmann
7017 98825740 Michael Hanselmann
    self.cfg.SetDiskID(dev, node)
7018 98825740 Michael Hanselmann
7019 98825740 Michael Hanselmann
    result = self.rpc.call_blockdev_find(node, dev)
7020 98825740 Michael Hanselmann
    if result.offline:
7021 98825740 Michael Hanselmann
      return None
7022 98825740 Michael Hanselmann
7023 98825740 Michael Hanselmann
    result.Raise("Can't compute disk status for %s" % instance_name)
7024 98825740 Michael Hanselmann
7025 98825740 Michael Hanselmann
    status = result.payload
7026 ddfe2228 Michael Hanselmann
    if status is None:
7027 ddfe2228 Michael Hanselmann
      return None
7028 98825740 Michael Hanselmann
7029 98825740 Michael Hanselmann
    return (status.dev_path, status.major, status.minor,
7030 98825740 Michael Hanselmann
            status.sync_percent, status.estimated_time,
7031 f208978a Michael Hanselmann
            status.is_degraded, status.ldisk_status)
7032 98825740 Michael Hanselmann
7033 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
7034 a8083063 Iustin Pop
    """Compute block device status.
7035 a8083063 Iustin Pop

7036 a8083063 Iustin Pop
    """
7037 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
7038 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
7039 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
7040 a8083063 Iustin Pop
        snode = dev.logical_id[1]
7041 a8083063 Iustin Pop
      else:
7042 a8083063 Iustin Pop
        snode = dev.logical_id[0]
7043 a8083063 Iustin Pop
7044 98825740 Michael Hanselmann
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
7045 98825740 Michael Hanselmann
                                              instance.name, dev)
7046 98825740 Michael Hanselmann
    dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
7047 a8083063 Iustin Pop
7048 a8083063 Iustin Pop
    if dev.children:
7049 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
7050 a8083063 Iustin Pop
                      for child in dev.children]
7051 a8083063 Iustin Pop
    else:
7052 a8083063 Iustin Pop
      dev_children = []
7053 a8083063 Iustin Pop
7054 a8083063 Iustin Pop
    data = {
7055 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
7056 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
7057 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
7058 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
7059 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
7060 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
7061 a8083063 Iustin Pop
      "children": dev_children,
7062 b6fdf8b8 Iustin Pop
      "mode": dev.mode,
7063 c98162a7 Iustin Pop
      "size": dev.size,
7064 a8083063 Iustin Pop
      }
7065 a8083063 Iustin Pop
7066 a8083063 Iustin Pop
    return data
7067 a8083063 Iustin Pop
7068 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
7069 a8083063 Iustin Pop
    """Gather and return data"""
7070 a8083063 Iustin Pop
    result = {}
7071 338e51e8 Iustin Pop
7072 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
7073 338e51e8 Iustin Pop
7074 a8083063 Iustin Pop
    for instance in self.wanted_instances:
7075 57821cac Iustin Pop
      if not self.op.static:
7076 57821cac Iustin Pop
        remote_info = self.rpc.call_instance_info(instance.primary_node,
7077 57821cac Iustin Pop
                                                  instance.name,
7078 57821cac Iustin Pop
                                                  instance.hypervisor)
7079 4c4e4e1e Iustin Pop
        remote_info.Raise("Error checking node %s" % instance.primary_node)
7080 7ad1af4a Iustin Pop
        remote_info = remote_info.payload
7081 57821cac Iustin Pop
        if remote_info and "state" in remote_info:
7082 57821cac Iustin Pop
          remote_state = "up"
7083 57821cac Iustin Pop
        else:
7084 57821cac Iustin Pop
          remote_state = "down"
7085 a8083063 Iustin Pop
      else:
7086 57821cac Iustin Pop
        remote_state = None
7087 0d68c45d Iustin Pop
      if instance.admin_up:
7088 a8083063 Iustin Pop
        config_state = "up"
7089 0d68c45d Iustin Pop
      else:
7090 0d68c45d Iustin Pop
        config_state = "down"
7091 a8083063 Iustin Pop
7092 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
7093 a8083063 Iustin Pop
               for device in instance.disks]
7094 a8083063 Iustin Pop
7095 a8083063 Iustin Pop
      idict = {
7096 a8083063 Iustin Pop
        "name": instance.name,
7097 a8083063 Iustin Pop
        "config_state": config_state,
7098 a8083063 Iustin Pop
        "run_state": remote_state,
7099 a8083063 Iustin Pop
        "pnode": instance.primary_node,
7100 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
7101 a8083063 Iustin Pop
        "os": instance.os,
7102 0b13832c Guido Trotter
        # this happens to be the same format used for hooks
7103 0b13832c Guido Trotter
        "nics": _NICListToTuple(self, instance.nics),
7104 a8083063 Iustin Pop
        "disks": disks,
7105 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
7106 24838135 Iustin Pop
        "network_port": instance.network_port,
7107 24838135 Iustin Pop
        "hv_instance": instance.hvparams,
7108 338e51e8 Iustin Pop
        "hv_actual": cluster.FillHV(instance),
7109 338e51e8 Iustin Pop
        "be_instance": instance.beparams,
7110 338e51e8 Iustin Pop
        "be_actual": cluster.FillBE(instance),
7111 90f72445 Iustin Pop
        "serial_no": instance.serial_no,
7112 90f72445 Iustin Pop
        "mtime": instance.mtime,
7113 90f72445 Iustin Pop
        "ctime": instance.ctime,
7114 033d58b0 Iustin Pop
        "uuid": instance.uuid,
7115 a8083063 Iustin Pop
        }
7116 a8083063 Iustin Pop
7117 a8083063 Iustin Pop
      result[instance.name] = idict
7118 a8083063 Iustin Pop
7119 a8083063 Iustin Pop
    return result
7120 a8083063 Iustin Pop
7121 a8083063 Iustin Pop
7122 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
7123 a8083063 Iustin Pop
  """Modifies an instances's parameters.
7124 a8083063 Iustin Pop

7125 a8083063 Iustin Pop
  """
7126 a8083063 Iustin Pop
  HPATH = "instance-modify"
7127 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
7128 24991749 Iustin Pop
  _OP_REQP = ["instance_name"]
7129 1a5c7281 Guido Trotter
  REQ_BGL = False
7130 1a5c7281 Guido Trotter
7131 24991749 Iustin Pop
  def CheckArguments(self):
7132 24991749 Iustin Pop
    if not hasattr(self.op, 'nics'):
7133 24991749 Iustin Pop
      self.op.nics = []
7134 24991749 Iustin Pop
    if not hasattr(self.op, 'disks'):
7135 24991749 Iustin Pop
      self.op.disks = []
7136 24991749 Iustin Pop
    if not hasattr(self.op, 'beparams'):
7137 24991749 Iustin Pop
      self.op.beparams = {}
7138 24991749 Iustin Pop
    if not hasattr(self.op, 'hvparams'):
7139 24991749 Iustin Pop
      self.op.hvparams = {}
7140 24991749 Iustin Pop
    self.op.force = getattr(self.op, "force", False)
7141 24991749 Iustin Pop
    if not (self.op.nics or self.op.disks or
7142 24991749 Iustin Pop
            self.op.hvparams or self.op.beparams):
7143 24991749 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
7144 24991749 Iustin Pop
7145 24991749 Iustin Pop
    # Disk validation
7146 24991749 Iustin Pop
    disk_addremove = 0
7147 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
7148 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
7149 24991749 Iustin Pop
        disk_addremove += 1
7150 24991749 Iustin Pop
        continue
7151 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
7152 24991749 Iustin Pop
        disk_addremove += 1
7153 24991749 Iustin Pop
      else:
7154 24991749 Iustin Pop
        if not isinstance(disk_op, int):
7155 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index")
7156 8b46606c Guido Trotter
        if not isinstance(disk_dict, dict):
7157 8b46606c Guido Trotter
          msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
7158 8b46606c Guido Trotter
          raise errors.OpPrereqError(msg)
7159 8b46606c Guido Trotter
7160 24991749 Iustin Pop
      if disk_op == constants.DDM_ADD:
7161 24991749 Iustin Pop
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
7162 6ec66eae Iustin Pop
        if mode not in constants.DISK_ACCESS_SET:
7163 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode)
7164 24991749 Iustin Pop
        size = disk_dict.get('size', None)
7165 24991749 Iustin Pop
        if size is None:
7166 24991749 Iustin Pop
          raise errors.OpPrereqError("Required disk parameter size missing")
7167 24991749 Iustin Pop
        try:
7168 24991749 Iustin Pop
          size = int(size)
7169 24991749 Iustin Pop
        except ValueError, err:
7170 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
7171 24991749 Iustin Pop
                                     str(err))
7172 24991749 Iustin Pop
        disk_dict['size'] = size
7173 24991749 Iustin Pop
      else:
7174 24991749 Iustin Pop
        # modification of disk
7175 24991749 Iustin Pop
        if 'size' in disk_dict:
7176 24991749 Iustin Pop
          raise errors.OpPrereqError("Disk size change not possible, use"
7177 24991749 Iustin Pop
                                     " grow-disk")
7178 24991749 Iustin Pop
7179 24991749 Iustin Pop
    if disk_addremove > 1:
7180 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one disk add or remove operation"
7181 24991749 Iustin Pop
                                 " supported at a time")
7182 24991749 Iustin Pop
7183 24991749 Iustin Pop
    # NIC validation
7184 24991749 Iustin Pop
    nic_addremove = 0
7185 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
7186 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
7187 24991749 Iustin Pop
        nic_addremove += 1
7188 24991749 Iustin Pop
        continue
7189 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
7190 24991749 Iustin Pop
        nic_addremove += 1
7191 24991749 Iustin Pop
      else:
7192 24991749 Iustin Pop
        if not isinstance(nic_op, int):
7193 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid nic index")
7194 8b46606c Guido Trotter
        if not isinstance(nic_dict, dict):
7195 8b46606c Guido Trotter
          msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
7196 8b46606c Guido Trotter
          raise errors.OpPrereqError(msg)
7197 24991749 Iustin Pop
7198 24991749 Iustin Pop
      # nic_dict should be a dict
7199 24991749 Iustin Pop
      nic_ip = nic_dict.get('ip', None)
7200 24991749 Iustin Pop
      if nic_ip is not None:
7201 5c44da6a Guido Trotter
        if nic_ip.lower() == constants.VALUE_NONE:
7202 24991749 Iustin Pop
          nic_dict['ip'] = None
7203 24991749 Iustin Pop
        else:
7204 24991749 Iustin Pop
          if not utils.IsValidIP(nic_ip):
7205 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip)
7206 5c44da6a Guido Trotter
7207 cd098c41 Guido Trotter
      nic_bridge = nic_dict.get('bridge', None)
7208 cd098c41 Guido Trotter
      nic_link = nic_dict.get('link', None)
7209 cd098c41 Guido Trotter
      if nic_bridge and nic_link:
7210 29921401 Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
7211 29921401 Iustin Pop
                                   " at the same time")
7212 cd098c41 Guido Trotter
      elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
7213 cd098c41 Guido Trotter
        nic_dict['bridge'] = None
7214 cd098c41 Guido Trotter
      elif nic_link and nic_link.lower() == constants.VALUE_NONE:
7215 cd098c41 Guido Trotter
        nic_dict['link'] = None
7216 cd098c41 Guido Trotter
7217 5c44da6a Guido Trotter
      if nic_op == constants.DDM_ADD:
7218 5c44da6a Guido Trotter
        nic_mac = nic_dict.get('mac', None)
7219 5c44da6a Guido Trotter
        if nic_mac is None:
7220 5c44da6a Guido Trotter
          nic_dict['mac'] = constants.VALUE_AUTO
7221 5c44da6a Guido Trotter
7222 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
7223 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
7224 24991749 Iustin Pop
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7225 24991749 Iustin Pop
          if not utils.IsValidMac(nic_mac):
7226 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac)
7227 5c44da6a Guido Trotter
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
7228 5c44da6a Guido Trotter
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
7229 5c44da6a Guido Trotter
                                     " modifying an existing nic")
7230 5c44da6a Guido Trotter
7231 24991749 Iustin Pop
    if nic_addremove > 1:
7232 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one NIC add or remove operation"
7233 24991749 Iustin Pop
                                 " supported at a time")
7234 24991749 Iustin Pop
7235 1a5c7281 Guido Trotter
  def ExpandNames(self):
7236 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
7237 74409b12 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
7238 74409b12 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7239 74409b12 Iustin Pop
7240 74409b12 Iustin Pop
  def DeclareLocks(self, level):
7241 74409b12 Iustin Pop
    if level == locking.LEVEL_NODE:
7242 74409b12 Iustin Pop
      self._LockInstancesNodes()
7243 a8083063 Iustin Pop
7244 a8083063 Iustin Pop
  def BuildHooksEnv(self):
7245 a8083063 Iustin Pop
    """Build hooks env.
7246 a8083063 Iustin Pop

7247 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
7248 a8083063 Iustin Pop

7249 a8083063 Iustin Pop
    """
7250 396e1b78 Michael Hanselmann
    args = dict()
7251 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.be_new:
7252 338e51e8 Iustin Pop
      args['memory'] = self.be_new[constants.BE_MEMORY]
7253 338e51e8 Iustin Pop
    if constants.BE_VCPUS in self.be_new:
7254 61be6ba4 Iustin Pop
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
7255 d8dcf3c9 Guido Trotter
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
7256 d8dcf3c9 Guido Trotter
    # information at all.
7257 d8dcf3c9 Guido Trotter
    if self.op.nics:
7258 d8dcf3c9 Guido Trotter
      args['nics'] = []
7259 d8dcf3c9 Guido Trotter
      nic_override = dict(self.op.nics)
7260 62f0dd02 Guido Trotter
      c_nicparams = self.cluster.nicparams[constants.PP_DEFAULT]
7261 d8dcf3c9 Guido Trotter
      for idx, nic in enumerate(self.instance.nics):
7262 d8dcf3c9 Guido Trotter
        if idx in nic_override:
7263 d8dcf3c9 Guido Trotter
          this_nic_override = nic_override[idx]
7264 d8dcf3c9 Guido Trotter
        else:
7265 d8dcf3c9 Guido Trotter
          this_nic_override = {}
7266 d8dcf3c9 Guido Trotter
        if 'ip' in this_nic_override:
7267 d8dcf3c9 Guido Trotter
          ip = this_nic_override['ip']
7268 d8dcf3c9 Guido Trotter
        else:
7269 d8dcf3c9 Guido Trotter
          ip = nic.ip
7270 d8dcf3c9 Guido Trotter
        if 'mac' in this_nic_override:
7271 d8dcf3c9 Guido Trotter
          mac = this_nic_override['mac']
7272 d8dcf3c9 Guido Trotter
        else:
7273 d8dcf3c9 Guido Trotter
          mac = nic.mac
7274 62f0dd02 Guido Trotter
        if idx in self.nic_pnew:
7275 62f0dd02 Guido Trotter
          nicparams = self.nic_pnew[idx]
7276 62f0dd02 Guido Trotter
        else:
7277 62f0dd02 Guido Trotter
          nicparams = objects.FillDict(c_nicparams, nic.nicparams)
7278 62f0dd02 Guido Trotter
        mode = nicparams[constants.NIC_MODE]
7279 62f0dd02 Guido Trotter
        link = nicparams[constants.NIC_LINK]
7280 62f0dd02 Guido Trotter
        args['nics'].append((ip, mac, mode, link))
7281 d8dcf3c9 Guido Trotter
      if constants.DDM_ADD in nic_override:
7282 d8dcf3c9 Guido Trotter
        ip = nic_override[constants.DDM_ADD].get('ip', None)
7283 d8dcf3c9 Guido Trotter
        mac = nic_override[constants.DDM_ADD]['mac']
7284 62f0dd02 Guido Trotter
        nicparams = self.nic_pnew[constants.DDM_ADD]
7285 62f0dd02 Guido Trotter
        mode = nicparams[constants.NIC_MODE]
7286 62f0dd02 Guido Trotter
        link = nicparams[constants.NIC_LINK]
7287 62f0dd02 Guido Trotter
        args['nics'].append((ip, mac, mode, link))
7288 d8dcf3c9 Guido Trotter
      elif constants.DDM_REMOVE in nic_override:
7289 d8dcf3c9 Guido Trotter
        del args['nics'][-1]
7290 d8dcf3c9 Guido Trotter
7291 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
7292 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
7293 a8083063 Iustin Pop
    return env, nl, nl
7294 a8083063 Iustin Pop
7295 0329617a Guido Trotter
  def _GetUpdatedParams(self, old_params, update_dict,
7296 0329617a Guido Trotter
                        default_values, parameter_types):
7297 0329617a Guido Trotter
    """Return the new params dict for the given params.
7298 0329617a Guido Trotter

7299 0329617a Guido Trotter
    @type old_params: dict
7300 f2fd87d7 Iustin Pop
    @param old_params: old parameters
7301 0329617a Guido Trotter
    @type update_dict: dict
7302 f2fd87d7 Iustin Pop
    @param update_dict: dict containing new parameter values,
7303 f2fd87d7 Iustin Pop
                        or constants.VALUE_DEFAULT to reset the
7304 f2fd87d7 Iustin Pop
                        parameter to its default value
7305 0329617a Guido Trotter
    @type default_values: dict
7306 0329617a Guido Trotter
    @param default_values: default values for the filled parameters
7307 0329617a Guido Trotter
    @type parameter_types: dict
7308 0329617a Guido Trotter
    @param parameter_types: dict mapping target dict keys to types
7309 0329617a Guido Trotter
                            in constants.ENFORCEABLE_TYPES
7310 0329617a Guido Trotter
    @rtype: (dict, dict)
7311 0329617a Guido Trotter
    @return: (new_parameters, filled_parameters)
7312 0329617a Guido Trotter

7313 0329617a Guido Trotter
    """
7314 0329617a Guido Trotter
    params_copy = copy.deepcopy(old_params)
7315 0329617a Guido Trotter
    for key, val in update_dict.iteritems():
7316 0329617a Guido Trotter
      if val == constants.VALUE_DEFAULT:
7317 0329617a Guido Trotter
        try:
7318 0329617a Guido Trotter
          del params_copy[key]
7319 0329617a Guido Trotter
        except KeyError:
7320 0329617a Guido Trotter
          pass
7321 0329617a Guido Trotter
      else:
7322 0329617a Guido Trotter
        params_copy[key] = val
7323 0329617a Guido Trotter
    utils.ForceDictType(params_copy, parameter_types)
7324 0329617a Guido Trotter
    params_filled = objects.FillDict(default_values, params_copy)
7325 0329617a Guido Trotter
    return (params_copy, params_filled)
7326 0329617a Guido Trotter
7327 a8083063 Iustin Pop
  def CheckPrereq(self):
7328 a8083063 Iustin Pop
    """Check prerequisites.
7329 a8083063 Iustin Pop

7330 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
7331 a8083063 Iustin Pop

7332 a8083063 Iustin Pop
    """
7333 7c4d6c7b Michael Hanselmann
    self.force = self.op.force
7334 a8083063 Iustin Pop
7335 74409b12 Iustin Pop
    # checking the new params on the primary/secondary nodes
7336 31a853d2 Iustin Pop
7337 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7338 2ee88aeb Guido Trotter
    cluster = self.cluster = self.cfg.GetClusterInfo()
7339 1a5c7281 Guido Trotter
    assert self.instance is not None, \
7340 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
7341 6b12959c Iustin Pop
    pnode = instance.primary_node
7342 6b12959c Iustin Pop
    nodelist = list(instance.all_nodes)
7343 74409b12 Iustin Pop
7344 338e51e8 Iustin Pop
    # hvparams processing
7345 74409b12 Iustin Pop
    if self.op.hvparams:
7346 0329617a Guido Trotter
      i_hvdict, hv_new = self._GetUpdatedParams(
7347 0329617a Guido Trotter
                             instance.hvparams, self.op.hvparams,
7348 0329617a Guido Trotter
                             cluster.hvparams[instance.hypervisor],
7349 0329617a Guido Trotter
                             constants.HVS_PARAMETER_TYPES)
7350 74409b12 Iustin Pop
      # local check
7351 74409b12 Iustin Pop
      hypervisor.GetHypervisor(
7352 74409b12 Iustin Pop
        instance.hypervisor).CheckParameterSyntax(hv_new)
7353 74409b12 Iustin Pop
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
7354 338e51e8 Iustin Pop
      self.hv_new = hv_new # the new actual values
7355 338e51e8 Iustin Pop
      self.hv_inst = i_hvdict # the new dict (without defaults)
7356 338e51e8 Iustin Pop
    else:
7357 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
7358 338e51e8 Iustin Pop
7359 338e51e8 Iustin Pop
    # beparams processing
7360 338e51e8 Iustin Pop
    if self.op.beparams:
7361 0329617a Guido Trotter
      i_bedict, be_new = self._GetUpdatedParams(
7362 0329617a Guido Trotter
                             instance.beparams, self.op.beparams,
7363 0329617a Guido Trotter
                             cluster.beparams[constants.PP_DEFAULT],
7364 0329617a Guido Trotter
                             constants.BES_PARAMETER_TYPES)
7365 338e51e8 Iustin Pop
      self.be_new = be_new # the new actual values
7366 338e51e8 Iustin Pop
      self.be_inst = i_bedict # the new dict (without defaults)
7367 338e51e8 Iustin Pop
    else:
7368 b637ae4d Iustin Pop
      self.be_new = self.be_inst = {}
7369 74409b12 Iustin Pop
7370 cfefe007 Guido Trotter
    self.warn = []
7371 647a5d80 Iustin Pop
7372 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.op.beparams and not self.force:
7373 647a5d80 Iustin Pop
      mem_check_list = [pnode]
7374 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
7375 c0f2b229 Iustin Pop
        # either we changed auto_balance to yes or it was from before
7376 647a5d80 Iustin Pop
        mem_check_list.extend(instance.secondary_nodes)
7377 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
7378 72737a7f Iustin Pop
                                                  instance.hypervisor)
7379 647a5d80 Iustin Pop
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
7380 72737a7f Iustin Pop
                                         instance.hypervisor)
7381 070e998b Iustin Pop
      pninfo = nodeinfo[pnode]
7382 4c4e4e1e Iustin Pop
      msg = pninfo.fail_msg
7383 070e998b Iustin Pop
      if msg:
7384 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
7385 070e998b Iustin Pop
        self.warn.append("Can't get info from primary node %s: %s" %
7386 070e998b Iustin Pop
                         (pnode,  msg))
7387 070e998b Iustin Pop
      elif not isinstance(pninfo.payload.get('memory_free', None), int):
7388 070e998b Iustin Pop
        self.warn.append("Node data from primary node %s doesn't contain"
7389 070e998b Iustin Pop
                         " free memory information" % pnode)
7390 4c4e4e1e Iustin Pop
      elif instance_info.fail_msg:
7391 7ad1af4a Iustin Pop
        self.warn.append("Can't get instance runtime information: %s" %
7392 4c4e4e1e Iustin Pop
                        instance_info.fail_msg)
7393 cfefe007 Guido Trotter
      else:
7394 7ad1af4a Iustin Pop
        if instance_info.payload:
7395 7ad1af4a Iustin Pop
          current_mem = int(instance_info.payload['memory'])
7396 cfefe007 Guido Trotter
        else:
7397 cfefe007 Guido Trotter
          # Assume instance not running
7398 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
7399 cfefe007 Guido Trotter
          # and we have no other way to check)
7400 cfefe007 Guido Trotter
          current_mem = 0
7401 338e51e8 Iustin Pop
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
7402 070e998b Iustin Pop
                    pninfo.payload['memory_free'])
7403 cfefe007 Guido Trotter
        if miss_mem > 0:
7404 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
7405 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
7406 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
7407 cfefe007 Guido Trotter
7408 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
7409 070e998b Iustin Pop
        for node, nres in nodeinfo.items():
7410 ea33068f Iustin Pop
          if node not in instance.secondary_nodes:
7411 ea33068f Iustin Pop
            continue
7412 4c4e4e1e Iustin Pop
          msg = nres.fail_msg
7413 070e998b Iustin Pop
          if msg:
7414 070e998b Iustin Pop
            self.warn.append("Can't get info from secondary node %s: %s" %
7415 070e998b Iustin Pop
                             (node, msg))
7416 070e998b Iustin Pop
          elif not isinstance(nres.payload.get('memory_free', None), int):
7417 070e998b Iustin Pop
            self.warn.append("Secondary node %s didn't return free"
7418 070e998b Iustin Pop
                             " memory information" % node)
7419 070e998b Iustin Pop
          elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
7420 647a5d80 Iustin Pop
            self.warn.append("Not enough memory to failover instance to"
7421 647a5d80 Iustin Pop
                             " secondary node %s" % node)
7422 5bc84f33 Alexander Schreiber
7423 24991749 Iustin Pop
    # NIC processing
7424 cd098c41 Guido Trotter
    self.nic_pnew = {}
7425 cd098c41 Guido Trotter
    self.nic_pinst = {}
7426 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
7427 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
7428 24991749 Iustin Pop
        if not instance.nics:
7429 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance has no NICs, cannot remove")
7430 24991749 Iustin Pop
        continue
7431 24991749 Iustin Pop
      if nic_op != constants.DDM_ADD:
7432 24991749 Iustin Pop
        # an existing nic
7433 24991749 Iustin Pop
        if nic_op < 0 or nic_op >= len(instance.nics):
7434 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
7435 24991749 Iustin Pop
                                     " are 0 to %d" %
7436 24991749 Iustin Pop
                                     (nic_op, len(instance.nics)))
7437 cd098c41 Guido Trotter
        old_nic_params = instance.nics[nic_op].nicparams
7438 cd098c41 Guido Trotter
        old_nic_ip = instance.nics[nic_op].ip
7439 cd098c41 Guido Trotter
      else:
7440 cd098c41 Guido Trotter
        old_nic_params = {}
7441 cd098c41 Guido Trotter
        old_nic_ip = None
7442 cd098c41 Guido Trotter
7443 cd098c41 Guido Trotter
      update_params_dict = dict([(key, nic_dict[key])
7444 cd098c41 Guido Trotter
                                 for key in constants.NICS_PARAMETERS
7445 cd098c41 Guido Trotter
                                 if key in nic_dict])
7446 cd098c41 Guido Trotter
7447 5c44da6a Guido Trotter
      if 'bridge' in nic_dict:
7448 cd098c41 Guido Trotter
        update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
7449 cd098c41 Guido Trotter
7450 cd098c41 Guido Trotter
      new_nic_params, new_filled_nic_params = \
7451 cd098c41 Guido Trotter
          self._GetUpdatedParams(old_nic_params, update_params_dict,
7452 cd098c41 Guido Trotter
                                 cluster.nicparams[constants.PP_DEFAULT],
7453 cd098c41 Guido Trotter
                                 constants.NICS_PARAMETER_TYPES)
7454 cd098c41 Guido Trotter
      objects.NIC.CheckParameterSyntax(new_filled_nic_params)
7455 cd098c41 Guido Trotter
      self.nic_pinst[nic_op] = new_nic_params
7456 cd098c41 Guido Trotter
      self.nic_pnew[nic_op] = new_filled_nic_params
7457 cd098c41 Guido Trotter
      new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
7458 cd098c41 Guido Trotter
7459 cd098c41 Guido Trotter
      if new_nic_mode == constants.NIC_MODE_BRIDGED:
7460 cd098c41 Guido Trotter
        nic_bridge = new_filled_nic_params[constants.NIC_LINK]
7461 4c4e4e1e Iustin Pop
        msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
7462 35c0c8da Iustin Pop
        if msg:
7463 35c0c8da Iustin Pop
          msg = "Error checking bridges on node %s: %s" % (pnode, msg)
7464 24991749 Iustin Pop
          if self.force:
7465 24991749 Iustin Pop
            self.warn.append(msg)
7466 24991749 Iustin Pop
          else:
7467 24991749 Iustin Pop
            raise errors.OpPrereqError(msg)
7468 cd098c41 Guido Trotter
      if new_nic_mode == constants.NIC_MODE_ROUTED:
7469 cd098c41 Guido Trotter
        if 'ip' in nic_dict:
7470 cd098c41 Guido Trotter
          nic_ip = nic_dict['ip']
7471 cd098c41 Guido Trotter
        else:
7472 cd098c41 Guido Trotter
          nic_ip = old_nic_ip
7473 cd098c41 Guido Trotter
        if nic_ip is None:
7474 cd098c41 Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic ip to None'
7475 cd098c41 Guido Trotter
                                     ' on a routed nic')
7476 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
7477 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
7478 5c44da6a Guido Trotter
        if nic_mac is None:
7479 5c44da6a Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic mac to None')
7480 5c44da6a Guido Trotter
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7481 5c44da6a Guido Trotter
          # otherwise generate the mac
7482 5c44da6a Guido Trotter
          nic_dict['mac'] = self.cfg.GenerateMAC()
7483 5c44da6a Guido Trotter
        else:
7484 5c44da6a Guido Trotter
          # or validate/reserve the current one
7485 5c44da6a Guido Trotter
          if self.cfg.IsMacInUse(nic_mac):
7486 5c44da6a Guido Trotter
            raise errors.OpPrereqError("MAC address %s already in use"
7487 5c44da6a Guido Trotter
                                       " in cluster" % nic_mac)
7488 24991749 Iustin Pop
7489 24991749 Iustin Pop
    # DISK processing
7490 24991749 Iustin Pop
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
7491 24991749 Iustin Pop
      raise errors.OpPrereqError("Disk operations not supported for"
7492 24991749 Iustin Pop
                                 " diskless instances")
7493 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
7494 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
7495 24991749 Iustin Pop
        if len(instance.disks) == 1:
7496 24991749 Iustin Pop
          raise errors.OpPrereqError("Cannot remove the last disk of"
7497 24991749 Iustin Pop
                                     " an instance")
7498 24991749 Iustin Pop
        ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
7499 24991749 Iustin Pop
        ins_l = ins_l[pnode]
7500 4c4e4e1e Iustin Pop
        msg = ins_l.fail_msg
7501 aca13712 Iustin Pop
        if msg:
7502 aca13712 Iustin Pop
          raise errors.OpPrereqError("Can't contact node %s: %s" %
7503 aca13712 Iustin Pop
                                     (pnode, msg))
7504 aca13712 Iustin Pop
        if instance.name in ins_l.payload:
7505 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance is running, can't remove"
7506 24991749 Iustin Pop
                                     " disks.")
7507 24991749 Iustin Pop
7508 24991749 Iustin Pop
      if (disk_op == constants.DDM_ADD and
7509 24991749 Iustin Pop
          len(instance.nics) >= constants.MAX_DISKS):
7510 24991749 Iustin Pop
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
7511 24991749 Iustin Pop
                                   " add more" % constants.MAX_DISKS)
7512 24991749 Iustin Pop
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
7513 24991749 Iustin Pop
        # an existing disk
7514 24991749 Iustin Pop
        if disk_op < 0 or disk_op >= len(instance.disks):
7515 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
7516 24991749 Iustin Pop
                                     " are 0 to %d" %
7517 24991749 Iustin Pop
                                     (disk_op, len(instance.disks)))
7518 24991749 Iustin Pop
7519 a8083063 Iustin Pop
    return
7520 a8083063 Iustin Pop
7521 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
7522 a8083063 Iustin Pop
    """Modifies an instance.
7523 a8083063 Iustin Pop

7524 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
7525 24991749 Iustin Pop

7526 a8083063 Iustin Pop
    """
7527 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
7528 cfefe007 Guido Trotter
    # feedback_fn there.
7529 cfefe007 Guido Trotter
    for warn in self.warn:
7530 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
7531 cfefe007 Guido Trotter
7532 a8083063 Iustin Pop
    result = []
7533 a8083063 Iustin Pop
    instance = self.instance
7534 cd098c41 Guido Trotter
    cluster = self.cluster
7535 24991749 Iustin Pop
    # disk changes
7536 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
7537 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
7538 24991749 Iustin Pop
        # remove the last disk
7539 24991749 Iustin Pop
        device = instance.disks.pop()
7540 24991749 Iustin Pop
        device_idx = len(instance.disks)
7541 24991749 Iustin Pop
        for node, disk in device.ComputeNodeTree(instance.primary_node):
7542 24991749 Iustin Pop
          self.cfg.SetDiskID(disk, node)
7543 4c4e4e1e Iustin Pop
          msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
7544 e1bc0878 Iustin Pop
          if msg:
7545 e1bc0878 Iustin Pop
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
7546 e1bc0878 Iustin Pop
                            " continuing anyway", device_idx, node, msg)
7547 24991749 Iustin Pop
        result.append(("disk/%d" % device_idx, "remove"))
7548 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
7549 24991749 Iustin Pop
        # add a new disk
7550 24991749 Iustin Pop
        if instance.disk_template == constants.DT_FILE:
7551 24991749 Iustin Pop
          file_driver, file_path = instance.disks[0].logical_id
7552 24991749 Iustin Pop
          file_path = os.path.dirname(file_path)
7553 24991749 Iustin Pop
        else:
7554 24991749 Iustin Pop
          file_driver = file_path = None
7555 24991749 Iustin Pop
        disk_idx_base = len(instance.disks)
7556 24991749 Iustin Pop
        new_disk = _GenerateDiskTemplate(self,
7557 24991749 Iustin Pop
                                         instance.disk_template,
7558 32388e6d Iustin Pop
                                         instance.name, instance.primary_node,
7559 24991749 Iustin Pop
                                         instance.secondary_nodes,
7560 24991749 Iustin Pop
                                         [disk_dict],
7561 24991749 Iustin Pop
                                         file_path,
7562 24991749 Iustin Pop
                                         file_driver,
7563 24991749 Iustin Pop
                                         disk_idx_base)[0]
7564 24991749 Iustin Pop
        instance.disks.append(new_disk)
7565 24991749 Iustin Pop
        info = _GetInstanceInfoText(instance)
7566 24991749 Iustin Pop
7567 24991749 Iustin Pop
        logging.info("Creating volume %s for instance %s",
7568 24991749 Iustin Pop
                     new_disk.iv_name, instance.name)
7569 24991749 Iustin Pop
        # Note: this needs to be kept in sync with _CreateDisks
7570 24991749 Iustin Pop
        #HARDCODE
7571 428958aa Iustin Pop
        for node in instance.all_nodes:
7572 428958aa Iustin Pop
          f_create = node == instance.primary_node
7573 796cab27 Iustin Pop
          try:
7574 428958aa Iustin Pop
            _CreateBlockDev(self, node, instance, new_disk,
7575 428958aa Iustin Pop
                            f_create, info, f_create)
7576 1492cca7 Iustin Pop
          except errors.OpExecError, err:
7577 24991749 Iustin Pop
            self.LogWarning("Failed to create volume %s (%s) on"
7578 428958aa Iustin Pop
                            " node %s: %s",
7579 428958aa Iustin Pop
                            new_disk.iv_name, new_disk, node, err)
7580 24991749 Iustin Pop
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
7581 24991749 Iustin Pop
                       (new_disk.size, new_disk.mode)))
7582 24991749 Iustin Pop
      else:
7583 24991749 Iustin Pop
        # change a given disk
7584 24991749 Iustin Pop
        instance.disks[disk_op].mode = disk_dict['mode']
7585 24991749 Iustin Pop
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
7586 24991749 Iustin Pop
    # NIC changes
7587 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
7588 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
7589 24991749 Iustin Pop
        # remove the last nic
7590 24991749 Iustin Pop
        del instance.nics[-1]
7591 24991749 Iustin Pop
        result.append(("nic.%d" % len(instance.nics), "remove"))
7592 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
7593 5c44da6a Guido Trotter
        # mac and bridge should be set, by now
7594 5c44da6a Guido Trotter
        mac = nic_dict['mac']
7595 cd098c41 Guido Trotter
        ip = nic_dict.get('ip', None)
7596 cd098c41 Guido Trotter
        nicparams = self.nic_pinst[constants.DDM_ADD]
7597 cd098c41 Guido Trotter
        new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
7598 24991749 Iustin Pop
        instance.nics.append(new_nic)
7599 24991749 Iustin Pop
        result.append(("nic.%d" % (len(instance.nics) - 1),
7600 cd098c41 Guido Trotter
                       "add:mac=%s,ip=%s,mode=%s,link=%s" %
7601 cd098c41 Guido Trotter
                       (new_nic.mac, new_nic.ip,
7602 cd098c41 Guido Trotter
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
7603 cd098c41 Guido Trotter
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
7604 cd098c41 Guido Trotter
                       )))
7605 24991749 Iustin Pop
      else:
7606 cd098c41 Guido Trotter
        for key in 'mac', 'ip':
7607 24991749 Iustin Pop
          if key in nic_dict:
7608 24991749 Iustin Pop
            setattr(instance.nics[nic_op], key, nic_dict[key])
7609 cd098c41 Guido Trotter
        if nic_op in self.nic_pnew:
7610 cd098c41 Guido Trotter
          instance.nics[nic_op].nicparams = self.nic_pnew[nic_op]
7611 cd098c41 Guido Trotter
        for key, val in nic_dict.iteritems():
7612 cd098c41 Guido Trotter
          result.append(("nic.%s/%d" % (key, nic_op), val))
7613 24991749 Iustin Pop
7614 24991749 Iustin Pop
    # hvparams changes
7615 74409b12 Iustin Pop
    if self.op.hvparams:
7616 12649e35 Guido Trotter
      instance.hvparams = self.hv_inst
7617 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
7618 74409b12 Iustin Pop
        result.append(("hv/%s" % key, val))
7619 24991749 Iustin Pop
7620 24991749 Iustin Pop
    # beparams changes
7621 338e51e8 Iustin Pop
    if self.op.beparams:
7622 338e51e8 Iustin Pop
      instance.beparams = self.be_inst
7623 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
7624 338e51e8 Iustin Pop
        result.append(("be/%s" % key, val))
7625 a8083063 Iustin Pop
7626 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
7627 a8083063 Iustin Pop
7628 a8083063 Iustin Pop
    return result
7629 a8083063 Iustin Pop
7630 a8083063 Iustin Pop
7631 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
7632 a8083063 Iustin Pop
  """Query the exports list
7633 a8083063 Iustin Pop

7634 a8083063 Iustin Pop
  """
7635 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
7636 21a15682 Guido Trotter
  REQ_BGL = False
7637 21a15682 Guido Trotter
7638 21a15682 Guido Trotter
  def ExpandNames(self):
7639 21a15682 Guido Trotter
    self.needed_locks = {}
7640 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
7641 21a15682 Guido Trotter
    if not self.op.nodes:
7642 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7643 21a15682 Guido Trotter
    else:
7644 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
7645 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
7646 a8083063 Iustin Pop
7647 a8083063 Iustin Pop
  def CheckPrereq(self):
7648 21a15682 Guido Trotter
    """Check prerequisites.
7649 a8083063 Iustin Pop

7650 a8083063 Iustin Pop
    """
7651 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
7652 a8083063 Iustin Pop
7653 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
7654 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
7655 a8083063 Iustin Pop

7656 e4376078 Iustin Pop
    @rtype: dict
7657 e4376078 Iustin Pop
    @return: a dictionary with the structure node->(export-list)
7658 e4376078 Iustin Pop
        where export-list is a list of the instances exported on
7659 e4376078 Iustin Pop
        that node.
7660 a8083063 Iustin Pop

7661 a8083063 Iustin Pop
    """
7662 b04285f2 Guido Trotter
    rpcresult = self.rpc.call_export_list(self.nodes)
7663 b04285f2 Guido Trotter
    result = {}
7664 b04285f2 Guido Trotter
    for node in rpcresult:
7665 4c4e4e1e Iustin Pop
      if rpcresult[node].fail_msg:
7666 b04285f2 Guido Trotter
        result[node] = False
7667 b04285f2 Guido Trotter
      else:
7668 1b7bfbb7 Iustin Pop
        result[node] = rpcresult[node].payload
7669 b04285f2 Guido Trotter
7670 b04285f2 Guido Trotter
    return result
7671 a8083063 Iustin Pop
7672 a8083063 Iustin Pop
7673 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
7674 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
7675 a8083063 Iustin Pop

7676 a8083063 Iustin Pop
  """
7677 a8083063 Iustin Pop
  HPATH = "instance-export"
7678 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
7679 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
7680 6657590e Guido Trotter
  REQ_BGL = False
7681 6657590e Guido Trotter
7682 6657590e Guido Trotter
  def ExpandNames(self):
7683 6657590e Guido Trotter
    self._ExpandAndLockInstance()
7684 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
7685 6657590e Guido Trotter
    #
7686 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
7687 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
7688 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
7689 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
7690 6657590e Guido Trotter
    #    then one to remove, after
7691 5bbd3f7f Michael Hanselmann
    #  - removing the removal operation altogether
7692 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7693 6657590e Guido Trotter
7694 6657590e Guido Trotter
  def DeclareLocks(self, level):
7695 6657590e Guido Trotter
    """Last minute lock declaration."""
7696 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
7697 a8083063 Iustin Pop
7698 a8083063 Iustin Pop
  def BuildHooksEnv(self):
7699 a8083063 Iustin Pop
    """Build hooks env.
7700 a8083063 Iustin Pop

7701 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
7702 a8083063 Iustin Pop

7703 a8083063 Iustin Pop
    """
7704 a8083063 Iustin Pop
    env = {
7705 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
7706 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
7707 a8083063 Iustin Pop
      }
7708 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
7709 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
7710 a8083063 Iustin Pop
          self.op.target_node]
7711 a8083063 Iustin Pop
    return env, nl, nl
7712 a8083063 Iustin Pop
7713 a8083063 Iustin Pop
  def CheckPrereq(self):
7714 a8083063 Iustin Pop
    """Check prerequisites.
7715 a8083063 Iustin Pop

7716 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
7717 a8083063 Iustin Pop

7718 a8083063 Iustin Pop
    """
7719 6657590e Guido Trotter
    instance_name = self.op.instance_name
7720 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
7721 6657590e Guido Trotter
    assert self.instance is not None, \
7722 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
7723 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
7724 a8083063 Iustin Pop
7725 6657590e Guido Trotter
    self.dst_node = self.cfg.GetNodeInfo(
7726 6657590e Guido Trotter
      self.cfg.ExpandNodeName(self.op.target_node))
7727 a8083063 Iustin Pop
7728 268b8e42 Iustin Pop
    if self.dst_node is None:
7729 268b8e42 Iustin Pop
      # This is wrong node name, not a non-locked node
7730 268b8e42 Iustin Pop
      raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node)
7731 aeb83a2b Iustin Pop
    _CheckNodeOnline(self, self.dst_node.name)
7732 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, self.dst_node.name)
7733 a8083063 Iustin Pop
7734 b6023d6c Manuel Franceschini
    # instance disk type verification
7735 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
7736 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
7737 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
7738 b6023d6c Manuel Franceschini
                                   " file-based disks")
7739 b6023d6c Manuel Franceschini
7740 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
7741 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
7742 a8083063 Iustin Pop

7743 a8083063 Iustin Pop
    """
7744 a8083063 Iustin Pop
    instance = self.instance
7745 a8083063 Iustin Pop
    dst_node = self.dst_node
7746 a8083063 Iustin Pop
    src_node = instance.primary_node
7747 37972df0 Michael Hanselmann
7748 a8083063 Iustin Pop
    if self.op.shutdown:
7749 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
7750 37972df0 Michael Hanselmann
      feedback_fn("Shutting down instance %s" % instance.name)
7751 781de953 Iustin Pop
      result = self.rpc.call_instance_shutdown(src_node, instance)
7752 4c4e4e1e Iustin Pop
      result.Raise("Could not shutdown instance %s on"
7753 4c4e4e1e Iustin Pop
                   " node %s" % (instance.name, src_node))
7754 a8083063 Iustin Pop
7755 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
7756 a8083063 Iustin Pop
7757 a8083063 Iustin Pop
    snap_disks = []
7758 a8083063 Iustin Pop
7759 998c712c Iustin Pop
    # set the disks ID correctly since call_instance_start needs the
7760 998c712c Iustin Pop
    # correct drbd minor to create the symlinks
7761 998c712c Iustin Pop
    for disk in instance.disks:
7762 998c712c Iustin Pop
      self.cfg.SetDiskID(disk, src_node)
7763 998c712c Iustin Pop
7764 084f05a5 Iustin Pop
    # per-disk results
7765 084f05a5 Iustin Pop
    dresults = []
7766 a8083063 Iustin Pop
    try:
7767 a97da6b7 Iustin Pop
      for idx, disk in enumerate(instance.disks):
7768 37972df0 Michael Hanselmann
        feedback_fn("Creating a snapshot of disk/%s on node %s" %
7769 37972df0 Michael Hanselmann
                    (idx, src_node))
7770 37972df0 Michael Hanselmann
7771 87812fd3 Iustin Pop
        # result.payload will be a snapshot of an lvm leaf of the one we passed
7772 87812fd3 Iustin Pop
        result = self.rpc.call_blockdev_snapshot(src_node, disk)
7773 4c4e4e1e Iustin Pop
        msg = result.fail_msg
7774 87812fd3 Iustin Pop
        if msg:
7775 af0413bb Guido Trotter
          self.LogWarning("Could not snapshot disk/%s on node %s: %s",
7776 af0413bb Guido Trotter
                          idx, src_node, msg)
7777 19d7f90a Guido Trotter
          snap_disks.append(False)
7778 19d7f90a Guido Trotter
        else:
7779 87812fd3 Iustin Pop
          disk_id = (vgname, result.payload)
7780 19d7f90a Guido Trotter
          new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
7781 87812fd3 Iustin Pop
                                 logical_id=disk_id, physical_id=disk_id,
7782 19d7f90a Guido Trotter
                                 iv_name=disk.iv_name)
7783 19d7f90a Guido Trotter
          snap_disks.append(new_dev)
7784 a8083063 Iustin Pop
7785 a8083063 Iustin Pop
    finally:
7786 0d68c45d Iustin Pop
      if self.op.shutdown and instance.admin_up:
7787 37972df0 Michael Hanselmann
        feedback_fn("Starting instance %s" % instance.name)
7788 0eca8e0c Iustin Pop
        result = self.rpc.call_instance_start(src_node, instance, None, None)
7789 4c4e4e1e Iustin Pop
        msg = result.fail_msg
7790 dd279568 Iustin Pop
        if msg:
7791 b9bddb6b Iustin Pop
          _ShutdownInstanceDisks(self, instance)
7792 dd279568 Iustin Pop
          raise errors.OpExecError("Could not start instance: %s" % msg)
7793 a8083063 Iustin Pop
7794 a8083063 Iustin Pop
    # TODO: check for size
7795 a8083063 Iustin Pop
7796 62c9ec92 Iustin Pop
    cluster_name = self.cfg.GetClusterName()
7797 74c47259 Iustin Pop
    for idx, dev in enumerate(snap_disks):
7798 37972df0 Michael Hanselmann
      feedback_fn("Exporting snapshot %s from %s to %s" %
7799 37972df0 Michael Hanselmann
                  (idx, src_node, dst_node.name))
7800 19d7f90a Guido Trotter
      if dev:
7801 781de953 Iustin Pop
        result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
7802 781de953 Iustin Pop
                                               instance, cluster_name, idx)
7803 4c4e4e1e Iustin Pop
        msg = result.fail_msg
7804 ba55d062 Iustin Pop
        if msg:
7805 af0413bb Guido Trotter
          self.LogWarning("Could not export disk/%s from node %s to"
7806 af0413bb Guido Trotter
                          " node %s: %s", idx, src_node, dst_node.name, msg)
7807 084f05a5 Iustin Pop
          dresults.append(False)
7808 084f05a5 Iustin Pop
        else:
7809 084f05a5 Iustin Pop
          dresults.append(True)
7810 4c4e4e1e Iustin Pop
        msg = self.rpc.call_blockdev_remove(src_node, dev).fail_msg
7811 e1bc0878 Iustin Pop
        if msg:
7812 a97da6b7 Iustin Pop
          self.LogWarning("Could not remove snapshot for disk/%d from node"
7813 a97da6b7 Iustin Pop
                          " %s: %s", idx, src_node, msg)
7814 084f05a5 Iustin Pop
      else:
7815 084f05a5 Iustin Pop
        dresults.append(False)
7816 a8083063 Iustin Pop
7817 37972df0 Michael Hanselmann
    feedback_fn("Finalizing export on %s" % dst_node.name)
7818 781de953 Iustin Pop
    result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
7819 084f05a5 Iustin Pop
    fin_resu = True
7820 4c4e4e1e Iustin Pop
    msg = result.fail_msg
7821 9b201a0d Iustin Pop
    if msg:
7822 9b201a0d Iustin Pop
      self.LogWarning("Could not finalize export for instance %s"
7823 9b201a0d Iustin Pop
                      " on node %s: %s", instance.name, dst_node.name, msg)
7824 084f05a5 Iustin Pop
      fin_resu = False
7825 a8083063 Iustin Pop
7826 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
7827 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
7828 a8083063 Iustin Pop
7829 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
7830 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
7831 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
7832 35fbcd11 Iustin Pop
    iname = instance.name
7833 a8083063 Iustin Pop
    if nodelist:
7834 37972df0 Michael Hanselmann
      feedback_fn("Removing old exports for instance %s" % iname)
7835 72737a7f Iustin Pop
      exportlist = self.rpc.call_export_list(nodelist)
7836 a8083063 Iustin Pop
      for node in exportlist:
7837 4c4e4e1e Iustin Pop
        if exportlist[node].fail_msg:
7838 781de953 Iustin Pop
          continue
7839 35fbcd11 Iustin Pop
        if iname in exportlist[node].payload:
7840 4c4e4e1e Iustin Pop
          msg = self.rpc.call_export_remove(node, iname).fail_msg
7841 35fbcd11 Iustin Pop
          if msg:
7842 19d7f90a Guido Trotter
            self.LogWarning("Could not remove older export for instance %s"
7843 35fbcd11 Iustin Pop
                            " on node %s: %s", iname, node, msg)
7844 084f05a5 Iustin Pop
    return fin_resu, dresults
7845 5c947f38 Iustin Pop
7846 5c947f38 Iustin Pop
7847 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
7848 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
7849 9ac99fda Guido Trotter

7850 9ac99fda Guido Trotter
  """
7851 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
7852 3656b3af Guido Trotter
  REQ_BGL = False
7853 3656b3af Guido Trotter
7854 3656b3af Guido Trotter
  def ExpandNames(self):
7855 3656b3af Guido Trotter
    self.needed_locks = {}
7856 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
7857 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
7858 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
7859 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7860 9ac99fda Guido Trotter
7861 9ac99fda Guido Trotter
  def CheckPrereq(self):
7862 9ac99fda Guido Trotter
    """Check prerequisites.
7863 9ac99fda Guido Trotter
    """
7864 9ac99fda Guido Trotter
    pass
7865 9ac99fda Guido Trotter
7866 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
7867 9ac99fda Guido Trotter
    """Remove any export.
7868 9ac99fda Guido Trotter

7869 9ac99fda Guido Trotter
    """
7870 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
7871 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
7872 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
7873 9ac99fda Guido Trotter
    fqdn_warn = False
7874 9ac99fda Guido Trotter
    if not instance_name:
7875 9ac99fda Guido Trotter
      fqdn_warn = True
7876 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
7877 9ac99fda Guido Trotter
7878 1b7bfbb7 Iustin Pop
    locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
7879 1b7bfbb7 Iustin Pop
    exportlist = self.rpc.call_export_list(locked_nodes)
7880 9ac99fda Guido Trotter
    found = False
7881 9ac99fda Guido Trotter
    for node in exportlist:
7882 4c4e4e1e Iustin Pop
      msg = exportlist[node].fail_msg
7883 1b7bfbb7 Iustin Pop
      if msg:
7884 1b7bfbb7 Iustin Pop
        self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
7885 781de953 Iustin Pop
        continue
7886 1b7bfbb7 Iustin Pop
      if instance_name in exportlist[node].payload:
7887 9ac99fda Guido Trotter
        found = True
7888 781de953 Iustin Pop
        result = self.rpc.call_export_remove(node, instance_name)
7889 4c4e4e1e Iustin Pop
        msg = result.fail_msg
7890 35fbcd11 Iustin Pop
        if msg:
7891 9a4f63d1 Iustin Pop
          logging.error("Could not remove export for instance %s"
7892 35fbcd11 Iustin Pop
                        " on node %s: %s", instance_name, node, msg)
7893 9ac99fda Guido Trotter
7894 9ac99fda Guido Trotter
    if fqdn_warn and not found:
7895 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
7896 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
7897 9ac99fda Guido Trotter
                  " Domain Name.")
7898 9ac99fda Guido Trotter
7899 9ac99fda Guido Trotter
7900 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
7901 5c947f38 Iustin Pop
  """Generic tags LU.
7902 5c947f38 Iustin Pop

7903 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
7904 5c947f38 Iustin Pop

7905 5c947f38 Iustin Pop
  """
7906 5c947f38 Iustin Pop
7907 8646adce Guido Trotter
  def ExpandNames(self):
7908 8646adce Guido Trotter
    self.needed_locks = {}
7909 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
7910 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
7911 5c947f38 Iustin Pop
      if name is None:
7912 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
7913 3ecf6786 Iustin Pop
                                   (self.op.name,))
7914 5c947f38 Iustin Pop
      self.op.name = name
7915 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = name
7916 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
7917 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
7918 5c947f38 Iustin Pop
      if name is None:
7919 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
7920 3ecf6786 Iustin Pop
                                   (self.op.name,))
7921 5c947f38 Iustin Pop
      self.op.name = name
7922 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = name
7923 8646adce Guido Trotter
7924 8646adce Guido Trotter
  def CheckPrereq(self):
7925 8646adce Guido Trotter
    """Check prerequisites.
7926 8646adce Guido Trotter

7927 8646adce Guido Trotter
    """
7928 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
7929 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
7930 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
7931 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
7932 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
7933 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
7934 5c947f38 Iustin Pop
    else:
7935 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
7936 3ecf6786 Iustin Pop
                                 str(self.op.kind))
7937 5c947f38 Iustin Pop
7938 5c947f38 Iustin Pop
7939 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
7940 5c947f38 Iustin Pop
  """Returns the tags of a given object.
7941 5c947f38 Iustin Pop

7942 5c947f38 Iustin Pop
  """
7943 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
7944 8646adce Guido Trotter
  REQ_BGL = False
7945 5c947f38 Iustin Pop
7946 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
7947 5c947f38 Iustin Pop
    """Returns the tag list.
7948 5c947f38 Iustin Pop

7949 5c947f38 Iustin Pop
    """
7950 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
7951 5c947f38 Iustin Pop
7952 5c947f38 Iustin Pop
7953 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
7954 73415719 Iustin Pop
  """Searches the tags for a given pattern.
7955 73415719 Iustin Pop

7956 73415719 Iustin Pop
  """
7957 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
7958 8646adce Guido Trotter
  REQ_BGL = False
7959 8646adce Guido Trotter
7960 8646adce Guido Trotter
  def ExpandNames(self):
7961 8646adce Guido Trotter
    self.needed_locks = {}
7962 73415719 Iustin Pop
7963 73415719 Iustin Pop
  def CheckPrereq(self):
7964 73415719 Iustin Pop
    """Check prerequisites.
7965 73415719 Iustin Pop

7966 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
7967 73415719 Iustin Pop

7968 73415719 Iustin Pop
    """
7969 73415719 Iustin Pop
    try:
7970 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
7971 73415719 Iustin Pop
    except re.error, err:
7972 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
7973 73415719 Iustin Pop
                                 (self.op.pattern, err))
7974 73415719 Iustin Pop
7975 73415719 Iustin Pop
  def Exec(self, feedback_fn):
7976 73415719 Iustin Pop
    """Returns the tag list.
7977 73415719 Iustin Pop

7978 73415719 Iustin Pop
    """
7979 73415719 Iustin Pop
    cfg = self.cfg
7980 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
7981 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
7982 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
7983 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
7984 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
7985 73415719 Iustin Pop
    results = []
7986 73415719 Iustin Pop
    for path, target in tgts:
7987 73415719 Iustin Pop
      for tag in target.GetTags():
7988 73415719 Iustin Pop
        if self.re.search(tag):
7989 73415719 Iustin Pop
          results.append((path, tag))
7990 73415719 Iustin Pop
    return results
7991 73415719 Iustin Pop
7992 73415719 Iustin Pop
7993 f27302fa Iustin Pop
class LUAddTags(TagsLU):
7994 5c947f38 Iustin Pop
  """Sets a tag on a given object.
7995 5c947f38 Iustin Pop

7996 5c947f38 Iustin Pop
  """
7997 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
7998 8646adce Guido Trotter
  REQ_BGL = False
7999 5c947f38 Iustin Pop
8000 5c947f38 Iustin Pop
  def CheckPrereq(self):
8001 5c947f38 Iustin Pop
    """Check prerequisites.
8002 5c947f38 Iustin Pop

8003 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
8004 5c947f38 Iustin Pop

8005 5c947f38 Iustin Pop
    """
8006 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
8007 f27302fa Iustin Pop
    for tag in self.op.tags:
8008 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
8009 5c947f38 Iustin Pop
8010 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
8011 5c947f38 Iustin Pop
    """Sets the tag.
8012 5c947f38 Iustin Pop

8013 5c947f38 Iustin Pop
    """
8014 5c947f38 Iustin Pop
    try:
8015 f27302fa Iustin Pop
      for tag in self.op.tags:
8016 f27302fa Iustin Pop
        self.target.AddTag(tag)
8017 5c947f38 Iustin Pop
    except errors.TagError, err:
8018 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
8019 5c947f38 Iustin Pop
    try:
8020 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
8021 5c947f38 Iustin Pop
    except errors.ConfigurationError:
8022 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
8023 3ecf6786 Iustin Pop
                                " config file and the operation has been"
8024 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
8025 5c947f38 Iustin Pop
8026 5c947f38 Iustin Pop
8027 f27302fa Iustin Pop
class LUDelTags(TagsLU):
8028 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
8029 5c947f38 Iustin Pop

8030 5c947f38 Iustin Pop
  """
8031 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
8032 8646adce Guido Trotter
  REQ_BGL = False
8033 5c947f38 Iustin Pop
8034 5c947f38 Iustin Pop
  def CheckPrereq(self):
8035 5c947f38 Iustin Pop
    """Check prerequisites.
8036 5c947f38 Iustin Pop

8037 5c947f38 Iustin Pop
    This checks that we have the given tag.
8038 5c947f38 Iustin Pop

8039 5c947f38 Iustin Pop
    """
8040 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
8041 f27302fa Iustin Pop
    for tag in self.op.tags:
8042 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
8043 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
8044 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
8045 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
8046 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
8047 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
8048 f27302fa Iustin Pop
      diff_names.sort()
8049 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
8050 f27302fa Iustin Pop
                                 (",".join(diff_names)))
8051 5c947f38 Iustin Pop
8052 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
8053 5c947f38 Iustin Pop
    """Remove the tag from the object.
8054 5c947f38 Iustin Pop

8055 5c947f38 Iustin Pop
    """
8056 f27302fa Iustin Pop
    for tag in self.op.tags:
8057 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
8058 5c947f38 Iustin Pop
    try:
8059 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
8060 5c947f38 Iustin Pop
    except errors.ConfigurationError:
8061 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
8062 3ecf6786 Iustin Pop
                                " config file and the operation has been"
8063 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
8064 06009e27 Iustin Pop
8065 0eed6e61 Guido Trotter
8066 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
8067 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
8068 06009e27 Iustin Pop

8069 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
8070 06009e27 Iustin Pop
  time.
8071 06009e27 Iustin Pop

8072 06009e27 Iustin Pop
  """
8073 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
8074 fbe9022f Guido Trotter
  REQ_BGL = False
8075 06009e27 Iustin Pop
8076 fbe9022f Guido Trotter
  def ExpandNames(self):
8077 fbe9022f Guido Trotter
    """Expand names and set required locks.
8078 06009e27 Iustin Pop

8079 fbe9022f Guido Trotter
    This expands the node list, if any.
8080 06009e27 Iustin Pop

8081 06009e27 Iustin Pop
    """
8082 fbe9022f Guido Trotter
    self.needed_locks = {}
8083 06009e27 Iustin Pop
    if self.op.on_nodes:
8084 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
8085 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
8086 fbe9022f Guido Trotter
      # more information.
8087 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
8088 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
8089 fbe9022f Guido Trotter
8090 fbe9022f Guido Trotter
  def CheckPrereq(self):
8091 fbe9022f Guido Trotter
    """Check prerequisites.
8092 fbe9022f Guido Trotter

8093 fbe9022f Guido Trotter
    """
8094 06009e27 Iustin Pop
8095 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
8096 06009e27 Iustin Pop
    """Do the actual sleep.
8097 06009e27 Iustin Pop

8098 06009e27 Iustin Pop
    """
8099 06009e27 Iustin Pop
    if self.op.on_master:
8100 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
8101 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
8102 06009e27 Iustin Pop
    if self.op.on_nodes:
8103 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
8104 06009e27 Iustin Pop
      for node, node_result in result.items():
8105 4c4e4e1e Iustin Pop
        node_result.Raise("Failure during rpc call to node %s" % node)
8106 d61df03e Iustin Pop
8107 d61df03e Iustin Pop
8108 d1c2dd75 Iustin Pop
class IAllocator(object):
8109 d1c2dd75 Iustin Pop
  """IAllocator framework.
8110 d61df03e Iustin Pop

8111 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
8112 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
8113 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
8114 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
8115 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
8116 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
8117 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
8118 d1c2dd75 Iustin Pop
      easy usage
8119 d61df03e Iustin Pop

8120 d61df03e Iustin Pop
  """
8121 29859cb7 Iustin Pop
  _ALLO_KEYS = [
8122 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
8123 8cc7e742 Guido Trotter
    "os", "tags", "nics", "vcpus", "hypervisor",
8124 d1c2dd75 Iustin Pop
    ]
8125 29859cb7 Iustin Pop
  _RELO_KEYS = [
8126 29859cb7 Iustin Pop
    "relocate_from",
8127 29859cb7 Iustin Pop
    ]
8128 d1c2dd75 Iustin Pop
8129 923ddac0 Michael Hanselmann
  def __init__(self, cfg, rpc, mode, name, **kwargs):
8130 923ddac0 Michael Hanselmann
    self.cfg = cfg
8131 923ddac0 Michael Hanselmann
    self.rpc = rpc
8132 d1c2dd75 Iustin Pop
    # init buffer variables
8133 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
8134 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
8135 29859cb7 Iustin Pop
    self.mode = mode
8136 29859cb7 Iustin Pop
    self.name = name
8137 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
8138 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
8139 a0add446 Iustin Pop
    self.hypervisor = None
8140 29859cb7 Iustin Pop
    self.relocate_from = None
8141 27579978 Iustin Pop
    # computed fields
8142 27579978 Iustin Pop
    self.required_nodes = None
8143 d1c2dd75 Iustin Pop
    # init result fields
8144 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
8145 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
8146 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
8147 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
8148 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
8149 29859cb7 Iustin Pop
    else:
8150 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
8151 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
8152 d1c2dd75 Iustin Pop
    for key in kwargs:
8153 29859cb7 Iustin Pop
      if key not in keyset:
8154 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
8155 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
8156 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
8157 29859cb7 Iustin Pop
    for key in keyset:
8158 d1c2dd75 Iustin Pop
      if key not in kwargs:
8159 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
8160 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
8161 d1c2dd75 Iustin Pop
    self._BuildInputData()
8162 d1c2dd75 Iustin Pop
8163 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
8164 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
8165 d1c2dd75 Iustin Pop

8166 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
8167 d1c2dd75 Iustin Pop

8168 d1c2dd75 Iustin Pop
    """
8169 923ddac0 Michael Hanselmann
    cfg = self.cfg
8170 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
8171 d1c2dd75 Iustin Pop
    # cluster data
8172 d1c2dd75 Iustin Pop
    data = {
8173 77031881 Iustin Pop
      "version": constants.IALLOCATOR_VERSION,
8174 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
8175 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
8176 1325da74 Iustin Pop
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
8177 d1c2dd75 Iustin Pop
      # we don't have job IDs
8178 d61df03e Iustin Pop
      }
8179 b57e9819 Guido Trotter
    iinfo = cfg.GetAllInstancesInfo().values()
8180 b57e9819 Guido Trotter
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
8181 6286519f Iustin Pop
8182 d1c2dd75 Iustin Pop
    # node data
8183 d1c2dd75 Iustin Pop
    node_results = {}
8184 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
8185 8cc7e742 Guido Trotter
8186 8cc7e742 Guido Trotter
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
8187 a0add446 Iustin Pop
      hypervisor_name = self.hypervisor
8188 8cc7e742 Guido Trotter
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
8189 a0add446 Iustin Pop
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
8190 8cc7e742 Guido Trotter
8191 923ddac0 Michael Hanselmann
    node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
8192 923ddac0 Michael Hanselmann
                                        hypervisor_name)
8193 923ddac0 Michael Hanselmann
    node_iinfo = \
8194 923ddac0 Michael Hanselmann
      self.rpc.call_all_instances_info(node_list,
8195 923ddac0 Michael Hanselmann
                                       cluster_info.enabled_hypervisors)
8196 1325da74 Iustin Pop
    for nname, nresult in node_data.items():
8197 1325da74 Iustin Pop
      # first fill in static (config-based) values
8198 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
8199 d1c2dd75 Iustin Pop
      pnr = {
8200 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
8201 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
8202 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
8203 fc0fe88c Iustin Pop
        "offline": ninfo.offline,
8204 0b2454b9 Iustin Pop
        "drained": ninfo.drained,
8205 1325da74 Iustin Pop
        "master_candidate": ninfo.master_candidate,
8206 d1c2dd75 Iustin Pop
        }
8207 1325da74 Iustin Pop
8208 0d853843 Iustin Pop
      if not (ninfo.offline or ninfo.drained):
8209 4c4e4e1e Iustin Pop
        nresult.Raise("Can't get data for node %s" % nname)
8210 4c4e4e1e Iustin Pop
        node_iinfo[nname].Raise("Can't get node instance info from node %s" %
8211 4c4e4e1e Iustin Pop
                                nname)
8212 070e998b Iustin Pop
        remote_info = nresult.payload
8213 b142ef15 Iustin Pop
8214 1325da74 Iustin Pop
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
8215 1325da74 Iustin Pop
                     'vg_size', 'vg_free', 'cpu_total']:
8216 1325da74 Iustin Pop
          if attr not in remote_info:
8217 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' didn't return attribute"
8218 1325da74 Iustin Pop
                                     " '%s'" % (nname, attr))
8219 070e998b Iustin Pop
          if not isinstance(remote_info[attr], int):
8220 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' returned invalid value"
8221 070e998b Iustin Pop
                                     " for '%s': %s" %
8222 070e998b Iustin Pop
                                     (nname, attr, remote_info[attr]))
8223 1325da74 Iustin Pop
        # compute memory used by primary instances
8224 1325da74 Iustin Pop
        i_p_mem = i_p_up_mem = 0
8225 1325da74 Iustin Pop
        for iinfo, beinfo in i_list:
8226 1325da74 Iustin Pop
          if iinfo.primary_node == nname:
8227 1325da74 Iustin Pop
            i_p_mem += beinfo[constants.BE_MEMORY]
8228 2fa74ef4 Iustin Pop
            if iinfo.name not in node_iinfo[nname].payload:
8229 1325da74 Iustin Pop
              i_used_mem = 0
8230 1325da74 Iustin Pop
            else:
8231 2fa74ef4 Iustin Pop
              i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory'])
8232 1325da74 Iustin Pop
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
8233 1325da74 Iustin Pop
            remote_info['memory_free'] -= max(0, i_mem_diff)
8234 1325da74 Iustin Pop
8235 1325da74 Iustin Pop
            if iinfo.admin_up:
8236 1325da74 Iustin Pop
              i_p_up_mem += beinfo[constants.BE_MEMORY]
8237 1325da74 Iustin Pop
8238 1325da74 Iustin Pop
        # compute memory used by instances
8239 1325da74 Iustin Pop
        pnr_dyn = {
8240 1325da74 Iustin Pop
          "total_memory": remote_info['memory_total'],
8241 1325da74 Iustin Pop
          "reserved_memory": remote_info['memory_dom0'],
8242 1325da74 Iustin Pop
          "free_memory": remote_info['memory_free'],
8243 1325da74 Iustin Pop
          "total_disk": remote_info['vg_size'],
8244 1325da74 Iustin Pop
          "free_disk": remote_info['vg_free'],
8245 1325da74 Iustin Pop
          "total_cpus": remote_info['cpu_total'],
8246 1325da74 Iustin Pop
          "i_pri_memory": i_p_mem,
8247 1325da74 Iustin Pop
          "i_pri_up_memory": i_p_up_mem,
8248 1325da74 Iustin Pop
          }
8249 1325da74 Iustin Pop
        pnr.update(pnr_dyn)
8250 1325da74 Iustin Pop
8251 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
8252 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
8253 d1c2dd75 Iustin Pop
8254 d1c2dd75 Iustin Pop
    # instance data
8255 d1c2dd75 Iustin Pop
    instance_data = {}
8256 338e51e8 Iustin Pop
    for iinfo, beinfo in i_list:
8257 a9fe7e8f Guido Trotter
      nic_data = []
8258 a9fe7e8f Guido Trotter
      for nic in iinfo.nics:
8259 a9fe7e8f Guido Trotter
        filled_params = objects.FillDict(
8260 a9fe7e8f Guido Trotter
            cluster_info.nicparams[constants.PP_DEFAULT],
8261 a9fe7e8f Guido Trotter
            nic.nicparams)
8262 a9fe7e8f Guido Trotter
        nic_dict = {"mac": nic.mac,
8263 a9fe7e8f Guido Trotter
                    "ip": nic.ip,
8264 a9fe7e8f Guido Trotter
                    "mode": filled_params[constants.NIC_MODE],
8265 a9fe7e8f Guido Trotter
                    "link": filled_params[constants.NIC_LINK],
8266 a9fe7e8f Guido Trotter
                   }
8267 a9fe7e8f Guido Trotter
        if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
8268 a9fe7e8f Guido Trotter
          nic_dict["bridge"] = filled_params[constants.NIC_LINK]
8269 a9fe7e8f Guido Trotter
        nic_data.append(nic_dict)
8270 d1c2dd75 Iustin Pop
      pir = {
8271 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
8272 1325da74 Iustin Pop
        "admin_up": iinfo.admin_up,
8273 338e51e8 Iustin Pop
        "vcpus": beinfo[constants.BE_VCPUS],
8274 338e51e8 Iustin Pop
        "memory": beinfo[constants.BE_MEMORY],
8275 d1c2dd75 Iustin Pop
        "os": iinfo.os,
8276 1325da74 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
8277 d1c2dd75 Iustin Pop
        "nics": nic_data,
8278 1325da74 Iustin Pop
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
8279 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
8280 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
8281 d1c2dd75 Iustin Pop
        }
8282 88ae4f85 Iustin Pop
      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
8283 88ae4f85 Iustin Pop
                                                 pir["disks"])
8284 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
8285 d61df03e Iustin Pop
8286 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
8287 d61df03e Iustin Pop
8288 d1c2dd75 Iustin Pop
    self.in_data = data
8289 d61df03e Iustin Pop
8290 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
8291 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
8292 d61df03e Iustin Pop

8293 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
8294 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
8295 d61df03e Iustin Pop

8296 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
8297 d1c2dd75 Iustin Pop
    done.
8298 d61df03e Iustin Pop

8299 d1c2dd75 Iustin Pop
    """
8300 d1c2dd75 Iustin Pop
    data = self.in_data
8301 d1c2dd75 Iustin Pop
8302 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
8303 d1c2dd75 Iustin Pop
8304 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
8305 27579978 Iustin Pop
      self.required_nodes = 2
8306 27579978 Iustin Pop
    else:
8307 27579978 Iustin Pop
      self.required_nodes = 1
8308 d1c2dd75 Iustin Pop
    request = {
8309 d1c2dd75 Iustin Pop
      "type": "allocate",
8310 d1c2dd75 Iustin Pop
      "name": self.name,
8311 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
8312 d1c2dd75 Iustin Pop
      "tags": self.tags,
8313 d1c2dd75 Iustin Pop
      "os": self.os,
8314 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
8315 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
8316 d1c2dd75 Iustin Pop
      "disks": self.disks,
8317 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
8318 d1c2dd75 Iustin Pop
      "nics": self.nics,
8319 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
8320 d1c2dd75 Iustin Pop
      }
8321 d1c2dd75 Iustin Pop
    data["request"] = request
8322 298fe380 Iustin Pop
8323 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
8324 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
8325 298fe380 Iustin Pop

8326 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
8327 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
8328 d61df03e Iustin Pop

8329 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
8330 d1c2dd75 Iustin Pop
    done.
8331 d61df03e Iustin Pop

8332 d1c2dd75 Iustin Pop
    """
8333 923ddac0 Michael Hanselmann
    instance = self.cfg.GetInstanceInfo(self.name)
8334 27579978 Iustin Pop
    if instance is None:
8335 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
8336 27579978 Iustin Pop
                                   " IAllocator" % self.name)
8337 27579978 Iustin Pop
8338 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
8339 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
8340 27579978 Iustin Pop
8341 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
8342 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
8343 2a139bb0 Iustin Pop
8344 27579978 Iustin Pop
    self.required_nodes = 1
8345 dafc7302 Guido Trotter
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
8346 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
8347 27579978 Iustin Pop
8348 d1c2dd75 Iustin Pop
    request = {
8349 2a139bb0 Iustin Pop
      "type": "relocate",
8350 d1c2dd75 Iustin Pop
      "name": self.name,
8351 27579978 Iustin Pop
      "disk_space_total": disk_space,
8352 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
8353 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
8354 d1c2dd75 Iustin Pop
      }
8355 27579978 Iustin Pop
    self.in_data["request"] = request
8356 d61df03e Iustin Pop
8357 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
8358 d1c2dd75 Iustin Pop
    """Build input data structures.
8359 d61df03e Iustin Pop

8360 d1c2dd75 Iustin Pop
    """
8361 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
8362 d61df03e Iustin Pop
8363 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
8364 d1c2dd75 Iustin Pop
      self._AddNewInstance()
8365 d1c2dd75 Iustin Pop
    else:
8366 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
8367 d61df03e Iustin Pop
8368 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
8369 d61df03e Iustin Pop
8370 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
8371 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
8372 298fe380 Iustin Pop

8373 d1c2dd75 Iustin Pop
    """
8374 72737a7f Iustin Pop
    if call_fn is None:
8375 923ddac0 Michael Hanselmann
      call_fn = self.rpc.call_iallocator_runner
8376 298fe380 Iustin Pop
8377 923ddac0 Michael Hanselmann
    result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
8378 4c4e4e1e Iustin Pop
    result.Raise("Failure while running the iallocator script")
8379 8d528b7c Iustin Pop
8380 87f5c298 Iustin Pop
    self.out_text = result.payload
8381 d1c2dd75 Iustin Pop
    if validate:
8382 d1c2dd75 Iustin Pop
      self._ValidateResult()
8383 298fe380 Iustin Pop
8384 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
8385 d1c2dd75 Iustin Pop
    """Process the allocator results.
8386 538475ca Iustin Pop

8387 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
8388 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
8389 538475ca Iustin Pop

8390 d1c2dd75 Iustin Pop
    """
8391 d1c2dd75 Iustin Pop
    try:
8392 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
8393 d1c2dd75 Iustin Pop
    except Exception, err:
8394 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
8395 d1c2dd75 Iustin Pop
8396 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
8397 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
8398 538475ca Iustin Pop
8399 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
8400 d1c2dd75 Iustin Pop
      if key not in rdict:
8401 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
8402 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
8403 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
8404 538475ca Iustin Pop
8405 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
8406 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
8407 d1c2dd75 Iustin Pop
                               " is not a list")
8408 d1c2dd75 Iustin Pop
    self.out_data = rdict
8409 538475ca Iustin Pop
8410 538475ca Iustin Pop
8411 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
8412 d61df03e Iustin Pop
  """Run allocator tests.
8413 d61df03e Iustin Pop

8414 d61df03e Iustin Pop
  This LU runs the allocator tests
8415 d61df03e Iustin Pop

8416 d61df03e Iustin Pop
  """
8417 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
8418 d61df03e Iustin Pop
8419 d61df03e Iustin Pop
  def CheckPrereq(self):
8420 d61df03e Iustin Pop
    """Check prerequisites.
8421 d61df03e Iustin Pop

8422 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
8423 d61df03e Iustin Pop

8424 d61df03e Iustin Pop
    """
8425 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
8426 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
8427 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
8428 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
8429 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
8430 d61df03e Iustin Pop
                                     attr)
8431 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
8432 d61df03e Iustin Pop
      if iname is not None:
8433 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
8434 d61df03e Iustin Pop
                                   iname)
8435 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
8436 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
8437 d61df03e Iustin Pop
      for row in self.op.nics:
8438 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
8439 d61df03e Iustin Pop
            "mac" not in row or
8440 d61df03e Iustin Pop
            "ip" not in row or
8441 d61df03e Iustin Pop
            "bridge" not in row):
8442 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
8443 d61df03e Iustin Pop
                                     " 'nics' parameter")
8444 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
8445 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
8446 d61df03e Iustin Pop
      for row in self.op.disks:
8447 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
8448 d61df03e Iustin Pop
            "size" not in row or
8449 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
8450 d61df03e Iustin Pop
            "mode" not in row or
8451 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
8452 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
8453 d61df03e Iustin Pop
                                     " 'disks' parameter")
8454 8901997e Iustin Pop
      if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None:
8455 8cc7e742 Guido Trotter
        self.op.hypervisor = self.cfg.GetHypervisorType()
8456 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
8457 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
8458 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
8459 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
8460 d61df03e Iustin Pop
      if fname is None:
8461 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
8462 d61df03e Iustin Pop
                                   self.op.name)
8463 d61df03e Iustin Pop
      self.op.name = fname
8464 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
8465 d61df03e Iustin Pop
    else:
8466 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
8467 d61df03e Iustin Pop
                                 self.op.mode)
8468 d61df03e Iustin Pop
8469 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
8470 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
8471 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
8472 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
8473 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
8474 d61df03e Iustin Pop
                                 self.op.direction)
8475 d61df03e Iustin Pop
8476 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
8477 d61df03e Iustin Pop
    """Run the allocator test.
8478 d61df03e Iustin Pop

8479 d61df03e Iustin Pop
    """
8480 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
8481 923ddac0 Michael Hanselmann
      ial = IAllocator(self.cfg, self.rpc,
8482 29859cb7 Iustin Pop
                       mode=self.op.mode,
8483 29859cb7 Iustin Pop
                       name=self.op.name,
8484 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
8485 29859cb7 Iustin Pop
                       disks=self.op.disks,
8486 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
8487 29859cb7 Iustin Pop
                       os=self.op.os,
8488 29859cb7 Iustin Pop
                       tags=self.op.tags,
8489 29859cb7 Iustin Pop
                       nics=self.op.nics,
8490 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
8491 8cc7e742 Guido Trotter
                       hypervisor=self.op.hypervisor,
8492 29859cb7 Iustin Pop
                       )
8493 29859cb7 Iustin Pop
    else:
8494 923ddac0 Michael Hanselmann
      ial = IAllocator(self.cfg, self.rpc,
8495 29859cb7 Iustin Pop
                       mode=self.op.mode,
8496 29859cb7 Iustin Pop
                       name=self.op.name,
8497 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
8498 29859cb7 Iustin Pop
                       )
8499 d61df03e Iustin Pop
8500 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
8501 d1c2dd75 Iustin Pop
      result = ial.in_text
8502 298fe380 Iustin Pop
    else:
8503 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
8504 d1c2dd75 Iustin Pop
      result = ial.out_text
8505 298fe380 Iustin Pop
    return result