Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 692738fc

History | View | Annotate | Download (257 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import time
29 a8083063 Iustin Pop
import re
30 a8083063 Iustin Pop
import platform
31 ffa1c0dc Iustin Pop
import logging
32 74409b12 Iustin Pop
import copy
33 a8083063 Iustin Pop
34 a8083063 Iustin Pop
from ganeti import ssh
35 a8083063 Iustin Pop
from ganeti import utils
36 a8083063 Iustin Pop
from ganeti import errors
37 a8083063 Iustin Pop
from ganeti import hypervisor
38 6048c986 Guido Trotter
from ganeti import locking
39 a8083063 Iustin Pop
from ganeti import constants
40 a8083063 Iustin Pop
from ganeti import objects
41 8d14b30d Iustin Pop
from ganeti import serializer
42 112f18a5 Iustin Pop
from ganeti import ssconf
43 d61df03e Iustin Pop
44 d61df03e Iustin Pop
45 a8083063 Iustin Pop
class LogicalUnit(object):
46 396e1b78 Michael Hanselmann
  """Logical Unit base class.
47 a8083063 Iustin Pop

48 a8083063 Iustin Pop
  Subclasses must follow these rules:
49 d465bdc8 Guido Trotter
    - implement ExpandNames
50 6fd35c4d Michael Hanselmann
    - implement CheckPrereq (except when tasklets are used)
51 6fd35c4d Michael Hanselmann
    - implement Exec (except when tasklets are used)
52 a8083063 Iustin Pop
    - implement BuildHooksEnv
53 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
54 05f86716 Guido Trotter
    - optionally redefine their run requirements:
55 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
56 05f86716 Guido Trotter

57 05f86716 Guido Trotter
  Note that all commands require root permissions.
58 a8083063 Iustin Pop

59 20777413 Iustin Pop
  @ivar dry_run_result: the value (if any) that will be returned to the caller
60 20777413 Iustin Pop
      in dry-run mode (signalled by opcode dry_run parameter)
61 20777413 Iustin Pop

62 a8083063 Iustin Pop
  """
63 a8083063 Iustin Pop
  HPATH = None
64 a8083063 Iustin Pop
  HTYPE = None
65 a8083063 Iustin Pop
  _OP_REQP = []
66 7e55040e Guido Trotter
  REQ_BGL = True
67 a8083063 Iustin Pop
68 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
69 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
70 a8083063 Iustin Pop

71 5bbd3f7f Michael Hanselmann
    This needs to be overridden in derived classes in order to check op
72 a8083063 Iustin Pop
    validity.
73 a8083063 Iustin Pop

74 a8083063 Iustin Pop
    """
75 5bfac263 Iustin Pop
    self.proc = processor
76 a8083063 Iustin Pop
    self.op = op
77 77b657a3 Guido Trotter
    self.cfg = context.cfg
78 77b657a3 Guido Trotter
    self.context = context
79 72737a7f Iustin Pop
    self.rpc = rpc
80 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
81 d465bdc8 Guido Trotter
    self.needed_locks = None
82 6683bba2 Guido Trotter
    self.acquired_locks = {}
83 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 0)
84 ca2a79e1 Guido Trotter
    self.add_locks = {}
85 ca2a79e1 Guido Trotter
    self.remove_locks = {}
86 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
87 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
88 c92b310a Michael Hanselmann
    self.__ssh = None
89 86d9d3bb Iustin Pop
    # logging
90 86d9d3bb Iustin Pop
    self.LogWarning = processor.LogWarning
91 86d9d3bb Iustin Pop
    self.LogInfo = processor.LogInfo
92 2bb5c911 Michael Hanselmann
    self.LogStep = processor.LogStep
93 20777413 Iustin Pop
    # support for dry-run
94 20777413 Iustin Pop
    self.dry_run_result = None
95 c92b310a Michael Hanselmann
96 6fd35c4d Michael Hanselmann
    # Tasklets
97 6fd35c4d Michael Hanselmann
    self.tasklets = []
98 6fd35c4d Michael Hanselmann
99 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
100 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
101 a8083063 Iustin Pop
      if attr_val is None:
102 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
103 3ecf6786 Iustin Pop
                                   attr_name)
104 6fd35c4d Michael Hanselmann
105 4be4691d Iustin Pop
    self.CheckArguments()
106 a8083063 Iustin Pop
107 c92b310a Michael Hanselmann
  def __GetSSH(self):
108 c92b310a Michael Hanselmann
    """Returns the SshRunner object
109 c92b310a Michael Hanselmann

110 c92b310a Michael Hanselmann
    """
111 c92b310a Michael Hanselmann
    if not self.__ssh:
112 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
113 c92b310a Michael Hanselmann
    return self.__ssh
114 c92b310a Michael Hanselmann
115 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
116 c92b310a Michael Hanselmann
117 4be4691d Iustin Pop
  def CheckArguments(self):
118 4be4691d Iustin Pop
    """Check syntactic validity for the opcode arguments.
119 4be4691d Iustin Pop

120 4be4691d Iustin Pop
    This method is for doing a simple syntactic check and ensure
121 4be4691d Iustin Pop
    validity of opcode parameters, without any cluster-related
122 4be4691d Iustin Pop
    checks. While the same can be accomplished in ExpandNames and/or
123 4be4691d Iustin Pop
    CheckPrereq, doing these separate is better because:
124 4be4691d Iustin Pop

125 4be4691d Iustin Pop
      - ExpandNames is left as as purely a lock-related function
126 5bbd3f7f Michael Hanselmann
      - CheckPrereq is run after we have acquired locks (and possible
127 4be4691d Iustin Pop
        waited for them)
128 4be4691d Iustin Pop

129 4be4691d Iustin Pop
    The function is allowed to change the self.op attribute so that
130 4be4691d Iustin Pop
    later methods can no longer worry about missing parameters.
131 4be4691d Iustin Pop

132 4be4691d Iustin Pop
    """
133 4be4691d Iustin Pop
    pass
134 4be4691d Iustin Pop
135 d465bdc8 Guido Trotter
  def ExpandNames(self):
136 d465bdc8 Guido Trotter
    """Expand names for this LU.
137 d465bdc8 Guido Trotter

138 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
139 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
140 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
141 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
142 d465bdc8 Guido Trotter

143 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
144 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
145 d465bdc8 Guido Trotter
    as values. Rules:
146 e4376078 Iustin Pop

147 e4376078 Iustin Pop
      - use an empty dict if you don't need any lock
148 e4376078 Iustin Pop
      - if you don't need any lock at a particular level omit that level
149 e4376078 Iustin Pop
      - don't put anything for the BGL level
150 e4376078 Iustin Pop
      - if you want all locks at a level use locking.ALL_SET as a value
151 d465bdc8 Guido Trotter

152 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
153 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
154 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
155 3977a4c1 Guido Trotter

156 6fd35c4d Michael Hanselmann
    This function can also define a list of tasklets, which then will be
157 6fd35c4d Michael Hanselmann
    executed in order instead of the usual LU-level CheckPrereq and Exec
158 6fd35c4d Michael Hanselmann
    functions, if those are not defined by the LU.
159 6fd35c4d Michael Hanselmann

160 e4376078 Iustin Pop
    Examples::
161 e4376078 Iustin Pop

162 e4376078 Iustin Pop
      # Acquire all nodes and one instance
163 e4376078 Iustin Pop
      self.needed_locks = {
164 e4376078 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
165 e4376078 Iustin Pop
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
166 e4376078 Iustin Pop
      }
167 e4376078 Iustin Pop
      # Acquire just two nodes
168 e4376078 Iustin Pop
      self.needed_locks = {
169 e4376078 Iustin Pop
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
170 e4376078 Iustin Pop
      }
171 e4376078 Iustin Pop
      # Acquire no locks
172 e4376078 Iustin Pop
      self.needed_locks = {} # No, you can't leave it to the default value None
173 d465bdc8 Guido Trotter

174 d465bdc8 Guido Trotter
    """
175 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
176 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
177 d465bdc8 Guido Trotter
    # time.
178 d465bdc8 Guido Trotter
    if self.REQ_BGL:
179 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
180 d465bdc8 Guido Trotter
    else:
181 d465bdc8 Guido Trotter
      raise NotImplementedError
182 d465bdc8 Guido Trotter
183 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
184 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
185 fb8dcb62 Guido Trotter

186 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
187 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
188 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
189 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
190 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
191 fb8dcb62 Guido Trotter
    default it does nothing.
192 fb8dcb62 Guido Trotter

193 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
194 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
195 fb8dcb62 Guido Trotter

196 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
197 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
198 fb8dcb62 Guido Trotter

199 fb8dcb62 Guido Trotter
    """
200 fb8dcb62 Guido Trotter
201 a8083063 Iustin Pop
  def CheckPrereq(self):
202 a8083063 Iustin Pop
    """Check prerequisites for this LU.
203 a8083063 Iustin Pop

204 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
205 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
206 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
207 a8083063 Iustin Pop
    allowed.
208 a8083063 Iustin Pop

209 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
210 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
211 a8083063 Iustin Pop

212 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
213 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
214 a8083063 Iustin Pop

215 a8083063 Iustin Pop
    """
216 6fd35c4d Michael Hanselmann
    if self.tasklets:
217 6fd35c4d Michael Hanselmann
      for tl in self.tasklets:
218 6fd35c4d Michael Hanselmann
        tl.CheckPrereq()
219 6fd35c4d Michael Hanselmann
    else:
220 6fd35c4d Michael Hanselmann
      raise NotImplementedError
221 a8083063 Iustin Pop
222 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
223 a8083063 Iustin Pop
    """Execute the LU.
224 a8083063 Iustin Pop

225 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
226 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
227 a8083063 Iustin Pop
    code, or expected.
228 a8083063 Iustin Pop

229 a8083063 Iustin Pop
    """
230 6fd35c4d Michael Hanselmann
    if self.tasklets:
231 6fd35c4d Michael Hanselmann
      for tl in self.tasklets:
232 6fd35c4d Michael Hanselmann
        tl.Exec(feedback_fn)
233 6fd35c4d Michael Hanselmann
    else:
234 6fd35c4d Michael Hanselmann
      raise NotImplementedError
235 a8083063 Iustin Pop
236 a8083063 Iustin Pop
  def BuildHooksEnv(self):
237 a8083063 Iustin Pop
    """Build hooks environment for this LU.
238 a8083063 Iustin Pop

239 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
240 a8083063 Iustin Pop
    containing the environment that will be used for running the
241 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
242 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
243 a8083063 Iustin Pop
    the hook should run after the execution.
244 a8083063 Iustin Pop

245 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
246 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
247 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
248 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
249 a8083063 Iustin Pop

250 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
251 a8083063 Iustin Pop

252 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
253 a8083063 Iustin Pop
    not be called.
254 a8083063 Iustin Pop

255 a8083063 Iustin Pop
    """
256 a8083063 Iustin Pop
    raise NotImplementedError
257 a8083063 Iustin Pop
258 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
259 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
260 1fce5219 Guido Trotter

261 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
262 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
263 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
264 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
265 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
266 1fce5219 Guido Trotter

267 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
268 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
269 e4376078 Iustin Pop
    @param hook_results: the results of the multi-node hooks rpc call
270 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
271 e4376078 Iustin Pop
    @param lu_result: the previous Exec result this LU had, or None
272 e4376078 Iustin Pop
        in the PRE phase
273 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
274 e4376078 Iustin Pop
        and hook results
275 1fce5219 Guido Trotter

276 1fce5219 Guido Trotter
    """
277 1fce5219 Guido Trotter
    return lu_result
278 1fce5219 Guido Trotter
279 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
280 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
281 43905206 Guido Trotter

282 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
283 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
284 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
285 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
286 43905206 Guido Trotter
    before.
287 43905206 Guido Trotter

288 43905206 Guido Trotter
    """
289 43905206 Guido Trotter
    if self.needed_locks is None:
290 43905206 Guido Trotter
      self.needed_locks = {}
291 43905206 Guido Trotter
    else:
292 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
293 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
294 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
295 43905206 Guido Trotter
    if expanded_name is None:
296 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
297 43905206 Guido Trotter
                                  self.op.instance_name)
298 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
299 43905206 Guido Trotter
    self.op.instance_name = expanded_name
300 43905206 Guido Trotter
301 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
302 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
303 c4a2fee1 Guido Trotter

304 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
305 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
306 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
307 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
308 c4a2fee1 Guido Trotter

309 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
310 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
311 c4a2fee1 Guido Trotter

312 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
313 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
314 c4a2fee1 Guido Trotter

315 e4376078 Iustin Pop
    If should be called in DeclareLocks in a way similar to::
316 c4a2fee1 Guido Trotter

317 e4376078 Iustin Pop
      if level == locking.LEVEL_NODE:
318 e4376078 Iustin Pop
        self._LockInstancesNodes()
319 c4a2fee1 Guido Trotter

320 a82ce292 Guido Trotter
    @type primary_only: boolean
321 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
322 a82ce292 Guido Trotter

323 c4a2fee1 Guido Trotter
    """
324 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
325 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
326 c4a2fee1 Guido Trotter
327 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
328 c4a2fee1 Guido Trotter
329 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
330 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
331 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
332 c4a2fee1 Guido Trotter
    wanted_nodes = []
333 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
334 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
335 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
336 a82ce292 Guido Trotter
      if not primary_only:
337 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
338 9513b6ab Guido Trotter
339 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
340 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
341 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
342 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
343 c4a2fee1 Guido Trotter
344 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
345 c4a2fee1 Guido Trotter
346 a8083063 Iustin Pop
347 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
348 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
349 a8083063 Iustin Pop

350 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
351 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
352 a8083063 Iustin Pop

353 a8083063 Iustin Pop
  """
354 a8083063 Iustin Pop
  HPATH = None
355 a8083063 Iustin Pop
  HTYPE = None
356 a8083063 Iustin Pop
357 a8083063 Iustin Pop
358 9a6800e1 Michael Hanselmann
class Tasklet:
359 9a6800e1 Michael Hanselmann
  """Tasklet base class.
360 9a6800e1 Michael Hanselmann

361 9a6800e1 Michael Hanselmann
  Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
362 9a6800e1 Michael Hanselmann
  they can mix legacy code with tasklets. Locking needs to be done in the LU,
363 9a6800e1 Michael Hanselmann
  tasklets know nothing about locks.
364 9a6800e1 Michael Hanselmann

365 9a6800e1 Michael Hanselmann
  Subclasses must follow these rules:
366 9a6800e1 Michael Hanselmann
    - Implement CheckPrereq
367 9a6800e1 Michael Hanselmann
    - Implement Exec
368 9a6800e1 Michael Hanselmann

369 9a6800e1 Michael Hanselmann
  """
370 9a6800e1 Michael Hanselmann
  def CheckPrereq(self):
371 9a6800e1 Michael Hanselmann
    """Check prerequisites for this tasklets.
372 9a6800e1 Michael Hanselmann

373 9a6800e1 Michael Hanselmann
    This method should check whether the prerequisites for the execution of
374 9a6800e1 Michael Hanselmann
    this tasklet are fulfilled. It can do internode communication, but it
375 9a6800e1 Michael Hanselmann
    should be idempotent - no cluster or system changes are allowed.
376 9a6800e1 Michael Hanselmann

377 9a6800e1 Michael Hanselmann
    The method should raise errors.OpPrereqError in case something is not
378 9a6800e1 Michael Hanselmann
    fulfilled. Its return value is ignored.
379 9a6800e1 Michael Hanselmann

380 9a6800e1 Michael Hanselmann
    This method should also update all parameters to their canonical form if it
381 9a6800e1 Michael Hanselmann
    hasn't been done before.
382 9a6800e1 Michael Hanselmann

383 9a6800e1 Michael Hanselmann
    """
384 9a6800e1 Michael Hanselmann
    raise NotImplementedError
385 9a6800e1 Michael Hanselmann
386 9a6800e1 Michael Hanselmann
  def Exec(self, feedback_fn):
387 9a6800e1 Michael Hanselmann
    """Execute the tasklet.
388 9a6800e1 Michael Hanselmann

389 9a6800e1 Michael Hanselmann
    This method should implement the actual work. It should raise
390 9a6800e1 Michael Hanselmann
    errors.OpExecError for failures that are somewhat dealt with in code, or
391 9a6800e1 Michael Hanselmann
    expected.
392 9a6800e1 Michael Hanselmann

393 9a6800e1 Michael Hanselmann
    """
394 9a6800e1 Michael Hanselmann
    raise NotImplementedError
395 9a6800e1 Michael Hanselmann
396 9a6800e1 Michael Hanselmann
397 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
398 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
399 83120a01 Michael Hanselmann

400 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
401 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
402 e4376078 Iustin Pop
  @type nodes: list
403 e4376078 Iustin Pop
  @param nodes: list of node names or None for all nodes
404 e4376078 Iustin Pop
  @rtype: list
405 e4376078 Iustin Pop
  @return: the list of nodes, sorted
406 e4376078 Iustin Pop
  @raise errors.OpProgrammerError: if the nodes parameter is wrong type
407 83120a01 Michael Hanselmann

408 83120a01 Michael Hanselmann
  """
409 3312b702 Iustin Pop
  if not isinstance(nodes, list):
410 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
411 dcb93971 Michael Hanselmann
412 ea47808a Guido Trotter
  if not nodes:
413 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
414 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
415 dcb93971 Michael Hanselmann
416 ea47808a Guido Trotter
  wanted = []
417 ea47808a Guido Trotter
  for name in nodes:
418 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
419 ea47808a Guido Trotter
    if node is None:
420 ea47808a Guido Trotter
      raise errors.OpPrereqError("No such node name '%s'" % name)
421 ea47808a Guido Trotter
    wanted.append(node)
422 dcb93971 Michael Hanselmann
423 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
424 3312b702 Iustin Pop
425 3312b702 Iustin Pop
426 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
427 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
428 3312b702 Iustin Pop

429 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
430 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
431 e4376078 Iustin Pop
  @type instances: list
432 e4376078 Iustin Pop
  @param instances: list of instance names or None for all instances
433 e4376078 Iustin Pop
  @rtype: list
434 e4376078 Iustin Pop
  @return: the list of instances, sorted
435 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if the instances parameter is wrong type
436 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if any of the passed instances is not found
437 3312b702 Iustin Pop

438 3312b702 Iustin Pop
  """
439 3312b702 Iustin Pop
  if not isinstance(instances, list):
440 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
441 3312b702 Iustin Pop
442 3312b702 Iustin Pop
  if instances:
443 3312b702 Iustin Pop
    wanted = []
444 3312b702 Iustin Pop
445 3312b702 Iustin Pop
    for name in instances:
446 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
447 3312b702 Iustin Pop
      if instance is None:
448 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
449 3312b702 Iustin Pop
      wanted.append(instance)
450 3312b702 Iustin Pop
451 3312b702 Iustin Pop
  else:
452 a7f5dc98 Iustin Pop
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
453 a7f5dc98 Iustin Pop
  return wanted
454 dcb93971 Michael Hanselmann
455 dcb93971 Michael Hanselmann
456 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
457 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
458 83120a01 Michael Hanselmann

459 a2d2e1a7 Iustin Pop
  @type static: L{utils.FieldSet}
460 31bf511f Iustin Pop
  @param static: static fields set
461 a2d2e1a7 Iustin Pop
  @type dynamic: L{utils.FieldSet}
462 31bf511f Iustin Pop
  @param dynamic: dynamic fields set
463 83120a01 Michael Hanselmann

464 83120a01 Michael Hanselmann
  """
465 a2d2e1a7 Iustin Pop
  f = utils.FieldSet()
466 31bf511f Iustin Pop
  f.Extend(static)
467 31bf511f Iustin Pop
  f.Extend(dynamic)
468 dcb93971 Michael Hanselmann
469 31bf511f Iustin Pop
  delta = f.NonMatching(selected)
470 31bf511f Iustin Pop
  if delta:
471 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
472 31bf511f Iustin Pop
                               % ",".join(delta))
473 dcb93971 Michael Hanselmann
474 dcb93971 Michael Hanselmann
475 a5961235 Iustin Pop
def _CheckBooleanOpField(op, name):
476 a5961235 Iustin Pop
  """Validates boolean opcode parameters.
477 a5961235 Iustin Pop

478 a5961235 Iustin Pop
  This will ensure that an opcode parameter is either a boolean value,
479 a5961235 Iustin Pop
  or None (but that it always exists).
480 a5961235 Iustin Pop

481 a5961235 Iustin Pop
  """
482 a5961235 Iustin Pop
  val = getattr(op, name, None)
483 a5961235 Iustin Pop
  if not (val is None or isinstance(val, bool)):
484 a5961235 Iustin Pop
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
485 a5961235 Iustin Pop
                               (name, str(val)))
486 a5961235 Iustin Pop
  setattr(op, name, val)
487 a5961235 Iustin Pop
488 a5961235 Iustin Pop
489 a5961235 Iustin Pop
def _CheckNodeOnline(lu, node):
490 a5961235 Iustin Pop
  """Ensure that a given node is online.
491 a5961235 Iustin Pop

492 a5961235 Iustin Pop
  @param lu: the LU on behalf of which we make the check
493 a5961235 Iustin Pop
  @param node: the node to check
494 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is offline
495 a5961235 Iustin Pop

496 a5961235 Iustin Pop
  """
497 a5961235 Iustin Pop
  if lu.cfg.GetNodeInfo(node).offline:
498 a5961235 Iustin Pop
    raise errors.OpPrereqError("Can't use offline node %s" % node)
499 a5961235 Iustin Pop
500 a5961235 Iustin Pop
501 733a2b6a Iustin Pop
def _CheckNodeNotDrained(lu, node):
502 733a2b6a Iustin Pop
  """Ensure that a given node is not drained.
503 733a2b6a Iustin Pop

504 733a2b6a Iustin Pop
  @param lu: the LU on behalf of which we make the check
505 733a2b6a Iustin Pop
  @param node: the node to check
506 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is drained
507 733a2b6a Iustin Pop

508 733a2b6a Iustin Pop
  """
509 733a2b6a Iustin Pop
  if lu.cfg.GetNodeInfo(node).drained:
510 733a2b6a Iustin Pop
    raise errors.OpPrereqError("Can't use drained node %s" % node)
511 733a2b6a Iustin Pop
512 733a2b6a Iustin Pop
513 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
514 67fc3042 Iustin Pop
                          memory, vcpus, nics, disk_template, disks,
515 7c4d6c7b Michael Hanselmann
                          bep, hvp, hypervisor_name):
516 e4376078 Iustin Pop
  """Builds instance related env variables for hooks
517 e4376078 Iustin Pop

518 e4376078 Iustin Pop
  This builds the hook environment from individual variables.
519 e4376078 Iustin Pop

520 e4376078 Iustin Pop
  @type name: string
521 e4376078 Iustin Pop
  @param name: the name of the instance
522 e4376078 Iustin Pop
  @type primary_node: string
523 e4376078 Iustin Pop
  @param primary_node: the name of the instance's primary node
524 e4376078 Iustin Pop
  @type secondary_nodes: list
525 e4376078 Iustin Pop
  @param secondary_nodes: list of secondary nodes as strings
526 e4376078 Iustin Pop
  @type os_type: string
527 e4376078 Iustin Pop
  @param os_type: the name of the instance's OS
528 0d68c45d Iustin Pop
  @type status: boolean
529 0d68c45d Iustin Pop
  @param status: the should_run status of the instance
530 e4376078 Iustin Pop
  @type memory: string
531 e4376078 Iustin Pop
  @param memory: the memory size of the instance
532 e4376078 Iustin Pop
  @type vcpus: string
533 e4376078 Iustin Pop
  @param vcpus: the count of VCPUs the instance has
534 e4376078 Iustin Pop
  @type nics: list
535 5e3d3eb3 Guido Trotter
  @param nics: list of tuples (ip, mac, mode, link) representing
536 5e3d3eb3 Guido Trotter
      the NICs the instance has
537 2c2690c9 Iustin Pop
  @type disk_template: string
538 5bbd3f7f Michael Hanselmann
  @param disk_template: the disk template of the instance
539 2c2690c9 Iustin Pop
  @type disks: list
540 2c2690c9 Iustin Pop
  @param disks: the list of (size, mode) pairs
541 67fc3042 Iustin Pop
  @type bep: dict
542 67fc3042 Iustin Pop
  @param bep: the backend parameters for the instance
543 67fc3042 Iustin Pop
  @type hvp: dict
544 67fc3042 Iustin Pop
  @param hvp: the hypervisor parameters for the instance
545 7c4d6c7b Michael Hanselmann
  @type hypervisor_name: string
546 7c4d6c7b Michael Hanselmann
  @param hypervisor_name: the hypervisor for the instance
547 e4376078 Iustin Pop
  @rtype: dict
548 e4376078 Iustin Pop
  @return: the hook environment for this instance
549 ecb215b5 Michael Hanselmann

550 396e1b78 Michael Hanselmann
  """
551 0d68c45d Iustin Pop
  if status:
552 0d68c45d Iustin Pop
    str_status = "up"
553 0d68c45d Iustin Pop
  else:
554 0d68c45d Iustin Pop
    str_status = "down"
555 396e1b78 Michael Hanselmann
  env = {
556 0e137c28 Iustin Pop
    "OP_TARGET": name,
557 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
558 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
559 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
560 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
561 0d68c45d Iustin Pop
    "INSTANCE_STATUS": str_status,
562 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
563 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
564 2c2690c9 Iustin Pop
    "INSTANCE_DISK_TEMPLATE": disk_template,
565 7c4d6c7b Michael Hanselmann
    "INSTANCE_HYPERVISOR": hypervisor_name,
566 396e1b78 Michael Hanselmann
  }
567 396e1b78 Michael Hanselmann
568 396e1b78 Michael Hanselmann
  if nics:
569 396e1b78 Michael Hanselmann
    nic_count = len(nics)
570 62f0dd02 Guido Trotter
    for idx, (ip, mac, mode, link) in enumerate(nics):
571 396e1b78 Michael Hanselmann
      if ip is None:
572 396e1b78 Michael Hanselmann
        ip = ""
573 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
574 2c2690c9 Iustin Pop
      env["INSTANCE_NIC%d_MAC" % idx] = mac
575 62f0dd02 Guido Trotter
      env["INSTANCE_NIC%d_MODE" % idx] = mode
576 62f0dd02 Guido Trotter
      env["INSTANCE_NIC%d_LINK" % idx] = link
577 62f0dd02 Guido Trotter
      if mode == constants.NIC_MODE_BRIDGED:
578 62f0dd02 Guido Trotter
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
579 396e1b78 Michael Hanselmann
  else:
580 396e1b78 Michael Hanselmann
    nic_count = 0
581 396e1b78 Michael Hanselmann
582 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
583 396e1b78 Michael Hanselmann
584 2c2690c9 Iustin Pop
  if disks:
585 2c2690c9 Iustin Pop
    disk_count = len(disks)
586 2c2690c9 Iustin Pop
    for idx, (size, mode) in enumerate(disks):
587 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_SIZE" % idx] = size
588 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_MODE" % idx] = mode
589 2c2690c9 Iustin Pop
  else:
590 2c2690c9 Iustin Pop
    disk_count = 0
591 2c2690c9 Iustin Pop
592 2c2690c9 Iustin Pop
  env["INSTANCE_DISK_COUNT"] = disk_count
593 2c2690c9 Iustin Pop
594 67fc3042 Iustin Pop
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
595 67fc3042 Iustin Pop
    for key, value in source.items():
596 67fc3042 Iustin Pop
      env["INSTANCE_%s_%s" % (kind, key)] = value
597 67fc3042 Iustin Pop
598 396e1b78 Michael Hanselmann
  return env
599 396e1b78 Michael Hanselmann
600 f9b10246 Guido Trotter
def _NICListToTuple(lu, nics):
601 62f0dd02 Guido Trotter
  """Build a list of nic information tuples.
602 62f0dd02 Guido Trotter

603 f9b10246 Guido Trotter
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
604 f9b10246 Guido Trotter
  value in LUQueryInstanceData.
605 62f0dd02 Guido Trotter

606 62f0dd02 Guido Trotter
  @type lu:  L{LogicalUnit}
607 62f0dd02 Guido Trotter
  @param lu: the logical unit on whose behalf we execute
608 62f0dd02 Guido Trotter
  @type nics: list of L{objects.NIC}
609 62f0dd02 Guido Trotter
  @param nics: list of nics to convert to hooks tuples
610 62f0dd02 Guido Trotter

611 62f0dd02 Guido Trotter
  """
612 62f0dd02 Guido Trotter
  hooks_nics = []
613 62f0dd02 Guido Trotter
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[constants.PP_DEFAULT]
614 62f0dd02 Guido Trotter
  for nic in nics:
615 62f0dd02 Guido Trotter
    ip = nic.ip
616 62f0dd02 Guido Trotter
    mac = nic.mac
617 62f0dd02 Guido Trotter
    filled_params = objects.FillDict(c_nicparams, nic.nicparams)
618 62f0dd02 Guido Trotter
    mode = filled_params[constants.NIC_MODE]
619 62f0dd02 Guido Trotter
    link = filled_params[constants.NIC_LINK]
620 62f0dd02 Guido Trotter
    hooks_nics.append((ip, mac, mode, link))
621 62f0dd02 Guido Trotter
  return hooks_nics
622 396e1b78 Michael Hanselmann
623 338e51e8 Iustin Pop
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
624 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
625 ecb215b5 Michael Hanselmann

626 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
627 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
628 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
629 e4376078 Iustin Pop
  @param instance: the instance for which we should build the
630 e4376078 Iustin Pop
      environment
631 e4376078 Iustin Pop
  @type override: dict
632 e4376078 Iustin Pop
  @param override: dictionary with key/values that will override
633 e4376078 Iustin Pop
      our values
634 e4376078 Iustin Pop
  @rtype: dict
635 e4376078 Iustin Pop
  @return: the hook environment dictionary
636 e4376078 Iustin Pop

637 ecb215b5 Michael Hanselmann
  """
638 67fc3042 Iustin Pop
  cluster = lu.cfg.GetClusterInfo()
639 67fc3042 Iustin Pop
  bep = cluster.FillBE(instance)
640 67fc3042 Iustin Pop
  hvp = cluster.FillHV(instance)
641 396e1b78 Michael Hanselmann
  args = {
642 396e1b78 Michael Hanselmann
    'name': instance.name,
643 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
644 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
645 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
646 0d68c45d Iustin Pop
    'status': instance.admin_up,
647 338e51e8 Iustin Pop
    'memory': bep[constants.BE_MEMORY],
648 338e51e8 Iustin Pop
    'vcpus': bep[constants.BE_VCPUS],
649 f9b10246 Guido Trotter
    'nics': _NICListToTuple(lu, instance.nics),
650 2c2690c9 Iustin Pop
    'disk_template': instance.disk_template,
651 2c2690c9 Iustin Pop
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
652 67fc3042 Iustin Pop
    'bep': bep,
653 67fc3042 Iustin Pop
    'hvp': hvp,
654 b0c63e2b Iustin Pop
    'hypervisor_name': instance.hypervisor,
655 396e1b78 Michael Hanselmann
  }
656 396e1b78 Michael Hanselmann
  if override:
657 396e1b78 Michael Hanselmann
    args.update(override)
658 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
659 396e1b78 Michael Hanselmann
660 396e1b78 Michael Hanselmann
661 ec0292f1 Iustin Pop
def _AdjustCandidatePool(lu):
662 ec0292f1 Iustin Pop
  """Adjust the candidate pool after node operations.
663 ec0292f1 Iustin Pop

664 ec0292f1 Iustin Pop
  """
665 ec0292f1 Iustin Pop
  mod_list = lu.cfg.MaintainCandidatePool()
666 ec0292f1 Iustin Pop
  if mod_list:
667 ec0292f1 Iustin Pop
    lu.LogInfo("Promoted nodes to master candidate role: %s",
668 ee513a66 Iustin Pop
               ", ".join(node.name for node in mod_list))
669 ec0292f1 Iustin Pop
    for name in mod_list:
670 ec0292f1 Iustin Pop
      lu.context.ReaddNode(name)
671 ec0292f1 Iustin Pop
  mc_now, mc_max = lu.cfg.GetMasterCandidateStats()
672 ec0292f1 Iustin Pop
  if mc_now > mc_max:
673 ec0292f1 Iustin Pop
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
674 ec0292f1 Iustin Pop
               (mc_now, mc_max))
675 ec0292f1 Iustin Pop
676 ec0292f1 Iustin Pop
677 b165e77e Guido Trotter
def _CheckNicsBridgesExist(lu, target_nics, target_node,
678 b165e77e Guido Trotter
                               profile=constants.PP_DEFAULT):
679 b165e77e Guido Trotter
  """Check that the brigdes needed by a list of nics exist.
680 b165e77e Guido Trotter

681 b165e77e Guido Trotter
  """
682 b165e77e Guido Trotter
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[profile]
683 b165e77e Guido Trotter
  paramslist = [objects.FillDict(c_nicparams, nic.nicparams)
684 b165e77e Guido Trotter
                for nic in target_nics]
685 b165e77e Guido Trotter
  brlist = [params[constants.NIC_LINK] for params in paramslist
686 b165e77e Guido Trotter
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
687 b165e77e Guido Trotter
  if brlist:
688 b165e77e Guido Trotter
    result = lu.rpc.call_bridges_exist(target_node, brlist)
689 4c4e4e1e Iustin Pop
    result.Raise("Error checking bridges on destination node '%s'" %
690 4c4e4e1e Iustin Pop
                 target_node, prereq=True)
691 b165e77e Guido Trotter
692 b165e77e Guido Trotter
693 b165e77e Guido Trotter
def _CheckInstanceBridgesExist(lu, instance, node=None):
694 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
695 bf6929a2 Alexander Schreiber

696 bf6929a2 Alexander Schreiber
  """
697 b165e77e Guido Trotter
  if node is None:
698 29921401 Iustin Pop
    node = instance.primary_node
699 b165e77e Guido Trotter
  _CheckNicsBridgesExist(lu, instance.nics, node)
700 bf6929a2 Alexander Schreiber
701 bf6929a2 Alexander Schreiber
702 692738fc Michael Hanselmann
def _GetNodeSecondaryInstances(cfg, node_name):
703 692738fc Michael Hanselmann
  """Returns secondary instances on a node.
704 692738fc Michael Hanselmann

705 692738fc Michael Hanselmann
  """
706 692738fc Michael Hanselmann
  instances = []
707 692738fc Michael Hanselmann
708 692738fc Michael Hanselmann
  for (_, inst) in cfg.GetAllInstancesInfo().iteritems():
709 692738fc Michael Hanselmann
    if node_name in inst.secondary_nodes:
710 692738fc Michael Hanselmann
      instances.append(inst)
711 692738fc Michael Hanselmann
712 692738fc Michael Hanselmann
  return instances
713 692738fc Michael Hanselmann
714 692738fc Michael Hanselmann
715 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
716 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
717 a8083063 Iustin Pop

718 a8083063 Iustin Pop
  """
719 a8083063 Iustin Pop
  _OP_REQP = []
720 a8083063 Iustin Pop
721 a8083063 Iustin Pop
  def CheckPrereq(self):
722 a8083063 Iustin Pop
    """Check prerequisites.
723 a8083063 Iustin Pop

724 a8083063 Iustin Pop
    This checks whether the cluster is empty.
725 a8083063 Iustin Pop

726 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
727 a8083063 Iustin Pop

728 a8083063 Iustin Pop
    """
729 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
730 a8083063 Iustin Pop
731 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
732 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
733 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
734 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
735 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
736 db915bd1 Michael Hanselmann
    if instancelist:
737 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
738 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
739 a8083063 Iustin Pop
740 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
741 a8083063 Iustin Pop
    """Destroys the cluster.
742 a8083063 Iustin Pop

743 a8083063 Iustin Pop
    """
744 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
745 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
746 4c4e4e1e Iustin Pop
    result.Raise("Could not disable the master role")
747 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
748 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
749 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
750 140aa4a8 Iustin Pop
    return master
751 a8083063 Iustin Pop
752 a8083063 Iustin Pop
753 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
754 a8083063 Iustin Pop
  """Verifies the cluster status.
755 a8083063 Iustin Pop

756 a8083063 Iustin Pop
  """
757 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
758 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
759 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
760 d4b9d97f Guido Trotter
  REQ_BGL = False
761 d4b9d97f Guido Trotter
762 d4b9d97f Guido Trotter
  def ExpandNames(self):
763 d4b9d97f Guido Trotter
    self.needed_locks = {
764 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
765 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
766 d4b9d97f Guido Trotter
    }
767 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
768 a8083063 Iustin Pop
769 25361b9a Iustin Pop
  def _VerifyNode(self, nodeinfo, file_list, local_cksum,
770 6d2e83d5 Iustin Pop
                  node_result, feedback_fn, master_files,
771 cc9e1230 Guido Trotter
                  drbd_map, vg_name):
772 a8083063 Iustin Pop
    """Run multiple tests against a node.
773 a8083063 Iustin Pop

774 112f18a5 Iustin Pop
    Test list:
775 e4376078 Iustin Pop

776 a8083063 Iustin Pop
      - compares ganeti version
777 5bbd3f7f Michael Hanselmann
      - checks vg existence and size > 20G
778 a8083063 Iustin Pop
      - checks config file checksum
779 a8083063 Iustin Pop
      - checks ssh to other nodes
780 a8083063 Iustin Pop

781 112f18a5 Iustin Pop
    @type nodeinfo: L{objects.Node}
782 112f18a5 Iustin Pop
    @param nodeinfo: the node to check
783 e4376078 Iustin Pop
    @param file_list: required list of files
784 e4376078 Iustin Pop
    @param local_cksum: dictionary of local files and their checksums
785 e4376078 Iustin Pop
    @param node_result: the results from the node
786 e4376078 Iustin Pop
    @param feedback_fn: function used to accumulate results
787 112f18a5 Iustin Pop
    @param master_files: list of files that only masters should have
788 6d2e83d5 Iustin Pop
    @param drbd_map: the useddrbd minors for this node, in
789 6d2e83d5 Iustin Pop
        form of minor: (instance, must_exist) which correspond to instances
790 6d2e83d5 Iustin Pop
        and their running status
791 cc9e1230 Guido Trotter
    @param vg_name: Ganeti Volume Group (result of self.cfg.GetVGName())
792 098c0958 Michael Hanselmann

793 a8083063 Iustin Pop
    """
794 112f18a5 Iustin Pop
    node = nodeinfo.name
795 25361b9a Iustin Pop
796 25361b9a Iustin Pop
    # main result, node_result should be a non-empty dict
797 25361b9a Iustin Pop
    if not node_result or not isinstance(node_result, dict):
798 25361b9a Iustin Pop
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
799 25361b9a Iustin Pop
      return True
800 25361b9a Iustin Pop
801 a8083063 Iustin Pop
    # compares ganeti version
802 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
803 25361b9a Iustin Pop
    remote_version = node_result.get('version', None)
804 e9ce0a64 Iustin Pop
    if not (remote_version and isinstance(remote_version, (list, tuple)) and
805 e9ce0a64 Iustin Pop
            len(remote_version) == 2):
806 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
807 a8083063 Iustin Pop
      return True
808 a8083063 Iustin Pop
809 e9ce0a64 Iustin Pop
    if local_version != remote_version[0]:
810 e9ce0a64 Iustin Pop
      feedback_fn("  - ERROR: incompatible protocol versions: master %s,"
811 e9ce0a64 Iustin Pop
                  " node %s %s" % (local_version, node, remote_version[0]))
812 a8083063 Iustin Pop
      return True
813 a8083063 Iustin Pop
814 e9ce0a64 Iustin Pop
    # node seems compatible, we can actually try to look into its results
815 a8083063 Iustin Pop
816 a8083063 Iustin Pop
    bad = False
817 e9ce0a64 Iustin Pop
818 e9ce0a64 Iustin Pop
    # full package version
819 e9ce0a64 Iustin Pop
    if constants.RELEASE_VERSION != remote_version[1]:
820 e9ce0a64 Iustin Pop
      feedback_fn("  - WARNING: software version mismatch: master %s,"
821 e9ce0a64 Iustin Pop
                  " node %s %s" %
822 e9ce0a64 Iustin Pop
                  (constants.RELEASE_VERSION, node, remote_version[1]))
823 e9ce0a64 Iustin Pop
824 e9ce0a64 Iustin Pop
    # checks vg existence and size > 20G
825 cc9e1230 Guido Trotter
    if vg_name is not None:
826 cc9e1230 Guido Trotter
      vglist = node_result.get(constants.NV_VGLIST, None)
827 cc9e1230 Guido Trotter
      if not vglist:
828 cc9e1230 Guido Trotter
        feedback_fn("  - ERROR: unable to check volume groups on node %s." %
829 cc9e1230 Guido Trotter
                        (node,))
830 a8083063 Iustin Pop
        bad = True
831 cc9e1230 Guido Trotter
      else:
832 cc9e1230 Guido Trotter
        vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
833 cc9e1230 Guido Trotter
                                              constants.MIN_VG_SIZE)
834 cc9e1230 Guido Trotter
        if vgstatus:
835 cc9e1230 Guido Trotter
          feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
836 cc9e1230 Guido Trotter
          bad = True
837 a8083063 Iustin Pop
838 a8083063 Iustin Pop
    # checks config file checksum
839 a8083063 Iustin Pop
840 25361b9a Iustin Pop
    remote_cksum = node_result.get(constants.NV_FILELIST, None)
841 25361b9a Iustin Pop
    if not isinstance(remote_cksum, dict):
842 a8083063 Iustin Pop
      bad = True
843 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
844 a8083063 Iustin Pop
    else:
845 a8083063 Iustin Pop
      for file_name in file_list:
846 112f18a5 Iustin Pop
        node_is_mc = nodeinfo.master_candidate
847 112f18a5 Iustin Pop
        must_have_file = file_name not in master_files
848 a8083063 Iustin Pop
        if file_name not in remote_cksum:
849 112f18a5 Iustin Pop
          if node_is_mc or must_have_file:
850 112f18a5 Iustin Pop
            bad = True
851 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' missing" % file_name)
852 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
853 112f18a5 Iustin Pop
          if node_is_mc or must_have_file:
854 112f18a5 Iustin Pop
            bad = True
855 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
856 112f18a5 Iustin Pop
          else:
857 112f18a5 Iustin Pop
            # not candidate and this is not a must-have file
858 112f18a5 Iustin Pop
            bad = True
859 e631cb25 Iustin Pop
            feedback_fn("  - ERROR: file '%s' should not exist on non master"
860 e631cb25 Iustin Pop
                        " candidates (and the file is outdated)" % file_name)
861 112f18a5 Iustin Pop
        else:
862 112f18a5 Iustin Pop
          # all good, except non-master/non-must have combination
863 112f18a5 Iustin Pop
          if not node_is_mc and not must_have_file:
864 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' should not exist on non master"
865 112f18a5 Iustin Pop
                        " candidates" % file_name)
866 a8083063 Iustin Pop
867 25361b9a Iustin Pop
    # checks ssh to any
868 25361b9a Iustin Pop
869 25361b9a Iustin Pop
    if constants.NV_NODELIST not in node_result:
870 a8083063 Iustin Pop
      bad = True
871 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
872 a8083063 Iustin Pop
    else:
873 25361b9a Iustin Pop
      if node_result[constants.NV_NODELIST]:
874 a8083063 Iustin Pop
        bad = True
875 25361b9a Iustin Pop
        for node in node_result[constants.NV_NODELIST]:
876 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
877 25361b9a Iustin Pop
                          (node, node_result[constants.NV_NODELIST][node]))
878 25361b9a Iustin Pop
879 25361b9a Iustin Pop
    if constants.NV_NODENETTEST not in node_result:
880 9d4bfc96 Iustin Pop
      bad = True
881 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
882 9d4bfc96 Iustin Pop
    else:
883 25361b9a Iustin Pop
      if node_result[constants.NV_NODENETTEST]:
884 9d4bfc96 Iustin Pop
        bad = True
885 25361b9a Iustin Pop
        nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys())
886 9d4bfc96 Iustin Pop
        for node in nlist:
887 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
888 25361b9a Iustin Pop
                          (node, node_result[constants.NV_NODENETTEST][node]))
889 9d4bfc96 Iustin Pop
890 25361b9a Iustin Pop
    hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
891 e69d05fd Iustin Pop
    if isinstance(hyp_result, dict):
892 e69d05fd Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
893 e69d05fd Iustin Pop
        if hv_result is not None:
894 e69d05fd Iustin Pop
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
895 e69d05fd Iustin Pop
                      (hv_name, hv_result))
896 6d2e83d5 Iustin Pop
897 6d2e83d5 Iustin Pop
    # check used drbd list
898 cc9e1230 Guido Trotter
    if vg_name is not None:
899 cc9e1230 Guido Trotter
      used_minors = node_result.get(constants.NV_DRBDLIST, [])
900 cc9e1230 Guido Trotter
      if not isinstance(used_minors, (tuple, list)):
901 cc9e1230 Guido Trotter
        feedback_fn("  - ERROR: cannot parse drbd status file: %s" %
902 cc9e1230 Guido Trotter
                    str(used_minors))
903 cc9e1230 Guido Trotter
      else:
904 cc9e1230 Guido Trotter
        for minor, (iname, must_exist) in drbd_map.items():
905 cc9e1230 Guido Trotter
          if minor not in used_minors and must_exist:
906 35e994e9 Iustin Pop
            feedback_fn("  - ERROR: drbd minor %d of instance %s is"
907 35e994e9 Iustin Pop
                        " not active" % (minor, iname))
908 cc9e1230 Guido Trotter
            bad = True
909 cc9e1230 Guido Trotter
        for minor in used_minors:
910 cc9e1230 Guido Trotter
          if minor not in drbd_map:
911 35e994e9 Iustin Pop
            feedback_fn("  - ERROR: unallocated drbd minor %d is in use" %
912 35e994e9 Iustin Pop
                        minor)
913 cc9e1230 Guido Trotter
            bad = True
914 6d2e83d5 Iustin Pop
915 a8083063 Iustin Pop
    return bad
916 a8083063 Iustin Pop
917 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
918 0a66c968 Iustin Pop
                      node_instance, feedback_fn, n_offline):
919 a8083063 Iustin Pop
    """Verify an instance.
920 a8083063 Iustin Pop

921 a8083063 Iustin Pop
    This function checks to see if the required block devices are
922 a8083063 Iustin Pop
    available on the instance's node.
923 a8083063 Iustin Pop

924 a8083063 Iustin Pop
    """
925 a8083063 Iustin Pop
    bad = False
926 a8083063 Iustin Pop
927 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
928 a8083063 Iustin Pop
929 a8083063 Iustin Pop
    node_vol_should = {}
930 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
931 a8083063 Iustin Pop
932 a8083063 Iustin Pop
    for node in node_vol_should:
933 0a66c968 Iustin Pop
      if node in n_offline:
934 0a66c968 Iustin Pop
        # ignore missing volumes on offline nodes
935 0a66c968 Iustin Pop
        continue
936 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
937 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
938 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
939 a8083063 Iustin Pop
                          (volume, node))
940 a8083063 Iustin Pop
          bad = True
941 a8083063 Iustin Pop
942 0d68c45d Iustin Pop
    if instanceconfig.admin_up:
943 0a66c968 Iustin Pop
      if ((node_current not in node_instance or
944 0a66c968 Iustin Pop
          not instance in node_instance[node_current]) and
945 0a66c968 Iustin Pop
          node_current not in n_offline):
946 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
947 a8083063 Iustin Pop
                        (instance, node_current))
948 a8083063 Iustin Pop
        bad = True
949 a8083063 Iustin Pop
950 a8083063 Iustin Pop
    for node in node_instance:
951 a8083063 Iustin Pop
      if (not node == node_current):
952 a8083063 Iustin Pop
        if instance in node_instance[node]:
953 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
954 a8083063 Iustin Pop
                          (instance, node))
955 a8083063 Iustin Pop
          bad = True
956 a8083063 Iustin Pop
957 6a438c98 Michael Hanselmann
    return bad
958 a8083063 Iustin Pop
959 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
960 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
961 a8083063 Iustin Pop

962 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
963 a8083063 Iustin Pop
    reported as unknown.
964 a8083063 Iustin Pop

965 a8083063 Iustin Pop
    """
966 a8083063 Iustin Pop
    bad = False
967 a8083063 Iustin Pop
968 a8083063 Iustin Pop
    for node in node_vol_is:
969 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
970 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
971 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
972 a8083063 Iustin Pop
                      (volume, node))
973 a8083063 Iustin Pop
          bad = True
974 a8083063 Iustin Pop
    return bad
975 a8083063 Iustin Pop
976 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
977 a8083063 Iustin Pop
    """Verify the list of running instances.
978 a8083063 Iustin Pop

979 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
980 a8083063 Iustin Pop

981 a8083063 Iustin Pop
    """
982 a8083063 Iustin Pop
    bad = False
983 a8083063 Iustin Pop
    for node in node_instance:
984 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
985 a8083063 Iustin Pop
        if runninginstance not in instancelist:
986 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
987 a8083063 Iustin Pop
                          (runninginstance, node))
988 a8083063 Iustin Pop
          bad = True
989 a8083063 Iustin Pop
    return bad
990 a8083063 Iustin Pop
991 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
992 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
993 2b3b6ddd Guido Trotter

994 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
995 2b3b6ddd Guido Trotter
    was primary for.
996 2b3b6ddd Guido Trotter

997 2b3b6ddd Guido Trotter
    """
998 2b3b6ddd Guido Trotter
    bad = False
999 2b3b6ddd Guido Trotter
1000 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
1001 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
1002 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
1003 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
1004 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
1005 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
1006 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
1007 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
1008 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
1009 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
1010 2b3b6ddd Guido Trotter
        needed_mem = 0
1011 2b3b6ddd Guido Trotter
        for instance in instances:
1012 338e51e8 Iustin Pop
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
1013 c0f2b229 Iustin Pop
          if bep[constants.BE_AUTO_BALANCE]:
1014 3924700f Iustin Pop
            needed_mem += bep[constants.BE_MEMORY]
1015 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
1016 5bbd3f7f Michael Hanselmann
          feedback_fn("  - ERROR: not enough memory on node %s to accommodate"
1017 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
1018 2b3b6ddd Guido Trotter
          bad = True
1019 2b3b6ddd Guido Trotter
    return bad
1020 2b3b6ddd Guido Trotter
1021 a8083063 Iustin Pop
  def CheckPrereq(self):
1022 a8083063 Iustin Pop
    """Check prerequisites.
1023 a8083063 Iustin Pop

1024 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
1025 e54c4c5e Guido Trotter
    all its members are valid.
1026 a8083063 Iustin Pop

1027 a8083063 Iustin Pop
    """
1028 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
1029 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
1030 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
1031 a8083063 Iustin Pop
1032 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
1033 d8fff41c Guido Trotter
    """Build hooks env.
1034 d8fff41c Guido Trotter

1035 5bbd3f7f Michael Hanselmann
    Cluster-Verify hooks just ran in the post phase and their failure makes
1036 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
1037 d8fff41c Guido Trotter

1038 d8fff41c Guido Trotter
    """
1039 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
1040 35e994e9 Iustin Pop
    env = {
1041 35e994e9 Iustin Pop
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
1042 35e994e9 Iustin Pop
      }
1043 35e994e9 Iustin Pop
    for node in self.cfg.GetAllNodesInfo().values():
1044 35e994e9 Iustin Pop
      env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
1045 35e994e9 Iustin Pop
1046 d8fff41c Guido Trotter
    return env, [], all_nodes
1047 d8fff41c Guido Trotter
1048 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1049 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
1050 a8083063 Iustin Pop

1051 a8083063 Iustin Pop
    """
1052 a8083063 Iustin Pop
    bad = False
1053 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
1054 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
1055 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
1056 a8083063 Iustin Pop
1057 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
1058 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
1059 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
1060 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
1061 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
1062 6d2e83d5 Iustin Pop
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
1063 6d2e83d5 Iustin Pop
                        for iname in instancelist)
1064 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
1065 3924700f Iustin Pop
    i_non_a_balanced = [] # Non auto-balanced instances
1066 0a66c968 Iustin Pop
    n_offline = [] # List of offline nodes
1067 22f0f71d Iustin Pop
    n_drained = [] # List of nodes being drained
1068 a8083063 Iustin Pop
    node_volume = {}
1069 a8083063 Iustin Pop
    node_instance = {}
1070 9c9c7d30 Guido Trotter
    node_info = {}
1071 26b6af5e Guido Trotter
    instance_cfg = {}
1072 a8083063 Iustin Pop
1073 a8083063 Iustin Pop
    # FIXME: verify OS list
1074 a8083063 Iustin Pop
    # do local checksums
1075 112f18a5 Iustin Pop
    master_files = [constants.CLUSTER_CONF_FILE]
1076 112f18a5 Iustin Pop
1077 112f18a5 Iustin Pop
    file_names = ssconf.SimpleStore().GetFileList()
1078 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
1079 699777f2 Michael Hanselmann
    file_names.append(constants.RAPI_CERT_FILE)
1080 112f18a5 Iustin Pop
    file_names.extend(master_files)
1081 112f18a5 Iustin Pop
1082 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
1083 a8083063 Iustin Pop
1084 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
1085 a8083063 Iustin Pop
    node_verify_param = {
1086 25361b9a Iustin Pop
      constants.NV_FILELIST: file_names,
1087 82e37788 Iustin Pop
      constants.NV_NODELIST: [node.name for node in nodeinfo
1088 82e37788 Iustin Pop
                              if not node.offline],
1089 25361b9a Iustin Pop
      constants.NV_HYPERVISOR: hypervisors,
1090 25361b9a Iustin Pop
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
1091 82e37788 Iustin Pop
                                  node.secondary_ip) for node in nodeinfo
1092 82e37788 Iustin Pop
                                 if not node.offline],
1093 25361b9a Iustin Pop
      constants.NV_INSTANCELIST: hypervisors,
1094 25361b9a Iustin Pop
      constants.NV_VERSION: None,
1095 25361b9a Iustin Pop
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
1096 a8083063 Iustin Pop
      }
1097 cc9e1230 Guido Trotter
    if vg_name is not None:
1098 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_VGLIST] = None
1099 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_LVLIST] = vg_name
1100 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_DRBDLIST] = None
1101 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
1102 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
1103 a8083063 Iustin Pop
1104 3924700f Iustin Pop
    cluster = self.cfg.GetClusterInfo()
1105 112f18a5 Iustin Pop
    master_node = self.cfg.GetMasterNode()
1106 6d2e83d5 Iustin Pop
    all_drbd_map = self.cfg.ComputeDRBDMap()
1107 6d2e83d5 Iustin Pop
1108 112f18a5 Iustin Pop
    for node_i in nodeinfo:
1109 112f18a5 Iustin Pop
      node = node_i.name
1110 25361b9a Iustin Pop
1111 0a66c968 Iustin Pop
      if node_i.offline:
1112 0a66c968 Iustin Pop
        feedback_fn("* Skipping offline node %s" % (node,))
1113 0a66c968 Iustin Pop
        n_offline.append(node)
1114 0a66c968 Iustin Pop
        continue
1115 0a66c968 Iustin Pop
1116 112f18a5 Iustin Pop
      if node == master_node:
1117 25361b9a Iustin Pop
        ntype = "master"
1118 112f18a5 Iustin Pop
      elif node_i.master_candidate:
1119 25361b9a Iustin Pop
        ntype = "master candidate"
1120 22f0f71d Iustin Pop
      elif node_i.drained:
1121 22f0f71d Iustin Pop
        ntype = "drained"
1122 22f0f71d Iustin Pop
        n_drained.append(node)
1123 112f18a5 Iustin Pop
      else:
1124 25361b9a Iustin Pop
        ntype = "regular"
1125 112f18a5 Iustin Pop
      feedback_fn("* Verifying node %s (%s)" % (node, ntype))
1126 25361b9a Iustin Pop
1127 4c4e4e1e Iustin Pop
      msg = all_nvinfo[node].fail_msg
1128 6f68a739 Iustin Pop
      if msg:
1129 6f68a739 Iustin Pop
        feedback_fn("  - ERROR: while contacting node %s: %s" % (node, msg))
1130 25361b9a Iustin Pop
        bad = True
1131 25361b9a Iustin Pop
        continue
1132 25361b9a Iustin Pop
1133 6f68a739 Iustin Pop
      nresult = all_nvinfo[node].payload
1134 6d2e83d5 Iustin Pop
      node_drbd = {}
1135 6d2e83d5 Iustin Pop
      for minor, instance in all_drbd_map[node].items():
1136 c614e5fb Iustin Pop
        if instance not in instanceinfo:
1137 c614e5fb Iustin Pop
          feedback_fn("  - ERROR: ghost instance '%s' in temporary DRBD map" %
1138 c614e5fb Iustin Pop
                      instance)
1139 c614e5fb Iustin Pop
          # ghost instance should not be running, but otherwise we
1140 c614e5fb Iustin Pop
          # don't give double warnings (both ghost instance and
1141 c614e5fb Iustin Pop
          # unallocated minor in use)
1142 c614e5fb Iustin Pop
          node_drbd[minor] = (instance, False)
1143 c614e5fb Iustin Pop
        else:
1144 c614e5fb Iustin Pop
          instance = instanceinfo[instance]
1145 c614e5fb Iustin Pop
          node_drbd[minor] = (instance.name, instance.admin_up)
1146 112f18a5 Iustin Pop
      result = self._VerifyNode(node_i, file_names, local_checksums,
1147 6d2e83d5 Iustin Pop
                                nresult, feedback_fn, master_files,
1148 cc9e1230 Guido Trotter
                                node_drbd, vg_name)
1149 a8083063 Iustin Pop
      bad = bad or result
1150 a8083063 Iustin Pop
1151 25361b9a Iustin Pop
      lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1152 cc9e1230 Guido Trotter
      if vg_name is None:
1153 cc9e1230 Guido Trotter
        node_volume[node] = {}
1154 cc9e1230 Guido Trotter
      elif isinstance(lvdata, basestring):
1155 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
1156 26f15862 Iustin Pop
                    (node, utils.SafeEncode(lvdata)))
1157 b63ed789 Iustin Pop
        bad = True
1158 b63ed789 Iustin Pop
        node_volume[node] = {}
1159 25361b9a Iustin Pop
      elif not isinstance(lvdata, dict):
1160 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (lvlist)" % (node,))
1161 a8083063 Iustin Pop
        bad = True
1162 a8083063 Iustin Pop
        continue
1163 b63ed789 Iustin Pop
      else:
1164 25361b9a Iustin Pop
        node_volume[node] = lvdata
1165 a8083063 Iustin Pop
1166 a8083063 Iustin Pop
      # node_instance
1167 25361b9a Iustin Pop
      idata = nresult.get(constants.NV_INSTANCELIST, None)
1168 25361b9a Iustin Pop
      if not isinstance(idata, list):
1169 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (instancelist)" %
1170 25361b9a Iustin Pop
                    (node,))
1171 a8083063 Iustin Pop
        bad = True
1172 a8083063 Iustin Pop
        continue
1173 a8083063 Iustin Pop
1174 25361b9a Iustin Pop
      node_instance[node] = idata
1175 a8083063 Iustin Pop
1176 9c9c7d30 Guido Trotter
      # node_info
1177 25361b9a Iustin Pop
      nodeinfo = nresult.get(constants.NV_HVINFO, None)
1178 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
1179 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (hvinfo)" % (node,))
1180 9c9c7d30 Guido Trotter
        bad = True
1181 9c9c7d30 Guido Trotter
        continue
1182 9c9c7d30 Guido Trotter
1183 9c9c7d30 Guido Trotter
      try:
1184 9c9c7d30 Guido Trotter
        node_info[node] = {
1185 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
1186 93e4c50b Guido Trotter
          "pinst": [],
1187 93e4c50b Guido Trotter
          "sinst": [],
1188 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
1189 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
1190 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
1191 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
1192 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
1193 36e7da50 Guido Trotter
          # secondary.
1194 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
1195 9c9c7d30 Guido Trotter
        }
1196 cc9e1230 Guido Trotter
        # FIXME: devise a free space model for file based instances as well
1197 cc9e1230 Guido Trotter
        if vg_name is not None:
1198 9a198532 Iustin Pop
          if (constants.NV_VGLIST not in nresult or
1199 9a198532 Iustin Pop
              vg_name not in nresult[constants.NV_VGLIST]):
1200 9a198532 Iustin Pop
            feedback_fn("  - ERROR: node %s didn't return data for the"
1201 9a198532 Iustin Pop
                        " volume group '%s' - it is either missing or broken" %
1202 9a198532 Iustin Pop
                        (node, vg_name))
1203 9a198532 Iustin Pop
            bad = True
1204 9a198532 Iustin Pop
            continue
1205 cc9e1230 Guido Trotter
          node_info[node]["dfree"] = int(nresult[constants.NV_VGLIST][vg_name])
1206 9a198532 Iustin Pop
      except (ValueError, KeyError):
1207 9a198532 Iustin Pop
        feedback_fn("  - ERROR: invalid nodeinfo value returned"
1208 9a198532 Iustin Pop
                    " from node %s" % (node,))
1209 9c9c7d30 Guido Trotter
        bad = True
1210 9c9c7d30 Guido Trotter
        continue
1211 9c9c7d30 Guido Trotter
1212 a8083063 Iustin Pop
    node_vol_should = {}
1213 a8083063 Iustin Pop
1214 a8083063 Iustin Pop
    for instance in instancelist:
1215 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
1216 6d2e83d5 Iustin Pop
      inst_config = instanceinfo[instance]
1217 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
1218 0a66c968 Iustin Pop
                                     node_instance, feedback_fn, n_offline)
1219 c5705f58 Guido Trotter
      bad = bad or result
1220 832261fd Iustin Pop
      inst_nodes_offline = []
1221 a8083063 Iustin Pop
1222 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
1223 a8083063 Iustin Pop
1224 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
1225 26b6af5e Guido Trotter
1226 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
1227 93e4c50b Guido Trotter
      if pnode in node_info:
1228 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
1229 0a66c968 Iustin Pop
      elif pnode not in n_offline:
1230 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
1231 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
1232 93e4c50b Guido Trotter
        bad = True
1233 93e4c50b Guido Trotter
1234 832261fd Iustin Pop
      if pnode in n_offline:
1235 832261fd Iustin Pop
        inst_nodes_offline.append(pnode)
1236 832261fd Iustin Pop
1237 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
1238 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
1239 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
1240 93e4c50b Guido Trotter
      # supported either.
1241 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
1242 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
1243 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
1244 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
1245 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
1246 93e4c50b Guido Trotter
                    % instance)
1247 93e4c50b Guido Trotter
1248 c0f2b229 Iustin Pop
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1249 3924700f Iustin Pop
        i_non_a_balanced.append(instance)
1250 3924700f Iustin Pop
1251 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
1252 93e4c50b Guido Trotter
        if snode in node_info:
1253 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
1254 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
1255 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
1256 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
1257 0a66c968 Iustin Pop
        elif snode not in n_offline:
1258 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
1259 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
1260 832261fd Iustin Pop
          bad = True
1261 832261fd Iustin Pop
        if snode in n_offline:
1262 832261fd Iustin Pop
          inst_nodes_offline.append(snode)
1263 832261fd Iustin Pop
1264 832261fd Iustin Pop
      if inst_nodes_offline:
1265 832261fd Iustin Pop
        # warn that the instance lives on offline nodes, and set bad=True
1266 832261fd Iustin Pop
        feedback_fn("  - ERROR: instance lives on offline node(s) %s" %
1267 832261fd Iustin Pop
                    ", ".join(inst_nodes_offline))
1268 832261fd Iustin Pop
        bad = True
1269 93e4c50b Guido Trotter
1270 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
1271 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
1272 a8083063 Iustin Pop
                                       feedback_fn)
1273 a8083063 Iustin Pop
    bad = bad or result
1274 a8083063 Iustin Pop
1275 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
1276 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
1277 a8083063 Iustin Pop
                                         feedback_fn)
1278 a8083063 Iustin Pop
    bad = bad or result
1279 a8083063 Iustin Pop
1280 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1281 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
1282 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
1283 e54c4c5e Guido Trotter
      bad = bad or result
1284 2b3b6ddd Guido Trotter
1285 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
1286 2b3b6ddd Guido Trotter
    if i_non_redundant:
1287 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1288 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
1289 2b3b6ddd Guido Trotter
1290 3924700f Iustin Pop
    if i_non_a_balanced:
1291 3924700f Iustin Pop
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1292 3924700f Iustin Pop
                  % len(i_non_a_balanced))
1293 3924700f Iustin Pop
1294 0a66c968 Iustin Pop
    if n_offline:
1295 0a66c968 Iustin Pop
      feedback_fn("  - NOTICE: %d offline node(s) found." % len(n_offline))
1296 0a66c968 Iustin Pop
1297 22f0f71d Iustin Pop
    if n_drained:
1298 22f0f71d Iustin Pop
      feedback_fn("  - NOTICE: %d drained node(s) found." % len(n_drained))
1299 22f0f71d Iustin Pop
1300 34290825 Michael Hanselmann
    return not bad
1301 a8083063 Iustin Pop
1302 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1303 5bbd3f7f Michael Hanselmann
    """Analyze the post-hooks' result
1304 e4376078 Iustin Pop

1305 e4376078 Iustin Pop
    This method analyses the hook result, handles it, and sends some
1306 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
1307 d8fff41c Guido Trotter

1308 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1309 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1310 e4376078 Iustin Pop
    @param hooks_results: the results of the multi-node hooks rpc call
1311 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
1312 e4376078 Iustin Pop
    @param lu_result: previous Exec result
1313 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
1314 e4376078 Iustin Pop
        and hook results
1315 d8fff41c Guido Trotter

1316 d8fff41c Guido Trotter
    """
1317 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
1318 38206f3c Iustin Pop
    # their results
1319 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
1320 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
1321 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
1322 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
1323 d8fff41c Guido Trotter
      if not hooks_results:
1324 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
1325 d8fff41c Guido Trotter
        lu_result = 1
1326 d8fff41c Guido Trotter
      else:
1327 d8fff41c Guido Trotter
        for node_name in hooks_results:
1328 d8fff41c Guido Trotter
          show_node_header = True
1329 d8fff41c Guido Trotter
          res = hooks_results[node_name]
1330 4c4e4e1e Iustin Pop
          msg = res.fail_msg
1331 3fb4f740 Iustin Pop
          if msg:
1332 0a66c968 Iustin Pop
            if res.offline:
1333 0a66c968 Iustin Pop
              # no need to warn or set fail return value
1334 0a66c968 Iustin Pop
              continue
1335 3fb4f740 Iustin Pop
            feedback_fn("    Communication failure in hooks execution: %s" %
1336 3fb4f740 Iustin Pop
                        msg)
1337 d8fff41c Guido Trotter
            lu_result = 1
1338 d8fff41c Guido Trotter
            continue
1339 3fb4f740 Iustin Pop
          for script, hkr, output in res.payload:
1340 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
1341 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
1342 d8fff41c Guido Trotter
              # failing hooks on that node
1343 d8fff41c Guido Trotter
              if show_node_header:
1344 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
1345 d8fff41c Guido Trotter
                show_node_header = False
1346 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
1347 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
1348 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
1349 d8fff41c Guido Trotter
              lu_result = 1
1350 d8fff41c Guido Trotter
1351 d8fff41c Guido Trotter
      return lu_result
1352 d8fff41c Guido Trotter
1353 a8083063 Iustin Pop
1354 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
1355 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
1356 2c95a8d4 Iustin Pop

1357 2c95a8d4 Iustin Pop
  """
1358 2c95a8d4 Iustin Pop
  _OP_REQP = []
1359 d4b9d97f Guido Trotter
  REQ_BGL = False
1360 d4b9d97f Guido Trotter
1361 d4b9d97f Guido Trotter
  def ExpandNames(self):
1362 d4b9d97f Guido Trotter
    self.needed_locks = {
1363 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1364 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1365 d4b9d97f Guido Trotter
    }
1366 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1367 2c95a8d4 Iustin Pop
1368 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
1369 2c95a8d4 Iustin Pop
    """Check prerequisites.
1370 2c95a8d4 Iustin Pop

1371 2c95a8d4 Iustin Pop
    This has no prerequisites.
1372 2c95a8d4 Iustin Pop

1373 2c95a8d4 Iustin Pop
    """
1374 2c95a8d4 Iustin Pop
    pass
1375 2c95a8d4 Iustin Pop
1376 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
1377 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
1378 2c95a8d4 Iustin Pop

1379 29d376ec Iustin Pop
    @rtype: tuple of three items
1380 29d376ec Iustin Pop
    @return: a tuple of (dict of node-to-node_error, list of instances
1381 29d376ec Iustin Pop
        which need activate-disks, dict of instance: (node, volume) for
1382 29d376ec Iustin Pop
        missing volumes
1383 29d376ec Iustin Pop

1384 2c95a8d4 Iustin Pop
    """
1385 29d376ec Iustin Pop
    result = res_nodes, res_instances, res_missing = {}, [], {}
1386 2c95a8d4 Iustin Pop
1387 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
1388 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1389 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
1390 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
1391 2c95a8d4 Iustin Pop
1392 2c95a8d4 Iustin Pop
    nv_dict = {}
1393 2c95a8d4 Iustin Pop
    for inst in instances:
1394 2c95a8d4 Iustin Pop
      inst_lvs = {}
1395 0d68c45d Iustin Pop
      if (not inst.admin_up or
1396 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
1397 2c95a8d4 Iustin Pop
        continue
1398 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
1399 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1400 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
1401 2c95a8d4 Iustin Pop
        for vol in vol_list:
1402 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
1403 2c95a8d4 Iustin Pop
1404 2c95a8d4 Iustin Pop
    if not nv_dict:
1405 2c95a8d4 Iustin Pop
      return result
1406 2c95a8d4 Iustin Pop
1407 b2a6ccd4 Iustin Pop
    node_lvs = self.rpc.call_lv_list(nodes, vg_name)
1408 2c95a8d4 Iustin Pop
1409 2c95a8d4 Iustin Pop
    for node in nodes:
1410 2c95a8d4 Iustin Pop
      # node_volume
1411 29d376ec Iustin Pop
      node_res = node_lvs[node]
1412 29d376ec Iustin Pop
      if node_res.offline:
1413 ea9ddc07 Iustin Pop
        continue
1414 4c4e4e1e Iustin Pop
      msg = node_res.fail_msg
1415 29d376ec Iustin Pop
      if msg:
1416 29d376ec Iustin Pop
        logging.warning("Error enumerating LVs on node %s: %s", node, msg)
1417 29d376ec Iustin Pop
        res_nodes[node] = msg
1418 2c95a8d4 Iustin Pop
        continue
1419 2c95a8d4 Iustin Pop
1420 29d376ec Iustin Pop
      lvs = node_res.payload
1421 29d376ec Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.items():
1422 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
1423 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
1424 b63ed789 Iustin Pop
            and inst.name not in res_instances):
1425 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
1426 2c95a8d4 Iustin Pop
1427 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1428 b63ed789 Iustin Pop
    # data better
1429 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1430 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1431 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1432 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1433 b63ed789 Iustin Pop
1434 2c95a8d4 Iustin Pop
    return result
1435 2c95a8d4 Iustin Pop
1436 2c95a8d4 Iustin Pop
1437 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1438 07bd8a51 Iustin Pop
  """Rename the cluster.
1439 07bd8a51 Iustin Pop

1440 07bd8a51 Iustin Pop
  """
1441 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1442 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1443 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1444 07bd8a51 Iustin Pop
1445 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1446 07bd8a51 Iustin Pop
    """Build hooks env.
1447 07bd8a51 Iustin Pop

1448 07bd8a51 Iustin Pop
    """
1449 07bd8a51 Iustin Pop
    env = {
1450 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1451 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1452 07bd8a51 Iustin Pop
      }
1453 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1454 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1455 07bd8a51 Iustin Pop
1456 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1457 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1458 07bd8a51 Iustin Pop

1459 07bd8a51 Iustin Pop
    """
1460 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1461 07bd8a51 Iustin Pop
1462 bcf043c9 Iustin Pop
    new_name = hostname.name
1463 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1464 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
1465 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
1466 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1467 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1468 07bd8a51 Iustin Pop
                                 " cluster has changed")
1469 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1470 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1471 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1472 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1473 07bd8a51 Iustin Pop
                                   new_ip)
1474 07bd8a51 Iustin Pop
1475 07bd8a51 Iustin Pop
    self.op.name = new_name
1476 07bd8a51 Iustin Pop
1477 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1478 07bd8a51 Iustin Pop
    """Rename the cluster.
1479 07bd8a51 Iustin Pop

1480 07bd8a51 Iustin Pop
    """
1481 07bd8a51 Iustin Pop
    clustername = self.op.name
1482 07bd8a51 Iustin Pop
    ip = self.ip
1483 07bd8a51 Iustin Pop
1484 07bd8a51 Iustin Pop
    # shutdown the master IP
1485 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
1486 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
1487 4c4e4e1e Iustin Pop
    result.Raise("Could not disable the master role")
1488 07bd8a51 Iustin Pop
1489 07bd8a51 Iustin Pop
    try:
1490 55cf7d83 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
1491 55cf7d83 Iustin Pop
      cluster.cluster_name = clustername
1492 55cf7d83 Iustin Pop
      cluster.master_ip = ip
1493 55cf7d83 Iustin Pop
      self.cfg.Update(cluster)
1494 ec85e3d5 Iustin Pop
1495 ec85e3d5 Iustin Pop
      # update the known hosts file
1496 ec85e3d5 Iustin Pop
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
1497 ec85e3d5 Iustin Pop
      node_list = self.cfg.GetNodeList()
1498 ec85e3d5 Iustin Pop
      try:
1499 ec85e3d5 Iustin Pop
        node_list.remove(master)
1500 ec85e3d5 Iustin Pop
      except ValueError:
1501 ec85e3d5 Iustin Pop
        pass
1502 ec85e3d5 Iustin Pop
      result = self.rpc.call_upload_file(node_list,
1503 ec85e3d5 Iustin Pop
                                         constants.SSH_KNOWN_HOSTS_FILE)
1504 ec85e3d5 Iustin Pop
      for to_node, to_result in result.iteritems():
1505 6f7d4e75 Iustin Pop
        msg = to_result.fail_msg
1506 6f7d4e75 Iustin Pop
        if msg:
1507 6f7d4e75 Iustin Pop
          msg = ("Copy of file %s to node %s failed: %s" %
1508 6f7d4e75 Iustin Pop
                 (constants.SSH_KNOWN_HOSTS_FILE, to_node, msg))
1509 6f7d4e75 Iustin Pop
          self.proc.LogWarning(msg)
1510 ec85e3d5 Iustin Pop
1511 07bd8a51 Iustin Pop
    finally:
1512 3583908a Guido Trotter
      result = self.rpc.call_node_start_master(master, False, False)
1513 4c4e4e1e Iustin Pop
      msg = result.fail_msg
1514 b726aff0 Iustin Pop
      if msg:
1515 86d9d3bb Iustin Pop
        self.LogWarning("Could not re-enable the master role on"
1516 b726aff0 Iustin Pop
                        " the master, please restart manually: %s", msg)
1517 07bd8a51 Iustin Pop
1518 07bd8a51 Iustin Pop
1519 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1520 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1521 8084f9f6 Manuel Franceschini

1522 e4376078 Iustin Pop
  @type disk: L{objects.Disk}
1523 e4376078 Iustin Pop
  @param disk: the disk to check
1524 5bbd3f7f Michael Hanselmann
  @rtype: boolean
1525 e4376078 Iustin Pop
  @return: boolean indicating whether a LD_LV dev_type was found or not
1526 8084f9f6 Manuel Franceschini

1527 8084f9f6 Manuel Franceschini
  """
1528 8084f9f6 Manuel Franceschini
  if disk.children:
1529 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1530 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1531 8084f9f6 Manuel Franceschini
        return True
1532 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1533 8084f9f6 Manuel Franceschini
1534 8084f9f6 Manuel Franceschini
1535 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1536 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1537 8084f9f6 Manuel Franceschini

1538 8084f9f6 Manuel Franceschini
  """
1539 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1540 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1541 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1542 c53279cf Guido Trotter
  REQ_BGL = False
1543 c53279cf Guido Trotter
1544 3994f455 Iustin Pop
  def CheckArguments(self):
1545 4b7735f9 Iustin Pop
    """Check parameters
1546 4b7735f9 Iustin Pop

1547 4b7735f9 Iustin Pop
    """
1548 4b7735f9 Iustin Pop
    if not hasattr(self.op, "candidate_pool_size"):
1549 4b7735f9 Iustin Pop
      self.op.candidate_pool_size = None
1550 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1551 4b7735f9 Iustin Pop
      try:
1552 4b7735f9 Iustin Pop
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1553 3994f455 Iustin Pop
      except (ValueError, TypeError), err:
1554 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1555 4b7735f9 Iustin Pop
                                   str(err))
1556 4b7735f9 Iustin Pop
      if self.op.candidate_pool_size < 1:
1557 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("At least one master candidate needed")
1558 4b7735f9 Iustin Pop
1559 c53279cf Guido Trotter
  def ExpandNames(self):
1560 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
1561 c53279cf Guido Trotter
    # all nodes to be modified.
1562 c53279cf Guido Trotter
    self.needed_locks = {
1563 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1564 c53279cf Guido Trotter
    }
1565 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1566 8084f9f6 Manuel Franceschini
1567 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1568 8084f9f6 Manuel Franceschini
    """Build hooks env.
1569 8084f9f6 Manuel Franceschini

1570 8084f9f6 Manuel Franceschini
    """
1571 8084f9f6 Manuel Franceschini
    env = {
1572 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1573 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1574 8084f9f6 Manuel Franceschini
      }
1575 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1576 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1577 8084f9f6 Manuel Franceschini
1578 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1579 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1580 8084f9f6 Manuel Franceschini

1581 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1582 5f83e263 Iustin Pop
    if the given volume group is valid.
1583 8084f9f6 Manuel Franceschini

1584 8084f9f6 Manuel Franceschini
    """
1585 779c15bb Iustin Pop
    if self.op.vg_name is not None and not self.op.vg_name:
1586 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
1587 8084f9f6 Manuel Franceschini
      for inst in instances:
1588 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1589 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1590 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1591 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1592 8084f9f6 Manuel Franceschini
1593 779c15bb Iustin Pop
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1594 779c15bb Iustin Pop
1595 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1596 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1597 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
1598 8084f9f6 Manuel Franceschini
      for node in node_list:
1599 4c4e4e1e Iustin Pop
        msg = vglist[node].fail_msg
1600 e480923b Iustin Pop
        if msg:
1601 781de953 Iustin Pop
          # ignoring down node
1602 e480923b Iustin Pop
          self.LogWarning("Error while gathering data on node %s"
1603 e480923b Iustin Pop
                          " (ignoring node): %s", node, msg)
1604 781de953 Iustin Pop
          continue
1605 e480923b Iustin Pop
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
1606 781de953 Iustin Pop
                                              self.op.vg_name,
1607 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1608 8084f9f6 Manuel Franceschini
        if vgstatus:
1609 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1610 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1611 8084f9f6 Manuel Franceschini
1612 779c15bb Iustin Pop
    self.cluster = cluster = self.cfg.GetClusterInfo()
1613 5af3da74 Guido Trotter
    # validate params changes
1614 779c15bb Iustin Pop
    if self.op.beparams:
1615 a5728081 Guido Trotter
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
1616 abe609b2 Guido Trotter
      self.new_beparams = objects.FillDict(
1617 4ef7f423 Guido Trotter
        cluster.beparams[constants.PP_DEFAULT], self.op.beparams)
1618 779c15bb Iustin Pop
1619 5af3da74 Guido Trotter
    if self.op.nicparams:
1620 5af3da74 Guido Trotter
      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
1621 5af3da74 Guido Trotter
      self.new_nicparams = objects.FillDict(
1622 5af3da74 Guido Trotter
        cluster.nicparams[constants.PP_DEFAULT], self.op.nicparams)
1623 5af3da74 Guido Trotter
      objects.NIC.CheckParameterSyntax(self.new_nicparams)
1624 5af3da74 Guido Trotter
1625 779c15bb Iustin Pop
    # hypervisor list/parameters
1626 abe609b2 Guido Trotter
    self.new_hvparams = objects.FillDict(cluster.hvparams, {})
1627 779c15bb Iustin Pop
    if self.op.hvparams:
1628 779c15bb Iustin Pop
      if not isinstance(self.op.hvparams, dict):
1629 779c15bb Iustin Pop
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1630 779c15bb Iustin Pop
      for hv_name, hv_dict in self.op.hvparams.items():
1631 779c15bb Iustin Pop
        if hv_name not in self.new_hvparams:
1632 779c15bb Iustin Pop
          self.new_hvparams[hv_name] = hv_dict
1633 779c15bb Iustin Pop
        else:
1634 779c15bb Iustin Pop
          self.new_hvparams[hv_name].update(hv_dict)
1635 779c15bb Iustin Pop
1636 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1637 779c15bb Iustin Pop
      self.hv_list = self.op.enabled_hypervisors
1638 b119bccb Guido Trotter
      if not self.hv_list:
1639 b119bccb Guido Trotter
        raise errors.OpPrereqError("Enabled hypervisors list must contain at"
1640 b119bccb Guido Trotter
                                   " least one member")
1641 b119bccb Guido Trotter
      invalid_hvs = set(self.hv_list) - constants.HYPER_TYPES
1642 b119bccb Guido Trotter
      if invalid_hvs:
1643 b119bccb Guido Trotter
        raise errors.OpPrereqError("Enabled hypervisors contains invalid"
1644 b119bccb Guido Trotter
                                   " entries: %s" % invalid_hvs)
1645 779c15bb Iustin Pop
    else:
1646 779c15bb Iustin Pop
      self.hv_list = cluster.enabled_hypervisors
1647 779c15bb Iustin Pop
1648 779c15bb Iustin Pop
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1649 779c15bb Iustin Pop
      # either the enabled list has changed, or the parameters have, validate
1650 779c15bb Iustin Pop
      for hv_name, hv_params in self.new_hvparams.items():
1651 779c15bb Iustin Pop
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1652 779c15bb Iustin Pop
            (self.op.enabled_hypervisors and
1653 779c15bb Iustin Pop
             hv_name in self.op.enabled_hypervisors)):
1654 779c15bb Iustin Pop
          # either this is a new hypervisor, or its parameters have changed
1655 779c15bb Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
1656 a5728081 Guido Trotter
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1657 779c15bb Iustin Pop
          hv_class.CheckParameterSyntax(hv_params)
1658 779c15bb Iustin Pop
          _CheckHVParams(self, node_list, hv_name, hv_params)
1659 779c15bb Iustin Pop
1660 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1661 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1662 8084f9f6 Manuel Franceschini

1663 8084f9f6 Manuel Franceschini
    """
1664 779c15bb Iustin Pop
    if self.op.vg_name is not None:
1665 b2482333 Guido Trotter
      new_volume = self.op.vg_name
1666 b2482333 Guido Trotter
      if not new_volume:
1667 b2482333 Guido Trotter
        new_volume = None
1668 b2482333 Guido Trotter
      if new_volume != self.cfg.GetVGName():
1669 b2482333 Guido Trotter
        self.cfg.SetVGName(new_volume)
1670 779c15bb Iustin Pop
      else:
1671 779c15bb Iustin Pop
        feedback_fn("Cluster LVM configuration already in desired"
1672 779c15bb Iustin Pop
                    " state, not changing")
1673 779c15bb Iustin Pop
    if self.op.hvparams:
1674 779c15bb Iustin Pop
      self.cluster.hvparams = self.new_hvparams
1675 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1676 779c15bb Iustin Pop
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1677 779c15bb Iustin Pop
    if self.op.beparams:
1678 4ef7f423 Guido Trotter
      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
1679 5af3da74 Guido Trotter
    if self.op.nicparams:
1680 5af3da74 Guido Trotter
      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
1681 5af3da74 Guido Trotter
1682 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1683 4b7735f9 Iustin Pop
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
1684 75e914fb Iustin Pop
      # we need to update the pool size here, otherwise the save will fail
1685 75e914fb Iustin Pop
      _AdjustCandidatePool(self)
1686 4b7735f9 Iustin Pop
1687 779c15bb Iustin Pop
    self.cfg.Update(self.cluster)
1688 8084f9f6 Manuel Franceschini
1689 8084f9f6 Manuel Franceschini
1690 28eddce5 Guido Trotter
def _RedistributeAncillaryFiles(lu, additional_nodes=None):
1691 28eddce5 Guido Trotter
  """Distribute additional files which are part of the cluster configuration.
1692 28eddce5 Guido Trotter

1693 28eddce5 Guido Trotter
  ConfigWriter takes care of distributing the config and ssconf files, but
1694 28eddce5 Guido Trotter
  there are more files which should be distributed to all nodes. This function
1695 28eddce5 Guido Trotter
  makes sure those are copied.
1696 28eddce5 Guido Trotter

1697 28eddce5 Guido Trotter
  @param lu: calling logical unit
1698 28eddce5 Guido Trotter
  @param additional_nodes: list of nodes not in the config to distribute to
1699 28eddce5 Guido Trotter

1700 28eddce5 Guido Trotter
  """
1701 28eddce5 Guido Trotter
  # 1. Gather target nodes
1702 28eddce5 Guido Trotter
  myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
1703 28eddce5 Guido Trotter
  dist_nodes = lu.cfg.GetNodeList()
1704 28eddce5 Guido Trotter
  if additional_nodes is not None:
1705 28eddce5 Guido Trotter
    dist_nodes.extend(additional_nodes)
1706 28eddce5 Guido Trotter
  if myself.name in dist_nodes:
1707 28eddce5 Guido Trotter
    dist_nodes.remove(myself.name)
1708 28eddce5 Guido Trotter
  # 2. Gather files to distribute
1709 28eddce5 Guido Trotter
  dist_files = set([constants.ETC_HOSTS,
1710 28eddce5 Guido Trotter
                    constants.SSH_KNOWN_HOSTS_FILE,
1711 28eddce5 Guido Trotter
                    constants.RAPI_CERT_FILE,
1712 28eddce5 Guido Trotter
                    constants.RAPI_USERS_FILE,
1713 4a34c5cf Guido Trotter
                    constants.HMAC_CLUSTER_KEY,
1714 28eddce5 Guido Trotter
                   ])
1715 e1b8653f Guido Trotter
1716 e1b8653f Guido Trotter
  enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
1717 e1b8653f Guido Trotter
  for hv_name in enabled_hypervisors:
1718 e1b8653f Guido Trotter
    hv_class = hypervisor.GetHypervisor(hv_name)
1719 e1b8653f Guido Trotter
    dist_files.update(hv_class.GetAncillaryFiles())
1720 e1b8653f Guido Trotter
1721 28eddce5 Guido Trotter
  # 3. Perform the files upload
1722 28eddce5 Guido Trotter
  for fname in dist_files:
1723 28eddce5 Guido Trotter
    if os.path.exists(fname):
1724 28eddce5 Guido Trotter
      result = lu.rpc.call_upload_file(dist_nodes, fname)
1725 28eddce5 Guido Trotter
      for to_node, to_result in result.items():
1726 6f7d4e75 Iustin Pop
        msg = to_result.fail_msg
1727 6f7d4e75 Iustin Pop
        if msg:
1728 6f7d4e75 Iustin Pop
          msg = ("Copy of file %s to node %s failed: %s" %
1729 6f7d4e75 Iustin Pop
                 (fname, to_node, msg))
1730 6f7d4e75 Iustin Pop
          lu.proc.LogWarning(msg)
1731 28eddce5 Guido Trotter
1732 28eddce5 Guido Trotter
1733 afee0879 Iustin Pop
class LURedistributeConfig(NoHooksLU):
1734 afee0879 Iustin Pop
  """Force the redistribution of cluster configuration.
1735 afee0879 Iustin Pop

1736 afee0879 Iustin Pop
  This is a very simple LU.
1737 afee0879 Iustin Pop

1738 afee0879 Iustin Pop
  """
1739 afee0879 Iustin Pop
  _OP_REQP = []
1740 afee0879 Iustin Pop
  REQ_BGL = False
1741 afee0879 Iustin Pop
1742 afee0879 Iustin Pop
  def ExpandNames(self):
1743 afee0879 Iustin Pop
    self.needed_locks = {
1744 afee0879 Iustin Pop
      locking.LEVEL_NODE: locking.ALL_SET,
1745 afee0879 Iustin Pop
    }
1746 afee0879 Iustin Pop
    self.share_locks[locking.LEVEL_NODE] = 1
1747 afee0879 Iustin Pop
1748 afee0879 Iustin Pop
  def CheckPrereq(self):
1749 afee0879 Iustin Pop
    """Check prerequisites.
1750 afee0879 Iustin Pop

1751 afee0879 Iustin Pop
    """
1752 afee0879 Iustin Pop
1753 afee0879 Iustin Pop
  def Exec(self, feedback_fn):
1754 afee0879 Iustin Pop
    """Redistribute the configuration.
1755 afee0879 Iustin Pop

1756 afee0879 Iustin Pop
    """
1757 afee0879 Iustin Pop
    self.cfg.Update(self.cfg.GetClusterInfo())
1758 28eddce5 Guido Trotter
    _RedistributeAncillaryFiles(self)
1759 afee0879 Iustin Pop
1760 afee0879 Iustin Pop
1761 b9bddb6b Iustin Pop
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1762 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1763 a8083063 Iustin Pop

1764 a8083063 Iustin Pop
  """
1765 a8083063 Iustin Pop
  if not instance.disks:
1766 a8083063 Iustin Pop
    return True
1767 a8083063 Iustin Pop
1768 a8083063 Iustin Pop
  if not oneshot:
1769 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1770 a8083063 Iustin Pop
1771 a8083063 Iustin Pop
  node = instance.primary_node
1772 a8083063 Iustin Pop
1773 a8083063 Iustin Pop
  for dev in instance.disks:
1774 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
1775 a8083063 Iustin Pop
1776 a8083063 Iustin Pop
  retries = 0
1777 fbafd7a8 Iustin Pop
  degr_retries = 10 # in seconds, as we sleep 1 second each time
1778 a8083063 Iustin Pop
  while True:
1779 a8083063 Iustin Pop
    max_time = 0
1780 a8083063 Iustin Pop
    done = True
1781 a8083063 Iustin Pop
    cumul_degraded = False
1782 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1783 4c4e4e1e Iustin Pop
    msg = rstats.fail_msg
1784 3efa9051 Iustin Pop
    if msg:
1785 3efa9051 Iustin Pop
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
1786 a8083063 Iustin Pop
      retries += 1
1787 a8083063 Iustin Pop
      if retries >= 10:
1788 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1789 3ecf6786 Iustin Pop
                                 " aborting." % node)
1790 a8083063 Iustin Pop
      time.sleep(6)
1791 a8083063 Iustin Pop
      continue
1792 3efa9051 Iustin Pop
    rstats = rstats.payload
1793 a8083063 Iustin Pop
    retries = 0
1794 1492cca7 Iustin Pop
    for i, mstat in enumerate(rstats):
1795 a8083063 Iustin Pop
      if mstat is None:
1796 86d9d3bb Iustin Pop
        lu.LogWarning("Can't compute data for node %s/%s",
1797 86d9d3bb Iustin Pop
                           node, instance.disks[i].iv_name)
1798 a8083063 Iustin Pop
        continue
1799 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1800 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1801 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1802 a8083063 Iustin Pop
      if perc_done is not None:
1803 a8083063 Iustin Pop
        done = False
1804 a8083063 Iustin Pop
        if est_time is not None:
1805 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1806 a8083063 Iustin Pop
          max_time = est_time
1807 a8083063 Iustin Pop
        else:
1808 a8083063 Iustin Pop
          rem_time = "no time estimate"
1809 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1810 b9bddb6b Iustin Pop
                        (instance.disks[i].iv_name, perc_done, rem_time))
1811 fbafd7a8 Iustin Pop
1812 fbafd7a8 Iustin Pop
    # if we're done but degraded, let's do a few small retries, to
1813 fbafd7a8 Iustin Pop
    # make sure we see a stable and not transient situation; therefore
1814 fbafd7a8 Iustin Pop
    # we force restart of the loop
1815 fbafd7a8 Iustin Pop
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
1816 fbafd7a8 Iustin Pop
      logging.info("Degraded disks found, %d retries left", degr_retries)
1817 fbafd7a8 Iustin Pop
      degr_retries -= 1
1818 fbafd7a8 Iustin Pop
      time.sleep(1)
1819 fbafd7a8 Iustin Pop
      continue
1820 fbafd7a8 Iustin Pop
1821 a8083063 Iustin Pop
    if done or oneshot:
1822 a8083063 Iustin Pop
      break
1823 a8083063 Iustin Pop
1824 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
1825 a8083063 Iustin Pop
1826 a8083063 Iustin Pop
  if done:
1827 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1828 a8083063 Iustin Pop
  return not cumul_degraded
1829 a8083063 Iustin Pop
1830 a8083063 Iustin Pop
1831 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1832 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1833 a8083063 Iustin Pop

1834 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1835 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1836 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1837 0834c866 Iustin Pop

1838 a8083063 Iustin Pop
  """
1839 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
1840 0834c866 Iustin Pop
  if ldisk:
1841 0834c866 Iustin Pop
    idx = 6
1842 0834c866 Iustin Pop
  else:
1843 0834c866 Iustin Pop
    idx = 5
1844 a8083063 Iustin Pop
1845 a8083063 Iustin Pop
  result = True
1846 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1847 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
1848 4c4e4e1e Iustin Pop
    msg = rstats.fail_msg
1849 23829f6f Iustin Pop
    if msg:
1850 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
1851 23829f6f Iustin Pop
      result = False
1852 23829f6f Iustin Pop
    elif not rstats.payload:
1853 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s", node)
1854 a8083063 Iustin Pop
      result = False
1855 a8083063 Iustin Pop
    else:
1856 23829f6f Iustin Pop
      result = result and (not rstats.payload[idx])
1857 a8083063 Iustin Pop
  if dev.children:
1858 a8083063 Iustin Pop
    for child in dev.children:
1859 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1860 a8083063 Iustin Pop
1861 a8083063 Iustin Pop
  return result
1862 a8083063 Iustin Pop
1863 a8083063 Iustin Pop
1864 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1865 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1866 a8083063 Iustin Pop

1867 a8083063 Iustin Pop
  """
1868 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1869 6bf01bbb Guido Trotter
  REQ_BGL = False
1870 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet()
1871 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
1872 a8083063 Iustin Pop
1873 6bf01bbb Guido Trotter
  def ExpandNames(self):
1874 1f9430d6 Iustin Pop
    if self.op.names:
1875 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1876 1f9430d6 Iustin Pop
1877 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1878 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1879 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1880 1f9430d6 Iustin Pop
1881 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
1882 a6ab004b Iustin Pop
    # Temporary removal of locks, should be reverted later
1883 a6ab004b Iustin Pop
    # TODO: reintroduce locks when they are lighter-weight
1884 6bf01bbb Guido Trotter
    self.needed_locks = {}
1885 a6ab004b Iustin Pop
    #self.share_locks[locking.LEVEL_NODE] = 1
1886 a6ab004b Iustin Pop
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1887 6bf01bbb Guido Trotter
1888 6bf01bbb Guido Trotter
  def CheckPrereq(self):
1889 6bf01bbb Guido Trotter
    """Check prerequisites.
1890 6bf01bbb Guido Trotter

1891 6bf01bbb Guido Trotter
    """
1892 6bf01bbb Guido Trotter
1893 1f9430d6 Iustin Pop
  @staticmethod
1894 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1895 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1896 1f9430d6 Iustin Pop

1897 e4376078 Iustin Pop
    @param node_list: a list with the names of all nodes
1898 e4376078 Iustin Pop
    @param rlist: a map with node names as keys and OS objects as values
1899 1f9430d6 Iustin Pop

1900 e4376078 Iustin Pop
    @rtype: dict
1901 5fcc718f Iustin Pop
    @return: a dictionary with osnames as keys and as value another map, with
1902 255dcebd Iustin Pop
        nodes as keys and tuples of (path, status, diagnose) as values, eg::
1903 e4376078 Iustin Pop

1904 255dcebd Iustin Pop
          {"debian-etch": {"node1": [(/usr/lib/..., True, ""),
1905 255dcebd Iustin Pop
                                     (/srv/..., False, "invalid api")],
1906 255dcebd Iustin Pop
                           "node2": [(/srv/..., True, "")]}
1907 e4376078 Iustin Pop
          }
1908 1f9430d6 Iustin Pop

1909 1f9430d6 Iustin Pop
    """
1910 1f9430d6 Iustin Pop
    all_os = {}
1911 a6ab004b Iustin Pop
    # we build here the list of nodes that didn't fail the RPC (at RPC
1912 a6ab004b Iustin Pop
    # level), so that nodes with a non-responding node daemon don't
1913 a6ab004b Iustin Pop
    # make all OSes invalid
1914 a6ab004b Iustin Pop
    good_nodes = [node_name for node_name in rlist
1915 4c4e4e1e Iustin Pop
                  if not rlist[node_name].fail_msg]
1916 83d92ad8 Iustin Pop
    for node_name, nr in rlist.items():
1917 4c4e4e1e Iustin Pop
      if nr.fail_msg or not nr.payload:
1918 1f9430d6 Iustin Pop
        continue
1919 255dcebd Iustin Pop
      for name, path, status, diagnose in nr.payload:
1920 255dcebd Iustin Pop
        if name not in all_os:
1921 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1922 1f9430d6 Iustin Pop
          # for each node in node_list
1923 255dcebd Iustin Pop
          all_os[name] = {}
1924 a6ab004b Iustin Pop
          for nname in good_nodes:
1925 255dcebd Iustin Pop
            all_os[name][nname] = []
1926 255dcebd Iustin Pop
        all_os[name][node_name].append((path, status, diagnose))
1927 1f9430d6 Iustin Pop
    return all_os
1928 a8083063 Iustin Pop
1929 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1930 a8083063 Iustin Pop
    """Compute the list of OSes.
1931 a8083063 Iustin Pop

1932 a8083063 Iustin Pop
    """
1933 a6ab004b Iustin Pop
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
1934 94a02bb5 Iustin Pop
    node_data = self.rpc.call_os_diagnose(valid_nodes)
1935 94a02bb5 Iustin Pop
    pol = self._DiagnoseByOS(valid_nodes, node_data)
1936 1f9430d6 Iustin Pop
    output = []
1937 83d92ad8 Iustin Pop
    for os_name, os_data in pol.items():
1938 1f9430d6 Iustin Pop
      row = []
1939 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1940 1f9430d6 Iustin Pop
        if field == "name":
1941 1f9430d6 Iustin Pop
          val = os_name
1942 1f9430d6 Iustin Pop
        elif field == "valid":
1943 255dcebd Iustin Pop
          val = utils.all([osl and osl[0][1] for osl in os_data.values()])
1944 1f9430d6 Iustin Pop
        elif field == "node_status":
1945 255dcebd Iustin Pop
          # this is just a copy of the dict
1946 1f9430d6 Iustin Pop
          val = {}
1947 255dcebd Iustin Pop
          for node_name, nos_list in os_data.items():
1948 255dcebd Iustin Pop
            val[node_name] = nos_list
1949 1f9430d6 Iustin Pop
        else:
1950 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1951 1f9430d6 Iustin Pop
        row.append(val)
1952 1f9430d6 Iustin Pop
      output.append(row)
1953 1f9430d6 Iustin Pop
1954 1f9430d6 Iustin Pop
    return output
1955 a8083063 Iustin Pop
1956 a8083063 Iustin Pop
1957 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1958 a8083063 Iustin Pop
  """Logical unit for removing a node.
1959 a8083063 Iustin Pop

1960 a8083063 Iustin Pop
  """
1961 a8083063 Iustin Pop
  HPATH = "node-remove"
1962 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1963 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1964 a8083063 Iustin Pop
1965 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1966 a8083063 Iustin Pop
    """Build hooks env.
1967 a8083063 Iustin Pop

1968 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1969 d08869ee Guido Trotter
    node would then be impossible to remove.
1970 a8083063 Iustin Pop

1971 a8083063 Iustin Pop
    """
1972 396e1b78 Michael Hanselmann
    env = {
1973 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1974 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1975 396e1b78 Michael Hanselmann
      }
1976 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1977 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1978 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1979 a8083063 Iustin Pop
1980 a8083063 Iustin Pop
  def CheckPrereq(self):
1981 a8083063 Iustin Pop
    """Check prerequisites.
1982 a8083063 Iustin Pop

1983 a8083063 Iustin Pop
    This checks:
1984 a8083063 Iustin Pop
     - the node exists in the configuration
1985 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1986 a8083063 Iustin Pop
     - it's not the master
1987 a8083063 Iustin Pop

1988 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
1989 a8083063 Iustin Pop

1990 a8083063 Iustin Pop
    """
1991 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1992 a8083063 Iustin Pop
    if node is None:
1993 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1994 a8083063 Iustin Pop
1995 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1996 a8083063 Iustin Pop
1997 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
1998 a8083063 Iustin Pop
    if node.name == masternode:
1999 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
2000 3ecf6786 Iustin Pop
                                 " you need to failover first.")
2001 a8083063 Iustin Pop
2002 a8083063 Iustin Pop
    for instance_name in instance_list:
2003 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
2004 6b12959c Iustin Pop
      if node.name in instance.all_nodes:
2005 6b12959c Iustin Pop
        raise errors.OpPrereqError("Instance %s is still running on the node,"
2006 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
2007 a8083063 Iustin Pop
    self.op.node_name = node.name
2008 a8083063 Iustin Pop
    self.node = node
2009 a8083063 Iustin Pop
2010 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2011 a8083063 Iustin Pop
    """Removes the node from the cluster.
2012 a8083063 Iustin Pop

2013 a8083063 Iustin Pop
    """
2014 a8083063 Iustin Pop
    node = self.node
2015 9a4f63d1 Iustin Pop
    logging.info("Stopping the node daemon and removing configs from node %s",
2016 9a4f63d1 Iustin Pop
                 node.name)
2017 a8083063 Iustin Pop
2018 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
2019 a8083063 Iustin Pop
2020 0623d351 Iustin Pop
    result = self.rpc.call_node_leave_cluster(node.name)
2021 4c4e4e1e Iustin Pop
    msg = result.fail_msg
2022 0623d351 Iustin Pop
    if msg:
2023 0623d351 Iustin Pop
      self.LogWarning("Errors encountered on the remote node while leaving"
2024 0623d351 Iustin Pop
                      " the cluster: %s", msg)
2025 c8a0948f Michael Hanselmann
2026 eb1742d5 Guido Trotter
    # Promote nodes to master candidate as needed
2027 ec0292f1 Iustin Pop
    _AdjustCandidatePool(self)
2028 eb1742d5 Guido Trotter
2029 a8083063 Iustin Pop
2030 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
2031 a8083063 Iustin Pop
  """Logical unit for querying nodes.
2032 a8083063 Iustin Pop

2033 a8083063 Iustin Pop
  """
2034 bc8e4a1a Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
2035 35705d8f Guido Trotter
  REQ_BGL = False
2036 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet(
2037 31bf511f Iustin Pop
    "dtotal", "dfree",
2038 31bf511f Iustin Pop
    "mtotal", "mnode", "mfree",
2039 31bf511f Iustin Pop
    "bootid",
2040 0105bad3 Iustin Pop
    "ctotal", "cnodes", "csockets",
2041 31bf511f Iustin Pop
    )
2042 31bf511f Iustin Pop
2043 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(
2044 31bf511f Iustin Pop
    "name", "pinst_cnt", "sinst_cnt",
2045 31bf511f Iustin Pop
    "pinst_list", "sinst_list",
2046 31bf511f Iustin Pop
    "pip", "sip", "tags",
2047 31bf511f Iustin Pop
    "serial_no",
2048 0e67cdbe Iustin Pop
    "master_candidate",
2049 0e67cdbe Iustin Pop
    "master",
2050 9ddb5e45 Iustin Pop
    "offline",
2051 0b2454b9 Iustin Pop
    "drained",
2052 c120ff34 Iustin Pop
    "role",
2053 31bf511f Iustin Pop
    )
2054 a8083063 Iustin Pop
2055 35705d8f Guido Trotter
  def ExpandNames(self):
2056 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2057 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2058 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2059 a8083063 Iustin Pop
2060 35705d8f Guido Trotter
    self.needed_locks = {}
2061 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2062 c8d8b4c8 Iustin Pop
2063 c8d8b4c8 Iustin Pop
    if self.op.names:
2064 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
2065 35705d8f Guido Trotter
    else:
2066 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
2067 c8d8b4c8 Iustin Pop
2068 bc8e4a1a Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
2069 bc8e4a1a Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
2070 c8d8b4c8 Iustin Pop
    if self.do_locking:
2071 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
2072 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
2073 c8d8b4c8 Iustin Pop
2074 35705d8f Guido Trotter
2075 35705d8f Guido Trotter
  def CheckPrereq(self):
2076 35705d8f Guido Trotter
    """Check prerequisites.
2077 35705d8f Guido Trotter

2078 35705d8f Guido Trotter
    """
2079 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
2080 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
2081 c8d8b4c8 Iustin Pop
    pass
2082 a8083063 Iustin Pop
2083 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2084 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2085 a8083063 Iustin Pop

2086 a8083063 Iustin Pop
    """
2087 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
2088 c8d8b4c8 Iustin Pop
    if self.do_locking:
2089 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
2090 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
2091 3fa93523 Guido Trotter
      nodenames = self.wanted
2092 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
2093 3fa93523 Guido Trotter
      if missing:
2094 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
2095 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
2096 c8d8b4c8 Iustin Pop
    else:
2097 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
2098 c1f1cbb2 Iustin Pop
2099 c1f1cbb2 Iustin Pop
    nodenames = utils.NiceSort(nodenames)
2100 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
2101 a8083063 Iustin Pop
2102 a8083063 Iustin Pop
    # begin data gathering
2103 a8083063 Iustin Pop
2104 bc8e4a1a Iustin Pop
    if self.do_node_query:
2105 a8083063 Iustin Pop
      live_data = {}
2106 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
2107 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
2108 a8083063 Iustin Pop
      for name in nodenames:
2109 781de953 Iustin Pop
        nodeinfo = node_data[name]
2110 4c4e4e1e Iustin Pop
        if not nodeinfo.fail_msg and nodeinfo.payload:
2111 070e998b Iustin Pop
          nodeinfo = nodeinfo.payload
2112 d599d686 Iustin Pop
          fn = utils.TryConvert
2113 a8083063 Iustin Pop
          live_data[name] = {
2114 d599d686 Iustin Pop
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
2115 d599d686 Iustin Pop
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
2116 d599d686 Iustin Pop
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
2117 d599d686 Iustin Pop
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
2118 d599d686 Iustin Pop
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
2119 d599d686 Iustin Pop
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
2120 d599d686 Iustin Pop
            "bootid": nodeinfo.get('bootid', None),
2121 0105bad3 Iustin Pop
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
2122 0105bad3 Iustin Pop
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
2123 a8083063 Iustin Pop
            }
2124 a8083063 Iustin Pop
        else:
2125 a8083063 Iustin Pop
          live_data[name] = {}
2126 a8083063 Iustin Pop
    else:
2127 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
2128 a8083063 Iustin Pop
2129 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
2130 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
2131 a8083063 Iustin Pop
2132 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
2133 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
2134 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
2135 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
2136 a8083063 Iustin Pop
2137 ec223efb Iustin Pop
      for instance_name in instancelist:
2138 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
2139 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
2140 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
2141 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
2142 ec223efb Iustin Pop
          if secnode in node_to_secondary:
2143 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
2144 a8083063 Iustin Pop
2145 0e67cdbe Iustin Pop
    master_node = self.cfg.GetMasterNode()
2146 0e67cdbe Iustin Pop
2147 a8083063 Iustin Pop
    # end data gathering
2148 a8083063 Iustin Pop
2149 a8083063 Iustin Pop
    output = []
2150 a8083063 Iustin Pop
    for node in nodelist:
2151 a8083063 Iustin Pop
      node_output = []
2152 a8083063 Iustin Pop
      for field in self.op.output_fields:
2153 a8083063 Iustin Pop
        if field == "name":
2154 a8083063 Iustin Pop
          val = node.name
2155 ec223efb Iustin Pop
        elif field == "pinst_list":
2156 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
2157 ec223efb Iustin Pop
        elif field == "sinst_list":
2158 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
2159 ec223efb Iustin Pop
        elif field == "pinst_cnt":
2160 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
2161 ec223efb Iustin Pop
        elif field == "sinst_cnt":
2162 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
2163 a8083063 Iustin Pop
        elif field == "pip":
2164 a8083063 Iustin Pop
          val = node.primary_ip
2165 a8083063 Iustin Pop
        elif field == "sip":
2166 a8083063 Iustin Pop
          val = node.secondary_ip
2167 130a6a6f Iustin Pop
        elif field == "tags":
2168 130a6a6f Iustin Pop
          val = list(node.GetTags())
2169 38d7239a Iustin Pop
        elif field == "serial_no":
2170 38d7239a Iustin Pop
          val = node.serial_no
2171 0e67cdbe Iustin Pop
        elif field == "master_candidate":
2172 0e67cdbe Iustin Pop
          val = node.master_candidate
2173 0e67cdbe Iustin Pop
        elif field == "master":
2174 0e67cdbe Iustin Pop
          val = node.name == master_node
2175 9ddb5e45 Iustin Pop
        elif field == "offline":
2176 9ddb5e45 Iustin Pop
          val = node.offline
2177 0b2454b9 Iustin Pop
        elif field == "drained":
2178 0b2454b9 Iustin Pop
          val = node.drained
2179 31bf511f Iustin Pop
        elif self._FIELDS_DYNAMIC.Matches(field):
2180 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
2181 c120ff34 Iustin Pop
        elif field == "role":
2182 c120ff34 Iustin Pop
          if node.name == master_node:
2183 c120ff34 Iustin Pop
            val = "M"
2184 c120ff34 Iustin Pop
          elif node.master_candidate:
2185 c120ff34 Iustin Pop
            val = "C"
2186 c120ff34 Iustin Pop
          elif node.drained:
2187 c120ff34 Iustin Pop
            val = "D"
2188 c120ff34 Iustin Pop
          elif node.offline:
2189 c120ff34 Iustin Pop
            val = "O"
2190 c120ff34 Iustin Pop
          else:
2191 c120ff34 Iustin Pop
            val = "R"
2192 a8083063 Iustin Pop
        else:
2193 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2194 a8083063 Iustin Pop
        node_output.append(val)
2195 a8083063 Iustin Pop
      output.append(node_output)
2196 a8083063 Iustin Pop
2197 a8083063 Iustin Pop
    return output
2198 a8083063 Iustin Pop
2199 a8083063 Iustin Pop
2200 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
2201 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
2202 dcb93971 Michael Hanselmann

2203 dcb93971 Michael Hanselmann
  """
2204 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
2205 21a15682 Guido Trotter
  REQ_BGL = False
2206 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
2207 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("node")
2208 21a15682 Guido Trotter
2209 21a15682 Guido Trotter
  def ExpandNames(self):
2210 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2211 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2212 21a15682 Guido Trotter
                       selected=self.op.output_fields)
2213 21a15682 Guido Trotter
2214 21a15682 Guido Trotter
    self.needed_locks = {}
2215 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2216 21a15682 Guido Trotter
    if not self.op.nodes:
2217 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2218 21a15682 Guido Trotter
    else:
2219 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
2220 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
2221 dcb93971 Michael Hanselmann
2222 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
2223 dcb93971 Michael Hanselmann
    """Check prerequisites.
2224 dcb93971 Michael Hanselmann

2225 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
2226 dcb93971 Michael Hanselmann

2227 dcb93971 Michael Hanselmann
    """
2228 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
2229 dcb93971 Michael Hanselmann
2230 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
2231 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
2232 dcb93971 Michael Hanselmann

2233 dcb93971 Michael Hanselmann
    """
2234 a7ba5e53 Iustin Pop
    nodenames = self.nodes
2235 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
2236 dcb93971 Michael Hanselmann
2237 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
2238 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
2239 dcb93971 Michael Hanselmann
2240 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
2241 dcb93971 Michael Hanselmann
2242 dcb93971 Michael Hanselmann
    output = []
2243 dcb93971 Michael Hanselmann
    for node in nodenames:
2244 10bfe6cb Iustin Pop
      nresult = volumes[node]
2245 10bfe6cb Iustin Pop
      if nresult.offline:
2246 10bfe6cb Iustin Pop
        continue
2247 4c4e4e1e Iustin Pop
      msg = nresult.fail_msg
2248 10bfe6cb Iustin Pop
      if msg:
2249 10bfe6cb Iustin Pop
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
2250 37d19eb2 Michael Hanselmann
        continue
2251 37d19eb2 Michael Hanselmann
2252 10bfe6cb Iustin Pop
      node_vols = nresult.payload[:]
2253 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
2254 dcb93971 Michael Hanselmann
2255 dcb93971 Michael Hanselmann
      for vol in node_vols:
2256 dcb93971 Michael Hanselmann
        node_output = []
2257 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
2258 dcb93971 Michael Hanselmann
          if field == "node":
2259 dcb93971 Michael Hanselmann
            val = node
2260 dcb93971 Michael Hanselmann
          elif field == "phys":
2261 dcb93971 Michael Hanselmann
            val = vol['dev']
2262 dcb93971 Michael Hanselmann
          elif field == "vg":
2263 dcb93971 Michael Hanselmann
            val = vol['vg']
2264 dcb93971 Michael Hanselmann
          elif field == "name":
2265 dcb93971 Michael Hanselmann
            val = vol['name']
2266 dcb93971 Michael Hanselmann
          elif field == "size":
2267 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
2268 dcb93971 Michael Hanselmann
          elif field == "instance":
2269 dcb93971 Michael Hanselmann
            for inst in ilist:
2270 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
2271 dcb93971 Michael Hanselmann
                continue
2272 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
2273 dcb93971 Michael Hanselmann
                val = inst.name
2274 dcb93971 Michael Hanselmann
                break
2275 dcb93971 Michael Hanselmann
            else:
2276 dcb93971 Michael Hanselmann
              val = '-'
2277 dcb93971 Michael Hanselmann
          else:
2278 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
2279 dcb93971 Michael Hanselmann
          node_output.append(str(val))
2280 dcb93971 Michael Hanselmann
2281 dcb93971 Michael Hanselmann
        output.append(node_output)
2282 dcb93971 Michael Hanselmann
2283 dcb93971 Michael Hanselmann
    return output
2284 dcb93971 Michael Hanselmann
2285 dcb93971 Michael Hanselmann
2286 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
2287 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
2288 a8083063 Iustin Pop

2289 a8083063 Iustin Pop
  """
2290 a8083063 Iustin Pop
  HPATH = "node-add"
2291 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2292 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
2293 a8083063 Iustin Pop
2294 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2295 a8083063 Iustin Pop
    """Build hooks env.
2296 a8083063 Iustin Pop

2297 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
2298 a8083063 Iustin Pop

2299 a8083063 Iustin Pop
    """
2300 a8083063 Iustin Pop
    env = {
2301 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
2302 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
2303 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
2304 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
2305 a8083063 Iustin Pop
      }
2306 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
2307 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
2308 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
2309 a8083063 Iustin Pop
2310 a8083063 Iustin Pop
  def CheckPrereq(self):
2311 a8083063 Iustin Pop
    """Check prerequisites.
2312 a8083063 Iustin Pop

2313 a8083063 Iustin Pop
    This checks:
2314 a8083063 Iustin Pop
     - the new node is not already in the config
2315 a8083063 Iustin Pop
     - it is resolvable
2316 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
2317 a8083063 Iustin Pop

2318 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
2319 a8083063 Iustin Pop

2320 a8083063 Iustin Pop
    """
2321 a8083063 Iustin Pop
    node_name = self.op.node_name
2322 a8083063 Iustin Pop
    cfg = self.cfg
2323 a8083063 Iustin Pop
2324 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
2325 a8083063 Iustin Pop
2326 bcf043c9 Iustin Pop
    node = dns_data.name
2327 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
2328 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
2329 a8083063 Iustin Pop
    if secondary_ip is None:
2330 a8083063 Iustin Pop
      secondary_ip = primary_ip
2331 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
2332 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
2333 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
2334 e7c6e02b Michael Hanselmann
2335 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
2336 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
2337 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
2338 e7c6e02b Michael Hanselmann
                                 node)
2339 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
2340 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
2341 a8083063 Iustin Pop
2342 a8083063 Iustin Pop
    for existing_node_name in node_list:
2343 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
2344 e7c6e02b Michael Hanselmann
2345 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
2346 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
2347 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
2348 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
2349 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
2350 e7c6e02b Michael Hanselmann
        continue
2351 e7c6e02b Michael Hanselmann
2352 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
2353 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
2354 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
2355 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
2356 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
2357 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
2358 a8083063 Iustin Pop
2359 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
2360 a8083063 Iustin Pop
    # same as for the master
2361 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
2362 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
2363 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
2364 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
2365 a8083063 Iustin Pop
      if master_singlehomed:
2366 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
2367 3ecf6786 Iustin Pop
                                   " new node has one")
2368 a8083063 Iustin Pop
      else:
2369 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
2370 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
2371 a8083063 Iustin Pop
2372 5bbd3f7f Michael Hanselmann
    # checks reachability
2373 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
2374 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
2375 a8083063 Iustin Pop
2376 a8083063 Iustin Pop
    if not newbie_singlehomed:
2377 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
2378 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
2379 b15d625f Iustin Pop
                           source=myself.secondary_ip):
2380 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
2381 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
2382 a8083063 Iustin Pop
2383 0fff97e9 Guido Trotter
    cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2384 a8ae3eb5 Iustin Pop
    if self.op.readd:
2385 a8ae3eb5 Iustin Pop
      exceptions = [node]
2386 a8ae3eb5 Iustin Pop
    else:
2387 a8ae3eb5 Iustin Pop
      exceptions = []
2388 a8ae3eb5 Iustin Pop
    mc_now, mc_max = self.cfg.GetMasterCandidateStats(exceptions)
2389 a8ae3eb5 Iustin Pop
    # the new node will increase mc_max with one, so:
2390 a8ae3eb5 Iustin Pop
    mc_max = min(mc_max + 1, cp_size)
2391 a8ae3eb5 Iustin Pop
    self.master_candidate = mc_now < mc_max
2392 0fff97e9 Guido Trotter
2393 a8ae3eb5 Iustin Pop
    if self.op.readd:
2394 a8ae3eb5 Iustin Pop
      self.new_node = self.cfg.GetNodeInfo(node)
2395 a8ae3eb5 Iustin Pop
      assert self.new_node is not None, "Can't retrieve locked node %s" % node
2396 a8ae3eb5 Iustin Pop
    else:
2397 a8ae3eb5 Iustin Pop
      self.new_node = objects.Node(name=node,
2398 a8ae3eb5 Iustin Pop
                                   primary_ip=primary_ip,
2399 a8ae3eb5 Iustin Pop
                                   secondary_ip=secondary_ip,
2400 a8ae3eb5 Iustin Pop
                                   master_candidate=self.master_candidate,
2401 a8ae3eb5 Iustin Pop
                                   offline=False, drained=False)
2402 a8083063 Iustin Pop
2403 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2404 a8083063 Iustin Pop
    """Adds the new node to the cluster.
2405 a8083063 Iustin Pop

2406 a8083063 Iustin Pop
    """
2407 a8083063 Iustin Pop
    new_node = self.new_node
2408 a8083063 Iustin Pop
    node = new_node.name
2409 a8083063 Iustin Pop
2410 a8ae3eb5 Iustin Pop
    # for re-adds, reset the offline/drained/master-candidate flags;
2411 a8ae3eb5 Iustin Pop
    # we need to reset here, otherwise offline would prevent RPC calls
2412 a8ae3eb5 Iustin Pop
    # later in the procedure; this also means that if the re-add
2413 a8ae3eb5 Iustin Pop
    # fails, we are left with a non-offlined, broken node
2414 a8ae3eb5 Iustin Pop
    if self.op.readd:
2415 a8ae3eb5 Iustin Pop
      new_node.drained = new_node.offline = False
2416 a8ae3eb5 Iustin Pop
      self.LogInfo("Readding a node, the offline/drained flags were reset")
2417 a8ae3eb5 Iustin Pop
      # if we demote the node, we do cleanup later in the procedure
2418 a8ae3eb5 Iustin Pop
      new_node.master_candidate = self.master_candidate
2419 a8ae3eb5 Iustin Pop
2420 a8ae3eb5 Iustin Pop
    # notify the user about any possible mc promotion
2421 a8ae3eb5 Iustin Pop
    if new_node.master_candidate:
2422 a8ae3eb5 Iustin Pop
      self.LogInfo("Node will be a master candidate")
2423 a8ae3eb5 Iustin Pop
2424 a8083063 Iustin Pop
    # check connectivity
2425 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
2426 4c4e4e1e Iustin Pop
    result.Raise("Can't get version information from node %s" % node)
2427 90b54c26 Iustin Pop
    if constants.PROTOCOL_VERSION == result.payload:
2428 90b54c26 Iustin Pop
      logging.info("Communication to node %s fine, sw version %s match",
2429 90b54c26 Iustin Pop
                   node, result.payload)
2430 a8083063 Iustin Pop
    else:
2431 90b54c26 Iustin Pop
      raise errors.OpExecError("Version mismatch master version %s,"
2432 90b54c26 Iustin Pop
                               " node version %s" %
2433 90b54c26 Iustin Pop
                               (constants.PROTOCOL_VERSION, result.payload))
2434 a8083063 Iustin Pop
2435 a8083063 Iustin Pop
    # setup ssh on node
2436 9a4f63d1 Iustin Pop
    logging.info("Copy ssh key to node %s", node)
2437 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
2438 a8083063 Iustin Pop
    keyarray = []
2439 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
2440 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
2441 70d9e3d8 Iustin Pop
                priv_key, pub_key]
2442 a8083063 Iustin Pop
2443 a8083063 Iustin Pop
    for i in keyfiles:
2444 a8083063 Iustin Pop
      f = open(i, 'r')
2445 a8083063 Iustin Pop
      try:
2446 a8083063 Iustin Pop
        keyarray.append(f.read())
2447 a8083063 Iustin Pop
      finally:
2448 a8083063 Iustin Pop
        f.close()
2449 a8083063 Iustin Pop
2450 72737a7f Iustin Pop
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
2451 72737a7f Iustin Pop
                                    keyarray[2],
2452 72737a7f Iustin Pop
                                    keyarray[3], keyarray[4], keyarray[5])
2453 4c4e4e1e Iustin Pop
    result.Raise("Cannot transfer ssh keys to the new node")
2454 a8083063 Iustin Pop
2455 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
2456 b86a6bcd Guido Trotter
    if self.cfg.GetClusterInfo().modify_etc_hosts:
2457 b86a6bcd Guido Trotter
      utils.AddHostToEtcHosts(new_node.name)
2458 c8a0948f Michael Hanselmann
2459 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
2460 781de953 Iustin Pop
      result = self.rpc.call_node_has_ip_address(new_node.name,
2461 781de953 Iustin Pop
                                                 new_node.secondary_ip)
2462 4c4e4e1e Iustin Pop
      result.Raise("Failure checking secondary ip on node %s" % new_node.name,
2463 4c4e4e1e Iustin Pop
                   prereq=True)
2464 c2fc8250 Iustin Pop
      if not result.payload:
2465 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
2466 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
2467 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
2468 a8083063 Iustin Pop
2469 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
2470 5c0527ed Guido Trotter
    node_verify_param = {
2471 5c0527ed Guido Trotter
      'nodelist': [node],
2472 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
2473 5c0527ed Guido Trotter
    }
2474 5c0527ed Guido Trotter
2475 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
2476 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
2477 5c0527ed Guido Trotter
    for verifier in node_verify_list:
2478 4c4e4e1e Iustin Pop
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
2479 6f68a739 Iustin Pop
      nl_payload = result[verifier].payload['nodelist']
2480 6f68a739 Iustin Pop
      if nl_payload:
2481 6f68a739 Iustin Pop
        for failed in nl_payload:
2482 5c0527ed Guido Trotter
          feedback_fn("ssh/hostname verification failed %s -> %s" %
2483 6f68a739 Iustin Pop
                      (verifier, nl_payload[failed]))
2484 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
2485 ff98055b Iustin Pop
2486 d8470559 Michael Hanselmann
    if self.op.readd:
2487 28eddce5 Guido Trotter
      _RedistributeAncillaryFiles(self)
2488 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
2489 a8ae3eb5 Iustin Pop
      # make sure we redistribute the config
2490 a8ae3eb5 Iustin Pop
      self.cfg.Update(new_node)
2491 a8ae3eb5 Iustin Pop
      # and make sure the new node will not have old files around
2492 a8ae3eb5 Iustin Pop
      if not new_node.master_candidate:
2493 a8ae3eb5 Iustin Pop
        result = self.rpc.call_node_demote_from_mc(new_node.name)
2494 a8ae3eb5 Iustin Pop
        msg = result.RemoteFailMsg()
2495 a8ae3eb5 Iustin Pop
        if msg:
2496 a8ae3eb5 Iustin Pop
          self.LogWarning("Node failed to demote itself from master"
2497 a8ae3eb5 Iustin Pop
                          " candidate status: %s" % msg)
2498 d8470559 Michael Hanselmann
    else:
2499 035566e3 Iustin Pop
      _RedistributeAncillaryFiles(self, additional_nodes=[node])
2500 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
2501 a8083063 Iustin Pop
2502 a8083063 Iustin Pop
2503 b31c8676 Iustin Pop
class LUSetNodeParams(LogicalUnit):
2504 b31c8676 Iustin Pop
  """Modifies the parameters of a node.
2505 b31c8676 Iustin Pop

2506 b31c8676 Iustin Pop
  """
2507 b31c8676 Iustin Pop
  HPATH = "node-modify"
2508 b31c8676 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2509 b31c8676 Iustin Pop
  _OP_REQP = ["node_name"]
2510 b31c8676 Iustin Pop
  REQ_BGL = False
2511 b31c8676 Iustin Pop
2512 b31c8676 Iustin Pop
  def CheckArguments(self):
2513 b31c8676 Iustin Pop
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2514 b31c8676 Iustin Pop
    if node_name is None:
2515 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2516 b31c8676 Iustin Pop
    self.op.node_name = node_name
2517 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'master_candidate')
2518 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'offline')
2519 c9d443ea Iustin Pop
    _CheckBooleanOpField(self.op, 'drained')
2520 c9d443ea Iustin Pop
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
2521 c9d443ea Iustin Pop
    if all_mods.count(None) == 3:
2522 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Please pass at least one modification")
2523 c9d443ea Iustin Pop
    if all_mods.count(True) > 1:
2524 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Can't set the node into more than one"
2525 c9d443ea Iustin Pop
                                 " state at the same time")
2526 b31c8676 Iustin Pop
2527 b31c8676 Iustin Pop
  def ExpandNames(self):
2528 b31c8676 Iustin Pop
    self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
2529 b31c8676 Iustin Pop
2530 b31c8676 Iustin Pop
  def BuildHooksEnv(self):
2531 b31c8676 Iustin Pop
    """Build hooks env.
2532 b31c8676 Iustin Pop

2533 b31c8676 Iustin Pop
    This runs on the master node.
2534 b31c8676 Iustin Pop

2535 b31c8676 Iustin Pop
    """
2536 b31c8676 Iustin Pop
    env = {
2537 b31c8676 Iustin Pop
      "OP_TARGET": self.op.node_name,
2538 b31c8676 Iustin Pop
      "MASTER_CANDIDATE": str(self.op.master_candidate),
2539 3a5ba66a Iustin Pop
      "OFFLINE": str(self.op.offline),
2540 c9d443ea Iustin Pop
      "DRAINED": str(self.op.drained),
2541 b31c8676 Iustin Pop
      }
2542 b31c8676 Iustin Pop
    nl = [self.cfg.GetMasterNode(),
2543 b31c8676 Iustin Pop
          self.op.node_name]
2544 b31c8676 Iustin Pop
    return env, nl, nl
2545 b31c8676 Iustin Pop
2546 b31c8676 Iustin Pop
  def CheckPrereq(self):
2547 b31c8676 Iustin Pop
    """Check prerequisites.
2548 b31c8676 Iustin Pop

2549 b31c8676 Iustin Pop
    This only checks the instance list against the existing names.
2550 b31c8676 Iustin Pop

2551 b31c8676 Iustin Pop
    """
2552 3a5ba66a Iustin Pop
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
2553 b31c8676 Iustin Pop
2554 c9d443ea Iustin Pop
    if ((self.op.master_candidate == False or self.op.offline == True or
2555 c9d443ea Iustin Pop
         self.op.drained == True) and node.master_candidate):
2556 3a5ba66a Iustin Pop
      # we will demote the node from master_candidate
2557 3a26773f Iustin Pop
      if self.op.node_name == self.cfg.GetMasterNode():
2558 3a26773f Iustin Pop
        raise errors.OpPrereqError("The master node has to be a"
2559 c9d443ea Iustin Pop
                                   " master candidate, online and not drained")
2560 3e83dd48 Iustin Pop
      cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2561 3a5ba66a Iustin Pop
      num_candidates, _ = self.cfg.GetMasterCandidateStats()
2562 3e83dd48 Iustin Pop
      if num_candidates <= cp_size:
2563 3e83dd48 Iustin Pop
        msg = ("Not enough master candidates (desired"
2564 3e83dd48 Iustin Pop
               " %d, new value will be %d)" % (cp_size, num_candidates-1))
2565 3a5ba66a Iustin Pop
        if self.op.force:
2566 3e83dd48 Iustin Pop
          self.LogWarning(msg)
2567 3e83dd48 Iustin Pop
        else:
2568 3e83dd48 Iustin Pop
          raise errors.OpPrereqError(msg)
2569 3e83dd48 Iustin Pop
2570 c9d443ea Iustin Pop
    if (self.op.master_candidate == True and
2571 c9d443ea Iustin Pop
        ((node.offline and not self.op.offline == False) or
2572 c9d443ea Iustin Pop
         (node.drained and not self.op.drained == False))):
2573 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
2574 949bdabe Iustin Pop
                                 " to master_candidate" % node.name)
2575 3a5ba66a Iustin Pop
2576 b31c8676 Iustin Pop
    return
2577 b31c8676 Iustin Pop
2578 b31c8676 Iustin Pop
  def Exec(self, feedback_fn):
2579 b31c8676 Iustin Pop
    """Modifies a node.
2580 b31c8676 Iustin Pop

2581 b31c8676 Iustin Pop
    """
2582 3a5ba66a Iustin Pop
    node = self.node
2583 b31c8676 Iustin Pop
2584 b31c8676 Iustin Pop
    result = []
2585 c9d443ea Iustin Pop
    changed_mc = False
2586 b31c8676 Iustin Pop
2587 3a5ba66a Iustin Pop
    if self.op.offline is not None:
2588 3a5ba66a Iustin Pop
      node.offline = self.op.offline
2589 3a5ba66a Iustin Pop
      result.append(("offline", str(self.op.offline)))
2590 c9d443ea Iustin Pop
      if self.op.offline == True:
2591 c9d443ea Iustin Pop
        if node.master_candidate:
2592 c9d443ea Iustin Pop
          node.master_candidate = False
2593 c9d443ea Iustin Pop
          changed_mc = True
2594 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to offline"))
2595 c9d443ea Iustin Pop
        if node.drained:
2596 c9d443ea Iustin Pop
          node.drained = False
2597 c9d443ea Iustin Pop
          result.append(("drained", "clear drained status due to offline"))
2598 3a5ba66a Iustin Pop
2599 b31c8676 Iustin Pop
    if self.op.master_candidate is not None:
2600 b31c8676 Iustin Pop
      node.master_candidate = self.op.master_candidate
2601 c9d443ea Iustin Pop
      changed_mc = True
2602 b31c8676 Iustin Pop
      result.append(("master_candidate", str(self.op.master_candidate)))
2603 56aa9fd5 Iustin Pop
      if self.op.master_candidate == False:
2604 56aa9fd5 Iustin Pop
        rrc = self.rpc.call_node_demote_from_mc(node.name)
2605 4c4e4e1e Iustin Pop
        msg = rrc.fail_msg
2606 0959c824 Iustin Pop
        if msg:
2607 0959c824 Iustin Pop
          self.LogWarning("Node failed to demote itself: %s" % msg)
2608 b31c8676 Iustin Pop
2609 c9d443ea Iustin Pop
    if self.op.drained is not None:
2610 c9d443ea Iustin Pop
      node.drained = self.op.drained
2611 82e12743 Iustin Pop
      result.append(("drained", str(self.op.drained)))
2612 c9d443ea Iustin Pop
      if self.op.drained == True:
2613 c9d443ea Iustin Pop
        if node.master_candidate:
2614 c9d443ea Iustin Pop
          node.master_candidate = False
2615 c9d443ea Iustin Pop
          changed_mc = True
2616 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to drain"))
2617 dec0d9da Iustin Pop
          rrc = self.rpc.call_node_demote_from_mc(node.name)
2618 dec0d9da Iustin Pop
          msg = rrc.RemoteFailMsg()
2619 dec0d9da Iustin Pop
          if msg:
2620 dec0d9da Iustin Pop
            self.LogWarning("Node failed to demote itself: %s" % msg)
2621 c9d443ea Iustin Pop
        if node.offline:
2622 c9d443ea Iustin Pop
          node.offline = False
2623 c9d443ea Iustin Pop
          result.append(("offline", "clear offline status due to drain"))
2624 c9d443ea Iustin Pop
2625 b31c8676 Iustin Pop
    # this will trigger configuration file update, if needed
2626 b31c8676 Iustin Pop
    self.cfg.Update(node)
2627 b31c8676 Iustin Pop
    # this will trigger job queue propagation or cleanup
2628 c9d443ea Iustin Pop
    if changed_mc:
2629 3a26773f Iustin Pop
      self.context.ReaddNode(node)
2630 b31c8676 Iustin Pop
2631 b31c8676 Iustin Pop
    return result
2632 b31c8676 Iustin Pop
2633 b31c8676 Iustin Pop
2634 f5118ade Iustin Pop
class LUPowercycleNode(NoHooksLU):
2635 f5118ade Iustin Pop
  """Powercycles a node.
2636 f5118ade Iustin Pop

2637 f5118ade Iustin Pop
  """
2638 f5118ade Iustin Pop
  _OP_REQP = ["node_name", "force"]
2639 f5118ade Iustin Pop
  REQ_BGL = False
2640 f5118ade Iustin Pop
2641 f5118ade Iustin Pop
  def CheckArguments(self):
2642 f5118ade Iustin Pop
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2643 f5118ade Iustin Pop
    if node_name is None:
2644 f5118ade Iustin Pop
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2645 f5118ade Iustin Pop
    self.op.node_name = node_name
2646 f5118ade Iustin Pop
    if node_name == self.cfg.GetMasterNode() and not self.op.force:
2647 f5118ade Iustin Pop
      raise errors.OpPrereqError("The node is the master and the force"
2648 f5118ade Iustin Pop
                                 " parameter was not set")
2649 f5118ade Iustin Pop
2650 f5118ade Iustin Pop
  def ExpandNames(self):
2651 f5118ade Iustin Pop
    """Locking for PowercycleNode.
2652 f5118ade Iustin Pop

2653 f5118ade Iustin Pop
    This is a last-resource option and shouldn't block on other
2654 f5118ade Iustin Pop
    jobs. Therefore, we grab no locks.
2655 f5118ade Iustin Pop

2656 f5118ade Iustin Pop
    """
2657 f5118ade Iustin Pop
    self.needed_locks = {}
2658 f5118ade Iustin Pop
2659 f5118ade Iustin Pop
  def CheckPrereq(self):
2660 f5118ade Iustin Pop
    """Check prerequisites.
2661 f5118ade Iustin Pop

2662 f5118ade Iustin Pop
    This LU has no prereqs.
2663 f5118ade Iustin Pop

2664 f5118ade Iustin Pop
    """
2665 f5118ade Iustin Pop
    pass
2666 f5118ade Iustin Pop
2667 f5118ade Iustin Pop
  def Exec(self, feedback_fn):
2668 f5118ade Iustin Pop
    """Reboots a node.
2669 f5118ade Iustin Pop

2670 f5118ade Iustin Pop
    """
2671 f5118ade Iustin Pop
    result = self.rpc.call_node_powercycle(self.op.node_name,
2672 f5118ade Iustin Pop
                                           self.cfg.GetHypervisorType())
2673 4c4e4e1e Iustin Pop
    result.Raise("Failed to schedule the reboot")
2674 f5118ade Iustin Pop
    return result.payload
2675 f5118ade Iustin Pop
2676 f5118ade Iustin Pop
2677 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
2678 a8083063 Iustin Pop
  """Query cluster configuration.
2679 a8083063 Iustin Pop

2680 a8083063 Iustin Pop
  """
2681 a8083063 Iustin Pop
  _OP_REQP = []
2682 642339cf Guido Trotter
  REQ_BGL = False
2683 642339cf Guido Trotter
2684 642339cf Guido Trotter
  def ExpandNames(self):
2685 642339cf Guido Trotter
    self.needed_locks = {}
2686 a8083063 Iustin Pop
2687 a8083063 Iustin Pop
  def CheckPrereq(self):
2688 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
2689 a8083063 Iustin Pop

2690 a8083063 Iustin Pop
    """
2691 a8083063 Iustin Pop
    pass
2692 a8083063 Iustin Pop
2693 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2694 a8083063 Iustin Pop
    """Return cluster config.
2695 a8083063 Iustin Pop

2696 a8083063 Iustin Pop
    """
2697 469f88e1 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
2698 a8083063 Iustin Pop
    result = {
2699 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
2700 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
2701 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
2702 d1a7d66f Guido Trotter
      "os_api_version": max(constants.OS_API_VERSIONS),
2703 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
2704 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
2705 469f88e1 Iustin Pop
      "name": cluster.cluster_name,
2706 469f88e1 Iustin Pop
      "master": cluster.master_node,
2707 066f465d Guido Trotter
      "default_hypervisor": cluster.enabled_hypervisors[0],
2708 469f88e1 Iustin Pop
      "enabled_hypervisors": cluster.enabled_hypervisors,
2709 b8810fec Michael Hanselmann
      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
2710 7c4d6c7b Michael Hanselmann
                        for hypervisor_name in cluster.enabled_hypervisors]),
2711 469f88e1 Iustin Pop
      "beparams": cluster.beparams,
2712 1094acda Guido Trotter
      "nicparams": cluster.nicparams,
2713 4b7735f9 Iustin Pop
      "candidate_pool_size": cluster.candidate_pool_size,
2714 7a56b411 Guido Trotter
      "master_netdev": cluster.master_netdev,
2715 7a56b411 Guido Trotter
      "volume_group_name": cluster.volume_group_name,
2716 7a56b411 Guido Trotter
      "file_storage_dir": cluster.file_storage_dir,
2717 a8083063 Iustin Pop
      }
2718 a8083063 Iustin Pop
2719 a8083063 Iustin Pop
    return result
2720 a8083063 Iustin Pop
2721 a8083063 Iustin Pop
2722 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
2723 ae5849b5 Michael Hanselmann
  """Return configuration values.
2724 a8083063 Iustin Pop

2725 a8083063 Iustin Pop
  """
2726 a8083063 Iustin Pop
  _OP_REQP = []
2727 642339cf Guido Trotter
  REQ_BGL = False
2728 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet()
2729 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag")
2730 642339cf Guido Trotter
2731 642339cf Guido Trotter
  def ExpandNames(self):
2732 642339cf Guido Trotter
    self.needed_locks = {}
2733 a8083063 Iustin Pop
2734 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2735 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2736 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
2737 ae5849b5 Michael Hanselmann
2738 a8083063 Iustin Pop
  def CheckPrereq(self):
2739 a8083063 Iustin Pop
    """No prerequisites.
2740 a8083063 Iustin Pop

2741 a8083063 Iustin Pop
    """
2742 a8083063 Iustin Pop
    pass
2743 a8083063 Iustin Pop
2744 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2745 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
2746 a8083063 Iustin Pop

2747 a8083063 Iustin Pop
    """
2748 ae5849b5 Michael Hanselmann
    values = []
2749 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
2750 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
2751 3ccafd0e Iustin Pop
        entry = self.cfg.GetClusterName()
2752 ae5849b5 Michael Hanselmann
      elif field == "master_node":
2753 3ccafd0e Iustin Pop
        entry = self.cfg.GetMasterNode()
2754 3ccafd0e Iustin Pop
      elif field == "drain_flag":
2755 3ccafd0e Iustin Pop
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2756 ae5849b5 Michael Hanselmann
      else:
2757 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
2758 3ccafd0e Iustin Pop
      values.append(entry)
2759 ae5849b5 Michael Hanselmann
    return values
2760 a8083063 Iustin Pop
2761 a8083063 Iustin Pop
2762 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
2763 a8083063 Iustin Pop
  """Bring up an instance's disks.
2764 a8083063 Iustin Pop

2765 a8083063 Iustin Pop
  """
2766 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2767 f22a8ba3 Guido Trotter
  REQ_BGL = False
2768 f22a8ba3 Guido Trotter
2769 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2770 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2771 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2772 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2773 f22a8ba3 Guido Trotter
2774 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2775 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2776 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2777 a8083063 Iustin Pop
2778 a8083063 Iustin Pop
  def CheckPrereq(self):
2779 a8083063 Iustin Pop
    """Check prerequisites.
2780 a8083063 Iustin Pop

2781 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2782 a8083063 Iustin Pop

2783 a8083063 Iustin Pop
    """
2784 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2785 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2786 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2787 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
2788 a8083063 Iustin Pop
2789 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2790 a8083063 Iustin Pop
    """Activate the disks.
2791 a8083063 Iustin Pop

2792 a8083063 Iustin Pop
    """
2793 b9bddb6b Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
2794 a8083063 Iustin Pop
    if not disks_ok:
2795 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
2796 a8083063 Iustin Pop
2797 a8083063 Iustin Pop
    return disks_info
2798 a8083063 Iustin Pop
2799 a8083063 Iustin Pop
2800 b9bddb6b Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
2801 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
2802 a8083063 Iustin Pop

2803 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
2804 a8083063 Iustin Pop

2805 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
2806 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
2807 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
2808 e4376078 Iustin Pop
  @param instance: the instance for whose disks we assemble
2809 e4376078 Iustin Pop
  @type ignore_secondaries: boolean
2810 e4376078 Iustin Pop
  @param ignore_secondaries: if true, errors on secondary nodes
2811 e4376078 Iustin Pop
      won't result in an error return from the function
2812 e4376078 Iustin Pop
  @return: False if the operation failed, otherwise a list of
2813 e4376078 Iustin Pop
      (host, instance_visible_name, node_visible_name)
2814 e4376078 Iustin Pop
      with the mapping from node devices to instance devices
2815 a8083063 Iustin Pop

2816 a8083063 Iustin Pop
  """
2817 a8083063 Iustin Pop
  device_info = []
2818 a8083063 Iustin Pop
  disks_ok = True
2819 fdbd668d Iustin Pop
  iname = instance.name
2820 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
2821 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
2822 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
2823 fdbd668d Iustin Pop
2824 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
2825 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
2826 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
2827 fdbd668d Iustin Pop
  # SyncSource, etc.)
2828 fdbd668d Iustin Pop
2829 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
2830 a8083063 Iustin Pop
  for inst_disk in instance.disks:
2831 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2832 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2833 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2834 4c4e4e1e Iustin Pop
      msg = result.fail_msg
2835 53c14ef1 Iustin Pop
      if msg:
2836 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2837 53c14ef1 Iustin Pop
                           " (is_primary=False, pass=1): %s",
2838 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
2839 fdbd668d Iustin Pop
        if not ignore_secondaries:
2840 a8083063 Iustin Pop
          disks_ok = False
2841 fdbd668d Iustin Pop
2842 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
2843 fdbd668d Iustin Pop
2844 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
2845 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
2846 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2847 fdbd668d Iustin Pop
      if node != instance.primary_node:
2848 fdbd668d Iustin Pop
        continue
2849 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2850 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2851 4c4e4e1e Iustin Pop
      msg = result.fail_msg
2852 53c14ef1 Iustin Pop
      if msg:
2853 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2854 53c14ef1 Iustin Pop
                           " (is_primary=True, pass=2): %s",
2855 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
2856 fdbd668d Iustin Pop
        disks_ok = False
2857 1dff8e07 Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name,
2858 1dff8e07 Iustin Pop
                        result.payload))
2859 a8083063 Iustin Pop
2860 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
2861 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
2862 b352ab5b Iustin Pop
  # improving the logical/physical id handling
2863 b352ab5b Iustin Pop
  for disk in instance.disks:
2864 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
2865 b352ab5b Iustin Pop
2866 a8083063 Iustin Pop
  return disks_ok, device_info
2867 a8083063 Iustin Pop
2868 a8083063 Iustin Pop
2869 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
2870 3ecf6786 Iustin Pop
  """Start the disks of an instance.
2871 3ecf6786 Iustin Pop

2872 3ecf6786 Iustin Pop
  """
2873 7c4d6c7b Michael Hanselmann
  disks_ok, _ = _AssembleInstanceDisks(lu, instance,
2874 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
2875 fe7b0351 Michael Hanselmann
  if not disks_ok:
2876 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
2877 fe7b0351 Michael Hanselmann
    if force is not None and not force:
2878 86d9d3bb Iustin Pop
      lu.proc.LogWarning("", hint="If the message above refers to a"
2879 86d9d3bb Iustin Pop
                         " secondary node,"
2880 86d9d3bb Iustin Pop
                         " you can retry the operation using '--force'.")
2881 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
2882 fe7b0351 Michael Hanselmann
2883 fe7b0351 Michael Hanselmann
2884 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
2885 a8083063 Iustin Pop
  """Shutdown an instance's disks.
2886 a8083063 Iustin Pop

2887 a8083063 Iustin Pop
  """
2888 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2889 f22a8ba3 Guido Trotter
  REQ_BGL = False
2890 f22a8ba3 Guido Trotter
2891 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2892 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2893 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2894 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2895 f22a8ba3 Guido Trotter
2896 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2897 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2898 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2899 a8083063 Iustin Pop
2900 a8083063 Iustin Pop
  def CheckPrereq(self):
2901 a8083063 Iustin Pop
    """Check prerequisites.
2902 a8083063 Iustin Pop

2903 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2904 a8083063 Iustin Pop

2905 a8083063 Iustin Pop
    """
2906 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2907 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2908 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2909 a8083063 Iustin Pop
2910 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2911 a8083063 Iustin Pop
    """Deactivate the disks
2912 a8083063 Iustin Pop

2913 a8083063 Iustin Pop
    """
2914 a8083063 Iustin Pop
    instance = self.instance
2915 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
2916 a8083063 Iustin Pop
2917 a8083063 Iustin Pop
2918 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
2919 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
2920 155d6c75 Guido Trotter

2921 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
2922 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
2923 155d6c75 Guido Trotter

2924 155d6c75 Guido Trotter
  """
2925 aca13712 Iustin Pop
  pnode = instance.primary_node
2926 4c4e4e1e Iustin Pop
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
2927 4c4e4e1e Iustin Pop
  ins_l.Raise("Can't contact node %s" % pnode)
2928 aca13712 Iustin Pop
2929 aca13712 Iustin Pop
  if instance.name in ins_l.payload:
2930 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
2931 155d6c75 Guido Trotter
                             " block devices.")
2932 155d6c75 Guido Trotter
2933 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
2934 a8083063 Iustin Pop
2935 a8083063 Iustin Pop
2936 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2937 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
2938 a8083063 Iustin Pop

2939 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
2940 a8083063 Iustin Pop

2941 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
2942 a8083063 Iustin Pop
  ignored.
2943 a8083063 Iustin Pop

2944 a8083063 Iustin Pop
  """
2945 cacfd1fd Iustin Pop
  all_result = True
2946 a8083063 Iustin Pop
  for disk in instance.disks:
2947 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2948 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
2949 781de953 Iustin Pop
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
2950 4c4e4e1e Iustin Pop
      msg = result.fail_msg
2951 cacfd1fd Iustin Pop
      if msg:
2952 cacfd1fd Iustin Pop
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
2953 cacfd1fd Iustin Pop
                      disk.iv_name, node, msg)
2954 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
2955 cacfd1fd Iustin Pop
          all_result = False
2956 cacfd1fd Iustin Pop
  return all_result
2957 a8083063 Iustin Pop
2958 a8083063 Iustin Pop
2959 9ca87a96 Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
2960 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
2961 d4f16fd9 Iustin Pop

2962 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
2963 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
2964 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
2965 d4f16fd9 Iustin Pop
  exception.
2966 d4f16fd9 Iustin Pop

2967 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
2968 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
2969 e69d05fd Iustin Pop
  @type node: C{str}
2970 e69d05fd Iustin Pop
  @param node: the node to check
2971 e69d05fd Iustin Pop
  @type reason: C{str}
2972 e69d05fd Iustin Pop
  @param reason: string to use in the error message
2973 e69d05fd Iustin Pop
  @type requested: C{int}
2974 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
2975 9ca87a96 Iustin Pop
  @type hypervisor_name: C{str}
2976 9ca87a96 Iustin Pop
  @param hypervisor_name: the hypervisor to ask for memory stats
2977 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2978 e69d05fd Iustin Pop
      we cannot check the node
2979 d4f16fd9 Iustin Pop

2980 d4f16fd9 Iustin Pop
  """
2981 9ca87a96 Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
2982 4c4e4e1e Iustin Pop
  nodeinfo[node].Raise("Can't get data from node %s" % node, prereq=True)
2983 070e998b Iustin Pop
  free_mem = nodeinfo[node].payload.get('memory_free', None)
2984 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2985 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2986 070e998b Iustin Pop
                               " was '%s'" % (node, free_mem))
2987 d4f16fd9 Iustin Pop
  if requested > free_mem:
2988 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2989 070e998b Iustin Pop
                               " needed %s MiB, available %s MiB" %
2990 070e998b Iustin Pop
                               (node, reason, requested, free_mem))
2991 d4f16fd9 Iustin Pop
2992 d4f16fd9 Iustin Pop
2993 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2994 a8083063 Iustin Pop
  """Starts an instance.
2995 a8083063 Iustin Pop

2996 a8083063 Iustin Pop
  """
2997 a8083063 Iustin Pop
  HPATH = "instance-start"
2998 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2999 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
3000 e873317a Guido Trotter
  REQ_BGL = False
3001 e873317a Guido Trotter
3002 e873317a Guido Trotter
  def ExpandNames(self):
3003 e873317a Guido Trotter
    self._ExpandAndLockInstance()
3004 a8083063 Iustin Pop
3005 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3006 a8083063 Iustin Pop
    """Build hooks env.
3007 a8083063 Iustin Pop

3008 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3009 a8083063 Iustin Pop

3010 a8083063 Iustin Pop
    """
3011 a8083063 Iustin Pop
    env = {
3012 a8083063 Iustin Pop
      "FORCE": self.op.force,
3013 a8083063 Iustin Pop
      }
3014 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3015 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3016 a8083063 Iustin Pop
    return env, nl, nl
3017 a8083063 Iustin Pop
3018 a8083063 Iustin Pop
  def CheckPrereq(self):
3019 a8083063 Iustin Pop
    """Check prerequisites.
3020 a8083063 Iustin Pop

3021 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3022 a8083063 Iustin Pop

3023 a8083063 Iustin Pop
    """
3024 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3025 e873317a Guido Trotter
    assert self.instance is not None, \
3026 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3027 a8083063 Iustin Pop
3028 d04aaa2f Iustin Pop
    # extra beparams
3029 d04aaa2f Iustin Pop
    self.beparams = getattr(self.op, "beparams", {})
3030 d04aaa2f Iustin Pop
    if self.beparams:
3031 d04aaa2f Iustin Pop
      if not isinstance(self.beparams, dict):
3032 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid beparams passed: %s, expected"
3033 d04aaa2f Iustin Pop
                                   " dict" % (type(self.beparams), ))
3034 d04aaa2f Iustin Pop
      # fill the beparams dict
3035 d04aaa2f Iustin Pop
      utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
3036 d04aaa2f Iustin Pop
      self.op.beparams = self.beparams
3037 d04aaa2f Iustin Pop
3038 d04aaa2f Iustin Pop
    # extra hvparams
3039 d04aaa2f Iustin Pop
    self.hvparams = getattr(self.op, "hvparams", {})
3040 d04aaa2f Iustin Pop
    if self.hvparams:
3041 d04aaa2f Iustin Pop
      if not isinstance(self.hvparams, dict):
3042 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid hvparams passed: %s, expected"
3043 d04aaa2f Iustin Pop
                                   " dict" % (type(self.hvparams), ))
3044 d04aaa2f Iustin Pop
3045 d04aaa2f Iustin Pop
      # check hypervisor parameter syntax (locally)
3046 d04aaa2f Iustin Pop
      cluster = self.cfg.GetClusterInfo()
3047 d04aaa2f Iustin Pop
      utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
3048 abe609b2 Guido Trotter
      filled_hvp = objects.FillDict(cluster.hvparams[instance.hypervisor],
3049 d04aaa2f Iustin Pop
                                    instance.hvparams)
3050 d04aaa2f Iustin Pop
      filled_hvp.update(self.hvparams)
3051 d04aaa2f Iustin Pop
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
3052 d04aaa2f Iustin Pop
      hv_type.CheckParameterSyntax(filled_hvp)
3053 d04aaa2f Iustin Pop
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
3054 d04aaa2f Iustin Pop
      self.op.hvparams = self.hvparams
3055 d04aaa2f Iustin Pop
3056 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3057 7527a8a4 Iustin Pop
3058 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3059 5bbd3f7f Michael Hanselmann
    # check bridges existence
3060 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
3061 a8083063 Iustin Pop
3062 f1926756 Guido Trotter
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3063 f1926756 Guido Trotter
                                              instance.name,
3064 f1926756 Guido Trotter
                                              instance.hypervisor)
3065 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
3066 4c4e4e1e Iustin Pop
                      prereq=True)
3067 7ad1af4a Iustin Pop
    if not remote_info.payload: # not running already
3068 f1926756 Guido Trotter
      _CheckNodeFreeMemory(self, instance.primary_node,
3069 f1926756 Guido Trotter
                           "starting instance %s" % instance.name,
3070 f1926756 Guido Trotter
                           bep[constants.BE_MEMORY], instance.hypervisor)
3071 d4f16fd9 Iustin Pop
3072 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3073 a8083063 Iustin Pop
    """Start the instance.
3074 a8083063 Iustin Pop

3075 a8083063 Iustin Pop
    """
3076 a8083063 Iustin Pop
    instance = self.instance
3077 a8083063 Iustin Pop
    force = self.op.force
3078 a8083063 Iustin Pop
3079 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
3080 fe482621 Iustin Pop
3081 a8083063 Iustin Pop
    node_current = instance.primary_node
3082 a8083063 Iustin Pop
3083 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
3084 a8083063 Iustin Pop
3085 d04aaa2f Iustin Pop
    result = self.rpc.call_instance_start(node_current, instance,
3086 d04aaa2f Iustin Pop
                                          self.hvparams, self.beparams)
3087 4c4e4e1e Iustin Pop
    msg = result.fail_msg
3088 dd279568 Iustin Pop
    if msg:
3089 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
3090 dd279568 Iustin Pop
      raise errors.OpExecError("Could not start instance: %s" % msg)
3091 a8083063 Iustin Pop
3092 a8083063 Iustin Pop
3093 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
3094 bf6929a2 Alexander Schreiber
  """Reboot an instance.
3095 bf6929a2 Alexander Schreiber

3096 bf6929a2 Alexander Schreiber
  """
3097 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
3098 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
3099 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
3100 e873317a Guido Trotter
  REQ_BGL = False
3101 e873317a Guido Trotter
3102 e873317a Guido Trotter
  def ExpandNames(self):
3103 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
3104 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
3105 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
3106 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
3107 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
3108 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
3109 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
3110 e873317a Guido Trotter
    self._ExpandAndLockInstance()
3111 bf6929a2 Alexander Schreiber
3112 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
3113 bf6929a2 Alexander Schreiber
    """Build hooks env.
3114 bf6929a2 Alexander Schreiber

3115 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
3116 bf6929a2 Alexander Schreiber

3117 bf6929a2 Alexander Schreiber
    """
3118 bf6929a2 Alexander Schreiber
    env = {
3119 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
3120 2c2690c9 Iustin Pop
      "REBOOT_TYPE": self.op.reboot_type,
3121 bf6929a2 Alexander Schreiber
      }
3122 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3123 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3124 bf6929a2 Alexander Schreiber
    return env, nl, nl
3125 bf6929a2 Alexander Schreiber
3126 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
3127 bf6929a2 Alexander Schreiber
    """Check prerequisites.
3128 bf6929a2 Alexander Schreiber

3129 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
3130 bf6929a2 Alexander Schreiber

3131 bf6929a2 Alexander Schreiber
    """
3132 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3133 e873317a Guido Trotter
    assert self.instance is not None, \
3134 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3135 bf6929a2 Alexander Schreiber
3136 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3137 7527a8a4 Iustin Pop
3138 5bbd3f7f Michael Hanselmann
    # check bridges existence
3139 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
3140 bf6929a2 Alexander Schreiber
3141 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
3142 bf6929a2 Alexander Schreiber
    """Reboot the instance.
3143 bf6929a2 Alexander Schreiber

3144 bf6929a2 Alexander Schreiber
    """
3145 bf6929a2 Alexander Schreiber
    instance = self.instance
3146 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
3147 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
3148 bf6929a2 Alexander Schreiber
3149 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
3150 bf6929a2 Alexander Schreiber
3151 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
3152 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
3153 ae48ac32 Iustin Pop
      for disk in instance.disks:
3154 ae48ac32 Iustin Pop
        self.cfg.SetDiskID(disk, node_current)
3155 781de953 Iustin Pop
      result = self.rpc.call_instance_reboot(node_current, instance,
3156 07813a9e Iustin Pop
                                             reboot_type)
3157 4c4e4e1e Iustin Pop
      result.Raise("Could not reboot instance")
3158 bf6929a2 Alexander Schreiber
    else:
3159 1fae010f Iustin Pop
      result = self.rpc.call_instance_shutdown(node_current, instance)
3160 4c4e4e1e Iustin Pop
      result.Raise("Could not shutdown instance for full reboot")
3161 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
3162 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
3163 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(node_current, instance, None, None)
3164 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3165 dd279568 Iustin Pop
      if msg:
3166 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3167 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance for"
3168 dd279568 Iustin Pop
                                 " full reboot: %s" % msg)
3169 bf6929a2 Alexander Schreiber
3170 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
3171 bf6929a2 Alexander Schreiber
3172 bf6929a2 Alexander Schreiber
3173 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
3174 a8083063 Iustin Pop
  """Shutdown an instance.
3175 a8083063 Iustin Pop

3176 a8083063 Iustin Pop
  """
3177 a8083063 Iustin Pop
  HPATH = "instance-stop"
3178 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3179 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3180 e873317a Guido Trotter
  REQ_BGL = False
3181 e873317a Guido Trotter
3182 e873317a Guido Trotter
  def ExpandNames(self):
3183 e873317a Guido Trotter
    self._ExpandAndLockInstance()
3184 a8083063 Iustin Pop
3185 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3186 a8083063 Iustin Pop
    """Build hooks env.
3187 a8083063 Iustin Pop

3188 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3189 a8083063 Iustin Pop

3190 a8083063 Iustin Pop
    """
3191 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3192 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3193 a8083063 Iustin Pop
    return env, nl, nl
3194 a8083063 Iustin Pop
3195 a8083063 Iustin Pop
  def CheckPrereq(self):
3196 a8083063 Iustin Pop
    """Check prerequisites.
3197 a8083063 Iustin Pop

3198 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3199 a8083063 Iustin Pop

3200 a8083063 Iustin Pop
    """
3201 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3202 e873317a Guido Trotter
    assert self.instance is not None, \
3203 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3204 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
3205 a8083063 Iustin Pop
3206 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3207 a8083063 Iustin Pop
    """Shutdown the instance.
3208 a8083063 Iustin Pop

3209 a8083063 Iustin Pop
    """
3210 a8083063 Iustin Pop
    instance = self.instance
3211 a8083063 Iustin Pop
    node_current = instance.primary_node
3212 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
3213 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(node_current, instance)
3214 4c4e4e1e Iustin Pop
    msg = result.fail_msg
3215 1fae010f Iustin Pop
    if msg:
3216 1fae010f Iustin Pop
      self.proc.LogWarning("Could not shutdown instance: %s" % msg)
3217 a8083063 Iustin Pop
3218 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
3219 a8083063 Iustin Pop
3220 a8083063 Iustin Pop
3221 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
3222 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
3223 fe7b0351 Michael Hanselmann

3224 fe7b0351 Michael Hanselmann
  """
3225 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
3226 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
3227 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
3228 4e0b4d2d Guido Trotter
  REQ_BGL = False
3229 4e0b4d2d Guido Trotter
3230 4e0b4d2d Guido Trotter
  def ExpandNames(self):
3231 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
3232 fe7b0351 Michael Hanselmann
3233 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
3234 fe7b0351 Michael Hanselmann
    """Build hooks env.
3235 fe7b0351 Michael Hanselmann

3236 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
3237 fe7b0351 Michael Hanselmann

3238 fe7b0351 Michael Hanselmann
    """
3239 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3240 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3241 fe7b0351 Michael Hanselmann
    return env, nl, nl
3242 fe7b0351 Michael Hanselmann
3243 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
3244 fe7b0351 Michael Hanselmann
    """Check prerequisites.
3245 fe7b0351 Michael Hanselmann

3246 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
3247 fe7b0351 Michael Hanselmann

3248 fe7b0351 Michael Hanselmann
    """
3249 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3250 4e0b4d2d Guido Trotter
    assert instance is not None, \
3251 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3252 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3253 4e0b4d2d Guido Trotter
3254 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
3255 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
3256 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3257 0d68c45d Iustin Pop
    if instance.admin_up:
3258 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3259 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3260 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3261 72737a7f Iustin Pop
                                              instance.name,
3262 72737a7f Iustin Pop
                                              instance.hypervisor)
3263 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
3264 4c4e4e1e Iustin Pop
                      prereq=True)
3265 7ad1af4a Iustin Pop
    if remote_info.payload:
3266 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3267 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
3268 3ecf6786 Iustin Pop
                                  instance.primary_node))
3269 d0834de3 Michael Hanselmann
3270 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
3271 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
3272 d0834de3 Michael Hanselmann
      # OS verification
3273 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
3274 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
3275 d0834de3 Michael Hanselmann
      if pnode is None:
3276 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
3277 3ecf6786 Iustin Pop
                                   self.op.pnode)
3278 781de953 Iustin Pop
      result = self.rpc.call_os_get(pnode.name, self.op.os_type)
3279 4c4e4e1e Iustin Pop
      result.Raise("OS '%s' not in supported OS list for primary node %s" %
3280 4c4e4e1e Iustin Pop
                   (self.op.os_type, pnode.name), prereq=True)
3281 d0834de3 Michael Hanselmann
3282 fe7b0351 Michael Hanselmann
    self.instance = instance
3283 fe7b0351 Michael Hanselmann
3284 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
3285 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
3286 fe7b0351 Michael Hanselmann

3287 fe7b0351 Michael Hanselmann
    """
3288 fe7b0351 Michael Hanselmann
    inst = self.instance
3289 fe7b0351 Michael Hanselmann
3290 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
3291 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
3292 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
3293 97abc79f Iustin Pop
      self.cfg.Update(inst)
3294 d0834de3 Michael Hanselmann
3295 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
3296 fe7b0351 Michael Hanselmann
    try:
3297 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
3298 e557bae9 Guido Trotter
      result = self.rpc.call_instance_os_add(inst.primary_node, inst, True)
3299 4c4e4e1e Iustin Pop
      result.Raise("Could not install OS for instance %s on node %s" %
3300 4c4e4e1e Iustin Pop
                   (inst.name, inst.primary_node))
3301 fe7b0351 Michael Hanselmann
    finally:
3302 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
3303 fe7b0351 Michael Hanselmann
3304 fe7b0351 Michael Hanselmann
3305 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
3306 decd5f45 Iustin Pop
  """Rename an instance.
3307 decd5f45 Iustin Pop

3308 decd5f45 Iustin Pop
  """
3309 decd5f45 Iustin Pop
  HPATH = "instance-rename"
3310 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3311 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
3312 decd5f45 Iustin Pop
3313 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
3314 decd5f45 Iustin Pop
    """Build hooks env.
3315 decd5f45 Iustin Pop

3316 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3317 decd5f45 Iustin Pop

3318 decd5f45 Iustin Pop
    """
3319 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3320 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
3321 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3322 decd5f45 Iustin Pop
    return env, nl, nl
3323 decd5f45 Iustin Pop
3324 decd5f45 Iustin Pop
  def CheckPrereq(self):
3325 decd5f45 Iustin Pop
    """Check prerequisites.
3326 decd5f45 Iustin Pop

3327 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
3328 decd5f45 Iustin Pop

3329 decd5f45 Iustin Pop
    """
3330 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3331 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3332 decd5f45 Iustin Pop
    if instance is None:
3333 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3334 decd5f45 Iustin Pop
                                 self.op.instance_name)
3335 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3336 7527a8a4 Iustin Pop
3337 0d68c45d Iustin Pop
    if instance.admin_up:
3338 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3339 decd5f45 Iustin Pop
                                 self.op.instance_name)
3340 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3341 72737a7f Iustin Pop
                                              instance.name,
3342 72737a7f Iustin Pop
                                              instance.hypervisor)
3343 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
3344 4c4e4e1e Iustin Pop
                      prereq=True)
3345 7ad1af4a Iustin Pop
    if remote_info.payload:
3346 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3347 decd5f45 Iustin Pop
                                 (self.op.instance_name,
3348 decd5f45 Iustin Pop
                                  instance.primary_node))
3349 decd5f45 Iustin Pop
    self.instance = instance
3350 decd5f45 Iustin Pop
3351 decd5f45 Iustin Pop
    # new name verification
3352 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
3353 decd5f45 Iustin Pop
3354 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
3355 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
3356 7bde3275 Guido Trotter
    if new_name in instance_list:
3357 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3358 c09f363f Manuel Franceschini
                                 new_name)
3359 7bde3275 Guido Trotter
3360 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
3361 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
3362 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3363 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
3364 decd5f45 Iustin Pop
3365 decd5f45 Iustin Pop
3366 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
3367 decd5f45 Iustin Pop
    """Reinstall the instance.
3368 decd5f45 Iustin Pop

3369 decd5f45 Iustin Pop
    """
3370 decd5f45 Iustin Pop
    inst = self.instance
3371 decd5f45 Iustin Pop
    old_name = inst.name
3372 decd5f45 Iustin Pop
3373 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
3374 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3375 b23c4333 Manuel Franceschini
3376 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
3377 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
3378 cb4e8387 Iustin Pop
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
3379 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
3380 decd5f45 Iustin Pop
3381 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
3382 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
3383 decd5f45 Iustin Pop
3384 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
3385 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3386 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
3387 72737a7f Iustin Pop
                                                     old_file_storage_dir,
3388 72737a7f Iustin Pop
                                                     new_file_storage_dir)
3389 4c4e4e1e Iustin Pop
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
3390 4c4e4e1e Iustin Pop
                   " (but the instance has been renamed in Ganeti)" %
3391 4c4e4e1e Iustin Pop
                   (inst.primary_node, old_file_storage_dir,
3392 4c4e4e1e Iustin Pop
                    new_file_storage_dir))
3393 b23c4333 Manuel Franceschini
3394 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
3395 decd5f45 Iustin Pop
    try:
3396 781de953 Iustin Pop
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
3397 781de953 Iustin Pop
                                                 old_name)
3398 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3399 96841384 Iustin Pop
      if msg:
3400 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
3401 96841384 Iustin Pop
               " (but the instance has been renamed in Ganeti): %s" %
3402 96841384 Iustin Pop
               (inst.name, inst.primary_node, msg))
3403 86d9d3bb Iustin Pop
        self.proc.LogWarning(msg)
3404 decd5f45 Iustin Pop
    finally:
3405 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
3406 decd5f45 Iustin Pop
3407 decd5f45 Iustin Pop
3408 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
3409 a8083063 Iustin Pop
  """Remove an instance.
3410 a8083063 Iustin Pop

3411 a8083063 Iustin Pop
  """
3412 a8083063 Iustin Pop
  HPATH = "instance-remove"
3413 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3414 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
3415 cf472233 Guido Trotter
  REQ_BGL = False
3416 cf472233 Guido Trotter
3417 cf472233 Guido Trotter
  def ExpandNames(self):
3418 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
3419 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3420 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3421 cf472233 Guido Trotter
3422 cf472233 Guido Trotter
  def DeclareLocks(self, level):
3423 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
3424 cf472233 Guido Trotter
      self._LockInstancesNodes()
3425 a8083063 Iustin Pop
3426 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3427 a8083063 Iustin Pop
    """Build hooks env.
3428 a8083063 Iustin Pop

3429 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3430 a8083063 Iustin Pop

3431 a8083063 Iustin Pop
    """
3432 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3433 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
3434 a8083063 Iustin Pop
    return env, nl, nl
3435 a8083063 Iustin Pop
3436 a8083063 Iustin Pop
  def CheckPrereq(self):
3437 a8083063 Iustin Pop
    """Check prerequisites.
3438 a8083063 Iustin Pop

3439 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3440 a8083063 Iustin Pop

3441 a8083063 Iustin Pop
    """
3442 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3443 cf472233 Guido Trotter
    assert self.instance is not None, \
3444 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3445 a8083063 Iustin Pop
3446 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3447 a8083063 Iustin Pop
    """Remove the instance.
3448 a8083063 Iustin Pop

3449 a8083063 Iustin Pop
    """
3450 a8083063 Iustin Pop
    instance = self.instance
3451 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
3452 9a4f63d1 Iustin Pop
                 instance.name, instance.primary_node)
3453 a8083063 Iustin Pop
3454 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
3455 4c4e4e1e Iustin Pop
    msg = result.fail_msg
3456 1fae010f Iustin Pop
    if msg:
3457 1d67656e Iustin Pop
      if self.op.ignore_failures:
3458 1fae010f Iustin Pop
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
3459 1d67656e Iustin Pop
      else:
3460 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
3461 1fae010f Iustin Pop
                                 " node %s: %s" %
3462 1fae010f Iustin Pop
                                 (instance.name, instance.primary_node, msg))
3463 a8083063 Iustin Pop
3464 9a4f63d1 Iustin Pop
    logging.info("Removing block devices for instance %s", instance.name)
3465 a8083063 Iustin Pop
3466 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
3467 1d67656e Iustin Pop
      if self.op.ignore_failures:
3468 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
3469 1d67656e Iustin Pop
      else:
3470 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
3471 a8083063 Iustin Pop
3472 9a4f63d1 Iustin Pop
    logging.info("Removing instance %s out of cluster config", instance.name)
3473 a8083063 Iustin Pop
3474 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
3475 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
3476 a8083063 Iustin Pop
3477 a8083063 Iustin Pop
3478 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
3479 a8083063 Iustin Pop
  """Logical unit for querying instances.
3480 a8083063 Iustin Pop

3481 a8083063 Iustin Pop
  """
3482 ec79568d Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
3483 7eb9d8f7 Guido Trotter
  REQ_BGL = False
3484 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
3485 5b460366 Iustin Pop
                                    "admin_state",
3486 a2d2e1a7 Iustin Pop
                                    "disk_template", "ip", "mac", "bridge",
3487 638c6349 Guido Trotter
                                    "nic_mode", "nic_link",
3488 a2d2e1a7 Iustin Pop
                                    "sda_size", "sdb_size", "vcpus", "tags",
3489 a2d2e1a7 Iustin Pop
                                    "network_port", "beparams",
3490 8aec325c Iustin Pop
                                    r"(disk)\.(size)/([0-9]+)",
3491 8aec325c Iustin Pop
                                    r"(disk)\.(sizes)", "disk_usage",
3492 638c6349 Guido Trotter
                                    r"(nic)\.(mac|ip|mode|link)/([0-9]+)",
3493 638c6349 Guido Trotter
                                    r"(nic)\.(bridge)/([0-9]+)",
3494 638c6349 Guido Trotter
                                    r"(nic)\.(macs|ips|modes|links|bridges)",
3495 8aec325c Iustin Pop
                                    r"(disk|nic)\.(count)",
3496 a2d2e1a7 Iustin Pop
                                    "serial_no", "hypervisor", "hvparams",] +
3497 a2d2e1a7 Iustin Pop
                                  ["hv/%s" % name
3498 a2d2e1a7 Iustin Pop
                                   for name in constants.HVS_PARAMETERS] +
3499 a2d2e1a7 Iustin Pop
                                  ["be/%s" % name
3500 a2d2e1a7 Iustin Pop
                                   for name in constants.BES_PARAMETERS])
3501 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
3502 31bf511f Iustin Pop
3503 a8083063 Iustin Pop
3504 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
3505 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
3506 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
3507 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
3508 a8083063 Iustin Pop
3509 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
3510 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
3511 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
3512 7eb9d8f7 Guido Trotter
3513 57a2fb91 Iustin Pop
    if self.op.names:
3514 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
3515 7eb9d8f7 Guido Trotter
    else:
3516 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
3517 7eb9d8f7 Guido Trotter
3518 ec79568d Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
3519 ec79568d Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
3520 57a2fb91 Iustin Pop
    if self.do_locking:
3521 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3522 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
3523 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3524 7eb9d8f7 Guido Trotter
3525 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
3526 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
3527 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
3528 7eb9d8f7 Guido Trotter
3529 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
3530 7eb9d8f7 Guido Trotter
    """Check prerequisites.
3531 7eb9d8f7 Guido Trotter

3532 7eb9d8f7 Guido Trotter
    """
3533 57a2fb91 Iustin Pop
    pass
3534 069dcc86 Iustin Pop
3535 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3536 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
3537 a8083063 Iustin Pop

3538 a8083063 Iustin Pop
    """
3539 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
3540 a7f5dc98 Iustin Pop
    if self.wanted == locking.ALL_SET:
3541 a7f5dc98 Iustin Pop
      # caller didn't specify instance names, so ordering is not important
3542 a7f5dc98 Iustin Pop
      if self.do_locking:
3543 a7f5dc98 Iustin Pop
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
3544 a7f5dc98 Iustin Pop
      else:
3545 a7f5dc98 Iustin Pop
        instance_names = all_info.keys()
3546 a7f5dc98 Iustin Pop
      instance_names = utils.NiceSort(instance_names)
3547 57a2fb91 Iustin Pop
    else:
3548 a7f5dc98 Iustin Pop
      # caller did specify names, so we must keep the ordering
3549 a7f5dc98 Iustin Pop
      if self.do_locking:
3550 a7f5dc98 Iustin Pop
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
3551 a7f5dc98 Iustin Pop
      else:
3552 a7f5dc98 Iustin Pop
        tgt_set = all_info.keys()
3553 a7f5dc98 Iustin Pop
      missing = set(self.wanted).difference(tgt_set)
3554 a7f5dc98 Iustin Pop
      if missing:
3555 a7f5dc98 Iustin Pop
        raise errors.OpExecError("Some instances were removed before"
3556 a7f5dc98 Iustin Pop
                                 " retrieving their data: %s" % missing)
3557 a7f5dc98 Iustin Pop
      instance_names = self.wanted
3558 c1f1cbb2 Iustin Pop
3559 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
3560 a8083063 Iustin Pop
3561 a8083063 Iustin Pop
    # begin data gathering
3562 a8083063 Iustin Pop
3563 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
3564 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
3565 a8083063 Iustin Pop
3566 a8083063 Iustin Pop
    bad_nodes = []
3567 cbfc4681 Iustin Pop
    off_nodes = []
3568 ec79568d Iustin Pop
    if self.do_node_query:
3569 a8083063 Iustin Pop
      live_data = {}
3570 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
3571 a8083063 Iustin Pop
      for name in nodes:
3572 a8083063 Iustin Pop
        result = node_data[name]
3573 cbfc4681 Iustin Pop
        if result.offline:
3574 cbfc4681 Iustin Pop
          # offline nodes will be in both lists
3575 cbfc4681 Iustin Pop
          off_nodes.append(name)
3576 4c4e4e1e Iustin Pop
        if result.failed or result.fail_msg:
3577 a8083063 Iustin Pop
          bad_nodes.append(name)
3578 781de953 Iustin Pop
        else:
3579 2fa74ef4 Iustin Pop
          if result.payload:
3580 2fa74ef4 Iustin Pop
            live_data.update(result.payload)
3581 2fa74ef4 Iustin Pop
          # else no instance is alive
3582 a8083063 Iustin Pop
    else:
3583 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
3584 a8083063 Iustin Pop
3585 a8083063 Iustin Pop
    # end data gathering
3586 a8083063 Iustin Pop
3587 5018a335 Iustin Pop
    HVPREFIX = "hv/"
3588 338e51e8 Iustin Pop
    BEPREFIX = "be/"
3589 a8083063 Iustin Pop
    output = []
3590 638c6349 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
3591 a8083063 Iustin Pop
    for instance in instance_list:
3592 a8083063 Iustin Pop
      iout = []
3593 638c6349 Guido Trotter
      i_hv = cluster.FillHV(instance)
3594 638c6349 Guido Trotter
      i_be = cluster.FillBE(instance)
3595 638c6349 Guido Trotter
      i_nicp = [objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
3596 638c6349 Guido Trotter
                                 nic.nicparams) for nic in instance.nics]
3597 a8083063 Iustin Pop
      for field in self.op.output_fields:
3598 71c1af58 Iustin Pop
        st_match = self._FIELDS_STATIC.Matches(field)
3599 a8083063 Iustin Pop
        if field == "name":
3600 a8083063 Iustin Pop
          val = instance.name
3601 a8083063 Iustin Pop
        elif field == "os":
3602 a8083063 Iustin Pop
          val = instance.os
3603 a8083063 Iustin Pop
        elif field == "pnode":
3604 a8083063 Iustin Pop
          val = instance.primary_node
3605 a8083063 Iustin Pop
        elif field == "snodes":
3606 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
3607 a8083063 Iustin Pop
        elif field == "admin_state":
3608 0d68c45d Iustin Pop
          val = instance.admin_up
3609 a8083063 Iustin Pop
        elif field == "oper_state":
3610 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
3611 8a23d2d3 Iustin Pop
            val = None
3612 a8083063 Iustin Pop
          else:
3613 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
3614 d8052456 Iustin Pop
        elif field == "status":
3615 cbfc4681 Iustin Pop
          if instance.primary_node in off_nodes:
3616 cbfc4681 Iustin Pop
            val = "ERROR_nodeoffline"
3617 cbfc4681 Iustin Pop
          elif instance.primary_node in bad_nodes:
3618 d8052456 Iustin Pop
            val = "ERROR_nodedown"
3619 d8052456 Iustin Pop
          else:
3620 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
3621 d8052456 Iustin Pop
            if running:
3622 0d68c45d Iustin Pop
              if instance.admin_up:
3623 d8052456 Iustin Pop
                val = "running"
3624 d8052456 Iustin Pop
              else:
3625 d8052456 Iustin Pop
                val = "ERROR_up"
3626 d8052456 Iustin Pop
            else:
3627 0d68c45d Iustin Pop
              if instance.admin_up:
3628 d8052456 Iustin Pop
                val = "ERROR_down"
3629 d8052456 Iustin Pop
              else:
3630 d8052456 Iustin Pop
                val = "ADMIN_down"
3631 a8083063 Iustin Pop
        elif field == "oper_ram":
3632 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
3633 8a23d2d3 Iustin Pop
            val = None
3634 a8083063 Iustin Pop
          elif instance.name in live_data:
3635 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
3636 a8083063 Iustin Pop
          else:
3637 a8083063 Iustin Pop
            val = "-"
3638 c1ce76bb Iustin Pop
        elif field == "vcpus":
3639 c1ce76bb Iustin Pop
          val = i_be[constants.BE_VCPUS]
3640 a8083063 Iustin Pop
        elif field == "disk_template":
3641 a8083063 Iustin Pop
          val = instance.disk_template
3642 a8083063 Iustin Pop
        elif field == "ip":
3643 39a02558 Guido Trotter
          if instance.nics:
3644 39a02558 Guido Trotter
            val = instance.nics[0].ip
3645 39a02558 Guido Trotter
          else:
3646 39a02558 Guido Trotter
            val = None
3647 638c6349 Guido Trotter
        elif field == "nic_mode":
3648 638c6349 Guido Trotter
          if instance.nics:
3649 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_MODE]
3650 638c6349 Guido Trotter
          else:
3651 638c6349 Guido Trotter
            val = None
3652 638c6349 Guido Trotter
        elif field == "nic_link":
3653 39a02558 Guido Trotter
          if instance.nics:
3654 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_LINK]
3655 638c6349 Guido Trotter
          else:
3656 638c6349 Guido Trotter
            val = None
3657 638c6349 Guido Trotter
        elif field == "bridge":
3658 638c6349 Guido Trotter
          if (instance.nics and
3659 638c6349 Guido Trotter
              i_nicp[0][constants.NIC_MODE] == constants.NIC_MODE_BRIDGED):
3660 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_LINK]
3661 39a02558 Guido Trotter
          else:
3662 39a02558 Guido Trotter
            val = None
3663 a8083063 Iustin Pop
        elif field == "mac":
3664 39a02558 Guido Trotter
          if instance.nics:
3665 39a02558 Guido Trotter
            val = instance.nics[0].mac
3666 39a02558 Guido Trotter
          else:
3667 39a02558 Guido Trotter
            val = None
3668 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
3669 ad24e046 Iustin Pop
          idx = ord(field[2]) - ord('a')
3670 ad24e046 Iustin Pop
          try:
3671 ad24e046 Iustin Pop
            val = instance.FindDisk(idx).size
3672 ad24e046 Iustin Pop
          except errors.OpPrereqError:
3673 8a23d2d3 Iustin Pop
            val = None
3674 024e157f Iustin Pop
        elif field == "disk_usage": # total disk usage per node
3675 024e157f Iustin Pop
          disk_sizes = [{'size': disk.size} for disk in instance.disks]
3676 024e157f Iustin Pop
          val = _ComputeDiskSize(instance.disk_template, disk_sizes)
3677 130a6a6f Iustin Pop
        elif field == "tags":
3678 130a6a6f Iustin Pop
          val = list(instance.GetTags())
3679 38d7239a Iustin Pop
        elif field == "serial_no":
3680 38d7239a Iustin Pop
          val = instance.serial_no
3681 5018a335 Iustin Pop
        elif field == "network_port":
3682 5018a335 Iustin Pop
          val = instance.network_port
3683 338e51e8 Iustin Pop
        elif field == "hypervisor":
3684 338e51e8 Iustin Pop
          val = instance.hypervisor
3685 338e51e8 Iustin Pop
        elif field == "hvparams":
3686 338e51e8 Iustin Pop
          val = i_hv
3687 5018a335 Iustin Pop
        elif (field.startswith(HVPREFIX) and
3688 5018a335 Iustin Pop
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
3689 5018a335 Iustin Pop
          val = i_hv.get(field[len(HVPREFIX):], None)
3690 338e51e8 Iustin Pop
        elif field == "beparams":
3691 338e51e8 Iustin Pop
          val = i_be
3692 338e51e8 Iustin Pop
        elif (field.startswith(BEPREFIX) and
3693 338e51e8 Iustin Pop
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
3694 338e51e8 Iustin Pop
          val = i_be.get(field[len(BEPREFIX):], None)
3695 71c1af58 Iustin Pop
        elif st_match and st_match.groups():
3696 71c1af58 Iustin Pop
          # matches a variable list
3697 71c1af58 Iustin Pop
          st_groups = st_match.groups()
3698 71c1af58 Iustin Pop
          if st_groups and st_groups[0] == "disk":
3699 71c1af58 Iustin Pop
            if st_groups[1] == "count":
3700 71c1af58 Iustin Pop
              val = len(instance.disks)
3701 41a776da Iustin Pop
            elif st_groups[1] == "sizes":
3702 41a776da Iustin Pop
              val = [disk.size for disk in instance.disks]
3703 71c1af58 Iustin Pop
            elif st_groups[1] == "size":
3704 3e0cea06 Iustin Pop
              try:
3705 3e0cea06 Iustin Pop
                val = instance.FindDisk(st_groups[2]).size
3706 3e0cea06 Iustin Pop
              except errors.OpPrereqError:
3707 71c1af58 Iustin Pop
                val = None
3708 71c1af58 Iustin Pop
            else:
3709 71c1af58 Iustin Pop
              assert False, "Unhandled disk parameter"
3710 71c1af58 Iustin Pop
          elif st_groups[0] == "nic":
3711 71c1af58 Iustin Pop
            if st_groups[1] == "count":
3712 71c1af58 Iustin Pop
              val = len(instance.nics)
3713 41a776da Iustin Pop
            elif st_groups[1] == "macs":
3714 41a776da Iustin Pop
              val = [nic.mac for nic in instance.nics]
3715 41a776da Iustin Pop
            elif st_groups[1] == "ips":
3716 41a776da Iustin Pop
              val = [nic.ip for nic in instance.nics]
3717 638c6349 Guido Trotter
            elif st_groups[1] == "modes":
3718 638c6349 Guido Trotter
              val = [nicp[constants.NIC_MODE] for nicp in i_nicp]
3719 638c6349 Guido Trotter
            elif st_groups[1] == "links":
3720 638c6349 Guido Trotter
              val = [nicp[constants.NIC_LINK] for nicp in i_nicp]
3721 41a776da Iustin Pop
            elif st_groups[1] == "bridges":
3722 638c6349 Guido Trotter
              val = []
3723 638c6349 Guido Trotter
              for nicp in i_nicp:
3724 638c6349 Guido Trotter
                if nicp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
3725 638c6349 Guido Trotter
                  val.append(nicp[constants.NIC_LINK])
3726 638c6349 Guido Trotter
                else:
3727 638c6349 Guido Trotter
                  val.append(None)
3728 71c1af58 Iustin Pop
            else:
3729 71c1af58 Iustin Pop
              # index-based item
3730 71c1af58 Iustin Pop
              nic_idx = int(st_groups[2])
3731 71c1af58 Iustin Pop
              if nic_idx >= len(instance.nics):
3732 71c1af58 Iustin Pop
                val = None
3733 71c1af58 Iustin Pop
              else:
3734 71c1af58 Iustin Pop
                if st_groups[1] == "mac":
3735 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].mac
3736 71c1af58 Iustin Pop
                elif st_groups[1] == "ip":
3737 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].ip
3738 638c6349 Guido Trotter
                elif st_groups[1] == "mode":
3739 638c6349 Guido Trotter
                  val = i_nicp[nic_idx][constants.NIC_MODE]
3740 638c6349 Guido Trotter
                elif st_groups[1] == "link":
3741 638c6349 Guido Trotter
                  val = i_nicp[nic_idx][constants.NIC_LINK]
3742 71c1af58 Iustin Pop
                elif st_groups[1] == "bridge":
3743 638c6349 Guido Trotter
                  nic_mode = i_nicp[nic_idx][constants.NIC_MODE]
3744 638c6349 Guido Trotter
                  if nic_mode == constants.NIC_MODE_BRIDGED:
3745 638c6349 Guido Trotter
                    val = i_nicp[nic_idx][constants.NIC_LINK]
3746 638c6349 Guido Trotter
                  else:
3747 638c6349 Guido Trotter
                    val = None
3748 71c1af58 Iustin Pop
                else:
3749 71c1af58 Iustin Pop
                  assert False, "Unhandled NIC parameter"
3750 71c1af58 Iustin Pop
          else:
3751 c1ce76bb Iustin Pop
            assert False, ("Declared but unhandled variable parameter '%s'" %
3752 c1ce76bb Iustin Pop
                           field)
3753 a8083063 Iustin Pop
        else:
3754 c1ce76bb Iustin Pop
          assert False, "Declared but unhandled parameter '%s'" % field
3755 a8083063 Iustin Pop
        iout.append(val)
3756 a8083063 Iustin Pop
      output.append(iout)
3757 a8083063 Iustin Pop
3758 a8083063 Iustin Pop
    return output
3759 a8083063 Iustin Pop
3760 a8083063 Iustin Pop
3761 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
3762 a8083063 Iustin Pop
  """Failover an instance.
3763 a8083063 Iustin Pop

3764 a8083063 Iustin Pop
  """
3765 a8083063 Iustin Pop
  HPATH = "instance-failover"
3766 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3767 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
3768 c9e5c064 Guido Trotter
  REQ_BGL = False
3769 c9e5c064 Guido Trotter
3770 c9e5c064 Guido Trotter
  def ExpandNames(self):
3771 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
3772 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3773 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3774 c9e5c064 Guido Trotter
3775 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
3776 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
3777 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
3778 a8083063 Iustin Pop
3779 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3780 a8083063 Iustin Pop
    """Build hooks env.
3781 a8083063 Iustin Pop

3782 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3783 a8083063 Iustin Pop

3784 a8083063 Iustin Pop
    """
3785 a8083063 Iustin Pop
    env = {
3786 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
3787 a8083063 Iustin Pop
      }
3788 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3789 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3790 a8083063 Iustin Pop
    return env, nl, nl
3791 a8083063 Iustin Pop
3792 a8083063 Iustin Pop
  def CheckPrereq(self):
3793 a8083063 Iustin Pop
    """Check prerequisites.
3794 a8083063 Iustin Pop

3795 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3796 a8083063 Iustin Pop

3797 a8083063 Iustin Pop
    """
3798 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3799 c9e5c064 Guido Trotter
    assert self.instance is not None, \
3800 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3801 a8083063 Iustin Pop
3802 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3803 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3804 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
3805 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
3806 2a710df1 Michael Hanselmann
3807 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
3808 2a710df1 Michael Hanselmann
    if not secondary_nodes:
3809 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
3810 abdf0113 Iustin Pop
                                   "a mirrored disk template")
3811 2a710df1 Michael Hanselmann
3812 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
3813 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, target_node)
3814 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, target_node)
3815 d27776f0 Iustin Pop
    if instance.admin_up:
3816 d27776f0 Iustin Pop
      # check memory requirements on the secondary node
3817 d27776f0 Iustin Pop
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
3818 d27776f0 Iustin Pop
                           instance.name, bep[constants.BE_MEMORY],
3819 d27776f0 Iustin Pop
                           instance.hypervisor)
3820 d27776f0 Iustin Pop
    else:
3821 d27776f0 Iustin Pop
      self.LogInfo("Not checking memory on the secondary node as"
3822 d27776f0 Iustin Pop
                   " instance will not be started")
3823 3a7c308e Guido Trotter
3824 a8083063 Iustin Pop
    # check bridge existance
3825 b165e77e Guido Trotter
    _CheckInstanceBridgesExist(self, instance, node=target_node)
3826 a8083063 Iustin Pop
3827 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3828 a8083063 Iustin Pop
    """Failover an instance.
3829 a8083063 Iustin Pop

3830 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
3831 a8083063 Iustin Pop
    starting it on the secondary.
3832 a8083063 Iustin Pop

3833 a8083063 Iustin Pop
    """
3834 a8083063 Iustin Pop
    instance = self.instance
3835 a8083063 Iustin Pop
3836 a8083063 Iustin Pop
    source_node = instance.primary_node
3837 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
3838 a8083063 Iustin Pop
3839 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
3840 a8083063 Iustin Pop
    for dev in instance.disks:
3841 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
3842 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
3843 0d68c45d Iustin Pop
        if instance.admin_up and not self.op.ignore_consistency:
3844 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
3845 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
3846 a8083063 Iustin Pop
3847 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
3848 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
3849 9a4f63d1 Iustin Pop
                 instance.name, source_node)
3850 a8083063 Iustin Pop
3851 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(source_node, instance)
3852 4c4e4e1e Iustin Pop
    msg = result.fail_msg
3853 1fae010f Iustin Pop
    if msg:
3854 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
3855 86d9d3bb Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
3856 1fae010f Iustin Pop
                             " Proceeding anyway. Please make sure node"
3857 1fae010f Iustin Pop
                             " %s is down. Error details: %s",
3858 1fae010f Iustin Pop
                             instance.name, source_node, source_node, msg)
3859 24a40d57 Iustin Pop
      else:
3860 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
3861 1fae010f Iustin Pop
                                 " node %s: %s" %
3862 1fae010f Iustin Pop
                                 (instance.name, source_node, msg))
3863 a8083063 Iustin Pop
3864 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
3865 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3866 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
3867 a8083063 Iustin Pop
3868 a8083063 Iustin Pop
    instance.primary_node = target_node
3869 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
3870 b6102dab Guido Trotter
    self.cfg.Update(instance)
3871 a8083063 Iustin Pop
3872 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
3873 0d68c45d Iustin Pop
    if instance.admin_up:
3874 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
3875 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s",
3876 9a4f63d1 Iustin Pop
                   instance.name, target_node)
3877 12a0cfbe Guido Trotter
3878 7c4d6c7b Michael Hanselmann
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
3879 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
3880 12a0cfbe Guido Trotter
      if not disks_ok:
3881 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3882 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
3883 a8083063 Iustin Pop
3884 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
3885 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance, None, None)
3886 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3887 dd279568 Iustin Pop
      if msg:
3888 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3889 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
3890 dd279568 Iustin Pop
                                 (instance.name, target_node, msg))
3891 a8083063 Iustin Pop
3892 a8083063 Iustin Pop
3893 53c776b5 Iustin Pop
class LUMigrateInstance(LogicalUnit):
3894 53c776b5 Iustin Pop
  """Migrate an instance.
3895 53c776b5 Iustin Pop

3896 53c776b5 Iustin Pop
  This is migration without shutting down, compared to the failover,
3897 53c776b5 Iustin Pop
  which is done with shutdown.
3898 53c776b5 Iustin Pop

3899 53c776b5 Iustin Pop
  """
3900 53c776b5 Iustin Pop
  HPATH = "instance-migrate"
3901 53c776b5 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3902 53c776b5 Iustin Pop
  _OP_REQP = ["instance_name", "live", "cleanup"]
3903 53c776b5 Iustin Pop
3904 53c776b5 Iustin Pop
  REQ_BGL = False
3905 53c776b5 Iustin Pop
3906 53c776b5 Iustin Pop
  def ExpandNames(self):
3907 53c776b5 Iustin Pop
    self._ExpandAndLockInstance()
3908 53c776b5 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
3909 53c776b5 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3910 53c776b5 Iustin Pop
3911 53c776b5 Iustin Pop
  def DeclareLocks(self, level):
3912 53c776b5 Iustin Pop
    if level == locking.LEVEL_NODE:
3913 53c776b5 Iustin Pop
      self._LockInstancesNodes()
3914 53c776b5 Iustin Pop
3915 53c776b5 Iustin Pop
  def BuildHooksEnv(self):
3916 53c776b5 Iustin Pop
    """Build hooks env.
3917 53c776b5 Iustin Pop

3918 53c776b5 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3919 53c776b5 Iustin Pop

3920 53c776b5 Iustin Pop
    """
3921 53c776b5 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3922 2c2690c9 Iustin Pop
    env["MIGRATE_LIVE"] = self.op.live
3923 2c2690c9 Iustin Pop
    env["MIGRATE_CLEANUP"] = self.op.cleanup
3924 53c776b5 Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3925 53c776b5 Iustin Pop
    return env, nl, nl
3926 53c776b5 Iustin Pop
3927 53c776b5 Iustin Pop
  def CheckPrereq(self):
3928 53c776b5 Iustin Pop
    """Check prerequisites.
3929 53c776b5 Iustin Pop

3930 53c776b5 Iustin Pop
    This checks that the instance is in the cluster.
3931 53c776b5 Iustin Pop

3932 53c776b5 Iustin Pop
    """
3933 53c776b5 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3934 53c776b5 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3935 53c776b5 Iustin Pop
    if instance is None:
3936 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3937 53c776b5 Iustin Pop
                                 self.op.instance_name)
3938 53c776b5 Iustin Pop
3939 53c776b5 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
3940 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3941 53c776b5 Iustin Pop
                                 " drbd8, cannot migrate.")
3942 53c776b5 Iustin Pop
3943 53c776b5 Iustin Pop
    secondary_nodes = instance.secondary_nodes
3944 53c776b5 Iustin Pop
    if not secondary_nodes:
3945 733a2b6a Iustin Pop
      raise errors.ConfigurationError("No secondary node but using"
3946 733a2b6a Iustin Pop
                                      " drbd8 disk template")
3947 53c776b5 Iustin Pop
3948 53c776b5 Iustin Pop
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
3949 53c776b5 Iustin Pop
3950 53c776b5 Iustin Pop
    target_node = secondary_nodes[0]
3951 53c776b5 Iustin Pop
    # check memory requirements on the secondary node
3952 53c776b5 Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
3953 53c776b5 Iustin Pop
                         instance.name, i_be[constants.BE_MEMORY],
3954 53c776b5 Iustin Pop
                         instance.hypervisor)
3955 53c776b5 Iustin Pop
3956 53c776b5 Iustin Pop
    # check bridge existance
3957 b165e77e Guido Trotter
    _CheckInstanceBridgesExist(self, instance, node=target_node)
3958 53c776b5 Iustin Pop
3959 53c776b5 Iustin Pop
    if not self.op.cleanup:
3960 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, target_node)
3961 53c776b5 Iustin Pop
      result = self.rpc.call_instance_migratable(instance.primary_node,
3962 53c776b5 Iustin Pop
                                                 instance)
3963 4c4e4e1e Iustin Pop
      result.Raise("Can't migrate, please use failover", prereq=True)
3964 53c776b5 Iustin Pop
3965 53c776b5 Iustin Pop
    self.instance = instance
3966 53c776b5 Iustin Pop
3967 53c776b5 Iustin Pop
  def _WaitUntilSync(self):
3968 53c776b5 Iustin Pop
    """Poll with custom rpc for disk sync.
3969 53c776b5 Iustin Pop

3970 53c776b5 Iustin Pop
    This uses our own step-based rpc call.
3971 53c776b5 Iustin Pop

3972 53c776b5 Iustin Pop
    """
3973 53c776b5 Iustin Pop
    self.feedback_fn("* wait until resync is done")
3974 53c776b5 Iustin Pop
    all_done = False
3975 53c776b5 Iustin Pop
    while not all_done:
3976 53c776b5 Iustin Pop
      all_done = True
3977 53c776b5 Iustin Pop
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
3978 53c776b5 Iustin Pop
                                            self.nodes_ip,
3979 53c776b5 Iustin Pop
                                            self.instance.disks)
3980 53c776b5 Iustin Pop
      min_percent = 100
3981 53c776b5 Iustin Pop
      for node, nres in result.items():
3982 4c4e4e1e Iustin Pop
        nres.Raise("Cannot resync disks on node %s" % node)
3983 0959c824 Iustin Pop
        node_done, node_percent = nres.payload
3984 53c776b5 Iustin Pop
        all_done = all_done and node_done
3985 53c776b5 Iustin Pop
        if node_percent is not None:
3986 53c776b5 Iustin Pop
          min_percent = min(min_percent, node_percent)
3987 53c776b5 Iustin Pop
      if not all_done:
3988 53c776b5 Iustin Pop
        if min_percent < 100:
3989 53c776b5 Iustin Pop
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
3990 53c776b5 Iustin Pop
        time.sleep(2)
3991 53c776b5 Iustin Pop
3992 53c776b5 Iustin Pop
  def _EnsureSecondary(self, node):
3993 53c776b5 Iustin Pop
    """Demote a node to secondary.
3994 53c776b5 Iustin Pop

3995 53c776b5 Iustin Pop
    """
3996 53c776b5 Iustin Pop
    self.feedback_fn("* switching node %s to secondary mode" % node)
3997 53c776b5 Iustin Pop
3998 53c776b5 Iustin Pop
    for dev in self.instance.disks:
3999 53c776b5 Iustin Pop
      self.cfg.SetDiskID(dev, node)
4000 53c776b5 Iustin Pop
4001 53c776b5 Iustin Pop
    result = self.rpc.call_blockdev_close(node, self.instance.name,
4002 53c776b5 Iustin Pop
                                          self.instance.disks)
4003 4c4e4e1e Iustin Pop
    result.Raise("Cannot change disk to secondary on node %s" % node)
4004 53c776b5 Iustin Pop
4005 53c776b5 Iustin Pop
  def _GoStandalone(self):
4006 53c776b5 Iustin Pop
    """Disconnect from the network.
4007 53c776b5 Iustin Pop

4008 53c776b5 Iustin Pop
    """
4009 53c776b5 Iustin Pop
    self.feedback_fn("* changing into standalone mode")
4010 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
4011 53c776b5 Iustin Pop
                                               self.instance.disks)
4012 53c776b5 Iustin Pop
    for node, nres in result.items():
4013 4c4e4e1e Iustin Pop
      nres.Raise("Cannot disconnect disks node %s" % node)
4014 53c776b5 Iustin Pop
4015 53c776b5 Iustin Pop
  def _GoReconnect(self, multimaster):
4016 53c776b5 Iustin Pop
    """Reconnect to the network.
4017 53c776b5 Iustin Pop

4018 53c776b5 Iustin Pop
    """
4019 53c776b5 Iustin Pop
    if multimaster:
4020 53c776b5 Iustin Pop
      msg = "dual-master"
4021 53c776b5 Iustin Pop
    else:
4022 53c776b5 Iustin Pop
      msg = "single-master"
4023 53c776b5 Iustin Pop
    self.feedback_fn("* changing disks into %s mode" % msg)
4024 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
4025 53c776b5 Iustin Pop
                                           self.instance.disks,
4026 53c776b5 Iustin Pop
                                           self.instance.name, multimaster)
4027 53c776b5 Iustin Pop
    for node, nres in result.items():
4028 4c4e4e1e Iustin Pop
      nres.Raise("Cannot change disks config on node %s" % node)
4029 53c776b5 Iustin Pop
4030 53c776b5 Iustin Pop
  def _ExecCleanup(self):
4031 53c776b5 Iustin Pop
    """Try to cleanup after a failed migration.
4032 53c776b5 Iustin Pop

4033 53c776b5 Iustin Pop
    The cleanup is done by:
4034 53c776b5 Iustin Pop
      - check that the instance is running only on one node
4035 53c776b5 Iustin Pop
        (and update the config if needed)
4036 53c776b5 Iustin Pop
      - change disks on its secondary node to secondary
4037 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
4038 53c776b5 Iustin Pop
      - disconnect from the network
4039 53c776b5 Iustin Pop
      - change disks into single-master mode
4040 53c776b5 Iustin Pop
      - wait again until disks are fully synchronized
4041 53c776b5 Iustin Pop

4042 53c776b5 Iustin Pop
    """
4043 53c776b5 Iustin Pop
    instance = self.instance
4044 53c776b5 Iustin Pop
    target_node = self.target_node
4045 53c776b5 Iustin Pop
    source_node = self.source_node
4046 53c776b5 Iustin Pop
4047 53c776b5 Iustin Pop
    # check running on only one node
4048 53c776b5 Iustin Pop
    self.feedback_fn("* checking where the instance actually runs"
4049 53c776b5 Iustin Pop
                     " (if this hangs, the hypervisor might be in"
4050 53c776b5 Iustin Pop
                     " a bad state)")
4051 53c776b5 Iustin Pop
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
4052 53c776b5 Iustin Pop
    for node, result in ins_l.items():
4053 4c4e4e1e Iustin Pop
      result.Raise("Can't contact node %s" % node)
4054 53c776b5 Iustin Pop
4055 aca13712 Iustin Pop
    runningon_source = instance.name in ins_l[source_node].payload
4056 aca13712 Iustin Pop
    runningon_target = instance.name in ins_l[target_node].payload
4057 53c776b5 Iustin Pop
4058 53c776b5 Iustin Pop
    if runningon_source and runningon_target:
4059 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance seems to be running on two nodes,"
4060 53c776b5 Iustin Pop
                               " or the hypervisor is confused. You will have"
4061 53c776b5 Iustin Pop
                               " to ensure manually that it runs only on one"
4062 53c776b5 Iustin Pop
                               " and restart this operation.")
4063 53c776b5 Iustin Pop
4064 53c776b5 Iustin Pop
    if not (runningon_source or runningon_target):
4065 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance does not seem to be running at all."
4066 53c776b5 Iustin Pop
                               " In this case, it's safer to repair by"
4067 53c776b5 Iustin Pop
                               " running 'gnt-instance stop' to ensure disk"
4068 53c776b5 Iustin Pop
                               " shutdown, and then restarting it.")
4069 53c776b5 Iustin Pop
4070 53c776b5 Iustin Pop
    if runningon_target:
4071 53c776b5 Iustin Pop
      # the migration has actually succeeded, we need to update the config
4072 53c776b5 Iustin Pop
      self.feedback_fn("* instance running on secondary node (%s),"
4073 53c776b5 Iustin Pop
                       " updating config" % target_node)
4074 53c776b5 Iustin Pop
      instance.primary_node = target_node
4075 53c776b5 Iustin Pop
      self.cfg.Update(instance)
4076 53c776b5 Iustin Pop
      demoted_node = source_node
4077 53c776b5 Iustin Pop
    else:
4078 53c776b5 Iustin Pop
      self.feedback_fn("* instance confirmed to be running on its"
4079 53c776b5 Iustin Pop
                       " primary node (%s)" % source_node)
4080 53c776b5 Iustin Pop
      demoted_node = target_node
4081 53c776b5 Iustin Pop
4082 53c776b5 Iustin Pop
    self._EnsureSecondary(demoted_node)
4083 53c776b5 Iustin Pop
    try:
4084 53c776b5 Iustin Pop
      self._WaitUntilSync()
4085 53c776b5 Iustin Pop
    except errors.OpExecError:
4086 53c776b5 Iustin Pop
      # we ignore here errors, since if the device is standalone, it
4087 53c776b5 Iustin Pop
      # won't be able to sync
4088 53c776b5 Iustin Pop
      pass
4089 53c776b5 Iustin Pop
    self._GoStandalone()
4090 53c776b5 Iustin Pop
    self._GoReconnect(False)
4091 53c776b5 Iustin Pop
    self._WaitUntilSync()
4092 53c776b5 Iustin Pop
4093 53c776b5 Iustin Pop
    self.feedback_fn("* done")
4094 53c776b5 Iustin Pop
4095 6906a9d8 Guido Trotter
  def _RevertDiskStatus(self):
4096 6906a9d8 Guido Trotter
    """Try to revert the disk status after a failed migration.
4097 6906a9d8 Guido Trotter

4098 6906a9d8 Guido Trotter
    """
4099 6906a9d8 Guido Trotter
    target_node = self.target_node
4100 6906a9d8 Guido Trotter
    try:
4101 6906a9d8 Guido Trotter
      self._EnsureSecondary(target_node)
4102 6906a9d8 Guido Trotter
      self._GoStandalone()
4103 6906a9d8 Guido Trotter
      self._GoReconnect(False)
4104 6906a9d8 Guido Trotter
      self._WaitUntilSync()
4105 6906a9d8 Guido Trotter
    except errors.OpExecError, err:
4106 6906a9d8 Guido Trotter
      self.LogWarning("Migration failed and I can't reconnect the"
4107 6906a9d8 Guido Trotter
                      " drives: error '%s'\n"
4108 6906a9d8 Guido Trotter
                      "Please look and recover the instance status" %
4109 6906a9d8 Guido Trotter
                      str(err))
4110 6906a9d8 Guido Trotter
4111 6906a9d8 Guido Trotter
  def _AbortMigration(self):
4112 6906a9d8 Guido Trotter
    """Call the hypervisor code to abort a started migration.
4113 6906a9d8 Guido Trotter

4114 6906a9d8 Guido Trotter
    """
4115 6906a9d8 Guido Trotter
    instance = self.instance
4116 6906a9d8 Guido Trotter
    target_node = self.target_node
4117 6906a9d8 Guido Trotter
    migration_info = self.migration_info
4118 6906a9d8 Guido Trotter
4119 6906a9d8 Guido Trotter
    abort_result = self.rpc.call_finalize_migration(target_node,
4120 6906a9d8 Guido Trotter
                                                    instance,
4121 6906a9d8 Guido Trotter
                                                    migration_info,
4122 6906a9d8 Guido Trotter
                                                    False)
4123 4c4e4e1e Iustin Pop
    abort_msg = abort_result.fail_msg
4124 6906a9d8 Guido Trotter
    if abort_msg:
4125 6906a9d8 Guido Trotter
      logging.error("Aborting migration failed on target node %s: %s" %
4126 6906a9d8 Guido Trotter
                    (target_node, abort_msg))
4127 6906a9d8 Guido Trotter
      # Don't raise an exception here, as we stil have to try to revert the
4128 6906a9d8 Guido Trotter
      # disk status, even if this step failed.
4129 6906a9d8 Guido Trotter
4130 53c776b5 Iustin Pop
  def _ExecMigration(self):
4131 53c776b5 Iustin Pop
    """Migrate an instance.
4132 53c776b5 Iustin Pop

4133 53c776b5 Iustin Pop
    The migrate is done by:
4134 53c776b5 Iustin Pop
      - change the disks into dual-master mode
4135 53c776b5 Iustin Pop
      - wait until disks are fully synchronized again
4136 53c776b5 Iustin Pop
      - migrate the instance
4137 53c776b5 Iustin Pop
      - change disks on the new secondary node (the old primary) to secondary
4138 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
4139 53c776b5 Iustin Pop
      - change disks into single-master mode
4140 53c776b5 Iustin Pop

4141 53c776b5 Iustin Pop
    """
4142 53c776b5 Iustin Pop
    instance = self.instance
4143 53c776b5 Iustin Pop
    target_node = self.target_node
4144 53c776b5 Iustin Pop
    source_node = self.source_node
4145 53c776b5 Iustin Pop
4146 53c776b5 Iustin Pop
    self.feedback_fn("* checking disk consistency between source and target")
4147 53c776b5 Iustin Pop
    for dev in instance.disks:
4148 53c776b5 Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
4149 53c776b5 Iustin Pop
        raise errors.OpExecError("Disk %s is degraded or not fully"
4150 53c776b5 Iustin Pop
                                 " synchronized on target node,"
4151 53c776b5 Iustin Pop
                                 " aborting migrate." % dev.iv_name)
4152 53c776b5 Iustin Pop
4153 6906a9d8 Guido Trotter
    # First get the migration information from the remote node
4154 6906a9d8 Guido Trotter
    result = self.rpc.call_migration_info(source_node, instance)
4155 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4156 6906a9d8 Guido Trotter
    if msg:
4157 6906a9d8 Guido Trotter
      log_err = ("Failed fetching source migration information from %s: %s" %
4158 0959c824 Iustin Pop
                 (source_node, msg))
4159 6906a9d8 Guido Trotter
      logging.error(log_err)
4160 6906a9d8 Guido Trotter
      raise errors.OpExecError(log_err)
4161 6906a9d8 Guido Trotter
4162 0959c824 Iustin Pop
    self.migration_info = migration_info = result.payload
4163 6906a9d8 Guido Trotter
4164 6906a9d8 Guido Trotter
    # Then switch the disks to master/master mode
4165 53c776b5 Iustin Pop
    self._EnsureSecondary(target_node)
4166 53c776b5 Iustin Pop
    self._GoStandalone()
4167 53c776b5 Iustin Pop
    self._GoReconnect(True)
4168 53c776b5 Iustin Pop
    self._WaitUntilSync()
4169 53c776b5 Iustin Pop
4170 6906a9d8 Guido Trotter
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
4171 6906a9d8 Guido Trotter
    result = self.rpc.call_accept_instance(target_node,
4172 6906a9d8 Guido Trotter
                                           instance,
4173 6906a9d8 Guido Trotter
                                           migration_info,
4174 6906a9d8 Guido Trotter
                                           self.nodes_ip[target_node])
4175 6906a9d8 Guido Trotter
4176 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4177 6906a9d8 Guido Trotter
    if msg:
4178 6906a9d8 Guido Trotter
      logging.error("Instance pre-migration failed, trying to revert"
4179 6906a9d8 Guido Trotter
                    " disk status: %s", msg)
4180 6906a9d8 Guido Trotter
      self._AbortMigration()
4181 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
4182 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
4183 6906a9d8 Guido Trotter
                               (instance.name, msg))
4184 6906a9d8 Guido Trotter
4185 53c776b5 Iustin Pop
    self.feedback_fn("* migrating instance to %s" % target_node)
4186 53c776b5 Iustin Pop
    time.sleep(10)
4187 53c776b5 Iustin Pop
    result = self.rpc.call_instance_migrate(source_node, instance,
4188 53c776b5 Iustin Pop
                                            self.nodes_ip[target_node],
4189 53c776b5 Iustin Pop
                                            self.op.live)
4190 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4191 53c776b5 Iustin Pop
    if msg:
4192 53c776b5 Iustin Pop
      logging.error("Instance migration failed, trying to revert"
4193 53c776b5 Iustin Pop
                    " disk status: %s", msg)
4194 6906a9d8 Guido Trotter
      self._AbortMigration()
4195 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
4196 53c776b5 Iustin Pop
      raise errors.OpExecError("Could not migrate instance %s: %s" %
4197 53c776b5 Iustin Pop
                               (instance.name, msg))
4198 53c776b5 Iustin Pop
    time.sleep(10)
4199 53c776b5 Iustin Pop
4200 53c776b5 Iustin Pop
    instance.primary_node = target_node
4201 53c776b5 Iustin Pop
    # distribute new instance config to the other nodes
4202 53c776b5 Iustin Pop
    self.cfg.Update(instance)
4203 53c776b5 Iustin Pop
4204 6906a9d8 Guido Trotter
    result = self.rpc.call_finalize_migration(target_node,
4205 6906a9d8 Guido Trotter
                                              instance,
4206 6906a9d8 Guido Trotter
                                              migration_info,
4207 6906a9d8 Guido Trotter
                                              True)
4208 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4209 6906a9d8 Guido Trotter
    if msg:
4210 6906a9d8 Guido Trotter
      logging.error("Instance migration succeeded, but finalization failed:"
4211 6906a9d8 Guido Trotter
                    " %s" % msg)
4212 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not finalize instance migration: %s" %
4213 6906a9d8 Guido Trotter
                               msg)
4214 6906a9d8 Guido Trotter
4215 53c776b5 Iustin Pop
    self._EnsureSecondary(source_node)
4216 53c776b5 Iustin Pop
    self._WaitUntilSync()
4217 53c776b5 Iustin Pop
    self._GoStandalone()
4218 53c776b5 Iustin Pop
    self._GoReconnect(False)
4219 53c776b5 Iustin Pop
    self._WaitUntilSync()
4220 53c776b5 Iustin Pop
4221 53c776b5 Iustin Pop
    self.feedback_fn("* done")
4222 53c776b5 Iustin Pop
4223 53c776b5 Iustin Pop
  def Exec(self, feedback_fn):
4224 53c776b5 Iustin Pop
    """Perform the migration.
4225 53c776b5 Iustin Pop

4226 53c776b5 Iustin Pop
    """
4227 53c776b5 Iustin Pop
    self.feedback_fn = feedback_fn
4228 53c776b5 Iustin Pop
4229 53c776b5 Iustin Pop
    self.source_node = self.instance.primary_node
4230 53c776b5 Iustin Pop
    self.target_node = self.instance.secondary_nodes[0]
4231 53c776b5 Iustin Pop
    self.all_nodes = [self.source_node, self.target_node]
4232 53c776b5 Iustin Pop
    self.nodes_ip = {
4233 53c776b5 Iustin Pop
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
4234 53c776b5 Iustin Pop
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
4235 53c776b5 Iustin Pop
      }
4236 53c776b5 Iustin Pop
    if self.op.cleanup:
4237 53c776b5 Iustin Pop
      return self._ExecCleanup()
4238 53c776b5 Iustin Pop
    else:
4239 53c776b5 Iustin Pop
      return self._ExecMigration()
4240 53c776b5 Iustin Pop
4241 53c776b5 Iustin Pop
4242 428958aa Iustin Pop
def _CreateBlockDev(lu, node, instance, device, force_create,
4243 428958aa Iustin Pop
                    info, force_open):
4244 428958aa Iustin Pop
  """Create a tree of block devices on a given node.
4245 a8083063 Iustin Pop

4246 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
4247 a8083063 Iustin Pop
  all its children.
4248 a8083063 Iustin Pop

4249 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
4250 a8083063 Iustin Pop

4251 428958aa Iustin Pop
  @param lu: the lu on whose behalf we execute
4252 428958aa Iustin Pop
  @param node: the node on which to create the device
4253 428958aa Iustin Pop
  @type instance: L{objects.Instance}
4254 428958aa Iustin Pop
  @param instance: the instance which owns the device
4255 428958aa Iustin Pop
  @type device: L{objects.Disk}
4256 428958aa Iustin Pop
  @param device: the device to create
4257 428958aa Iustin Pop
  @type force_create: boolean
4258 428958aa Iustin Pop
  @param force_create: whether to force creation of this device; this
4259 428958aa Iustin Pop
      will be change to True whenever we find a device which has
4260 428958aa Iustin Pop
      CreateOnSecondary() attribute
4261 428958aa Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
4262 428958aa Iustin Pop
      (this will be represented as a LVM tag)
4263 428958aa Iustin Pop
  @type force_open: boolean
4264 428958aa Iustin Pop
  @param force_open: this parameter will be passes to the
4265 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
4266 428958aa Iustin Pop
      whether we run on primary or not, and it affects both
4267 428958aa Iustin Pop
      the child assembly and the device own Open() execution
4268 428958aa Iustin Pop

4269 a8083063 Iustin Pop
  """
4270 a8083063 Iustin Pop
  if device.CreateOnSecondary():
4271 428958aa Iustin Pop
    force_create = True
4272 796cab27 Iustin Pop
4273 a8083063 Iustin Pop
  if device.children:
4274 a8083063 Iustin Pop
    for child in device.children:
4275 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, child, force_create,
4276 428958aa Iustin Pop
                      info, force_open)
4277 a8083063 Iustin Pop
4278 428958aa Iustin Pop
  if not force_create:
4279 796cab27 Iustin Pop
    return
4280 796cab27 Iustin Pop
4281 de12473a Iustin Pop
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
4282 de12473a Iustin Pop
4283 de12473a Iustin Pop
4284 de12473a Iustin Pop
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
4285 de12473a Iustin Pop
  """Create a single block device on a given node.
4286 de12473a Iustin Pop

4287 de12473a Iustin Pop
  This will not recurse over children of the device, so they must be
4288 de12473a Iustin Pop
  created in advance.
4289 de12473a Iustin Pop

4290 de12473a Iustin Pop
  @param lu: the lu on whose behalf we execute
4291 de12473a Iustin Pop
  @param node: the node on which to create the device
4292 de12473a Iustin Pop
  @type instance: L{objects.Instance}
4293 de12473a Iustin Pop
  @param instance: the instance which owns the device
4294 de12473a Iustin Pop
  @type device: L{objects.Disk}
4295 de12473a Iustin Pop
  @param device: the device to create
4296 de12473a Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
4297 de12473a Iustin Pop
      (this will be represented as a LVM tag)
4298 de12473a Iustin Pop
  @type force_open: boolean
4299 de12473a Iustin Pop
  @param force_open: this parameter will be passes to the
4300 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
4301 de12473a Iustin Pop
      whether we run on primary or not, and it affects both
4302 de12473a Iustin Pop
      the child assembly and the device own Open() execution
4303 de12473a Iustin Pop

4304 de12473a Iustin Pop
  """
4305 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
4306 7d81697f Iustin Pop
  result = lu.rpc.call_blockdev_create(node, device, device.size,
4307 428958aa Iustin Pop
                                       instance.name, force_open, info)
4308 4c4e4e1e Iustin Pop
  result.Raise("Can't create block device %s on"
4309 4c4e4e1e Iustin Pop
               " node %s for instance %s" % (device, node, instance.name))
4310 a8083063 Iustin Pop
  if device.physical_id is None:
4311 0959c824 Iustin Pop
    device.physical_id = result.payload
4312 a8083063 Iustin Pop
4313 a8083063 Iustin Pop
4314 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
4315 923b1523 Iustin Pop
  """Generate a suitable LV name.
4316 923b1523 Iustin Pop

4317 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
4318 923b1523 Iustin Pop

4319 923b1523 Iustin Pop
  """
4320 923b1523 Iustin Pop
  results = []
4321 923b1523 Iustin Pop
  for val in exts:
4322 b9bddb6b Iustin Pop
    new_id = lu.cfg.GenerateUniqueID()
4323 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
4324 923b1523 Iustin Pop
  return results
4325 923b1523 Iustin Pop
4326 923b1523 Iustin Pop
4327 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
4328 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
4329 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
4330 a1f445d3 Iustin Pop

4331 a1f445d3 Iustin Pop
  """
4332 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
4333 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
4334 b9bddb6b Iustin Pop
  shared_secret = lu.cfg.GenerateDRBDSecret()
4335 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
4336 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
4337 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
4338 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
4339 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
4340 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
4341 f9518d38 Iustin Pop
                                      p_minor, s_minor,
4342 f9518d38 Iustin Pop
                                      shared_secret),
4343 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
4344 a1f445d3 Iustin Pop
                          iv_name=iv_name)
4345 a1f445d3 Iustin Pop
  return drbd_dev
4346 a1f445d3 Iustin Pop
4347 7c0d6283 Michael Hanselmann
4348 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
4349 a8083063 Iustin Pop
                          instance_name, primary_node,
4350 08db7c5c Iustin Pop
                          secondary_nodes, disk_info,
4351 e2a65344 Iustin Pop
                          file_storage_dir, file_driver,
4352 e2a65344 Iustin Pop
                          base_index):
4353 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
4354 a8083063 Iustin Pop

4355 a8083063 Iustin Pop
  """
4356 a8083063 Iustin Pop
  #TODO: compute space requirements
4357 a8083063 Iustin Pop
4358 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
4359 08db7c5c Iustin Pop
  disk_count = len(disk_info)
4360 08db7c5c Iustin Pop
  disks = []
4361 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
4362 08db7c5c Iustin Pop
    pass
4363 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
4364 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
4365 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
4366 923b1523 Iustin Pop
4367 fb4b324b Guido Trotter
    names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
4368 08db7c5c Iustin Pop
                                      for i in range(disk_count)])
4369 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4370 e2a65344 Iustin Pop
      disk_index = idx + base_index
4371 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
4372 08db7c5c Iustin Pop
                              logical_id=(vgname, names[idx]),
4373 6ec66eae Iustin Pop
                              iv_name="disk/%d" % disk_index,
4374 6ec66eae Iustin Pop
                              mode=disk["mode"])
4375 08db7c5c Iustin Pop
      disks.append(disk_dev)
4376 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
4377 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
4378 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
4379 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
4380 08db7c5c Iustin Pop
    minors = lu.cfg.AllocateDRBDMinor(
4381 08db7c5c Iustin Pop
      [primary_node, remote_node] * len(disk_info), instance_name)
4382 08db7c5c Iustin Pop
4383 e6c1ff2f Iustin Pop
    names = []
4384 fb4b324b Guido Trotter
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
4385 e6c1ff2f Iustin Pop
                                               for i in range(disk_count)]):
4386 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_data")
4387 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_meta")
4388 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4389 112050d9 Iustin Pop
      disk_index = idx + base_index
4390 08db7c5c Iustin Pop
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
4391 08db7c5c Iustin Pop
                                      disk["size"], names[idx*2:idx*2+2],
4392 e2a65344 Iustin Pop
                                      "disk/%d" % disk_index,
4393 08db7c5c Iustin Pop
                                      minors[idx*2], minors[idx*2+1])
4394 6ec66eae Iustin Pop
      disk_dev.mode = disk["mode"]
4395 08db7c5c Iustin Pop
      disks.append(disk_dev)
4396 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
4397 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
4398 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
4399 0f1a06e3 Manuel Franceschini
4400 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4401 112050d9 Iustin Pop
      disk_index = idx + base_index
4402 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
4403 e2a65344 Iustin Pop
                              iv_name="disk/%d" % disk_index,
4404 08db7c5c Iustin Pop
                              logical_id=(file_driver,
4405 08db7c5c Iustin Pop
                                          "%s/disk%d" % (file_storage_dir,
4406 43e99cff Guido Trotter
                                                         disk_index)),
4407 6ec66eae Iustin Pop
                              mode=disk["mode"])
4408 08db7c5c Iustin Pop
      disks.append(disk_dev)
4409 a8083063 Iustin Pop
  else:
4410 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
4411 a8083063 Iustin Pop
  return disks
4412 a8083063 Iustin Pop
4413 a8083063 Iustin Pop
4414 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
4415 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
4416 3ecf6786 Iustin Pop

4417 3ecf6786 Iustin Pop
  """
4418 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
4419 a0c3fea1 Michael Hanselmann
4420 a0c3fea1 Michael Hanselmann
4421 b9bddb6b Iustin Pop
def _CreateDisks(lu, instance):
4422 a8083063 Iustin Pop
  """Create all disks for an instance.
4423 a8083063 Iustin Pop

4424 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
4425 a8083063 Iustin Pop

4426 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
4427 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
4428 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
4429 e4376078 Iustin Pop
  @param instance: the instance whose disks we should create
4430 e4376078 Iustin Pop
  @rtype: boolean
4431 e4376078 Iustin Pop
  @return: the success of the creation
4432 a8083063 Iustin Pop

4433 a8083063 Iustin Pop
  """
4434 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
4435 428958aa Iustin Pop
  pnode = instance.primary_node
4436 a0c3fea1 Michael Hanselmann
4437 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
4438 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4439 428958aa Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
4440 0f1a06e3 Manuel Franceschini
4441 4c4e4e1e Iustin Pop
    result.Raise("Failed to create directory '%s' on"
4442 4c4e4e1e Iustin Pop
                 " node %s: %s" % (file_storage_dir, pnode))
4443 0f1a06e3 Manuel Franceschini
4444 24991749 Iustin Pop
  # Note: this needs to be kept in sync with adding of disks in
4445 24991749 Iustin Pop
  # LUSetInstanceParams
4446 a8083063 Iustin Pop
  for device in instance.disks:
4447 9a4f63d1 Iustin Pop
    logging.info("Creating volume %s for instance %s",
4448 9a4f63d1 Iustin Pop
                 device.iv_name, instance.name)
4449 a8083063 Iustin Pop
    #HARDCODE
4450 428958aa Iustin Pop
    for node in instance.all_nodes:
4451 428958aa Iustin Pop
      f_create = node == pnode
4452 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
4453 a8083063 Iustin Pop
4454 a8083063 Iustin Pop
4455 b9bddb6b Iustin Pop
def _RemoveDisks(lu, instance):
4456 a8083063 Iustin Pop
  """Remove all disks for an instance.
4457 a8083063 Iustin Pop

4458 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
4459 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
4460 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
4461 a8083063 Iustin Pop
  with `_CreateDisks()`).
4462 a8083063 Iustin Pop

4463 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
4464 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
4465 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
4466 e4376078 Iustin Pop
  @param instance: the instance whose disks we should remove
4467 e4376078 Iustin Pop
  @rtype: boolean
4468 e4376078 Iustin Pop
  @return: the success of the removal
4469 a8083063 Iustin Pop

4470 a8083063 Iustin Pop
  """
4471 9a4f63d1 Iustin Pop
  logging.info("Removing block devices for instance %s", instance.name)
4472 a8083063 Iustin Pop
4473 e1bc0878 Iustin Pop
  all_result = True
4474 a8083063 Iustin Pop
  for device in instance.disks:
4475 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
4476 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
4477 4c4e4e1e Iustin Pop
      msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
4478 e1bc0878 Iustin Pop
      if msg:
4479 e1bc0878 Iustin Pop
        lu.LogWarning("Could not remove block device %s on node %s,"
4480 e1bc0878 Iustin Pop
                      " continuing anyway: %s", device.iv_name, node, msg)
4481 e1bc0878 Iustin Pop
        all_result = False
4482 0f1a06e3 Manuel Franceschini
4483 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
4484 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4485 781de953 Iustin Pop
    result = lu.rpc.call_file_storage_dir_remove(instance.primary_node,
4486 781de953 Iustin Pop
                                                 file_storage_dir)
4487 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4488 b2b8bcce Iustin Pop
    if msg:
4489 b2b8bcce Iustin Pop
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
4490 b2b8bcce Iustin Pop
                    file_storage_dir, instance.primary_node, msg)
4491 e1bc0878 Iustin Pop
      all_result = False
4492 0f1a06e3 Manuel Franceschini
4493 e1bc0878 Iustin Pop
  return all_result
4494 a8083063 Iustin Pop
4495 a8083063 Iustin Pop
4496 08db7c5c Iustin Pop
def _ComputeDiskSize(disk_template, disks):
4497 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
4498 e2fe6369 Iustin Pop

4499 e2fe6369 Iustin Pop
  """
4500 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
4501 e2fe6369 Iustin Pop
  req_size_dict = {
4502 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
4503 08db7c5c Iustin Pop
    constants.DT_PLAIN: sum(d["size"] for d in disks),
4504 08db7c5c Iustin Pop
    # 128 MB are added for drbd metadata for each disk
4505 08db7c5c Iustin Pop
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
4506 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
4507 e2fe6369 Iustin Pop
  }
4508 e2fe6369 Iustin Pop
4509 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
4510 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
4511 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
4512 e2fe6369 Iustin Pop
4513 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
4514 e2fe6369 Iustin Pop
4515 e2fe6369 Iustin Pop
4516 74409b12 Iustin Pop
def _CheckHVParams(lu, nodenames, hvname, hvparams):
4517 74409b12 Iustin Pop
  """Hypervisor parameter validation.
4518 74409b12 Iustin Pop

4519 74409b12 Iustin Pop
  This function abstract the hypervisor parameter validation to be
4520 74409b12 Iustin Pop
  used in both instance create and instance modify.
4521 74409b12 Iustin Pop

4522 74409b12 Iustin Pop
  @type lu: L{LogicalUnit}
4523 74409b12 Iustin Pop
  @param lu: the logical unit for which we check
4524 74409b12 Iustin Pop
  @type nodenames: list
4525 74409b12 Iustin Pop
  @param nodenames: the list of nodes on which we should check
4526 74409b12 Iustin Pop
  @type hvname: string
4527 74409b12 Iustin Pop
  @param hvname: the name of the hypervisor we should use
4528 74409b12 Iustin Pop
  @type hvparams: dict
4529 74409b12 Iustin Pop
  @param hvparams: the parameters which we need to check
4530 74409b12 Iustin Pop
  @raise errors.OpPrereqError: if the parameters are not valid
4531 74409b12 Iustin Pop

4532 74409b12 Iustin Pop
  """
4533 74409b12 Iustin Pop
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
4534 74409b12 Iustin Pop
                                                  hvname,
4535 74409b12 Iustin Pop
                                                  hvparams)
4536 74409b12 Iustin Pop
  for node in nodenames:
4537 781de953 Iustin Pop
    info = hvinfo[node]
4538 68c6f21c Iustin Pop
    if info.offline:
4539 68c6f21c Iustin Pop
      continue
4540 4c4e4e1e Iustin Pop
    info.Raise("Hypervisor parameter validation failed on node %s" % node)
4541 74409b12 Iustin Pop
4542 74409b12 Iustin Pop
4543 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
4544 a8083063 Iustin Pop
  """Create an instance.
4545 a8083063 Iustin Pop

4546 a8083063 Iustin Pop
  """
4547 a8083063 Iustin Pop
  HPATH = "instance-add"
4548 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4549 08db7c5c Iustin Pop
  _OP_REQP = ["instance_name", "disks", "disk_template",
4550 08db7c5c Iustin Pop
              "mode", "start",
4551 08db7c5c Iustin Pop
              "wait_for_sync", "ip_check", "nics",
4552 338e51e8 Iustin Pop
              "hvparams", "beparams"]
4553 7baf741d Guido Trotter
  REQ_BGL = False
4554 7baf741d Guido Trotter
4555 7baf741d Guido Trotter
  def _ExpandNode(self, node):
4556 7baf741d Guido Trotter
    """Expands and checks one node name.
4557 7baf741d Guido Trotter

4558 7baf741d Guido Trotter
    """
4559 7baf741d Guido Trotter
    node_full = self.cfg.ExpandNodeName(node)
4560 7baf741d Guido Trotter
    if node_full is None:
4561 7baf741d Guido Trotter
      raise errors.OpPrereqError("Unknown node %s" % node)
4562 7baf741d Guido Trotter
    return node_full
4563 7baf741d Guido Trotter
4564 7baf741d Guido Trotter
  def ExpandNames(self):
4565 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
4566 7baf741d Guido Trotter

4567 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
4568 7baf741d Guido Trotter

4569 7baf741d Guido Trotter
    """
4570 7baf741d Guido Trotter
    self.needed_locks = {}
4571 7baf741d Guido Trotter
4572 7baf741d Guido Trotter
    # set optional parameters to none if they don't exist
4573 6785674e Iustin Pop
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
4574 7baf741d Guido Trotter
      if not hasattr(self.op, attr):
4575 7baf741d Guido Trotter
        setattr(self.op, attr, None)
4576 7baf741d Guido Trotter
4577 4b2f38dd Iustin Pop
    # cheap checks, mostly valid constants given
4578 4b2f38dd Iustin Pop
4579 7baf741d Guido Trotter
    # verify creation mode
4580 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
4581 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
4582 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
4583 7baf741d Guido Trotter
                                 self.op.mode)
4584 4b2f38dd Iustin Pop
4585 7baf741d Guido Trotter
    # disk template and mirror node verification
4586 7baf741d Guido Trotter
    if self.op.disk_template not in constants.DISK_TEMPLATES:
4587 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid disk template name")
4588 7baf741d Guido Trotter
4589 4b2f38dd Iustin Pop
    if self.op.hypervisor is None:
4590 4b2f38dd Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
4591 4b2f38dd Iustin Pop
4592 8705eb96 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
4593 8705eb96 Iustin Pop
    enabled_hvs = cluster.enabled_hypervisors
4594 4b2f38dd Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
4595 4b2f38dd Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
4596 4b2f38dd Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
4597 4b2f38dd Iustin Pop
                                  ",".join(enabled_hvs)))
4598 4b2f38dd Iustin Pop
4599 6785674e Iustin Pop
    # check hypervisor parameter syntax (locally)
4600 a5728081 Guido Trotter
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
4601 abe609b2 Guido Trotter
    filled_hvp = objects.FillDict(cluster.hvparams[self.op.hypervisor],
4602 8705eb96 Iustin Pop
                                  self.op.hvparams)
4603 6785674e Iustin Pop
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
4604 8705eb96 Iustin Pop
    hv_type.CheckParameterSyntax(filled_hvp)
4605 67fc3042 Iustin Pop
    self.hv_full = filled_hvp
4606 6785674e Iustin Pop
4607 338e51e8 Iustin Pop
    # fill and remember the beparams dict
4608 a5728081 Guido Trotter
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
4609 4ef7f423 Guido Trotter
    self.be_full = objects.FillDict(cluster.beparams[constants.PP_DEFAULT],
4610 338e51e8 Iustin Pop
                                    self.op.beparams)
4611 338e51e8 Iustin Pop
4612 7baf741d Guido Trotter
    #### instance parameters check
4613 7baf741d Guido Trotter
4614 7baf741d Guido Trotter
    # instance name verification
4615 7baf741d Guido Trotter
    hostname1 = utils.HostInfo(self.op.instance_name)
4616 7baf741d Guido Trotter
    self.op.instance_name = instance_name = hostname1.name
4617 7baf741d Guido Trotter
4618 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
4619 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
4620 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
4621 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
4622 7baf741d Guido Trotter
                                 instance_name)
4623 7baf741d Guido Trotter
4624 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
4625 7baf741d Guido Trotter
4626 08db7c5c Iustin Pop
    # NIC buildup
4627 08db7c5c Iustin Pop
    self.nics = []
4628 9dce4771 Guido Trotter
    for idx, nic in enumerate(self.op.nics):
4629 9dce4771 Guido Trotter
      nic_mode_req = nic.get("mode", None)
4630 9dce4771 Guido Trotter
      nic_mode = nic_mode_req
4631 9dce4771 Guido Trotter
      if nic_mode is None:
4632 9dce4771 Guido Trotter
        nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
4633 9dce4771 Guido Trotter
4634 9dce4771 Guido Trotter
      # in routed mode, for the first nic, the default ip is 'auto'
4635 9dce4771 Guido Trotter
      if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
4636 9dce4771 Guido Trotter
        default_ip_mode = constants.VALUE_AUTO
4637 9dce4771 Guido Trotter
      else:
4638 9dce4771 Guido Trotter
        default_ip_mode = constants.VALUE_NONE
4639 9dce4771 Guido Trotter
4640 08db7c5c Iustin Pop
      # ip validity checks
4641 9dce4771 Guido Trotter
      ip = nic.get("ip", default_ip_mode)
4642 9dce4771 Guido Trotter
      if ip is None or ip.lower() == constants.VALUE_NONE:
4643 08db7c5c Iustin Pop
        nic_ip = None
4644 08db7c5c Iustin Pop
      elif ip.lower() == constants.VALUE_AUTO:
4645 08db7c5c Iustin Pop
        nic_ip = hostname1.ip
4646 08db7c5c Iustin Pop
      else:
4647 08db7c5c Iustin Pop
        if not utils.IsValidIP(ip):
4648 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
4649 08db7c5c Iustin Pop
                                     " like a valid IP" % ip)
4650 08db7c5c Iustin Pop
        nic_ip = ip
4651 08db7c5c Iustin Pop
4652 9dce4771 Guido Trotter
      # TODO: check the ip for uniqueness !!
4653 9dce4771 Guido Trotter
      if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
4654 9dce4771 Guido Trotter
        raise errors.OpPrereqError("Routed nic mode requires an ip address")
4655 9dce4771 Guido Trotter
4656 08db7c5c Iustin Pop
      # MAC address verification
4657 08db7c5c Iustin Pop
      mac = nic.get("mac", constants.VALUE_AUTO)
4658 08db7c5c Iustin Pop
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4659 08db7c5c Iustin Pop
        if not utils.IsValidMac(mac.lower()):
4660 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Invalid MAC address specified: %s" %
4661 08db7c5c Iustin Pop
                                     mac)
4662 08db7c5c Iustin Pop
      # bridge verification
4663 9939547b Iustin Pop
      bridge = nic.get("bridge", None)
4664 9dce4771 Guido Trotter
      link = nic.get("link", None)
4665 9dce4771 Guido Trotter
      if bridge and link:
4666 29921401 Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
4667 29921401 Iustin Pop
                                   " at the same time")
4668 9dce4771 Guido Trotter
      elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
4669 9dce4771 Guido Trotter
        raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic")
4670 9dce4771 Guido Trotter
      elif bridge:
4671 9dce4771 Guido Trotter
        link = bridge
4672 9dce4771 Guido Trotter
4673 9dce4771 Guido Trotter
      nicparams = {}
4674 9dce4771 Guido Trotter
      if nic_mode_req:
4675 9dce4771 Guido Trotter
        nicparams[constants.NIC_MODE] = nic_mode_req
4676 9dce4771 Guido Trotter
      if link:
4677 9dce4771 Guido Trotter
        nicparams[constants.NIC_LINK] = link
4678 9dce4771 Guido Trotter
4679 9dce4771 Guido Trotter
      check_params = objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
4680 9dce4771 Guido Trotter
                                      nicparams)
4681 9dce4771 Guido Trotter
      objects.NIC.CheckParameterSyntax(check_params)
4682 9dce4771 Guido Trotter
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
4683 08db7c5c Iustin Pop
4684 08db7c5c Iustin Pop
    # disk checks/pre-build
4685 08db7c5c Iustin Pop
    self.disks = []
4686 08db7c5c Iustin Pop
    for disk in self.op.disks:
4687 08db7c5c Iustin Pop
      mode = disk.get("mode", constants.DISK_RDWR)
4688 08db7c5c Iustin Pop
      if mode not in constants.DISK_ACCESS_SET:
4689 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
4690 08db7c5c Iustin Pop
                                   mode)
4691 08db7c5c Iustin Pop
      size = disk.get("size", None)
4692 08db7c5c Iustin Pop
      if size is None:
4693 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Missing disk size")
4694 08db7c5c Iustin Pop
      try:
4695 08db7c5c Iustin Pop
        size = int(size)
4696 08db7c5c Iustin Pop
      except ValueError:
4697 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk size '%s'" % size)
4698 08db7c5c Iustin Pop
      self.disks.append({"size": size, "mode": mode})
4699 08db7c5c Iustin Pop
4700 7baf741d Guido Trotter
    # used in CheckPrereq for ip ping check
4701 7baf741d Guido Trotter
    self.check_ip = hostname1.ip
4702 7baf741d Guido Trotter
4703 7baf741d Guido Trotter
    # file storage checks
4704 7baf741d Guido Trotter
    if (self.op.file_driver and
4705 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
4706 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
4707 7baf741d Guido Trotter
                                 self.op.file_driver)
4708 7baf741d Guido Trotter
4709 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
4710 7baf741d Guido Trotter
      raise errors.OpPrereqError("File storage directory path not absolute")
4711 7baf741d Guido Trotter
4712 7baf741d Guido Trotter
    ### Node/iallocator related checks
4713 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
4714 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
4715 7baf741d Guido Trotter
                                 " node must be given")
4716 7baf741d Guido Trotter
4717 7baf741d Guido Trotter
    if self.op.iallocator:
4718 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4719 7baf741d Guido Trotter
    else:
4720 7baf741d Guido Trotter
      self.op.pnode = self._ExpandNode(self.op.pnode)
4721 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
4722 7baf741d Guido Trotter
      if self.op.snode is not None:
4723 7baf741d Guido Trotter
        self.op.snode = self._ExpandNode(self.op.snode)
4724 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
4725 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
4726 7baf741d Guido Trotter
4727 7baf741d Guido Trotter
    # in case of import lock the source node too
4728 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
4729 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
4730 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
4731 7baf741d Guido Trotter
4732 b9322a9f Guido Trotter
      if src_path is None:
4733 b9322a9f Guido Trotter
        self.op.src_path = src_path = self.op.instance_name
4734 b9322a9f Guido Trotter
4735 b9322a9f Guido Trotter
      if src_node is None:
4736 b9322a9f Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4737 b9322a9f Guido Trotter
        self.op.src_node = None
4738 b9322a9f Guido Trotter
        if os.path.isabs(src_path):
4739 b9322a9f Guido Trotter
          raise errors.OpPrereqError("Importing an instance from an absolute"
4740 b9322a9f Guido Trotter
                                     " path requires a source node option.")
4741 b9322a9f Guido Trotter
      else:
4742 b9322a9f Guido Trotter
        self.op.src_node = src_node = self._ExpandNode(src_node)
4743 b9322a9f Guido Trotter
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4744 b9322a9f Guido Trotter
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
4745 b9322a9f Guido Trotter
        if not os.path.isabs(src_path):
4746 b9322a9f Guido Trotter
          self.op.src_path = src_path = \
4747 b9322a9f Guido Trotter
            os.path.join(constants.EXPORT_DIR, src_path)
4748 7baf741d Guido Trotter
4749 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
4750 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
4751 7baf741d Guido Trotter
        raise errors.OpPrereqError("No guest OS specified")
4752 a8083063 Iustin Pop
4753 538475ca Iustin Pop
  def _RunAllocator(self):
4754 538475ca Iustin Pop
    """Run the allocator based on input opcode.
4755 538475ca Iustin Pop

4756 538475ca Iustin Pop
    """
4757 08db7c5c Iustin Pop
    nics = [n.ToDict() for n in self.nics]
4758 923ddac0 Michael Hanselmann
    ial = IAllocator(self.cfg, self.rpc,
4759 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
4760 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
4761 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
4762 d1c2dd75 Iustin Pop
                     tags=[],
4763 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
4764 338e51e8 Iustin Pop
                     vcpus=self.be_full[constants.BE_VCPUS],
4765 338e51e8 Iustin Pop
                     mem_size=self.be_full[constants.BE_MEMORY],
4766 08db7c5c Iustin Pop
                     disks=self.disks,
4767 d1c2dd75 Iustin Pop
                     nics=nics,
4768 8cc7e742 Guido Trotter
                     hypervisor=self.op.hypervisor,
4769 29859cb7 Iustin Pop
                     )
4770 d1c2dd75 Iustin Pop
4771 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
4772 d1c2dd75 Iustin Pop
4773 d1c2dd75 Iustin Pop
    if not ial.success:
4774 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
4775 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
4776 d1c2dd75 Iustin Pop
                                                           ial.info))
4777 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
4778 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4779 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
4780 97abc79f Iustin Pop
                                 (self.op.iallocator, len(ial.nodes),
4781 1ce4bbe3 Renรฉ Nussbaumer
                                  ial.required_nodes))
4782 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
4783 86d9d3bb Iustin Pop
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
4784 86d9d3bb Iustin Pop
                 self.op.instance_name, self.op.iallocator,
4785 86d9d3bb Iustin Pop
                 ", ".join(ial.nodes))
4786 27579978 Iustin Pop
    if ial.required_nodes == 2:
4787 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
4788 538475ca Iustin Pop
4789 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4790 a8083063 Iustin Pop
    """Build hooks env.
4791 a8083063 Iustin Pop

4792 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4793 a8083063 Iustin Pop

4794 a8083063 Iustin Pop
    """
4795 a8083063 Iustin Pop
    env = {
4796 2c2690c9 Iustin Pop
      "ADD_MODE": self.op.mode,
4797 a8083063 Iustin Pop
      }
4798 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
4799 2c2690c9 Iustin Pop
      env["SRC_NODE"] = self.op.src_node
4800 2c2690c9 Iustin Pop
      env["SRC_PATH"] = self.op.src_path
4801 2c2690c9 Iustin Pop
      env["SRC_IMAGES"] = self.src_images
4802 396e1b78 Michael Hanselmann
4803 2c2690c9 Iustin Pop
    env.update(_BuildInstanceHookEnv(
4804 2c2690c9 Iustin Pop
      name=self.op.instance_name,
4805 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
4806 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
4807 4978db17 Iustin Pop
      status=self.op.start,
4808 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
4809 338e51e8 Iustin Pop
      memory=self.be_full[constants.BE_MEMORY],
4810 338e51e8 Iustin Pop
      vcpus=self.be_full[constants.BE_VCPUS],
4811 f9b10246 Guido Trotter
      nics=_NICListToTuple(self, self.nics),
4812 2c2690c9 Iustin Pop
      disk_template=self.op.disk_template,
4813 2c2690c9 Iustin Pop
      disks=[(d["size"], d["mode"]) for d in self.disks],
4814 67fc3042 Iustin Pop
      bep=self.be_full,
4815 67fc3042 Iustin Pop
      hvp=self.hv_full,
4816 3df6e710 Iustin Pop
      hypervisor_name=self.op.hypervisor,
4817 396e1b78 Michael Hanselmann
    ))
4818 a8083063 Iustin Pop
4819 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
4820 a8083063 Iustin Pop
          self.secondaries)
4821 a8083063 Iustin Pop
    return env, nl, nl
4822 a8083063 Iustin Pop
4823 a8083063 Iustin Pop
4824 a8083063 Iustin Pop
  def CheckPrereq(self):
4825 a8083063 Iustin Pop
    """Check prerequisites.
4826 a8083063 Iustin Pop

4827 a8083063 Iustin Pop
    """
4828 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
4829 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
4830 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
4831 eedc99de Manuel Franceschini
                                 " instances")
4832 eedc99de Manuel Franceschini
4833 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
4834 7baf741d Guido Trotter
      src_node = self.op.src_node
4835 7baf741d Guido Trotter
      src_path = self.op.src_path
4836 a8083063 Iustin Pop
4837 c0cbdc67 Guido Trotter
      if src_node is None:
4838 1b7bfbb7 Iustin Pop
        locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
4839 1b7bfbb7 Iustin Pop
        exp_list = self.rpc.call_export_list(locked_nodes)
4840 c0cbdc67 Guido Trotter
        found = False
4841 c0cbdc67 Guido Trotter
        for node in exp_list:
4842 4c4e4e1e Iustin Pop
          if exp_list[node].fail_msg:
4843 1b7bfbb7 Iustin Pop
            continue
4844 1b7bfbb7 Iustin Pop
          if src_path in exp_list[node].payload:
4845 c0cbdc67 Guido Trotter
            found = True
4846 c0cbdc67 Guido Trotter
            self.op.src_node = src_node = node
4847 c0cbdc67 Guido Trotter
            self.op.src_path = src_path = os.path.join(constants.EXPORT_DIR,
4848 c0cbdc67 Guido Trotter
                                                       src_path)
4849 c0cbdc67 Guido Trotter
            break
4850 c0cbdc67 Guido Trotter
        if not found:
4851 c0cbdc67 Guido Trotter
          raise errors.OpPrereqError("No export found for relative path %s" %
4852 c0cbdc67 Guido Trotter
                                      src_path)
4853 c0cbdc67 Guido Trotter
4854 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, src_node)
4855 781de953 Iustin Pop
      result = self.rpc.call_export_info(src_node, src_path)
4856 4c4e4e1e Iustin Pop
      result.Raise("No export or invalid export found in dir %s" % src_path)
4857 a8083063 Iustin Pop
4858 3eccac06 Iustin Pop
      export_info = objects.SerializableConfigParser.Loads(str(result.payload))
4859 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
4860 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
4861 a8083063 Iustin Pop
4862 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
4863 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
4864 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
4865 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
4866 a8083063 Iustin Pop
4867 09acf207 Guido Trotter
      # Check that the new instance doesn't have less disks than the export
4868 08db7c5c Iustin Pop
      instance_disks = len(self.disks)
4869 09acf207 Guido Trotter
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
4870 09acf207 Guido Trotter
      if instance_disks < export_disks:
4871 09acf207 Guido Trotter
        raise errors.OpPrereqError("Not enough disks to import."
4872 09acf207 Guido Trotter
                                   " (instance: %d, export: %d)" %
4873 726d7d68 Iustin Pop
                                   (instance_disks, export_disks))
4874 a8083063 Iustin Pop
4875 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
4876 09acf207 Guido Trotter
      disk_images = []
4877 09acf207 Guido Trotter
      for idx in range(export_disks):
4878 09acf207 Guido Trotter
        option = 'disk%d_dump' % idx
4879 09acf207 Guido Trotter
        if export_info.has_option(constants.INISECT_INS, option):
4880 09acf207 Guido Trotter
          # FIXME: are the old os-es, disk sizes, etc. useful?
4881 09acf207 Guido Trotter
          export_name = export_info.get(constants.INISECT_INS, option)
4882 09acf207 Guido Trotter
          image = os.path.join(src_path, export_name)
4883 09acf207 Guido Trotter
          disk_images.append(image)
4884 09acf207 Guido Trotter
        else:
4885 09acf207 Guido Trotter
          disk_images.append(False)
4886 09acf207 Guido Trotter
4887 09acf207 Guido Trotter
      self.src_images = disk_images
4888 901a65c1 Iustin Pop
4889 b4364a6b Guido Trotter
      old_name = export_info.get(constants.INISECT_INS, 'name')
4890 b4364a6b Guido Trotter
      # FIXME: int() here could throw a ValueError on broken exports
4891 b4364a6b Guido Trotter
      exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
4892 b4364a6b Guido Trotter
      if self.op.instance_name == old_name:
4893 b4364a6b Guido Trotter
        for idx, nic in enumerate(self.nics):
4894 b4364a6b Guido Trotter
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
4895 b4364a6b Guido Trotter
            nic_mac_ini = 'nic%d_mac' % idx
4896 b4364a6b Guido Trotter
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
4897 bc89efc3 Guido Trotter
4898 295728df Guido Trotter
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
4899 7baf741d Guido Trotter
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
4900 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
4901 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
4902 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
4903 901a65c1 Iustin Pop
4904 901a65c1 Iustin Pop
    if self.op.ip_check:
4905 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
4906 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
4907 7b3a8fb5 Iustin Pop
                                   (self.check_ip, self.op.instance_name))
4908 901a65c1 Iustin Pop
4909 295728df Guido Trotter
    #### mac address generation
4910 295728df Guido Trotter
    # By generating here the mac address both the allocator and the hooks get
4911 295728df Guido Trotter
    # the real final mac address rather than the 'auto' or 'generate' value.
4912 295728df Guido Trotter
    # There is a race condition between the generation and the instance object
4913 295728df Guido Trotter
    # creation, which means that we know the mac is valid now, but we're not
4914 295728df Guido Trotter
    # sure it will be when we actually add the instance. If things go bad
4915 295728df Guido Trotter
    # adding the instance will abort because of a duplicate mac, and the
4916 295728df Guido Trotter
    # creation job will fail.
4917 295728df Guido Trotter
    for nic in self.nics:
4918 295728df Guido Trotter
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4919 295728df Guido Trotter
        nic.mac = self.cfg.GenerateMAC()
4920 295728df Guido Trotter
4921 538475ca Iustin Pop
    #### allocator run
4922 538475ca Iustin Pop
4923 538475ca Iustin Pop
    if self.op.iallocator is not None:
4924 538475ca Iustin Pop
      self._RunAllocator()
4925 0f1a06e3 Manuel Franceschini
4926 901a65c1 Iustin Pop
    #### node related checks
4927 901a65c1 Iustin Pop
4928 901a65c1 Iustin Pop
    # check primary node
4929 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
4930 7baf741d Guido Trotter
    assert self.pnode is not None, \
4931 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
4932 7527a8a4 Iustin Pop
    if pnode.offline:
4933 7527a8a4 Iustin Pop
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
4934 7527a8a4 Iustin Pop
                                 pnode.name)
4935 733a2b6a Iustin Pop
    if pnode.drained:
4936 733a2b6a Iustin Pop
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
4937 733a2b6a Iustin Pop
                                 pnode.name)
4938 7527a8a4 Iustin Pop
4939 901a65c1 Iustin Pop
    self.secondaries = []
4940 901a65c1 Iustin Pop
4941 901a65c1 Iustin Pop
    # mirror node verification
4942 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
4943 7baf741d Guido Trotter
      if self.op.snode is None:
4944 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
4945 3ecf6786 Iustin Pop
                                   " a mirror node")
4946 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
4947 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
4948 3ecf6786 Iustin Pop
                                   " the primary node.")
4949 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, self.op.snode)
4950 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, self.op.snode)
4951 733a2b6a Iustin Pop
      self.secondaries.append(self.op.snode)
4952 a8083063 Iustin Pop
4953 6785674e Iustin Pop
    nodenames = [pnode.name] + self.secondaries
4954 6785674e Iustin Pop
4955 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
4956 08db7c5c Iustin Pop
                                self.disks)
4957 ed1ebc60 Guido Trotter
4958 8d75db10 Iustin Pop
    # Check lv size requirements
4959 8d75db10 Iustin Pop
    if req_size is not None:
4960 72737a7f Iustin Pop
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4961 72737a7f Iustin Pop
                                         self.op.hypervisor)
4962 8d75db10 Iustin Pop
      for node in nodenames:
4963 781de953 Iustin Pop
        info = nodeinfo[node]
4964 4c4e4e1e Iustin Pop
        info.Raise("Cannot get current information from node %s" % node)
4965 070e998b Iustin Pop
        info = info.payload
4966 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
4967 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
4968 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
4969 8d75db10 Iustin Pop
                                     " node %s" % node)
4970 070e998b Iustin Pop
        if req_size > vg_free:
4971 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
4972 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
4973 070e998b Iustin Pop
                                     (node, vg_free, req_size))
4974 ed1ebc60 Guido Trotter
4975 74409b12 Iustin Pop
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
4976 6785674e Iustin Pop
4977 a8083063 Iustin Pop
    # os verification
4978 781de953 Iustin Pop
    result = self.rpc.call_os_get(pnode.name, self.op.os_type)
4979 4c4e4e1e Iustin Pop
    result.Raise("OS '%s' not in supported os list for primary node %s" %
4980 4c4e4e1e Iustin Pop
                 (self.op.os_type, pnode.name), prereq=True)
4981 a8083063 Iustin Pop
4982 b165e77e Guido Trotter
    _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
4983 a8083063 Iustin Pop
4984 49ce1563 Iustin Pop
    # memory check on primary node
4985 49ce1563 Iustin Pop
    if self.op.start:
4986 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
4987 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
4988 338e51e8 Iustin Pop
                           self.be_full[constants.BE_MEMORY],
4989 338e51e8 Iustin Pop
                           self.op.hypervisor)
4990 49ce1563 Iustin Pop
4991 08896026 Iustin Pop
    self.dry_run_result = list(nodenames)
4992 08896026 Iustin Pop
4993 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4994 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
4995 a8083063 Iustin Pop

4996 a8083063 Iustin Pop
    """
4997 a8083063 Iustin Pop
    instance = self.op.instance_name
4998 a8083063 Iustin Pop
    pnode_name = self.pnode.name
4999 a8083063 Iustin Pop
5000 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
5001 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
5002 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
5003 2a6469d5 Alexander Schreiber
    else:
5004 2a6469d5 Alexander Schreiber
      network_port = None
5005 58acb49d Alexander Schreiber
5006 6785674e Iustin Pop
    ##if self.op.vnc_bind_address is None:
5007 6785674e Iustin Pop
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
5008 31a853d2 Iustin Pop
5009 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
5010 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
5011 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
5012 2c313123 Manuel Franceschini
    else:
5013 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
5014 2c313123 Manuel Franceschini
5015 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
5016 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
5017 d6a02168 Michael Hanselmann
                                        self.cfg.GetFileStorageDir(),
5018 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
5019 0f1a06e3 Manuel Franceschini
5020 0f1a06e3 Manuel Franceschini
5021 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
5022 a8083063 Iustin Pop
                                  self.op.disk_template,
5023 a8083063 Iustin Pop
                                  instance, pnode_name,
5024 08db7c5c Iustin Pop
                                  self.secondaries,
5025 08db7c5c Iustin Pop
                                  self.disks,
5026 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
5027 e2a65344 Iustin Pop
                                  self.op.file_driver,
5028 e2a65344 Iustin Pop
                                  0)
5029 a8083063 Iustin Pop
5030 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
5031 a8083063 Iustin Pop
                            primary_node=pnode_name,
5032 08db7c5c Iustin Pop
                            nics=self.nics, disks=disks,
5033 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
5034 4978db17 Iustin Pop
                            admin_up=False,
5035 58acb49d Alexander Schreiber
                            network_port=network_port,
5036 338e51e8 Iustin Pop
                            beparams=self.op.beparams,
5037 6785674e Iustin Pop
                            hvparams=self.op.hvparams,
5038 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
5039 a8083063 Iustin Pop
                            )
5040 a8083063 Iustin Pop
5041 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
5042 796cab27 Iustin Pop
    try:
5043 796cab27 Iustin Pop
      _CreateDisks(self, iobj)
5044 796cab27 Iustin Pop
    except errors.OpExecError:
5045 796cab27 Iustin Pop
      self.LogWarning("Device creation failed, reverting...")
5046 796cab27 Iustin Pop
      try:
5047 796cab27 Iustin Pop
        _RemoveDisks(self, iobj)
5048 796cab27 Iustin Pop
      finally:
5049 796cab27 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance)
5050 796cab27 Iustin Pop
        raise
5051 a8083063 Iustin Pop
5052 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
5053 a8083063 Iustin Pop
5054 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
5055 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
5056 7baf741d Guido Trotter
    # added the instance to the config
5057 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
5058 e36e96b4 Guido Trotter
    # Unlock all the nodes
5059 9c8971d7 Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
5060 9c8971d7 Guido Trotter
      nodes_keep = [self.op.src_node]
5061 9c8971d7 Guido Trotter
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
5062 9c8971d7 Guido Trotter
                       if node != self.op.src_node]
5063 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
5064 9c8971d7 Guido Trotter
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
5065 9c8971d7 Guido Trotter
    else:
5066 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE)
5067 9c8971d7 Guido Trotter
      del self.acquired_locks[locking.LEVEL_NODE]
5068 a8083063 Iustin Pop
5069 a8083063 Iustin Pop
    if self.op.wait_for_sync:
5070 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
5071 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
5072 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
5073 a8083063 Iustin Pop
      time.sleep(15)
5074 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
5075 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
5076 a8083063 Iustin Pop
    else:
5077 a8083063 Iustin Pop
      disk_abort = False
5078 a8083063 Iustin Pop
5079 a8083063 Iustin Pop
    if disk_abort:
5080 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
5081 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
5082 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
5083 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
5084 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
5085 3ecf6786 Iustin Pop
                               " this instance")
5086 a8083063 Iustin Pop
5087 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
5088 a8083063 Iustin Pop
                (instance, pnode_name))
5089 a8083063 Iustin Pop
5090 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
5091 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
5092 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
5093 e557bae9 Guido Trotter
        result = self.rpc.call_instance_os_add(pnode_name, iobj, False)
5094 4c4e4e1e Iustin Pop
        result.Raise("Could not add os for instance %s"
5095 4c4e4e1e Iustin Pop
                     " on node %s" % (instance, pnode_name))
5096 a8083063 Iustin Pop
5097 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
5098 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
5099 a8083063 Iustin Pop
        src_node = self.op.src_node
5100 09acf207 Guido Trotter
        src_images = self.src_images
5101 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
5102 6c0af70e Guido Trotter
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
5103 09acf207 Guido Trotter
                                                         src_node, src_images,
5104 6c0af70e Guido Trotter
                                                         cluster_name)
5105 4c4e4e1e Iustin Pop
        msg = import_result.fail_msg
5106 944bf548 Iustin Pop
        if msg:
5107 944bf548 Iustin Pop
          self.LogWarning("Error while importing the disk images for instance"
5108 944bf548 Iustin Pop
                          " %s on node %s: %s" % (instance, pnode_name, msg))
5109 a8083063 Iustin Pop
      else:
5110 a8083063 Iustin Pop
        # also checked in the prereq part
5111 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
5112 3ecf6786 Iustin Pop
                                     % self.op.mode)
5113 a8083063 Iustin Pop
5114 a8083063 Iustin Pop
    if self.op.start:
5115 4978db17 Iustin Pop
      iobj.admin_up = True
5116 4978db17 Iustin Pop
      self.cfg.Update(iobj)
5117 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s", instance, pnode_name)
5118 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
5119 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
5120 4c4e4e1e Iustin Pop
      result.Raise("Could not start instance")
5121 a8083063 Iustin Pop
5122 08896026 Iustin Pop
    return list(iobj.all_nodes)
5123 08896026 Iustin Pop
5124 a8083063 Iustin Pop
5125 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
5126 a8083063 Iustin Pop
  """Connect to an instance's console.
5127 a8083063 Iustin Pop

5128 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
5129 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
5130 a8083063 Iustin Pop
  console.
5131 a8083063 Iustin Pop

5132 a8083063 Iustin Pop
  """
5133 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
5134 8659b73e Guido Trotter
  REQ_BGL = False
5135 8659b73e Guido Trotter
5136 8659b73e Guido Trotter
  def ExpandNames(self):
5137 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
5138 a8083063 Iustin Pop
5139 a8083063 Iustin Pop
  def CheckPrereq(self):
5140 a8083063 Iustin Pop
    """Check prerequisites.
5141 a8083063 Iustin Pop

5142 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
5143 a8083063 Iustin Pop

5144 a8083063 Iustin Pop
    """
5145 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5146 8659b73e Guido Trotter
    assert self.instance is not None, \
5147 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5148 513e896d Guido Trotter
    _CheckNodeOnline(self, self.instance.primary_node)
5149 a8083063 Iustin Pop
5150 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5151 a8083063 Iustin Pop
    """Connect to the console of an instance
5152 a8083063 Iustin Pop

5153 a8083063 Iustin Pop
    """
5154 a8083063 Iustin Pop
    instance = self.instance
5155 a8083063 Iustin Pop
    node = instance.primary_node
5156 a8083063 Iustin Pop
5157 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
5158 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
5159 4c4e4e1e Iustin Pop
    node_insts.Raise("Can't get node information from %s" % node)
5160 a8083063 Iustin Pop
5161 aca13712 Iustin Pop
    if instance.name not in node_insts.payload:
5162 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
5163 a8083063 Iustin Pop
5164 9a4f63d1 Iustin Pop
    logging.debug("Connecting to console of %s on %s", instance.name, node)
5165 a8083063 Iustin Pop
5166 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
5167 5431b2e4 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
5168 5431b2e4 Guido Trotter
    # beparams and hvparams are passed separately, to avoid editing the
5169 5431b2e4 Guido Trotter
    # instance and then saving the defaults in the instance itself.
5170 5431b2e4 Guido Trotter
    hvparams = cluster.FillHV(instance)
5171 5431b2e4 Guido Trotter
    beparams = cluster.FillBE(instance)
5172 5431b2e4 Guido Trotter
    console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
5173 b047857b Michael Hanselmann
5174 82122173 Iustin Pop
    # build ssh cmdline
5175 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
5176 a8083063 Iustin Pop
5177 a8083063 Iustin Pop
5178 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
5179 a8083063 Iustin Pop
  """Replace the disks of an instance.
5180 a8083063 Iustin Pop

5181 a8083063 Iustin Pop
  """
5182 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
5183 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5184 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
5185 efd990e4 Guido Trotter
  REQ_BGL = False
5186 efd990e4 Guido Trotter
5187 7e9366f7 Iustin Pop
  def CheckArguments(self):
5188 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
5189 efd990e4 Guido Trotter
      self.op.remote_node = None
5190 7e9366f7 Iustin Pop
    if not hasattr(self.op, "iallocator"):
5191 7e9366f7 Iustin Pop
      self.op.iallocator = None
5192 7e9366f7 Iustin Pop
5193 2bb5c911 Michael Hanselmann
    _DiskReplacer.CheckArguments(self.op.mode, self.op.remote_node,
5194 2bb5c911 Michael Hanselmann
                                 self.op.iallocator)
5195 7e9366f7 Iustin Pop
5196 7e9366f7 Iustin Pop
  def ExpandNames(self):
5197 7e9366f7 Iustin Pop
    self._ExpandAndLockInstance()
5198 7e9366f7 Iustin Pop
5199 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
5200 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5201 2bb5c911 Michael Hanselmann
5202 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
5203 efd990e4 Guido Trotter
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
5204 efd990e4 Guido Trotter
      if remote_node is None:
5205 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Node '%s' not known" %
5206 efd990e4 Guido Trotter
                                   self.op.remote_node)
5207 2bb5c911 Michael Hanselmann
5208 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
5209 2bb5c911 Michael Hanselmann
5210 3b559640 Iustin Pop
      # Warning: do not remove the locking of the new secondary here
5211 3b559640 Iustin Pop
      # unless DRBD8.AddChildren is changed to work in parallel;
5212 3b559640 Iustin Pop
      # currently it doesn't since parallel invocations of
5213 3b559640 Iustin Pop
      # FindUnusedMinor will conflict
5214 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
5215 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5216 2bb5c911 Michael Hanselmann
5217 efd990e4 Guido Trotter
    else:
5218 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
5219 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5220 efd990e4 Guido Trotter
5221 2bb5c911 Michael Hanselmann
    self.replacer = _DiskReplacer(self, self.op.instance_name, self.op.mode,
5222 2bb5c911 Michael Hanselmann
                                  self.op.iallocator, self.op.remote_node,
5223 2bb5c911 Michael Hanselmann
                                  self.op.disks)
5224 2bb5c911 Michael Hanselmann
5225 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
5226 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
5227 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
5228 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
5229 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
5230 efd990e4 Guido Trotter
      self._LockInstancesNodes()
5231 a8083063 Iustin Pop
5232 a8083063 Iustin Pop
  def BuildHooksEnv(self):
5233 a8083063 Iustin Pop
    """Build hooks env.
5234 a8083063 Iustin Pop

5235 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
5236 a8083063 Iustin Pop

5237 a8083063 Iustin Pop
    """
5238 2bb5c911 Michael Hanselmann
    instance = self.replacer.instance
5239 a8083063 Iustin Pop
    env = {
5240 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
5241 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
5242 2bb5c911 Michael Hanselmann
      "OLD_SECONDARY": instance.secondary_nodes[0],
5243 a8083063 Iustin Pop
      }
5244 2bb5c911 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self, instance))
5245 0834c866 Iustin Pop
    nl = [
5246 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
5247 2bb5c911 Michael Hanselmann
      instance.primary_node,
5248 0834c866 Iustin Pop
      ]
5249 0834c866 Iustin Pop
    if self.op.remote_node is not None:
5250 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
5251 a8083063 Iustin Pop
    return env, nl, nl
5252 a8083063 Iustin Pop
5253 a8083063 Iustin Pop
  def CheckPrereq(self):
5254 a8083063 Iustin Pop
    """Check prerequisites.
5255 a8083063 Iustin Pop

5256 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
5257 a8083063 Iustin Pop

5258 a8083063 Iustin Pop
    """
5259 2bb5c911 Michael Hanselmann
    self.replacer.CheckPrereq()
5260 a8083063 Iustin Pop
5261 2bb5c911 Michael Hanselmann
  def Exec(self, feedback_fn):
5262 2bb5c911 Michael Hanselmann
    """Execute disk replacement.
5263 2bb5c911 Michael Hanselmann

5264 2bb5c911 Michael Hanselmann
    This dispatches the disk replacement to the appropriate handler.
5265 2bb5c911 Michael Hanselmann

5266 2bb5c911 Michael Hanselmann
    """
5267 2bb5c911 Michael Hanselmann
    self.replacer.Exec()
5268 2bb5c911 Michael Hanselmann
5269 2bb5c911 Michael Hanselmann
5270 2bb5c911 Michael Hanselmann
class _DiskReplacer:
5271 2bb5c911 Michael Hanselmann
  """Replaces disks for an instance.
5272 2bb5c911 Michael Hanselmann

5273 2bb5c911 Michael Hanselmann
  Note: Locking is not within the scope of this class.
5274 2bb5c911 Michael Hanselmann

5275 2bb5c911 Michael Hanselmann
  """
5276 2bb5c911 Michael Hanselmann
  def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
5277 2bb5c911 Michael Hanselmann
               disks):
5278 2bb5c911 Michael Hanselmann
    """Initializes this class.
5279 2bb5c911 Michael Hanselmann

5280 2bb5c911 Michael Hanselmann
    """
5281 2bb5c911 Michael Hanselmann
    # Parameters
5282 2bb5c911 Michael Hanselmann
    self.lu = lu
5283 2bb5c911 Michael Hanselmann
    self.instance_name = instance_name
5284 2bb5c911 Michael Hanselmann
    self.mode = mode
5285 2bb5c911 Michael Hanselmann
    self.iallocator_name = iallocator_name
5286 2bb5c911 Michael Hanselmann
    self.remote_node = remote_node
5287 2bb5c911 Michael Hanselmann
    self.disks = disks
5288 2bb5c911 Michael Hanselmann
5289 2bb5c911 Michael Hanselmann
    # Shortcuts
5290 2bb5c911 Michael Hanselmann
    self.cfg = lu.cfg
5291 2bb5c911 Michael Hanselmann
    self.rpc = lu.rpc
5292 2bb5c911 Michael Hanselmann
5293 2bb5c911 Michael Hanselmann
    # Runtime data
5294 2bb5c911 Michael Hanselmann
    self.instance = None
5295 2bb5c911 Michael Hanselmann
    self.new_node = None
5296 2bb5c911 Michael Hanselmann
    self.target_node = None
5297 2bb5c911 Michael Hanselmann
    self.other_node = None
5298 2bb5c911 Michael Hanselmann
    self.remote_node_info = None
5299 2bb5c911 Michael Hanselmann
    self.node_secondary_ip = None
5300 2bb5c911 Michael Hanselmann
5301 2bb5c911 Michael Hanselmann
  @staticmethod
5302 2bb5c911 Michael Hanselmann
  def CheckArguments(mode, remote_node, iallocator):
5303 2bb5c911 Michael Hanselmann
    # check for valid parameter combination
5304 2bb5c911 Michael Hanselmann
    cnt = [remote_node, iallocator].count(None)
5305 2bb5c911 Michael Hanselmann
    if mode == constants.REPLACE_DISK_CHG:
5306 2bb5c911 Michael Hanselmann
      if cnt == 2:
5307 2bb5c911 Michael Hanselmann
        raise errors.OpPrereqError("When changing the secondary either an"
5308 2bb5c911 Michael Hanselmann
                                   " iallocator script must be used or the"
5309 2bb5c911 Michael Hanselmann
                                   " new node given")
5310 2bb5c911 Michael Hanselmann
      elif cnt == 0:
5311 2bb5c911 Michael Hanselmann
        raise errors.OpPrereqError("Give either the iallocator or the new"
5312 2bb5c911 Michael Hanselmann
                                   " secondary, not both")
5313 2bb5c911 Michael Hanselmann
    else: # not replacing the secondary
5314 2bb5c911 Michael Hanselmann
      if cnt != 2:
5315 2bb5c911 Michael Hanselmann
        raise errors.OpPrereqError("The iallocator and new node options can"
5316 2bb5c911 Michael Hanselmann
                                   " be used only when changing the"
5317 2bb5c911 Michael Hanselmann
                                   " secondary node")
5318 2bb5c911 Michael Hanselmann
5319 2bb5c911 Michael Hanselmann
  @staticmethod
5320 2bb5c911 Michael Hanselmann
  def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
5321 2bb5c911 Michael Hanselmann
    """Compute a new secondary node using an IAllocator.
5322 2bb5c911 Michael Hanselmann

5323 2bb5c911 Michael Hanselmann
    """
5324 2bb5c911 Michael Hanselmann
    ial = IAllocator(lu.cfg, lu.rpc,
5325 2bb5c911 Michael Hanselmann
                     mode=constants.IALLOCATOR_MODE_RELOC,
5326 2bb5c911 Michael Hanselmann
                     name=instance_name,
5327 2bb5c911 Michael Hanselmann
                     relocate_from=relocate_from)
5328 2bb5c911 Michael Hanselmann
5329 2bb5c911 Michael Hanselmann
    ial.Run(iallocator_name)
5330 2bb5c911 Michael Hanselmann
5331 2bb5c911 Michael Hanselmann
    if not ial.success:
5332 2bb5c911 Michael Hanselmann
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
5333 2bb5c911 Michael Hanselmann
                                 " %s" % (iallocator_name, ial.info))
5334 2bb5c911 Michael Hanselmann
5335 2bb5c911 Michael Hanselmann
    if len(ial.nodes) != ial.required_nodes:
5336 2bb5c911 Michael Hanselmann
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
5337 2bb5c911 Michael Hanselmann
                                 " of nodes (%s), required %s" %
5338 2bb5c911 Michael Hanselmann
                                 (len(ial.nodes), ial.required_nodes))
5339 2bb5c911 Michael Hanselmann
5340 2bb5c911 Michael Hanselmann
    remote_node_name = ial.nodes[0]
5341 2bb5c911 Michael Hanselmann
5342 2bb5c911 Michael Hanselmann
    lu.LogInfo("Selected new secondary for instance '%s': %s",
5343 2bb5c911 Michael Hanselmann
               instance_name, remote_node_name)
5344 2bb5c911 Michael Hanselmann
5345 2bb5c911 Michael Hanselmann
    return remote_node_name
5346 2bb5c911 Michael Hanselmann
5347 2bb5c911 Michael Hanselmann
  def CheckPrereq(self):
5348 2bb5c911 Michael Hanselmann
    """Check prerequisites.
5349 2bb5c911 Michael Hanselmann

5350 2bb5c911 Michael Hanselmann
    This checks that the instance is in the cluster.
5351 2bb5c911 Michael Hanselmann

5352 2bb5c911 Michael Hanselmann
    """
5353 2bb5c911 Michael Hanselmann
    self.instance = self.cfg.GetInstanceInfo(self.instance_name)
5354 2bb5c911 Michael Hanselmann
    assert self.instance is not None, \
5355 2bb5c911 Michael Hanselmann
      "Cannot retrieve locked instance %s" % self.instance_name
5356 2bb5c911 Michael Hanselmann
5357 2bb5c911 Michael Hanselmann
    if self.instance.disk_template != constants.DT_DRBD8:
5358 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
5359 7e9366f7 Iustin Pop
                                 " instances")
5360 a8083063 Iustin Pop
5361 2bb5c911 Michael Hanselmann
    if len(self.instance.secondary_nodes) != 1:
5362 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
5363 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
5364 2bb5c911 Michael Hanselmann
                                 len(self.instance.secondary_nodes))
5365 a8083063 Iustin Pop
5366 2bb5c911 Michael Hanselmann
    secondary_node = self.instance.secondary_nodes[0]
5367 a9e0c397 Iustin Pop
5368 2bb5c911 Michael Hanselmann
    if self.iallocator_name is None:
5369 2bb5c911 Michael Hanselmann
      remote_node = self.remote_node
5370 2bb5c911 Michael Hanselmann
    else:
5371 2bb5c911 Michael Hanselmann
      remote_node = self._RunAllocator(self.lu, self.iallocator_name,
5372 2bb5c911 Michael Hanselmann
                                       self.instance.name, secondary_node)
5373 b6e82a65 Iustin Pop
5374 a9e0c397 Iustin Pop
    if remote_node is not None:
5375 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
5376 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
5377 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
5378 a9e0c397 Iustin Pop
    else:
5379 a9e0c397 Iustin Pop
      self.remote_node_info = None
5380 2bb5c911 Michael Hanselmann
5381 2bb5c911 Michael Hanselmann
    if remote_node == self.instance.primary_node:
5382 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
5383 3ecf6786 Iustin Pop
                                 " the instance.")
5384 2bb5c911 Michael Hanselmann
5385 2bb5c911 Michael Hanselmann
    if remote_node == secondary_node:
5386 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("The specified node is already the"
5387 7e9366f7 Iustin Pop
                                 " secondary node of the instance.")
5388 7e9366f7 Iustin Pop
5389 2bb5c911 Michael Hanselmann
    if self.mode == constants.REPLACE_DISK_PRI:
5390 2bb5c911 Michael Hanselmann
      self.target_node = self.instance.primary_node
5391 2bb5c911 Michael Hanselmann
      self.other_node = secondary_node
5392 2bb5c911 Michael Hanselmann
      check_nodes = [self.target_node, self.other_node]
5393 7e9366f7 Iustin Pop
5394 2bb5c911 Michael Hanselmann
    elif self.mode == constants.REPLACE_DISK_SEC:
5395 2bb5c911 Michael Hanselmann
      self.target_node = secondary_node
5396 2bb5c911 Michael Hanselmann
      self.other_node = self.instance.primary_node
5397 2bb5c911 Michael Hanselmann
      check_nodes = [self.target_node, self.other_node]
5398 a9e0c397 Iustin Pop
5399 2bb5c911 Michael Hanselmann
    elif self.mode == constants.REPLACE_DISK_CHG:
5400 2bb5c911 Michael Hanselmann
      self.new_node = remote_node
5401 2bb5c911 Michael Hanselmann
      self.other_node = self.instance.primary_node
5402 2bb5c911 Michael Hanselmann
      self.target_node = secondary_node
5403 2bb5c911 Michael Hanselmann
      check_nodes = [self.new_node, self.other_node]
5404 54155f52 Iustin Pop
5405 2bb5c911 Michael Hanselmann
      _CheckNodeNotDrained(self.lu, remote_node)
5406 a8083063 Iustin Pop
5407 2bb5c911 Michael Hanselmann
    else:
5408 2bb5c911 Michael Hanselmann
      raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
5409 2bb5c911 Michael Hanselmann
                                   self.mode)
5410 a9e0c397 Iustin Pop
5411 2bb5c911 Michael Hanselmann
    for node in check_nodes:
5412 2bb5c911 Michael Hanselmann
      _CheckNodeOnline(self.lu, node)
5413 e4376078 Iustin Pop
5414 2bb5c911 Michael Hanselmann
    # If not specified all disks should be replaced
5415 2bb5c911 Michael Hanselmann
    if not self.disks:
5416 2bb5c911 Michael Hanselmann
      self.disks = range(len(self.instance.disks))
5417 e4376078 Iustin Pop
5418 2bb5c911 Michael Hanselmann
    # Check whether disks are valid
5419 2bb5c911 Michael Hanselmann
    for disk_idx in self.disks:
5420 2bb5c911 Michael Hanselmann
      self.instance.FindDisk(disk_idx)
5421 e4376078 Iustin Pop
5422 2bb5c911 Michael Hanselmann
    # Get secondary node IP addresses
5423 2bb5c911 Michael Hanselmann
    node_2nd_ip = {}
5424 e4376078 Iustin Pop
5425 2bb5c911 Michael Hanselmann
    for node_name in [self.target_node, self.other_node, self.new_node]:
5426 2bb5c911 Michael Hanselmann
      if node_name is not None:
5427 2bb5c911 Michael Hanselmann
        node_2nd_ip[node_name] = self.cfg.GetNodeInfo(node_name).secondary_ip
5428 e4376078 Iustin Pop
5429 2bb5c911 Michael Hanselmann
    self.node_secondary_ip = node_2nd_ip
5430 a9e0c397 Iustin Pop
5431 2bb5c911 Michael Hanselmann
  def Exec(self):
5432 2bb5c911 Michael Hanselmann
    """Execute disk replacement.
5433 2bb5c911 Michael Hanselmann

5434 2bb5c911 Michael Hanselmann
    This dispatches the disk replacement to the appropriate handler.
5435 cff90b79 Iustin Pop

5436 a9e0c397 Iustin Pop
    """
5437 2bb5c911 Michael Hanselmann
    activate_disks = (not self.instance.admin_up)
5438 2bb5c911 Michael Hanselmann
5439 2bb5c911 Michael Hanselmann
    # Activate the instance disks if we're replacing them on a down instance
5440 2bb5c911 Michael Hanselmann
    if activate_disks:
5441 2bb5c911 Michael Hanselmann
      _StartInstanceDisks(self.lu, self.instance, True)
5442 2bb5c911 Michael Hanselmann
5443 2bb5c911 Michael Hanselmann
    try:
5444 2bb5c911 Michael Hanselmann
      if self.mode == constants.REPLACE_DISK_CHG:
5445 2bb5c911 Michael Hanselmann
        return self._ExecDrbd8Secondary()
5446 2bb5c911 Michael Hanselmann
      else:
5447 2bb5c911 Michael Hanselmann
        return self._ExecDrbd8DiskOnly()
5448 2bb5c911 Michael Hanselmann
5449 2bb5c911 Michael Hanselmann
    finally:
5450 2bb5c911 Michael Hanselmann
      # Deactivate the instance disks if we're replacing them on a down instance
5451 2bb5c911 Michael Hanselmann
      if activate_disks:
5452 2bb5c911 Michael Hanselmann
        _SafeShutdownInstanceDisks(self.lu, self.instance)
5453 2bb5c911 Michael Hanselmann
5454 2bb5c911 Michael Hanselmann
  def _CheckVolumeGroup(self, nodes):
5455 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Checking volume groups")
5456 2bb5c911 Michael Hanselmann
5457 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
5458 cff90b79 Iustin Pop
5459 2bb5c911 Michael Hanselmann
    # Make sure volume group exists on all involved nodes
5460 2bb5c911 Michael Hanselmann
    results = self.rpc.call_vg_list(nodes)
5461 cff90b79 Iustin Pop
    if not results:
5462 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
5463 2bb5c911 Michael Hanselmann
5464 2bb5c911 Michael Hanselmann
    for node in nodes:
5465 781de953 Iustin Pop
      res = results[node]
5466 4c4e4e1e Iustin Pop
      res.Raise("Error checking node %s" % node)
5467 2bb5c911 Michael Hanselmann
      if vgname not in res.payload:
5468 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Volume group '%s' not found on node %s" %
5469 2bb5c911 Michael Hanselmann
                                 (vgname, node))
5470 2bb5c911 Michael Hanselmann
5471 2bb5c911 Michael Hanselmann
  def _CheckDisksExistence(self, nodes):
5472 2bb5c911 Michael Hanselmann
    # Check disk existence
5473 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
5474 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
5475 cff90b79 Iustin Pop
        continue
5476 2bb5c911 Michael Hanselmann
5477 2bb5c911 Michael Hanselmann
      for node in nodes:
5478 2bb5c911 Michael Hanselmann
        self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
5479 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(dev, node)
5480 2bb5c911 Michael Hanselmann
5481 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(node, dev)
5482 2bb5c911 Michael Hanselmann
5483 4c4e4e1e Iustin Pop
        msg = result.fail_msg
5484 2bb5c911 Michael Hanselmann
        if msg or not result.payload:
5485 2bb5c911 Michael Hanselmann
          if not msg:
5486 2bb5c911 Michael Hanselmann
            msg = "disk not found"
5487 23829f6f Iustin Pop
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5488 23829f6f Iustin Pop
                                   (idx, node, msg))
5489 cff90b79 Iustin Pop
5490 2bb5c911 Michael Hanselmann
  def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
5491 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
5492 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
5493 cff90b79 Iustin Pop
        continue
5494 cff90b79 Iustin Pop
5495 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Checking disk/%d consistency on node %s" %
5496 2bb5c911 Michael Hanselmann
                      (idx, node_name))
5497 2bb5c911 Michael Hanselmann
5498 2bb5c911 Michael Hanselmann
      if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary,
5499 2bb5c911 Michael Hanselmann
                                   ldisk=ldisk):
5500 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Node %s has degraded storage, unsafe to"
5501 2bb5c911 Michael Hanselmann
                                 " replace disks for instance %s" %
5502 2bb5c911 Michael Hanselmann
                                 (node_name, self.instance.name))
5503 2bb5c911 Michael Hanselmann
5504 2bb5c911 Michael Hanselmann
  def _CreateNewStorage(self, node_name):
5505 2bb5c911 Michael Hanselmann
    vgname = self.cfg.GetVGName()
5506 2bb5c911 Michael Hanselmann
    iv_names = {}
5507 2bb5c911 Michael Hanselmann
5508 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
5509 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
5510 a9e0c397 Iustin Pop
        continue
5511 2bb5c911 Michael Hanselmann
5512 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
5513 2bb5c911 Michael Hanselmann
5514 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, node_name)
5515 2bb5c911 Michael Hanselmann
5516 2bb5c911 Michael Hanselmann
      lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
5517 2bb5c911 Michael Hanselmann
      names = _GenerateUniqueNames(self.lu, lv_names)
5518 2bb5c911 Michael Hanselmann
5519 2bb5c911 Michael Hanselmann
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
5520 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
5521 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
5522 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
5523 2bb5c911 Michael Hanselmann
5524 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
5525 a9e0c397 Iustin Pop
      old_lvs = dev.children
5526 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
5527 2bb5c911 Michael Hanselmann
5528 428958aa Iustin Pop
      # we pass force_create=True to force the LVM creation
5529 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
5530 2bb5c911 Michael Hanselmann
        _CreateBlockDev(self.lu, node_name, self.instance, new_lv, True,
5531 2bb5c911 Michael Hanselmann
                        _GetInstanceInfoText(self.instance), False)
5532 2bb5c911 Michael Hanselmann
5533 2bb5c911 Michael Hanselmann
    return iv_names
5534 2bb5c911 Michael Hanselmann
5535 2bb5c911 Michael Hanselmann
  def _CheckDevices(self, node_name, iv_names):
5536 2bb5c911 Michael Hanselmann
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5537 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, node_name)
5538 2bb5c911 Michael Hanselmann
5539 2bb5c911 Michael Hanselmann
      result = self.rpc.call_blockdev_find(node_name, dev)
5540 2bb5c911 Michael Hanselmann
5541 2bb5c911 Michael Hanselmann
      msg = result.fail_msg
5542 2bb5c911 Michael Hanselmann
      if msg or not result.payload:
5543 2bb5c911 Michael Hanselmann
        if not msg:
5544 2bb5c911 Michael Hanselmann
          msg = "disk not found"
5545 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
5546 2bb5c911 Michael Hanselmann
                                 (name, msg))
5547 2bb5c911 Michael Hanselmann
5548 2bb5c911 Michael Hanselmann
      if result.payload[5]:
5549 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
5550 2bb5c911 Michael Hanselmann
5551 2bb5c911 Michael Hanselmann
  def _RemoveOldStorage(self, node_name, iv_names):
5552 2bb5c911 Michael Hanselmann
    for name, (dev, old_lvs, _) in iv_names.iteritems():
5553 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Remove logical volumes for %s" % name)
5554 2bb5c911 Michael Hanselmann
5555 2bb5c911 Michael Hanselmann
      for lv in old_lvs:
5556 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(lv, node_name)
5557 2bb5c911 Michael Hanselmann
5558 2bb5c911 Michael Hanselmann
        msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
5559 2bb5c911 Michael Hanselmann
        if msg:
5560 2bb5c911 Michael Hanselmann
          self.lu.LogWarning("Can't remove old LV: %s" % msg,
5561 2bb5c911 Michael Hanselmann
                             hint="remove unused LVs manually")
5562 2bb5c911 Michael Hanselmann
5563 2bb5c911 Michael Hanselmann
  def _ExecDrbd8DiskOnly(self):
5564 2bb5c911 Michael Hanselmann
    """Replace a disk on the primary or secondary for DRBD 8.
5565 2bb5c911 Michael Hanselmann

5566 2bb5c911 Michael Hanselmann
    The algorithm for replace is quite complicated:
5567 2bb5c911 Michael Hanselmann

5568 2bb5c911 Michael Hanselmann
      1. for each disk to be replaced:
5569 2bb5c911 Michael Hanselmann

5570 2bb5c911 Michael Hanselmann
        1. create new LVs on the target node with unique names
5571 2bb5c911 Michael Hanselmann
        1. detach old LVs from the drbd device
5572 2bb5c911 Michael Hanselmann
        1. rename old LVs to name_replaced.<time_t>
5573 2bb5c911 Michael Hanselmann
        1. rename new LVs to old LVs
5574 2bb5c911 Michael Hanselmann
        1. attach the new LVs (with the old names now) to the drbd device
5575 2bb5c911 Michael Hanselmann

5576 2bb5c911 Michael Hanselmann
      1. wait for sync across all devices
5577 2bb5c911 Michael Hanselmann

5578 2bb5c911 Michael Hanselmann
      1. for each modified disk:
5579 2bb5c911 Michael Hanselmann

5580 2bb5c911 Michael Hanselmann
        1. remove old LVs (which have the name name_replaces.<time_t>)
5581 2bb5c911 Michael Hanselmann

5582 2bb5c911 Michael Hanselmann
    Failures are not very well handled.
5583 2bb5c911 Michael Hanselmann

5584 2bb5c911 Michael Hanselmann
    """
5585 2bb5c911 Michael Hanselmann
    steps_total = 6
5586 2bb5c911 Michael Hanselmann
5587 2bb5c911 Michael Hanselmann
    # Step: check device activation
5588 2bb5c911 Michael Hanselmann
    self.lu.LogStep(1, steps_total, "Check device existence")
5589 2bb5c911 Michael Hanselmann
    self._CheckDisksExistence([self.other_node, self.target_node])
5590 2bb5c911 Michael Hanselmann
    self._CheckVolumeGroup([self.target_node, self.other_node])
5591 2bb5c911 Michael Hanselmann
5592 2bb5c911 Michael Hanselmann
    # Step: check other node consistency
5593 2bb5c911 Michael Hanselmann
    self.lu.LogStep(2, steps_total, "Check peer consistency")
5594 2bb5c911 Michael Hanselmann
    self._CheckDisksConsistency(self.other_node,
5595 2bb5c911 Michael Hanselmann
                                self.other_node == self.instance.primary_node,
5596 2bb5c911 Michael Hanselmann
                                False)
5597 2bb5c911 Michael Hanselmann
5598 2bb5c911 Michael Hanselmann
    # Step: create new storage
5599 2bb5c911 Michael Hanselmann
    self.lu.LogStep(3, steps_total, "Allocate new storage")
5600 2bb5c911 Michael Hanselmann
    iv_names = self._CreateNewStorage(self.target_node)
5601 a9e0c397 Iustin Pop
5602 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
5603 2bb5c911 Michael Hanselmann
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
5604 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
5605 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
5606 2bb5c911 Michael Hanselmann
5607 2bb5c911 Michael Hanselmann
      result = self.rpc.call_blockdev_removechildren(self.target_node, dev, old_lvs)
5608 4c4e4e1e Iustin Pop
      result.Raise("Can't detach drbd from local storage on node"
5609 2bb5c911 Michael Hanselmann
                   " %s for device %s" % (self.target_node, dev.iv_name))
5610 cff90b79 Iustin Pop
      #dev.children = []
5611 cff90b79 Iustin Pop
      #cfg.Update(instance)
5612 a9e0c397 Iustin Pop
5613 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
5614 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
5615 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
5616 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
5617 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
5618 cff90b79 Iustin Pop
5619 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
5620 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
5621 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
5622 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
5623 2bb5c911 Michael Hanselmann
5624 2bb5c911 Michael Hanselmann
      # Build the rename list based on what LVs exist on the node
5625 2bb5c911 Michael Hanselmann
      rename_old_to_new = []
5626 cff90b79 Iustin Pop
      for to_ren in old_lvs:
5627 2bb5c911 Michael Hanselmann
        result = self.rpc.call_blockdev_find(self.target_node, to_ren)
5628 4c4e4e1e Iustin Pop
        if not result.fail_msg and result.payload:
5629 23829f6f Iustin Pop
          # device exists
5630 2bb5c911 Michael Hanselmann
          rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
5631 cff90b79 Iustin Pop
5632 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Renaming the old LVs on the target node")
5633 2bb5c911 Michael Hanselmann
      result = self.rpc.call_blockdev_rename(self.target_node, rename_old_to_new)
5634 2bb5c911 Michael Hanselmann
      result.Raise("Can't rename old LVs on node %s" % self.target_node)
5635 2bb5c911 Michael Hanselmann
5636 2bb5c911 Michael Hanselmann
      # Now we rename the new LVs to the old LVs
5637 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Renaming the new LVs on the target node")
5638 2bb5c911 Michael Hanselmann
      rename_new_to_old = [(new, old.physical_id)
5639 2bb5c911 Michael Hanselmann
                           for old, new in zip(old_lvs, new_lvs)]
5640 2bb5c911 Michael Hanselmann
      result = self.rpc.call_blockdev_rename(self.target_node, rename_new_to_old)
5641 2bb5c911 Michael Hanselmann
      result.Raise("Can't rename new LVs on node %s" % self.target_node)
5642 cff90b79 Iustin Pop
5643 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
5644 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
5645 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(new, self.target_node)
5646 a9e0c397 Iustin Pop
5647 cff90b79 Iustin Pop
      for disk in old_lvs:
5648 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
5649 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(disk, self.target_node)
5650 a9e0c397 Iustin Pop
5651 2bb5c911 Michael Hanselmann
      # Now that the new lvs have the old name, we can add them to the device
5652 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
5653 2bb5c911 Michael Hanselmann
      result = self.rpc.call_blockdev_addchildren(self.target_node, dev, new_lvs)
5654 4c4e4e1e Iustin Pop
      msg = result.fail_msg
5655 2cc1da8b Iustin Pop
      if msg:
5656 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
5657 2bb5c911 Michael Hanselmann
          msg2 = self.rpc.call_blockdev_remove(self.target_node, new_lv).fail_msg
5658 4c4e4e1e Iustin Pop
          if msg2:
5659 2bb5c911 Michael Hanselmann
            self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
5660 2bb5c911 Michael Hanselmann
                               hint=("cleanup manually the unused logical"
5661 2bb5c911 Michael Hanselmann
                                     "volumes"))
5662 2cc1da8b Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
5663 a9e0c397 Iustin Pop
5664 a9e0c397 Iustin Pop
      dev.children = new_lvs
5665 a9e0c397 Iustin Pop
5666 2bb5c911 Michael Hanselmann
      self.cfg.Update(self.instance)
5667 a9e0c397 Iustin Pop
5668 2bb5c911 Michael Hanselmann
    # Wait for sync
5669 2bb5c911 Michael Hanselmann
    # This can fail as the old devices are degraded and _WaitForSync
5670 2bb5c911 Michael Hanselmann
    # does a combined result over all disks, so we don't check its return value
5671 2bb5c911 Michael Hanselmann
    self.lu.LogStep(5, steps_total, "Sync devices")
5672 2bb5c911 Michael Hanselmann
    _WaitForSync(self.lu, self.instance, unlock=True)
5673 a9e0c397 Iustin Pop
5674 2bb5c911 Michael Hanselmann
    # Check all devices manually
5675 2bb5c911 Michael Hanselmann
    self._CheckDevices(self.instance.primary_node, iv_names)
5676 a9e0c397 Iustin Pop
5677 cff90b79 Iustin Pop
    # Step: remove old storage
5678 2bb5c911 Michael Hanselmann
    self.lu.LogStep(6, steps_total, "Removing old storage")
5679 2bb5c911 Michael Hanselmann
    self._RemoveOldStorage(self.target_node, iv_names)
5680 a9e0c397 Iustin Pop
5681 2bb5c911 Michael Hanselmann
  def _ExecDrbd8Secondary(self):
5682 2bb5c911 Michael Hanselmann
    """Replace the secondary node for DRBD 8.
5683 a9e0c397 Iustin Pop

5684 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
5685 a9e0c397 Iustin Pop
      - for all disks of the instance:
5686 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
5687 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
5688 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
5689 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
5690 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
5691 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
5692 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
5693 a9e0c397 Iustin Pop
          not network enabled
5694 a9e0c397 Iustin Pop
      - wait for sync across all devices
5695 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
5696 a9e0c397 Iustin Pop

5697 a9e0c397 Iustin Pop
    Failures are not very well handled.
5698 0834c866 Iustin Pop

5699 a9e0c397 Iustin Pop
    """
5700 0834c866 Iustin Pop
    steps_total = 6
5701 0834c866 Iustin Pop
5702 0834c866 Iustin Pop
    # Step: check device activation
5703 2bb5c911 Michael Hanselmann
    self.lu.LogStep(1, steps_total, "Check device existence")
5704 2bb5c911 Michael Hanselmann
    self._CheckDisksExistence([self.instance.primary_node])
5705 2bb5c911 Michael Hanselmann
    self._CheckVolumeGroup([self.instance.primary_node])
5706 0834c866 Iustin Pop
5707 0834c866 Iustin Pop
    # Step: check other node consistency
5708 2bb5c911 Michael Hanselmann
    self.lu.LogStep(2, steps_total, "Check peer consistency")
5709 2bb5c911 Michael Hanselmann
    self._CheckDisksConsistency(self.instance.primary_node, True, True)
5710 0834c866 Iustin Pop
5711 0834c866 Iustin Pop
    # Step: create new storage
5712 2bb5c911 Michael Hanselmann
    self.lu.LogStep(3, steps_total, "Allocate new storage")
5713 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
5714 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
5715 2bb5c911 Michael Hanselmann
                      (self.new_node, idx))
5716 428958aa Iustin Pop
      # we pass force_create=True to force LVM creation
5717 a9e0c397 Iustin Pop
      for new_lv in dev.children:
5718 2bb5c911 Michael Hanselmann
        _CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True,
5719 2bb5c911 Michael Hanselmann
                        _GetInstanceInfoText(self.instance), False)
5720 a9e0c397 Iustin Pop
5721 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
5722 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
5723 a1578d63 Iustin Pop
    # error and the success paths
5724 2bb5c911 Michael Hanselmann
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
5725 2bb5c911 Michael Hanselmann
    minors = self.cfg.AllocateDRBDMinor([self.new_node for dev in self.instance.disks],
5726 2bb5c911 Michael Hanselmann
                                        self.instance.name)
5727 2bb5c911 Michael Hanselmann
    logging.debug("Allocated minors %r" % (minors,))
5728 2bb5c911 Michael Hanselmann
5729 2bb5c911 Michael Hanselmann
    iv_names = {}
5730 2bb5c911 Michael Hanselmann
    for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
5731 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("activating a new drbd on %s for disk/%d" % (self.new_node, idx))
5732 a2d59d8b Iustin Pop
      # create new devices on new_node; note that we create two IDs:
5733 a2d59d8b Iustin Pop
      # one without port, so the drbd will be activated without
5734 a2d59d8b Iustin Pop
      # networking information on the new node at this stage, and one
5735 a2d59d8b Iustin Pop
      # with network, for the latter activation in step 4
5736 a2d59d8b Iustin Pop
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
5737 2bb5c911 Michael Hanselmann
      if self.instance.primary_node == o_node1:
5738 a2d59d8b Iustin Pop
        p_minor = o_minor1
5739 ffa1c0dc Iustin Pop
      else:
5740 a2d59d8b Iustin Pop
        p_minor = o_minor2
5741 a2d59d8b Iustin Pop
5742 2bb5c911 Michael Hanselmann
      new_alone_id = (self.instance.primary_node, self.new_node, None, p_minor, new_minor, o_secret)
5743 2bb5c911 Michael Hanselmann
      new_net_id = (self.instance.primary_node, self.new_node, o_port, p_minor, new_minor, o_secret)
5744 a2d59d8b Iustin Pop
5745 a2d59d8b Iustin Pop
      iv_names[idx] = (dev, dev.children, new_net_id)
5746 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
5747 a2d59d8b Iustin Pop
                    new_net_id)
5748 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
5749 a2d59d8b Iustin Pop
                              logical_id=new_alone_id,
5750 8a6c7011 Iustin Pop
                              children=dev.children,
5751 8a6c7011 Iustin Pop
                              size=dev.size)
5752 796cab27 Iustin Pop
      try:
5753 2bb5c911 Michael Hanselmann
        _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
5754 2bb5c911 Michael Hanselmann
                              _GetInstanceInfoText(self.instance), False)
5755 82759cb1 Iustin Pop
      except errors.GenericError:
5756 2bb5c911 Michael Hanselmann
        self.cfg.ReleaseDRBDMinors(self.instance.name)
5757 796cab27 Iustin Pop
        raise
5758 a9e0c397 Iustin Pop
5759 2bb5c911 Michael Hanselmann
    # We have new devices, shutdown the drbd on the old secondary
5760 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
5761 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
5762 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, self.target_node)
5763 2bb5c911 Michael Hanselmann
      msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
5764 cacfd1fd Iustin Pop
      if msg:
5765 2bb5c911 Michael Hanselmann
        self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
5766 2bb5c911 Michael Hanselmann
                           "node: %s" % (idx, msg),
5767 2bb5c911 Michael Hanselmann
                           hint=("Please cleanup this device manually as"
5768 2bb5c911 Michael Hanselmann
                                 " soon as possible"))
5769 a9e0c397 Iustin Pop
5770 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
5771 2bb5c911 Michael Hanselmann
    result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node], self.node_secondary_ip,
5772 2bb5c911 Michael Hanselmann
                                               self.instance.disks)[self.instance.primary_node]
5773 642445d9 Iustin Pop
5774 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5775 a2d59d8b Iustin Pop
    if msg:
5776 a2d59d8b Iustin Pop
      # detaches didn't succeed (unlikely)
5777 2bb5c911 Michael Hanselmann
      self.cfg.ReleaseDRBDMinors(self.instance.name)
5778 a2d59d8b Iustin Pop
      raise errors.OpExecError("Can't detach the disks from the network on"
5779 a2d59d8b Iustin Pop
                               " old node: %s" % (msg,))
5780 642445d9 Iustin Pop
5781 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
5782 642445d9 Iustin Pop
    # the instance to point to the new secondary
5783 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Updating instance configuration")
5784 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
5785 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
5786 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, self.instance.primary_node)
5787 2bb5c911 Michael Hanselmann
5788 2bb5c911 Michael Hanselmann
    self.cfg.Update(self.instance)
5789 a9e0c397 Iustin Pop
5790 642445d9 Iustin Pop
    # and now perform the drbd attach
5791 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Attaching primary drbds to new secondary"
5792 2bb5c911 Michael Hanselmann
                    " (standalone => connected)")
5793 2bb5c911 Michael Hanselmann
    result = self.rpc.call_drbd_attach_net([self.instance.primary_node, self.new_node], self.node_secondary_ip,
5794 2bb5c911 Michael Hanselmann
                                           self.instance.disks, self.instance.name,
5795 a2d59d8b Iustin Pop
                                           False)
5796 a2d59d8b Iustin Pop
    for to_node, to_result in result.items():
5797 4c4e4e1e Iustin Pop
      msg = to_result.fail_msg
5798 a2d59d8b Iustin Pop
      if msg:
5799 2bb5c911 Michael Hanselmann
        self.lu.LogWarning("Can't attach drbd disks on node %s: %s", to_node, msg,
5800 2bb5c911 Michael Hanselmann
                           hint=("please do a gnt-instance info to see the"
5801 2bb5c911 Michael Hanselmann
                                 " status of disks"))
5802 a9e0c397 Iustin Pop
5803 2bb5c911 Michael Hanselmann
    # Wait for sync
5804 2bb5c911 Michael Hanselmann
    # This can fail as the old devices are degraded and _WaitForSync
5805 2bb5c911 Michael Hanselmann
    # does a combined result over all disks, so we don't check its return value
5806 2bb5c911 Michael Hanselmann
    self.lu.LogStep(5, steps_total, "Sync devices")
5807 2bb5c911 Michael Hanselmann
    _WaitForSync(self.lu, self.instance, unlock=True)
5808 a9e0c397 Iustin Pop
5809 2bb5c911 Michael Hanselmann
    # Check all devices manually
5810 2bb5c911 Michael Hanselmann
    self._CheckDevices(self.instance.primary_node, iv_names)
5811 22985314 Guido Trotter
5812 2bb5c911 Michael Hanselmann
    # Step: remove old storage
5813 2bb5c911 Michael Hanselmann
    self.lu.LogStep(6, steps_total, "Removing old storage")
5814 2bb5c911 Michael Hanselmann
    self._RemoveOldStorage(self.target_node, iv_names)
5815 a9e0c397 Iustin Pop
5816 a8083063 Iustin Pop
5817 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
5818 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
5819 8729e0d7 Iustin Pop

5820 8729e0d7 Iustin Pop
  """
5821 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
5822 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5823 6605411d Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
5824 31e63dbf Guido Trotter
  REQ_BGL = False
5825 31e63dbf Guido Trotter
5826 31e63dbf Guido Trotter
  def ExpandNames(self):
5827 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
5828 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
5829 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5830 31e63dbf Guido Trotter
5831 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
5832 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
5833 31e63dbf Guido Trotter
      self._LockInstancesNodes()
5834 8729e0d7 Iustin Pop
5835 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
5836 8729e0d7 Iustin Pop
    """Build hooks env.
5837 8729e0d7 Iustin Pop

5838 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
5839 8729e0d7 Iustin Pop

5840 8729e0d7 Iustin Pop
    """
5841 8729e0d7 Iustin Pop
    env = {
5842 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
5843 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
5844 8729e0d7 Iustin Pop
      }
5845 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5846 8729e0d7 Iustin Pop
    nl = [
5847 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
5848 8729e0d7 Iustin Pop
      self.instance.primary_node,
5849 8729e0d7 Iustin Pop
      ]
5850 8729e0d7 Iustin Pop
    return env, nl, nl
5851 8729e0d7 Iustin Pop
5852 8729e0d7 Iustin Pop
  def CheckPrereq(self):
5853 8729e0d7 Iustin Pop
    """Check prerequisites.
5854 8729e0d7 Iustin Pop

5855 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
5856 8729e0d7 Iustin Pop

5857 8729e0d7 Iustin Pop
    """
5858 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5859 31e63dbf Guido Trotter
    assert instance is not None, \
5860 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5861 6b12959c Iustin Pop
    nodenames = list(instance.all_nodes)
5862 6b12959c Iustin Pop
    for node in nodenames:
5863 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, node)
5864 7527a8a4 Iustin Pop
5865 31e63dbf Guido Trotter
5866 8729e0d7 Iustin Pop
    self.instance = instance
5867 8729e0d7 Iustin Pop
5868 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
5869 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
5870 8729e0d7 Iustin Pop
                                 " growing.")
5871 8729e0d7 Iustin Pop
5872 ad24e046 Iustin Pop
    self.disk = instance.FindDisk(self.op.disk)
5873 8729e0d7 Iustin Pop
5874 72737a7f Iustin Pop
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
5875 72737a7f Iustin Pop
                                       instance.hypervisor)
5876 8729e0d7 Iustin Pop
    for node in nodenames:
5877 781de953 Iustin Pop
      info = nodeinfo[node]
5878 4c4e4e1e Iustin Pop
      info.Raise("Cannot get current information from node %s" % node)
5879 070e998b Iustin Pop
      vg_free = info.payload.get('vg_free', None)
5880 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
5881 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
5882 8729e0d7 Iustin Pop
                                   " node %s" % node)
5883 781de953 Iustin Pop
      if self.op.amount > vg_free:
5884 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
5885 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
5886 781de953 Iustin Pop
                                   (node, vg_free, self.op.amount))
5887 8729e0d7 Iustin Pop
5888 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
5889 8729e0d7 Iustin Pop
    """Execute disk grow.
5890 8729e0d7 Iustin Pop

5891 8729e0d7 Iustin Pop
    """
5892 8729e0d7 Iustin Pop
    instance = self.instance
5893 ad24e046 Iustin Pop
    disk = self.disk
5894 6b12959c Iustin Pop
    for node in instance.all_nodes:
5895 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
5896 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
5897 4c4e4e1e Iustin Pop
      result.Raise("Grow request failed to node %s" % node)
5898 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
5899 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
5900 6605411d Iustin Pop
    if self.op.wait_for_sync:
5901 cd4d138f Guido Trotter
      disk_abort = not _WaitForSync(self, instance)
5902 6605411d Iustin Pop
      if disk_abort:
5903 86d9d3bb Iustin Pop
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
5904 86d9d3bb Iustin Pop
                             " status.\nPlease check the instance.")
5905 8729e0d7 Iustin Pop
5906 8729e0d7 Iustin Pop
5907 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
5908 a8083063 Iustin Pop
  """Query runtime instance data.
5909 a8083063 Iustin Pop

5910 a8083063 Iustin Pop
  """
5911 57821cac Iustin Pop
  _OP_REQP = ["instances", "static"]
5912 a987fa48 Guido Trotter
  REQ_BGL = False
5913 ae5849b5 Michael Hanselmann
5914 a987fa48 Guido Trotter
  def ExpandNames(self):
5915 a987fa48 Guido Trotter
    self.needed_locks = {}
5916 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
5917 a987fa48 Guido Trotter
5918 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
5919 a987fa48 Guido Trotter
      raise errors.OpPrereqError("Invalid argument type 'instances'")
5920 a987fa48 Guido Trotter
5921 a987fa48 Guido Trotter
    if self.op.instances:
5922 a987fa48 Guido Trotter
      self.wanted_names = []
5923 a987fa48 Guido Trotter
      for name in self.op.instances:
5924 a987fa48 Guido Trotter
        full_name = self.cfg.ExpandInstanceName(name)
5925 a987fa48 Guido Trotter
        if full_name is None:
5926 f57c76e4 Iustin Pop
          raise errors.OpPrereqError("Instance '%s' not known" % name)
5927 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
5928 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
5929 a987fa48 Guido Trotter
    else:
5930 a987fa48 Guido Trotter
      self.wanted_names = None
5931 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
5932 a987fa48 Guido Trotter
5933 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
5934 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5935 a987fa48 Guido Trotter
5936 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
5937 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
5938 a987fa48 Guido Trotter
      self._LockInstancesNodes()
5939 a8083063 Iustin Pop
5940 a8083063 Iustin Pop
  def CheckPrereq(self):
5941 a8083063 Iustin Pop
    """Check prerequisites.
5942 a8083063 Iustin Pop

5943 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
5944 a8083063 Iustin Pop

5945 a8083063 Iustin Pop
    """
5946 a987fa48 Guido Trotter
    if self.wanted_names is None:
5947 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
5948 a8083063 Iustin Pop
5949 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
5950 a987fa48 Guido Trotter
                             in self.wanted_names]
5951 a987fa48 Guido Trotter
    return
5952 a8083063 Iustin Pop
5953 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
5954 a8083063 Iustin Pop
    """Compute block device status.
5955 a8083063 Iustin Pop

5956 a8083063 Iustin Pop
    """
5957 57821cac Iustin Pop
    static = self.op.static
5958 57821cac Iustin Pop
    if not static:
5959 57821cac Iustin Pop
      self.cfg.SetDiskID(dev, instance.primary_node)
5960 57821cac Iustin Pop
      dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
5961 9854f5d0 Iustin Pop
      if dev_pstatus.offline:
5962 9854f5d0 Iustin Pop
        dev_pstatus = None
5963 9854f5d0 Iustin Pop
      else:
5964 4c4e4e1e Iustin Pop
        dev_pstatus.Raise("Can't compute disk status for %s" % instance.name)
5965 9854f5d0 Iustin Pop
        dev_pstatus = dev_pstatus.payload
5966 57821cac Iustin Pop
    else:
5967 57821cac Iustin Pop
      dev_pstatus = None
5968 57821cac Iustin Pop
5969 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
5970 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
5971 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
5972 a8083063 Iustin Pop
        snode = dev.logical_id[1]
5973 a8083063 Iustin Pop
      else:
5974 a8083063 Iustin Pop
        snode = dev.logical_id[0]
5975 a8083063 Iustin Pop
5976 57821cac Iustin Pop
    if snode and not static:
5977 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
5978 72737a7f Iustin Pop
      dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
5979 9854f5d0 Iustin Pop
      if dev_sstatus.offline:
5980 9854f5d0 Iustin Pop
        dev_sstatus = None
5981 9854f5d0 Iustin Pop
      else:
5982 4c4e4e1e Iustin Pop
        dev_sstatus.Raise("Can't compute disk status for %s" % instance.name)
5983 9854f5d0 Iustin Pop
        dev_sstatus = dev_sstatus.payload
5984 a8083063 Iustin Pop
    else:
5985 a8083063 Iustin Pop
      dev_sstatus = None
5986 a8083063 Iustin Pop
5987 a8083063 Iustin Pop
    if dev.children:
5988 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
5989 a8083063 Iustin Pop
                      for child in dev.children]
5990 a8083063 Iustin Pop
    else:
5991 a8083063 Iustin Pop
      dev_children = []
5992 a8083063 Iustin Pop
5993 a8083063 Iustin Pop
    data = {
5994 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
5995 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
5996 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
5997 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
5998 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
5999 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
6000 a8083063 Iustin Pop
      "children": dev_children,
6001 b6fdf8b8 Iustin Pop
      "mode": dev.mode,
6002 c98162a7 Iustin Pop
      "size": dev.size,
6003 a8083063 Iustin Pop
      }
6004 a8083063 Iustin Pop
6005 a8083063 Iustin Pop
    return data
6006 a8083063 Iustin Pop
6007 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6008 a8083063 Iustin Pop
    """Gather and return data"""
6009 a8083063 Iustin Pop
    result = {}
6010 338e51e8 Iustin Pop
6011 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
6012 338e51e8 Iustin Pop
6013 a8083063 Iustin Pop
    for instance in self.wanted_instances:
6014 57821cac Iustin Pop
      if not self.op.static:
6015 57821cac Iustin Pop
        remote_info = self.rpc.call_instance_info(instance.primary_node,
6016 57821cac Iustin Pop
                                                  instance.name,
6017 57821cac Iustin Pop
                                                  instance.hypervisor)
6018 4c4e4e1e Iustin Pop
        remote_info.Raise("Error checking node %s" % instance.primary_node)
6019 7ad1af4a Iustin Pop
        remote_info = remote_info.payload
6020 57821cac Iustin Pop
        if remote_info and "state" in remote_info:
6021 57821cac Iustin Pop
          remote_state = "up"
6022 57821cac Iustin Pop
        else:
6023 57821cac Iustin Pop
          remote_state = "down"
6024 a8083063 Iustin Pop
      else:
6025 57821cac Iustin Pop
        remote_state = None
6026 0d68c45d Iustin Pop
      if instance.admin_up:
6027 a8083063 Iustin Pop
        config_state = "up"
6028 0d68c45d Iustin Pop
      else:
6029 0d68c45d Iustin Pop
        config_state = "down"
6030 a8083063 Iustin Pop
6031 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
6032 a8083063 Iustin Pop
               for device in instance.disks]
6033 a8083063 Iustin Pop
6034 a8083063 Iustin Pop
      idict = {
6035 a8083063 Iustin Pop
        "name": instance.name,
6036 a8083063 Iustin Pop
        "config_state": config_state,
6037 a8083063 Iustin Pop
        "run_state": remote_state,
6038 a8083063 Iustin Pop
        "pnode": instance.primary_node,
6039 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
6040 a8083063 Iustin Pop
        "os": instance.os,
6041 0b13832c Guido Trotter
        # this happens to be the same format used for hooks
6042 0b13832c Guido Trotter
        "nics": _NICListToTuple(self, instance.nics),
6043 a8083063 Iustin Pop
        "disks": disks,
6044 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
6045 24838135 Iustin Pop
        "network_port": instance.network_port,
6046 24838135 Iustin Pop
        "hv_instance": instance.hvparams,
6047 338e51e8 Iustin Pop
        "hv_actual": cluster.FillHV(instance),
6048 338e51e8 Iustin Pop
        "be_instance": instance.beparams,
6049 338e51e8 Iustin Pop
        "be_actual": cluster.FillBE(instance),
6050 a8083063 Iustin Pop
        }
6051 a8083063 Iustin Pop
6052 a8083063 Iustin Pop
      result[instance.name] = idict
6053 a8083063 Iustin Pop
6054 a8083063 Iustin Pop
    return result
6055 a8083063 Iustin Pop
6056 a8083063 Iustin Pop
6057 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
6058 a8083063 Iustin Pop
  """Modifies an instances's parameters.
6059 a8083063 Iustin Pop

6060 a8083063 Iustin Pop
  """
6061 a8083063 Iustin Pop
  HPATH = "instance-modify"
6062 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
6063 24991749 Iustin Pop
  _OP_REQP = ["instance_name"]
6064 1a5c7281 Guido Trotter
  REQ_BGL = False
6065 1a5c7281 Guido Trotter
6066 24991749 Iustin Pop
  def CheckArguments(self):
6067 24991749 Iustin Pop
    if not hasattr(self.op, 'nics'):
6068 24991749 Iustin Pop
      self.op.nics = []
6069 24991749 Iustin Pop
    if not hasattr(self.op, 'disks'):
6070 24991749 Iustin Pop
      self.op.disks = []
6071 24991749 Iustin Pop
    if not hasattr(self.op, 'beparams'):
6072 24991749 Iustin Pop
      self.op.beparams = {}
6073 24991749 Iustin Pop
    if not hasattr(self.op, 'hvparams'):
6074 24991749 Iustin Pop
      self.op.hvparams = {}
6075 24991749 Iustin Pop
    self.op.force = getattr(self.op, "force", False)
6076 24991749 Iustin Pop
    if not (self.op.nics or self.op.disks or
6077 24991749 Iustin Pop
            self.op.hvparams or self.op.beparams):
6078 24991749 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
6079 24991749 Iustin Pop
6080 24991749 Iustin Pop
    # Disk validation
6081 24991749 Iustin Pop
    disk_addremove = 0
6082 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
6083 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
6084 24991749 Iustin Pop
        disk_addremove += 1
6085 24991749 Iustin Pop
        continue
6086 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
6087 24991749 Iustin Pop
        disk_addremove += 1
6088 24991749 Iustin Pop
      else:
6089 24991749 Iustin Pop
        if not isinstance(disk_op, int):
6090 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index")
6091 8b46606c Guido Trotter
        if not isinstance(disk_dict, dict):
6092 8b46606c Guido Trotter
          msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
6093 8b46606c Guido Trotter
          raise errors.OpPrereqError(msg)
6094 8b46606c Guido Trotter
6095 24991749 Iustin Pop
      if disk_op == constants.DDM_ADD:
6096 24991749 Iustin Pop
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
6097 6ec66eae Iustin Pop
        if mode not in constants.DISK_ACCESS_SET:
6098 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode)
6099 24991749 Iustin Pop
        size = disk_dict.get('size', None)
6100 24991749 Iustin Pop
        if size is None:
6101 24991749 Iustin Pop
          raise errors.OpPrereqError("Required disk parameter size missing")
6102 24991749 Iustin Pop
        try:
6103 24991749 Iustin Pop
          size = int(size)
6104 24991749 Iustin Pop
        except ValueError, err:
6105 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
6106 24991749 Iustin Pop
                                     str(err))
6107 24991749 Iustin Pop
        disk_dict['size'] = size
6108 24991749 Iustin Pop
      else:
6109 24991749 Iustin Pop
        # modification of disk
6110 24991749 Iustin Pop
        if 'size' in disk_dict:
6111 24991749 Iustin Pop
          raise errors.OpPrereqError("Disk size change not possible, use"
6112 24991749 Iustin Pop
                                     " grow-disk")
6113 24991749 Iustin Pop
6114 24991749 Iustin Pop
    if disk_addremove > 1:
6115 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one disk add or remove operation"
6116 24991749 Iustin Pop
                                 " supported at a time")
6117 24991749 Iustin Pop
6118 24991749 Iustin Pop
    # NIC validation
6119 24991749 Iustin Pop
    nic_addremove = 0
6120 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
6121 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
6122 24991749 Iustin Pop
        nic_addremove += 1
6123 24991749 Iustin Pop
        continue
6124 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
6125 24991749 Iustin Pop
        nic_addremove += 1
6126 24991749 Iustin Pop
      else:
6127 24991749 Iustin Pop
        if not isinstance(nic_op, int):
6128 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid nic index")
6129 8b46606c Guido Trotter
        if not isinstance(nic_dict, dict):
6130 8b46606c Guido Trotter
          msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
6131 8b46606c Guido Trotter
          raise errors.OpPrereqError(msg)
6132 24991749 Iustin Pop
6133 24991749 Iustin Pop
      # nic_dict should be a dict
6134 24991749 Iustin Pop
      nic_ip = nic_dict.get('ip', None)
6135 24991749 Iustin Pop
      if nic_ip is not None:
6136 5c44da6a Guido Trotter
        if nic_ip.lower() == constants.VALUE_NONE:
6137 24991749 Iustin Pop
          nic_dict['ip'] = None
6138 24991749 Iustin Pop
        else:
6139 24991749 Iustin Pop
          if not utils.IsValidIP(nic_ip):
6140 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip)
6141 5c44da6a Guido Trotter
6142 cd098c41 Guido Trotter
      nic_bridge = nic_dict.get('bridge', None)
6143 cd098c41 Guido Trotter
      nic_link = nic_dict.get('link', None)
6144 cd098c41 Guido Trotter
      if nic_bridge and nic_link:
6145 29921401 Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
6146 29921401 Iustin Pop
                                   " at the same time")
6147 cd098c41 Guido Trotter
      elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
6148 cd098c41 Guido Trotter
        nic_dict['bridge'] = None
6149 cd098c41 Guido Trotter
      elif nic_link and nic_link.lower() == constants.VALUE_NONE:
6150 cd098c41 Guido Trotter
        nic_dict['link'] = None
6151 cd098c41 Guido Trotter
6152 5c44da6a Guido Trotter
      if nic_op == constants.DDM_ADD:
6153 5c44da6a Guido Trotter
        nic_mac = nic_dict.get('mac', None)
6154 5c44da6a Guido Trotter
        if nic_mac is None:
6155 5c44da6a Guido Trotter
          nic_dict['mac'] = constants.VALUE_AUTO
6156 5c44da6a Guido Trotter
6157 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
6158 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
6159 24991749 Iustin Pop
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
6160 24991749 Iustin Pop
          if not utils.IsValidMac(nic_mac):
6161 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac)
6162 5c44da6a Guido Trotter
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
6163 5c44da6a Guido Trotter
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
6164 5c44da6a Guido Trotter
                                     " modifying an existing nic")
6165 5c44da6a Guido Trotter
6166 24991749 Iustin Pop
    if nic_addremove > 1:
6167 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one NIC add or remove operation"
6168 24991749 Iustin Pop
                                 " supported at a time")
6169 24991749 Iustin Pop
6170 1a5c7281 Guido Trotter
  def ExpandNames(self):
6171 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
6172 74409b12 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
6173 74409b12 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6174 74409b12 Iustin Pop
6175 74409b12 Iustin Pop
  def DeclareLocks(self, level):
6176 74409b12 Iustin Pop
    if level == locking.LEVEL_NODE:
6177 74409b12 Iustin Pop
      self._LockInstancesNodes()
6178 a8083063 Iustin Pop
6179 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6180 a8083063 Iustin Pop
    """Build hooks env.
6181 a8083063 Iustin Pop

6182 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
6183 a8083063 Iustin Pop

6184 a8083063 Iustin Pop
    """
6185 396e1b78 Michael Hanselmann
    args = dict()
6186 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.be_new:
6187 338e51e8 Iustin Pop
      args['memory'] = self.be_new[constants.BE_MEMORY]
6188 338e51e8 Iustin Pop
    if constants.BE_VCPUS in self.be_new:
6189 61be6ba4 Iustin Pop
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
6190 d8dcf3c9 Guido Trotter
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
6191 d8dcf3c9 Guido Trotter
    # information at all.
6192 d8dcf3c9 Guido Trotter
    if self.op.nics:
6193 d8dcf3c9 Guido Trotter
      args['nics'] = []
6194 d8dcf3c9 Guido Trotter
      nic_override = dict(self.op.nics)
6195 62f0dd02 Guido Trotter
      c_nicparams = self.cluster.nicparams[constants.PP_DEFAULT]
6196 d8dcf3c9 Guido Trotter
      for idx, nic in enumerate(self.instance.nics):
6197 d8dcf3c9 Guido Trotter
        if idx in nic_override:
6198 d8dcf3c9 Guido Trotter
          this_nic_override = nic_override[idx]
6199 d8dcf3c9 Guido Trotter
        else:
6200 d8dcf3c9 Guido Trotter
          this_nic_override = {}
6201 d8dcf3c9 Guido Trotter
        if 'ip' in this_nic_override:
6202 d8dcf3c9 Guido Trotter
          ip = this_nic_override['ip']
6203 d8dcf3c9 Guido Trotter
        else:
6204 d8dcf3c9 Guido Trotter
          ip = nic.ip
6205 d8dcf3c9 Guido Trotter
        if 'mac' in this_nic_override:
6206 d8dcf3c9 Guido Trotter
          mac = this_nic_override['mac']
6207 d8dcf3c9 Guido Trotter
        else:
6208 d8dcf3c9 Guido Trotter
          mac = nic.mac
6209 62f0dd02 Guido Trotter
        if idx in self.nic_pnew:
6210 62f0dd02 Guido Trotter
          nicparams = self.nic_pnew[idx]
6211 62f0dd02 Guido Trotter
        else:
6212 62f0dd02 Guido Trotter
          nicparams = objects.FillDict(c_nicparams, nic.nicparams)
6213 62f0dd02 Guido Trotter
        mode = nicparams[constants.NIC_MODE]
6214 62f0dd02 Guido Trotter
        link = nicparams[constants.NIC_LINK]
6215 62f0dd02 Guido Trotter
        args['nics'].append((ip, mac, mode, link))
6216 d8dcf3c9 Guido Trotter
      if constants.DDM_ADD in nic_override:
6217 d8dcf3c9 Guido Trotter
        ip = nic_override[constants.DDM_ADD].get('ip', None)
6218 d8dcf3c9 Guido Trotter
        mac = nic_override[constants.DDM_ADD]['mac']
6219 62f0dd02 Guido Trotter
        nicparams = self.nic_pnew[constants.DDM_ADD]
6220 62f0dd02 Guido Trotter
        mode = nicparams[constants.NIC_MODE]
6221 62f0dd02 Guido Trotter
        link = nicparams[constants.NIC_LINK]
6222 62f0dd02 Guido Trotter
        args['nics'].append((ip, mac, mode, link))
6223 d8dcf3c9 Guido Trotter
      elif constants.DDM_REMOVE in nic_override:
6224 d8dcf3c9 Guido Trotter
        del args['nics'][-1]
6225 d8dcf3c9 Guido Trotter
6226 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
6227 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6228 a8083063 Iustin Pop
    return env, nl, nl
6229 a8083063 Iustin Pop
6230 0329617a Guido Trotter
  def _GetUpdatedParams(self, old_params, update_dict,
6231 0329617a Guido Trotter
                        default_values, parameter_types):
6232 0329617a Guido Trotter
    """Return the new params dict for the given params.
6233 0329617a Guido Trotter

6234 0329617a Guido Trotter
    @type old_params: dict
6235 f2fd87d7 Iustin Pop
    @param old_params: old parameters
6236 0329617a Guido Trotter
    @type update_dict: dict
6237 f2fd87d7 Iustin Pop
    @param update_dict: dict containing new parameter values,
6238 f2fd87d7 Iustin Pop
                        or constants.VALUE_DEFAULT to reset the
6239 f2fd87d7 Iustin Pop
                        parameter to its default value
6240 0329617a Guido Trotter
    @type default_values: dict
6241 0329617a Guido Trotter
    @param default_values: default values for the filled parameters
6242 0329617a Guido Trotter
    @type parameter_types: dict
6243 0329617a Guido Trotter
    @param parameter_types: dict mapping target dict keys to types
6244 0329617a Guido Trotter
                            in constants.ENFORCEABLE_TYPES
6245 0329617a Guido Trotter
    @rtype: (dict, dict)
6246 0329617a Guido Trotter
    @return: (new_parameters, filled_parameters)
6247 0329617a Guido Trotter

6248 0329617a Guido Trotter
    """
6249 0329617a Guido Trotter
    params_copy = copy.deepcopy(old_params)
6250 0329617a Guido Trotter
    for key, val in update_dict.iteritems():
6251 0329617a Guido Trotter
      if val == constants.VALUE_DEFAULT:
6252 0329617a Guido Trotter
        try:
6253 0329617a Guido Trotter
          del params_copy[key]
6254 0329617a Guido Trotter
        except KeyError:
6255 0329617a Guido Trotter
          pass
6256 0329617a Guido Trotter
      else:
6257 0329617a Guido Trotter
        params_copy[key] = val
6258 0329617a Guido Trotter
    utils.ForceDictType(params_copy, parameter_types)
6259 0329617a Guido Trotter
    params_filled = objects.FillDict(default_values, params_copy)
6260 0329617a Guido Trotter
    return (params_copy, params_filled)
6261 0329617a Guido Trotter
6262 a8083063 Iustin Pop
  def CheckPrereq(self):
6263 a8083063 Iustin Pop
    """Check prerequisites.
6264 a8083063 Iustin Pop

6265 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
6266 a8083063 Iustin Pop

6267 a8083063 Iustin Pop
    """
6268 7c4d6c7b Michael Hanselmann
    self.force = self.op.force
6269 a8083063 Iustin Pop
6270 74409b12 Iustin Pop
    # checking the new params on the primary/secondary nodes
6271 31a853d2 Iustin Pop
6272 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6273 2ee88aeb Guido Trotter
    cluster = self.cluster = self.cfg.GetClusterInfo()
6274 1a5c7281 Guido Trotter
    assert self.instance is not None, \
6275 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
6276 6b12959c Iustin Pop
    pnode = instance.primary_node
6277 6b12959c Iustin Pop
    nodelist = list(instance.all_nodes)
6278 74409b12 Iustin Pop
6279 338e51e8 Iustin Pop
    # hvparams processing
6280 74409b12 Iustin Pop
    if self.op.hvparams:
6281 0329617a Guido Trotter
      i_hvdict, hv_new = self._GetUpdatedParams(
6282 0329617a Guido Trotter
                             instance.hvparams, self.op.hvparams,
6283 0329617a Guido Trotter
                             cluster.hvparams[instance.hypervisor],
6284 0329617a Guido Trotter
                             constants.HVS_PARAMETER_TYPES)
6285 74409b12 Iustin Pop
      # local check
6286 74409b12 Iustin Pop
      hypervisor.GetHypervisor(
6287 74409b12 Iustin Pop
        instance.hypervisor).CheckParameterSyntax(hv_new)
6288 74409b12 Iustin Pop
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
6289 338e51e8 Iustin Pop
      self.hv_new = hv_new # the new actual values
6290 338e51e8 Iustin Pop
      self.hv_inst = i_hvdict # the new dict (without defaults)
6291 338e51e8 Iustin Pop
    else:
6292 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
6293 338e51e8 Iustin Pop
6294 338e51e8 Iustin Pop
    # beparams processing
6295 338e51e8 Iustin Pop
    if self.op.beparams:
6296 0329617a Guido Trotter
      i_bedict, be_new = self._GetUpdatedParams(
6297 0329617a Guido Trotter
                             instance.beparams, self.op.beparams,
6298 0329617a Guido Trotter
                             cluster.beparams[constants.PP_DEFAULT],
6299 0329617a Guido Trotter
                             constants.BES_PARAMETER_TYPES)
6300 338e51e8 Iustin Pop
      self.be_new = be_new # the new actual values
6301 338e51e8 Iustin Pop
      self.be_inst = i_bedict # the new dict (without defaults)
6302 338e51e8 Iustin Pop
    else:
6303 b637ae4d Iustin Pop
      self.be_new = self.be_inst = {}
6304 74409b12 Iustin Pop
6305 cfefe007 Guido Trotter
    self.warn = []
6306 647a5d80 Iustin Pop
6307 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.op.beparams and not self.force:
6308 647a5d80 Iustin Pop
      mem_check_list = [pnode]
6309 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
6310 c0f2b229 Iustin Pop
        # either we changed auto_balance to yes or it was from before
6311 647a5d80 Iustin Pop
        mem_check_list.extend(instance.secondary_nodes)
6312 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
6313 72737a7f Iustin Pop
                                                  instance.hypervisor)
6314 647a5d80 Iustin Pop
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
6315 72737a7f Iustin Pop
                                         instance.hypervisor)
6316 070e998b Iustin Pop
      pninfo = nodeinfo[pnode]
6317 4c4e4e1e Iustin Pop
      msg = pninfo.fail_msg
6318 070e998b Iustin Pop
      if msg:
6319 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
6320 070e998b Iustin Pop
        self.warn.append("Can't get info from primary node %s: %s" %
6321 070e998b Iustin Pop
                         (pnode,  msg))
6322 070e998b Iustin Pop
      elif not isinstance(pninfo.payload.get('memory_free', None), int):
6323 070e998b Iustin Pop
        self.warn.append("Node data from primary node %s doesn't contain"
6324 070e998b Iustin Pop
                         " free memory information" % pnode)
6325 4c4e4e1e Iustin Pop
      elif instance_info.fail_msg:
6326 7ad1af4a Iustin Pop
        self.warn.append("Can't get instance runtime information: %s" %
6327 4c4e4e1e Iustin Pop
                        instance_info.fail_msg)
6328 cfefe007 Guido Trotter
      else:
6329 7ad1af4a Iustin Pop
        if instance_info.payload:
6330 7ad1af4a Iustin Pop
          current_mem = int(instance_info.payload['memory'])
6331 cfefe007 Guido Trotter
        else:
6332 cfefe007 Guido Trotter
          # Assume instance not running
6333 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
6334 cfefe007 Guido Trotter
          # and we have no other way to check)
6335 cfefe007 Guido Trotter
          current_mem = 0
6336 338e51e8 Iustin Pop
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
6337 070e998b Iustin Pop
                    pninfo.payload['memory_free'])
6338 cfefe007 Guido Trotter
        if miss_mem > 0:
6339 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
6340 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
6341 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
6342 cfefe007 Guido Trotter
6343 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
6344 070e998b Iustin Pop
        for node, nres in nodeinfo.items():
6345 ea33068f Iustin Pop
          if node not in instance.secondary_nodes:
6346 ea33068f Iustin Pop
            continue
6347 4c4e4e1e Iustin Pop
          msg = nres.fail_msg
6348 070e998b Iustin Pop
          if msg:
6349 070e998b Iustin Pop
            self.warn.append("Can't get info from secondary node %s: %s" %
6350 070e998b Iustin Pop
                             (node, msg))
6351 070e998b Iustin Pop
          elif not isinstance(nres.payload.get('memory_free', None), int):
6352 070e998b Iustin Pop
            self.warn.append("Secondary node %s didn't return free"
6353 070e998b Iustin Pop
                             " memory information" % node)
6354 070e998b Iustin Pop
          elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
6355 647a5d80 Iustin Pop
            self.warn.append("Not enough memory to failover instance to"
6356 647a5d80 Iustin Pop
                             " secondary node %s" % node)
6357 5bc84f33 Alexander Schreiber
6358 24991749 Iustin Pop
    # NIC processing
6359 cd098c41 Guido Trotter
    self.nic_pnew = {}
6360 cd098c41 Guido Trotter
    self.nic_pinst = {}
6361 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
6362 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
6363 24991749 Iustin Pop
        if not instance.nics:
6364 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance has no NICs, cannot remove")
6365 24991749 Iustin Pop
        continue
6366 24991749 Iustin Pop
      if nic_op != constants.DDM_ADD:
6367 24991749 Iustin Pop
        # an existing nic
6368 24991749 Iustin Pop
        if nic_op < 0 or nic_op >= len(instance.nics):
6369 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
6370 24991749 Iustin Pop
                                     " are 0 to %d" %
6371 24991749 Iustin Pop
                                     (nic_op, len(instance.nics)))
6372 cd098c41 Guido Trotter
        old_nic_params = instance.nics[nic_op].nicparams
6373 cd098c41 Guido Trotter
        old_nic_ip = instance.nics[nic_op].ip
6374 cd098c41 Guido Trotter
      else:
6375 cd098c41 Guido Trotter
        old_nic_params = {}
6376 cd098c41 Guido Trotter
        old_nic_ip = None
6377 cd098c41 Guido Trotter
6378 cd098c41 Guido Trotter
      update_params_dict = dict([(key, nic_dict[key])
6379 cd098c41 Guido Trotter
                                 for key in constants.NICS_PARAMETERS
6380 cd098c41 Guido Trotter
                                 if key in nic_dict])
6381 cd098c41 Guido Trotter
6382 5c44da6a Guido Trotter
      if 'bridge' in nic_dict:
6383 cd098c41 Guido Trotter
        update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
6384 cd098c41 Guido Trotter
6385 cd098c41 Guido Trotter
      new_nic_params, new_filled_nic_params = \
6386 cd098c41 Guido Trotter
          self._GetUpdatedParams(old_nic_params, update_params_dict,
6387 cd098c41 Guido Trotter
                                 cluster.nicparams[constants.PP_DEFAULT],
6388 cd098c41 Guido Trotter
                                 constants.NICS_PARAMETER_TYPES)
6389 cd098c41 Guido Trotter
      objects.NIC.CheckParameterSyntax(new_filled_nic_params)
6390 cd098c41 Guido Trotter
      self.nic_pinst[nic_op] = new_nic_params
6391 cd098c41 Guido Trotter
      self.nic_pnew[nic_op] = new_filled_nic_params
6392 cd098c41 Guido Trotter
      new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
6393 cd098c41 Guido Trotter
6394 cd098c41 Guido Trotter
      if new_nic_mode == constants.NIC_MODE_BRIDGED:
6395 cd098c41 Guido Trotter
        nic_bridge = new_filled_nic_params[constants.NIC_LINK]
6396 4c4e4e1e Iustin Pop
        msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
6397 35c0c8da Iustin Pop
        if msg:
6398 35c0c8da Iustin Pop
          msg = "Error checking bridges on node %s: %s" % (pnode, msg)
6399 24991749 Iustin Pop
          if self.force:
6400 24991749 Iustin Pop
            self.warn.append(msg)
6401 24991749 Iustin Pop
          else:
6402 24991749 Iustin Pop
            raise errors.OpPrereqError(msg)
6403 cd098c41 Guido Trotter
      if new_nic_mode == constants.NIC_MODE_ROUTED:
6404 cd098c41 Guido Trotter
        if 'ip' in nic_dict:
6405 cd098c41 Guido Trotter
          nic_ip = nic_dict['ip']
6406 cd098c41 Guido Trotter
        else:
6407 cd098c41 Guido Trotter
          nic_ip = old_nic_ip
6408 cd098c41 Guido Trotter
        if nic_ip is None:
6409 cd098c41 Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic ip to None'
6410 cd098c41 Guido Trotter
                                     ' on a routed nic')
6411 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
6412 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
6413 5c44da6a Guido Trotter
        if nic_mac is None:
6414 5c44da6a Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic mac to None')
6415 5c44da6a Guido Trotter
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
6416 5c44da6a Guido Trotter
          # otherwise generate the mac
6417 5c44da6a Guido Trotter
          nic_dict['mac'] = self.cfg.GenerateMAC()
6418 5c44da6a Guido Trotter
        else:
6419 5c44da6a Guido Trotter
          # or validate/reserve the current one
6420 5c44da6a Guido Trotter
          if self.cfg.IsMacInUse(nic_mac):
6421 5c44da6a Guido Trotter
            raise errors.OpPrereqError("MAC address %s already in use"
6422 5c44da6a Guido Trotter
                                       " in cluster" % nic_mac)
6423 24991749 Iustin Pop
6424 24991749 Iustin Pop
    # DISK processing
6425 24991749 Iustin Pop
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
6426 24991749 Iustin Pop
      raise errors.OpPrereqError("Disk operations not supported for"
6427 24991749 Iustin Pop
                                 " diskless instances")
6428 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
6429 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
6430 24991749 Iustin Pop
        if len(instance.disks) == 1:
6431 24991749 Iustin Pop
          raise errors.OpPrereqError("Cannot remove the last disk of"
6432 24991749 Iustin Pop
                                     " an instance")
6433 24991749 Iustin Pop
        ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
6434 24991749 Iustin Pop
        ins_l = ins_l[pnode]
6435 4c4e4e1e Iustin Pop
        msg = ins_l.fail_msg
6436 aca13712 Iustin Pop
        if msg:
6437 aca13712 Iustin Pop
          raise errors.OpPrereqError("Can't contact node %s: %s" %
6438 aca13712 Iustin Pop
                                     (pnode, msg))
6439 aca13712 Iustin Pop
        if instance.name in ins_l.payload:
6440 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance is running, can't remove"
6441 24991749 Iustin Pop
                                     " disks.")
6442 24991749 Iustin Pop
6443 24991749 Iustin Pop
      if (disk_op == constants.DDM_ADD and
6444 24991749 Iustin Pop
          len(instance.nics) >= constants.MAX_DISKS):
6445 24991749 Iustin Pop
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
6446 24991749 Iustin Pop
                                   " add more" % constants.MAX_DISKS)
6447 24991749 Iustin Pop
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
6448 24991749 Iustin Pop
        # an existing disk
6449 24991749 Iustin Pop
        if disk_op < 0 or disk_op >= len(instance.disks):
6450 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
6451 24991749 Iustin Pop
                                     " are 0 to %d" %
6452 24991749 Iustin Pop
                                     (disk_op, len(instance.disks)))
6453 24991749 Iustin Pop
6454 a8083063 Iustin Pop
    return
6455 a8083063 Iustin Pop
6456 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6457 a8083063 Iustin Pop
    """Modifies an instance.
6458 a8083063 Iustin Pop

6459 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
6460 24991749 Iustin Pop

6461 a8083063 Iustin Pop
    """
6462 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
6463 cfefe007 Guido Trotter
    # feedback_fn there.
6464 cfefe007 Guido Trotter
    for warn in self.warn:
6465 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
6466 cfefe007 Guido Trotter
6467 a8083063 Iustin Pop
    result = []
6468 a8083063 Iustin Pop
    instance = self.instance
6469 cd098c41 Guido Trotter
    cluster = self.cluster
6470 24991749 Iustin Pop
    # disk changes
6471 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
6472 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
6473 24991749 Iustin Pop
        # remove the last disk
6474 24991749 Iustin Pop
        device = instance.disks.pop()
6475 24991749 Iustin Pop
        device_idx = len(instance.disks)
6476 24991749 Iustin Pop
        for node, disk in device.ComputeNodeTree(instance.primary_node):
6477 24991749 Iustin Pop
          self.cfg.SetDiskID(disk, node)
6478 4c4e4e1e Iustin Pop
          msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
6479 e1bc0878 Iustin Pop
          if msg:
6480 e1bc0878 Iustin Pop
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
6481 e1bc0878 Iustin Pop
                            " continuing anyway", device_idx, node, msg)
6482 24991749 Iustin Pop
        result.append(("disk/%d" % device_idx, "remove"))
6483 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
6484 24991749 Iustin Pop
        # add a new disk
6485 24991749 Iustin Pop
        if instance.disk_template == constants.DT_FILE:
6486 24991749 Iustin Pop
          file_driver, file_path = instance.disks[0].logical_id
6487 24991749 Iustin Pop
          file_path = os.path.dirname(file_path)
6488 24991749 Iustin Pop
        else:
6489 24991749 Iustin Pop
          file_driver = file_path = None
6490 24991749 Iustin Pop
        disk_idx_base = len(instance.disks)
6491 24991749 Iustin Pop
        new_disk = _GenerateDiskTemplate(self,
6492 24991749 Iustin Pop
                                         instance.disk_template,
6493 32388e6d Iustin Pop
                                         instance.name, instance.primary_node,
6494 24991749 Iustin Pop
                                         instance.secondary_nodes,
6495 24991749 Iustin Pop
                                         [disk_dict],
6496 24991749 Iustin Pop
                                         file_path,
6497 24991749 Iustin Pop
                                         file_driver,
6498 24991749 Iustin Pop
                                         disk_idx_base)[0]
6499 24991749 Iustin Pop
        instance.disks.append(new_disk)
6500 24991749 Iustin Pop
        info = _GetInstanceInfoText(instance)
6501 24991749 Iustin Pop
6502 24991749 Iustin Pop
        logging.info("Creating volume %s for instance %s",
6503 24991749 Iustin Pop
                     new_disk.iv_name, instance.name)
6504 24991749 Iustin Pop
        # Note: this needs to be kept in sync with _CreateDisks
6505 24991749 Iustin Pop
        #HARDCODE
6506 428958aa Iustin Pop
        for node in instance.all_nodes:
6507 428958aa Iustin Pop
          f_create = node == instance.primary_node
6508 796cab27 Iustin Pop
          try:
6509 428958aa Iustin Pop
            _CreateBlockDev(self, node, instance, new_disk,
6510 428958aa Iustin Pop
                            f_create, info, f_create)
6511 1492cca7 Iustin Pop
          except errors.OpExecError, err:
6512 24991749 Iustin Pop
            self.LogWarning("Failed to create volume %s (%s) on"
6513 428958aa Iustin Pop
                            " node %s: %s",
6514 428958aa Iustin Pop
                            new_disk.iv_name, new_disk, node, err)
6515 24991749 Iustin Pop
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
6516 24991749 Iustin Pop
                       (new_disk.size, new_disk.mode)))
6517 24991749 Iustin Pop
      else:
6518 24991749 Iustin Pop
        # change a given disk
6519 24991749 Iustin Pop
        instance.disks[disk_op].mode = disk_dict['mode']
6520 24991749 Iustin Pop
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
6521 24991749 Iustin Pop
    # NIC changes
6522 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
6523 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
6524 24991749 Iustin Pop
        # remove the last nic
6525 24991749 Iustin Pop
        del instance.nics[-1]
6526 24991749 Iustin Pop
        result.append(("nic.%d" % len(instance.nics), "remove"))
6527 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
6528 5c44da6a Guido Trotter
        # mac and bridge should be set, by now
6529 5c44da6a Guido Trotter
        mac = nic_dict['mac']
6530 cd098c41 Guido Trotter
        ip = nic_dict.get('ip', None)
6531 cd098c41 Guido Trotter
        nicparams = self.nic_pinst[constants.DDM_ADD]
6532 cd098c41 Guido Trotter
        new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
6533 24991749 Iustin Pop
        instance.nics.append(new_nic)
6534 24991749 Iustin Pop
        result.append(("nic.%d" % (len(instance.nics) - 1),
6535 cd098c41 Guido Trotter
                       "add:mac=%s,ip=%s,mode=%s,link=%s" %
6536 cd098c41 Guido Trotter
                       (new_nic.mac, new_nic.ip,
6537 cd098c41 Guido Trotter
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
6538 cd098c41 Guido Trotter
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
6539 cd098c41 Guido Trotter
                       )))
6540 24991749 Iustin Pop
      else:
6541 cd098c41 Guido Trotter
        for key in 'mac', 'ip':
6542 24991749 Iustin Pop
          if key in nic_dict:
6543 24991749 Iustin Pop
            setattr(instance.nics[nic_op], key, nic_dict[key])
6544 cd098c41 Guido Trotter
        if nic_op in self.nic_pnew:
6545 cd098c41 Guido Trotter
          instance.nics[nic_op].nicparams = self.nic_pnew[nic_op]
6546 cd098c41 Guido Trotter
        for key, val in nic_dict.iteritems():
6547 cd098c41 Guido Trotter
          result.append(("nic.%s/%d" % (key, nic_op), val))
6548 24991749 Iustin Pop
6549 24991749 Iustin Pop
    # hvparams changes
6550 74409b12 Iustin Pop
    if self.op.hvparams:
6551 12649e35 Guido Trotter
      instance.hvparams = self.hv_inst
6552 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
6553 74409b12 Iustin Pop
        result.append(("hv/%s" % key, val))
6554 24991749 Iustin Pop
6555 24991749 Iustin Pop
    # beparams changes
6556 338e51e8 Iustin Pop
    if self.op.beparams:
6557 338e51e8 Iustin Pop
      instance.beparams = self.be_inst
6558 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
6559 338e51e8 Iustin Pop
        result.append(("be/%s" % key, val))
6560 a8083063 Iustin Pop
6561 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
6562 a8083063 Iustin Pop
6563 a8083063 Iustin Pop
    return result
6564 a8083063 Iustin Pop
6565 a8083063 Iustin Pop
6566 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
6567 a8083063 Iustin Pop
  """Query the exports list
6568 a8083063 Iustin Pop

6569 a8083063 Iustin Pop
  """
6570 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
6571 21a15682 Guido Trotter
  REQ_BGL = False
6572 21a15682 Guido Trotter
6573 21a15682 Guido Trotter
  def ExpandNames(self):
6574 21a15682 Guido Trotter
    self.needed_locks = {}
6575 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
6576 21a15682 Guido Trotter
    if not self.op.nodes:
6577 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6578 21a15682 Guido Trotter
    else:
6579 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
6580 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
6581 a8083063 Iustin Pop
6582 a8083063 Iustin Pop
  def CheckPrereq(self):
6583 21a15682 Guido Trotter
    """Check prerequisites.
6584 a8083063 Iustin Pop

6585 a8083063 Iustin Pop
    """
6586 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
6587 a8083063 Iustin Pop
6588 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6589 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
6590 a8083063 Iustin Pop

6591 e4376078 Iustin Pop
    @rtype: dict
6592 e4376078 Iustin Pop
    @return: a dictionary with the structure node->(export-list)
6593 e4376078 Iustin Pop
        where export-list is a list of the instances exported on
6594 e4376078 Iustin Pop
        that node.
6595 a8083063 Iustin Pop

6596 a8083063 Iustin Pop
    """
6597 b04285f2 Guido Trotter
    rpcresult = self.rpc.call_export_list(self.nodes)
6598 b04285f2 Guido Trotter
    result = {}
6599 b04285f2 Guido Trotter
    for node in rpcresult:
6600 4c4e4e1e Iustin Pop
      if rpcresult[node].fail_msg:
6601 b04285f2 Guido Trotter
        result[node] = False
6602 b04285f2 Guido Trotter
      else:
6603 1b7bfbb7 Iustin Pop
        result[node] = rpcresult[node].payload
6604 b04285f2 Guido Trotter
6605 b04285f2 Guido Trotter
    return result
6606 a8083063 Iustin Pop
6607 a8083063 Iustin Pop
6608 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
6609 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
6610 a8083063 Iustin Pop

6611 a8083063 Iustin Pop
  """
6612 a8083063 Iustin Pop
  HPATH = "instance-export"
6613 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
6614 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
6615 6657590e Guido Trotter
  REQ_BGL = False
6616 6657590e Guido Trotter
6617 6657590e Guido Trotter
  def ExpandNames(self):
6618 6657590e Guido Trotter
    self._ExpandAndLockInstance()
6619 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
6620 6657590e Guido Trotter
    #
6621 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
6622 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
6623 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
6624 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
6625 6657590e Guido Trotter
    #    then one to remove, after
6626 5bbd3f7f Michael Hanselmann
    #  - removing the removal operation altogether
6627 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6628 6657590e Guido Trotter
6629 6657590e Guido Trotter
  def DeclareLocks(self, level):
6630 6657590e Guido Trotter
    """Last minute lock declaration."""
6631 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
6632 a8083063 Iustin Pop
6633 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6634 a8083063 Iustin Pop
    """Build hooks env.
6635 a8083063 Iustin Pop

6636 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
6637 a8083063 Iustin Pop

6638 a8083063 Iustin Pop
    """
6639 a8083063 Iustin Pop
    env = {
6640 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
6641 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
6642 a8083063 Iustin Pop
      }
6643 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6644 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
6645 a8083063 Iustin Pop
          self.op.target_node]
6646 a8083063 Iustin Pop
    return env, nl, nl
6647 a8083063 Iustin Pop
6648 a8083063 Iustin Pop
  def CheckPrereq(self):
6649 a8083063 Iustin Pop
    """Check prerequisites.
6650 a8083063 Iustin Pop

6651 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
6652 a8083063 Iustin Pop

6653 a8083063 Iustin Pop
    """
6654 6657590e Guido Trotter
    instance_name = self.op.instance_name
6655 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
6656 6657590e Guido Trotter
    assert self.instance is not None, \
6657 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
6658 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
6659 a8083063 Iustin Pop
6660 6657590e Guido Trotter
    self.dst_node = self.cfg.GetNodeInfo(
6661 6657590e Guido Trotter
      self.cfg.ExpandNodeName(self.op.target_node))
6662 a8083063 Iustin Pop
6663 268b8e42 Iustin Pop
    if self.dst_node is None:
6664 268b8e42 Iustin Pop
      # This is wrong node name, not a non-locked node
6665 268b8e42 Iustin Pop
      raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node)
6666 aeb83a2b Iustin Pop
    _CheckNodeOnline(self, self.dst_node.name)
6667 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, self.dst_node.name)
6668 a8083063 Iustin Pop
6669 b6023d6c Manuel Franceschini
    # instance disk type verification
6670 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
6671 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
6672 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
6673 b6023d6c Manuel Franceschini
                                   " file-based disks")
6674 b6023d6c Manuel Franceschini
6675 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6676 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
6677 a8083063 Iustin Pop

6678 a8083063 Iustin Pop
    """
6679 a8083063 Iustin Pop
    instance = self.instance
6680 a8083063 Iustin Pop
    dst_node = self.dst_node
6681 a8083063 Iustin Pop
    src_node = instance.primary_node
6682 a8083063 Iustin Pop
    if self.op.shutdown:
6683 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
6684 781de953 Iustin Pop
      result = self.rpc.call_instance_shutdown(src_node, instance)
6685 4c4e4e1e Iustin Pop
      result.Raise("Could not shutdown instance %s on"
6686 4c4e4e1e Iustin Pop
                   " node %s" % (instance.name, src_node))
6687 a8083063 Iustin Pop
6688 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
6689 a8083063 Iustin Pop
6690 a8083063 Iustin Pop
    snap_disks = []
6691 a8083063 Iustin Pop
6692 998c712c Iustin Pop
    # set the disks ID correctly since call_instance_start needs the
6693 998c712c Iustin Pop
    # correct drbd minor to create the symlinks
6694 998c712c Iustin Pop
    for disk in instance.disks:
6695 998c712c Iustin Pop
      self.cfg.SetDiskID(disk, src_node)
6696 998c712c Iustin Pop
6697 a8083063 Iustin Pop
    try:
6698 a97da6b7 Iustin Pop
      for idx, disk in enumerate(instance.disks):
6699 87812fd3 Iustin Pop
        # result.payload will be a snapshot of an lvm leaf of the one we passed
6700 87812fd3 Iustin Pop
        result = self.rpc.call_blockdev_snapshot(src_node, disk)
6701 4c4e4e1e Iustin Pop
        msg = result.fail_msg
6702 87812fd3 Iustin Pop
        if msg:
6703 af0413bb Guido Trotter
          self.LogWarning("Could not snapshot disk/%s on node %s: %s",
6704 af0413bb Guido Trotter
                          idx, src_node, msg)
6705 19d7f90a Guido Trotter
          snap_disks.append(False)
6706 19d7f90a Guido Trotter
        else:
6707 87812fd3 Iustin Pop
          disk_id = (vgname, result.payload)
6708 19d7f90a Guido Trotter
          new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
6709 87812fd3 Iustin Pop
                                 logical_id=disk_id, physical_id=disk_id,
6710 19d7f90a Guido Trotter
                                 iv_name=disk.iv_name)
6711 19d7f90a Guido Trotter
          snap_disks.append(new_dev)
6712 a8083063 Iustin Pop
6713 a8083063 Iustin Pop
    finally:
6714 0d68c45d Iustin Pop
      if self.op.shutdown and instance.admin_up:
6715 0eca8e0c Iustin Pop
        result = self.rpc.call_instance_start(src_node, instance, None, None)
6716 4c4e4e1e Iustin Pop
        msg = result.fail_msg
6717 dd279568 Iustin Pop
        if msg:
6718 b9bddb6b Iustin Pop
          _ShutdownInstanceDisks(self, instance)
6719 dd279568 Iustin Pop
          raise errors.OpExecError("Could not start instance: %s" % msg)
6720 a8083063 Iustin Pop
6721 a8083063 Iustin Pop
    # TODO: check for size
6722 a8083063 Iustin Pop
6723 62c9ec92 Iustin Pop
    cluster_name = self.cfg.GetClusterName()
6724 74c47259 Iustin Pop
    for idx, dev in enumerate(snap_disks):
6725 19d7f90a Guido Trotter
      if dev:
6726 781de953 Iustin Pop
        result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
6727 781de953 Iustin Pop
                                               instance, cluster_name, idx)
6728 4c4e4e1e Iustin Pop
        msg = result.fail_msg
6729 ba55d062 Iustin Pop
        if msg:
6730 af0413bb Guido Trotter
          self.LogWarning("Could not export disk/%s from node %s to"
6731 af0413bb Guido Trotter
                          " node %s: %s", idx, src_node, dst_node.name, msg)
6732 4c4e4e1e Iustin Pop
        msg = self.rpc.call_blockdev_remove(src_node, dev).fail_msg
6733 e1bc0878 Iustin Pop
        if msg:
6734 a97da6b7 Iustin Pop
          self.LogWarning("Could not remove snapshot for disk/%d from node"
6735 a97da6b7 Iustin Pop
                          " %s: %s", idx, src_node, msg)
6736 a8083063 Iustin Pop
6737 781de953 Iustin Pop
    result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
6738 4c4e4e1e Iustin Pop
    msg = result.fail_msg
6739 9b201a0d Iustin Pop
    if msg:
6740 9b201a0d Iustin Pop
      self.LogWarning("Could not finalize export for instance %s"
6741 9b201a0d Iustin Pop
                      " on node %s: %s", instance.name, dst_node.name, msg)
6742 a8083063 Iustin Pop
6743 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
6744 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
6745 a8083063 Iustin Pop
6746 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
6747 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
6748 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
6749 35fbcd11 Iustin Pop
    iname = instance.name
6750 a8083063 Iustin Pop
    if nodelist:
6751 72737a7f Iustin Pop
      exportlist = self.rpc.call_export_list(nodelist)
6752 a8083063 Iustin Pop
      for node in exportlist:
6753 4c4e4e1e Iustin Pop
        if exportlist[node].fail_msg:
6754 781de953 Iustin Pop
          continue
6755 35fbcd11 Iustin Pop
        if iname in exportlist[node].payload:
6756 4c4e4e1e Iustin Pop
          msg = self.rpc.call_export_remove(node, iname).fail_msg
6757 35fbcd11 Iustin Pop
          if msg:
6758 19d7f90a Guido Trotter
            self.LogWarning("Could not remove older export for instance %s"
6759 35fbcd11 Iustin Pop
                            " on node %s: %s", iname, node, msg)
6760 5c947f38 Iustin Pop
6761 5c947f38 Iustin Pop
6762 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
6763 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
6764 9ac99fda Guido Trotter

6765 9ac99fda Guido Trotter
  """
6766 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
6767 3656b3af Guido Trotter
  REQ_BGL = False
6768 3656b3af Guido Trotter
6769 3656b3af Guido Trotter
  def ExpandNames(self):
6770 3656b3af Guido Trotter
    self.needed_locks = {}
6771 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
6772 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
6773 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
6774 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6775 9ac99fda Guido Trotter
6776 9ac99fda Guido Trotter
  def CheckPrereq(self):
6777 9ac99fda Guido Trotter
    """Check prerequisites.
6778 9ac99fda Guido Trotter
    """
6779 9ac99fda Guido Trotter
    pass
6780 9ac99fda Guido Trotter
6781 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
6782 9ac99fda Guido Trotter
    """Remove any export.
6783 9ac99fda Guido Trotter

6784 9ac99fda Guido Trotter
    """
6785 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
6786 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
6787 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
6788 9ac99fda Guido Trotter
    fqdn_warn = False
6789 9ac99fda Guido Trotter
    if not instance_name:
6790 9ac99fda Guido Trotter
      fqdn_warn = True
6791 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
6792 9ac99fda Guido Trotter
6793 1b7bfbb7 Iustin Pop
    locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
6794 1b7bfbb7 Iustin Pop
    exportlist = self.rpc.call_export_list(locked_nodes)
6795 9ac99fda Guido Trotter
    found = False
6796 9ac99fda Guido Trotter
    for node in exportlist:
6797 4c4e4e1e Iustin Pop
      msg = exportlist[node].fail_msg
6798 1b7bfbb7 Iustin Pop
      if msg:
6799 1b7bfbb7 Iustin Pop
        self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
6800 781de953 Iustin Pop
        continue
6801 1b7bfbb7 Iustin Pop
      if instance_name in exportlist[node].payload:
6802 9ac99fda Guido Trotter
        found = True
6803 781de953 Iustin Pop
        result = self.rpc.call_export_remove(node, instance_name)
6804 4c4e4e1e Iustin Pop
        msg = result.fail_msg
6805 35fbcd11 Iustin Pop
        if msg:
6806 9a4f63d1 Iustin Pop
          logging.error("Could not remove export for instance %s"
6807 35fbcd11 Iustin Pop
                        " on node %s: %s", instance_name, node, msg)
6808 9ac99fda Guido Trotter
6809 9ac99fda Guido Trotter
    if fqdn_warn and not found:
6810 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
6811 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
6812 9ac99fda Guido Trotter
                  " Domain Name.")
6813 9ac99fda Guido Trotter
6814 9ac99fda Guido Trotter
6815 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
6816 5c947f38 Iustin Pop
  """Generic tags LU.
6817 5c947f38 Iustin Pop

6818 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
6819 5c947f38 Iustin Pop

6820 5c947f38 Iustin Pop
  """
6821 5c947f38 Iustin Pop
6822 8646adce Guido Trotter
  def ExpandNames(self):
6823 8646adce Guido Trotter
    self.needed_locks = {}
6824 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
6825 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
6826 5c947f38 Iustin Pop
      if name is None:
6827 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
6828 3ecf6786 Iustin Pop
                                   (self.op.name,))
6829 5c947f38 Iustin Pop
      self.op.name = name
6830 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = name
6831 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
6832 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
6833 5c947f38 Iustin Pop
      if name is None:
6834 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
6835 3ecf6786 Iustin Pop
                                   (self.op.name,))
6836 5c947f38 Iustin Pop
      self.op.name = name
6837 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = name
6838 8646adce Guido Trotter
6839 8646adce Guido Trotter
  def CheckPrereq(self):
6840 8646adce Guido Trotter
    """Check prerequisites.
6841 8646adce Guido Trotter

6842 8646adce Guido Trotter
    """
6843 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
6844 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
6845 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
6846 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
6847 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
6848 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
6849 5c947f38 Iustin Pop
    else:
6850 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
6851 3ecf6786 Iustin Pop
                                 str(self.op.kind))
6852 5c947f38 Iustin Pop
6853 5c947f38 Iustin Pop
6854 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
6855 5c947f38 Iustin Pop
  """Returns the tags of a given object.
6856 5c947f38 Iustin Pop

6857 5c947f38 Iustin Pop
  """
6858 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
6859 8646adce Guido Trotter
  REQ_BGL = False
6860 5c947f38 Iustin Pop
6861 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6862 5c947f38 Iustin Pop
    """Returns the tag list.
6863 5c947f38 Iustin Pop

6864 5c947f38 Iustin Pop
    """
6865 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
6866 5c947f38 Iustin Pop
6867 5c947f38 Iustin Pop
6868 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
6869 73415719 Iustin Pop
  """Searches the tags for a given pattern.
6870 73415719 Iustin Pop

6871 73415719 Iustin Pop
  """
6872 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
6873 8646adce Guido Trotter
  REQ_BGL = False
6874 8646adce Guido Trotter
6875 8646adce Guido Trotter
  def ExpandNames(self):
6876 8646adce Guido Trotter
    self.needed_locks = {}
6877 73415719 Iustin Pop
6878 73415719 Iustin Pop
  def CheckPrereq(self):
6879 73415719 Iustin Pop
    """Check prerequisites.
6880 73415719 Iustin Pop

6881 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
6882 73415719 Iustin Pop

6883 73415719 Iustin Pop
    """
6884 73415719 Iustin Pop
    try:
6885 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
6886 73415719 Iustin Pop
    except re.error, err:
6887 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
6888 73415719 Iustin Pop
                                 (self.op.pattern, err))
6889 73415719 Iustin Pop
6890 73415719 Iustin Pop
  def Exec(self, feedback_fn):
6891 73415719 Iustin Pop
    """Returns the tag list.
6892 73415719 Iustin Pop

6893 73415719 Iustin Pop
    """
6894 73415719 Iustin Pop
    cfg = self.cfg
6895 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
6896 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
6897 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
6898 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
6899 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
6900 73415719 Iustin Pop
    results = []
6901 73415719 Iustin Pop
    for path, target in tgts:
6902 73415719 Iustin Pop
      for tag in target.GetTags():
6903 73415719 Iustin Pop
        if self.re.search(tag):
6904 73415719 Iustin Pop
          results.append((path, tag))
6905 73415719 Iustin Pop
    return results
6906 73415719 Iustin Pop
6907 73415719 Iustin Pop
6908 f27302fa Iustin Pop
class LUAddTags(TagsLU):
6909 5c947f38 Iustin Pop
  """Sets a tag on a given object.
6910 5c947f38 Iustin Pop

6911 5c947f38 Iustin Pop
  """
6912 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
6913 8646adce Guido Trotter
  REQ_BGL = False
6914 5c947f38 Iustin Pop
6915 5c947f38 Iustin Pop
  def CheckPrereq(self):
6916 5c947f38 Iustin Pop
    """Check prerequisites.
6917 5c947f38 Iustin Pop

6918 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
6919 5c947f38 Iustin Pop

6920 5c947f38 Iustin Pop
    """
6921 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
6922 f27302fa Iustin Pop
    for tag in self.op.tags:
6923 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
6924 5c947f38 Iustin Pop
6925 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6926 5c947f38 Iustin Pop
    """Sets the tag.
6927 5c947f38 Iustin Pop

6928 5c947f38 Iustin Pop
    """
6929 5c947f38 Iustin Pop
    try:
6930 f27302fa Iustin Pop
      for tag in self.op.tags:
6931 f27302fa Iustin Pop
        self.target.AddTag(tag)
6932 5c947f38 Iustin Pop
    except errors.TagError, err:
6933 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
6934 5c947f38 Iustin Pop
    try:
6935 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
6936 5c947f38 Iustin Pop
    except errors.ConfigurationError:
6937 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
6938 3ecf6786 Iustin Pop
                                " config file and the operation has been"
6939 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
6940 5c947f38 Iustin Pop
6941 5c947f38 Iustin Pop
6942 f27302fa Iustin Pop
class LUDelTags(TagsLU):
6943 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
6944 5c947f38 Iustin Pop

6945 5c947f38 Iustin Pop
  """
6946 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
6947 8646adce Guido Trotter
  REQ_BGL = False
6948 5c947f38 Iustin Pop
6949 5c947f38 Iustin Pop
  def CheckPrereq(self):
6950 5c947f38 Iustin Pop
    """Check prerequisites.
6951 5c947f38 Iustin Pop

6952 5c947f38 Iustin Pop
    This checks that we have the given tag.
6953 5c947f38 Iustin Pop

6954 5c947f38 Iustin Pop
    """
6955 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
6956 f27302fa Iustin Pop
    for tag in self.op.tags:
6957 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
6958 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
6959 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
6960 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
6961 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
6962 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
6963 f27302fa Iustin Pop
      diff_names.sort()
6964 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
6965 f27302fa Iustin Pop
                                 (",".join(diff_names)))
6966 5c947f38 Iustin Pop
6967 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6968 5c947f38 Iustin Pop
    """Remove the tag from the object.
6969 5c947f38 Iustin Pop

6970 5c947f38 Iustin Pop
    """
6971 f27302fa Iustin Pop
    for tag in self.op.tags:
6972 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
6973 5c947f38 Iustin Pop
    try:
6974 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
6975 5c947f38 Iustin Pop
    except errors.ConfigurationError:
6976 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
6977 3ecf6786 Iustin Pop
                                " config file and the operation has been"
6978 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
6979 06009e27 Iustin Pop
6980 0eed6e61 Guido Trotter
6981 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
6982 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
6983 06009e27 Iustin Pop

6984 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
6985 06009e27 Iustin Pop
  time.
6986 06009e27 Iustin Pop

6987 06009e27 Iustin Pop
  """
6988 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
6989 fbe9022f Guido Trotter
  REQ_BGL = False
6990 06009e27 Iustin Pop
6991 fbe9022f Guido Trotter
  def ExpandNames(self):
6992 fbe9022f Guido Trotter
    """Expand names and set required locks.
6993 06009e27 Iustin Pop

6994 fbe9022f Guido Trotter
    This expands the node list, if any.
6995 06009e27 Iustin Pop

6996 06009e27 Iustin Pop
    """
6997 fbe9022f Guido Trotter
    self.needed_locks = {}
6998 06009e27 Iustin Pop
    if self.op.on_nodes:
6999 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
7000 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
7001 fbe9022f Guido Trotter
      # more information.
7002 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
7003 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
7004 fbe9022f Guido Trotter
7005 fbe9022f Guido Trotter
  def CheckPrereq(self):
7006 fbe9022f Guido Trotter
    """Check prerequisites.
7007 fbe9022f Guido Trotter

7008 fbe9022f Guido Trotter
    """
7009 06009e27 Iustin Pop
7010 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
7011 06009e27 Iustin Pop
    """Do the actual sleep.
7012 06009e27 Iustin Pop

7013 06009e27 Iustin Pop
    """
7014 06009e27 Iustin Pop
    if self.op.on_master:
7015 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
7016 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
7017 06009e27 Iustin Pop
    if self.op.on_nodes:
7018 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
7019 06009e27 Iustin Pop
      for node, node_result in result.items():
7020 4c4e4e1e Iustin Pop
        node_result.Raise("Failure during rpc call to node %s" % node)
7021 d61df03e Iustin Pop
7022 d61df03e Iustin Pop
7023 d1c2dd75 Iustin Pop
class IAllocator(object):
7024 d1c2dd75 Iustin Pop
  """IAllocator framework.
7025 d61df03e Iustin Pop

7026 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
7027 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
7028 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
7029 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
7030 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
7031 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
7032 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
7033 d1c2dd75 Iustin Pop
      easy usage
7034 d61df03e Iustin Pop

7035 d61df03e Iustin Pop
  """
7036 29859cb7 Iustin Pop
  _ALLO_KEYS = [
7037 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
7038 8cc7e742 Guido Trotter
    "os", "tags", "nics", "vcpus", "hypervisor",
7039 d1c2dd75 Iustin Pop
    ]
7040 29859cb7 Iustin Pop
  _RELO_KEYS = [
7041 29859cb7 Iustin Pop
    "relocate_from",
7042 29859cb7 Iustin Pop
    ]
7043 d1c2dd75 Iustin Pop
7044 923ddac0 Michael Hanselmann
  def __init__(self, cfg, rpc, mode, name, **kwargs):
7045 923ddac0 Michael Hanselmann
    self.cfg = cfg
7046 923ddac0 Michael Hanselmann
    self.rpc = rpc
7047 d1c2dd75 Iustin Pop
    # init buffer variables
7048 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
7049 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
7050 29859cb7 Iustin Pop
    self.mode = mode
7051 29859cb7 Iustin Pop
    self.name = name
7052 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
7053 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
7054 a0add446 Iustin Pop
    self.hypervisor = None
7055 29859cb7 Iustin Pop
    self.relocate_from = None
7056 27579978 Iustin Pop
    # computed fields
7057 27579978 Iustin Pop
    self.required_nodes = None
7058 d1c2dd75 Iustin Pop
    # init result fields
7059 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
7060 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
7061 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
7062 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
7063 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
7064 29859cb7 Iustin Pop
    else:
7065 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
7066 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
7067 d1c2dd75 Iustin Pop
    for key in kwargs:
7068 29859cb7 Iustin Pop
      if key not in keyset:
7069 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
7070 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
7071 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
7072 29859cb7 Iustin Pop
    for key in keyset:
7073 d1c2dd75 Iustin Pop
      if key not in kwargs:
7074 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
7075 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
7076 d1c2dd75 Iustin Pop
    self._BuildInputData()
7077 d1c2dd75 Iustin Pop
7078 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
7079 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
7080 d1c2dd75 Iustin Pop

7081 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
7082 d1c2dd75 Iustin Pop

7083 d1c2dd75 Iustin Pop
    """
7084 923ddac0 Michael Hanselmann
    cfg = self.cfg
7085 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
7086 d1c2dd75 Iustin Pop
    # cluster data
7087 d1c2dd75 Iustin Pop
    data = {
7088 77031881 Iustin Pop
      "version": constants.IALLOCATOR_VERSION,
7089 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
7090 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
7091 1325da74 Iustin Pop
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
7092 d1c2dd75 Iustin Pop
      # we don't have job IDs
7093 d61df03e Iustin Pop
      }
7094 b57e9819 Guido Trotter
    iinfo = cfg.GetAllInstancesInfo().values()
7095 b57e9819 Guido Trotter
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
7096 6286519f Iustin Pop
7097 d1c2dd75 Iustin Pop
    # node data
7098 d1c2dd75 Iustin Pop
    node_results = {}
7099 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
7100 8cc7e742 Guido Trotter
7101 8cc7e742 Guido Trotter
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
7102 a0add446 Iustin Pop
      hypervisor_name = self.hypervisor
7103 8cc7e742 Guido Trotter
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
7104 a0add446 Iustin Pop
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
7105 8cc7e742 Guido Trotter
7106 923ddac0 Michael Hanselmann
    node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
7107 923ddac0 Michael Hanselmann
                                        hypervisor_name)
7108 923ddac0 Michael Hanselmann
    node_iinfo = \
7109 923ddac0 Michael Hanselmann
      self.rpc.call_all_instances_info(node_list,
7110 923ddac0 Michael Hanselmann
                                       cluster_info.enabled_hypervisors)
7111 1325da74 Iustin Pop
    for nname, nresult in node_data.items():
7112 1325da74 Iustin Pop
      # first fill in static (config-based) values
7113 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
7114 d1c2dd75 Iustin Pop
      pnr = {
7115 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
7116 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
7117 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
7118 fc0fe88c Iustin Pop
        "offline": ninfo.offline,
7119 0b2454b9 Iustin Pop
        "drained": ninfo.drained,
7120 1325da74 Iustin Pop
        "master_candidate": ninfo.master_candidate,
7121 d1c2dd75 Iustin Pop
        }
7122 1325da74 Iustin Pop
7123 1325da74 Iustin Pop
      if not ninfo.offline:
7124 4c4e4e1e Iustin Pop
        nresult.Raise("Can't get data for node %s" % nname)
7125 4c4e4e1e Iustin Pop
        node_iinfo[nname].Raise("Can't get node instance info from node %s" %
7126 4c4e4e1e Iustin Pop
                                nname)
7127 070e998b Iustin Pop
        remote_info = nresult.payload
7128 1325da74 Iustin Pop
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
7129 1325da74 Iustin Pop
                     'vg_size', 'vg_free', 'cpu_total']:
7130 1325da74 Iustin Pop
          if attr not in remote_info:
7131 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' didn't return attribute"
7132 1325da74 Iustin Pop
                                     " '%s'" % (nname, attr))
7133 070e998b Iustin Pop
          if not isinstance(remote_info[attr], int):
7134 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' returned invalid value"
7135 070e998b Iustin Pop
                                     " for '%s': %s" %
7136 070e998b Iustin Pop
                                     (nname, attr, remote_info[attr]))
7137 1325da74 Iustin Pop
        # compute memory used by primary instances
7138 1325da74 Iustin Pop
        i_p_mem = i_p_up_mem = 0
7139 1325da74 Iustin Pop
        for iinfo, beinfo in i_list:
7140 1325da74 Iustin Pop
          if iinfo.primary_node == nname:
7141 1325da74 Iustin Pop
            i_p_mem += beinfo[constants.BE_MEMORY]
7142 2fa74ef4 Iustin Pop
            if iinfo.name not in node_iinfo[nname].payload:
7143 1325da74 Iustin Pop
              i_used_mem = 0
7144 1325da74 Iustin Pop
            else:
7145 2fa74ef4 Iustin Pop
              i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory'])
7146 1325da74 Iustin Pop
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
7147 1325da74 Iustin Pop
            remote_info['memory_free'] -= max(0, i_mem_diff)
7148 1325da74 Iustin Pop
7149 1325da74 Iustin Pop
            if iinfo.admin_up:
7150 1325da74 Iustin Pop
              i_p_up_mem += beinfo[constants.BE_MEMORY]
7151 1325da74 Iustin Pop
7152 1325da74 Iustin Pop
        # compute memory used by instances
7153 1325da74 Iustin Pop
        pnr_dyn = {
7154 1325da74 Iustin Pop
          "total_memory": remote_info['memory_total'],
7155 1325da74 Iustin Pop
          "reserved_memory": remote_info['memory_dom0'],
7156 1325da74 Iustin Pop
          "free_memory": remote_info['memory_free'],
7157 1325da74 Iustin Pop
          "total_disk": remote_info['vg_size'],
7158 1325da74 Iustin Pop
          "free_disk": remote_info['vg_free'],
7159 1325da74 Iustin Pop
          "total_cpus": remote_info['cpu_total'],
7160 1325da74 Iustin Pop
          "i_pri_memory": i_p_mem,
7161 1325da74 Iustin Pop
          "i_pri_up_memory": i_p_up_mem,
7162 1325da74 Iustin Pop
          }
7163 1325da74 Iustin Pop
        pnr.update(pnr_dyn)
7164 1325da74 Iustin Pop
7165 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
7166 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
7167 d1c2dd75 Iustin Pop
7168 d1c2dd75 Iustin Pop
    # instance data
7169 d1c2dd75 Iustin Pop
    instance_data = {}
7170 338e51e8 Iustin Pop
    for iinfo, beinfo in i_list:
7171 a9fe7e8f Guido Trotter
      nic_data = []
7172 a9fe7e8f Guido Trotter
      for nic in iinfo.nics:
7173 a9fe7e8f Guido Trotter
        filled_params = objects.FillDict(
7174 a9fe7e8f Guido Trotter
            cluster_info.nicparams[constants.PP_DEFAULT],
7175 a9fe7e8f Guido Trotter
            nic.nicparams)
7176 a9fe7e8f Guido Trotter
        nic_dict = {"mac": nic.mac,
7177 a9fe7e8f Guido Trotter
                    "ip": nic.ip,
7178 a9fe7e8f Guido Trotter
                    "mode": filled_params[constants.NIC_MODE],
7179 a9fe7e8f Guido Trotter
                    "link": filled_params[constants.NIC_LINK],
7180 a9fe7e8f Guido Trotter
                   }
7181 a9fe7e8f Guido Trotter
        if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
7182 a9fe7e8f Guido Trotter
          nic_dict["bridge"] = filled_params[constants.NIC_LINK]
7183 a9fe7e8f Guido Trotter
        nic_data.append(nic_dict)
7184 d1c2dd75 Iustin Pop
      pir = {
7185 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
7186 1325da74 Iustin Pop
        "admin_up": iinfo.admin_up,
7187 338e51e8 Iustin Pop
        "vcpus": beinfo[constants.BE_VCPUS],
7188 338e51e8 Iustin Pop
        "memory": beinfo[constants.BE_MEMORY],
7189 d1c2dd75 Iustin Pop
        "os": iinfo.os,
7190 1325da74 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
7191 d1c2dd75 Iustin Pop
        "nics": nic_data,
7192 1325da74 Iustin Pop
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
7193 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
7194 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
7195 d1c2dd75 Iustin Pop
        }
7196 88ae4f85 Iustin Pop
      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
7197 88ae4f85 Iustin Pop
                                                 pir["disks"])
7198 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
7199 d61df03e Iustin Pop
7200 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
7201 d61df03e Iustin Pop
7202 d1c2dd75 Iustin Pop
    self.in_data = data
7203 d61df03e Iustin Pop
7204 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
7205 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
7206 d61df03e Iustin Pop

7207 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
7208 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
7209 d61df03e Iustin Pop

7210 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
7211 d1c2dd75 Iustin Pop
    done.
7212 d61df03e Iustin Pop

7213 d1c2dd75 Iustin Pop
    """
7214 d1c2dd75 Iustin Pop
    data = self.in_data
7215 d1c2dd75 Iustin Pop
7216 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
7217 d1c2dd75 Iustin Pop
7218 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
7219 27579978 Iustin Pop
      self.required_nodes = 2
7220 27579978 Iustin Pop
    else:
7221 27579978 Iustin Pop
      self.required_nodes = 1
7222 d1c2dd75 Iustin Pop
    request = {
7223 d1c2dd75 Iustin Pop
      "type": "allocate",
7224 d1c2dd75 Iustin Pop
      "name": self.name,
7225 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
7226 d1c2dd75 Iustin Pop
      "tags": self.tags,
7227 d1c2dd75 Iustin Pop
      "os": self.os,
7228 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
7229 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
7230 d1c2dd75 Iustin Pop
      "disks": self.disks,
7231 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
7232 d1c2dd75 Iustin Pop
      "nics": self.nics,
7233 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
7234 d1c2dd75 Iustin Pop
      }
7235 d1c2dd75 Iustin Pop
    data["request"] = request
7236 298fe380 Iustin Pop
7237 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
7238 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
7239 298fe380 Iustin Pop

7240 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
7241 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
7242 d61df03e Iustin Pop

7243 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
7244 d1c2dd75 Iustin Pop
    done.
7245 d61df03e Iustin Pop

7246 d1c2dd75 Iustin Pop
    """
7247 923ddac0 Michael Hanselmann
    instance = self.cfg.GetInstanceInfo(self.name)
7248 27579978 Iustin Pop
    if instance is None:
7249 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
7250 27579978 Iustin Pop
                                   " IAllocator" % self.name)
7251 27579978 Iustin Pop
7252 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
7253 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
7254 27579978 Iustin Pop
7255 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
7256 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
7257 2a139bb0 Iustin Pop
7258 27579978 Iustin Pop
    self.required_nodes = 1
7259 dafc7302 Guido Trotter
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
7260 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
7261 27579978 Iustin Pop
7262 d1c2dd75 Iustin Pop
    request = {
7263 2a139bb0 Iustin Pop
      "type": "relocate",
7264 d1c2dd75 Iustin Pop
      "name": self.name,
7265 27579978 Iustin Pop
      "disk_space_total": disk_space,
7266 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
7267 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
7268 d1c2dd75 Iustin Pop
      }
7269 27579978 Iustin Pop
    self.in_data["request"] = request
7270 d61df03e Iustin Pop
7271 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
7272 d1c2dd75 Iustin Pop
    """Build input data structures.
7273 d61df03e Iustin Pop

7274 d1c2dd75 Iustin Pop
    """
7275 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
7276 d61df03e Iustin Pop
7277 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
7278 d1c2dd75 Iustin Pop
      self._AddNewInstance()
7279 d1c2dd75 Iustin Pop
    else:
7280 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
7281 d61df03e Iustin Pop
7282 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
7283 d61df03e Iustin Pop
7284 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
7285 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
7286 298fe380 Iustin Pop

7287 d1c2dd75 Iustin Pop
    """
7288 72737a7f Iustin Pop
    if call_fn is None:
7289 923ddac0 Michael Hanselmann
      call_fn = self.rpc.call_iallocator_runner
7290 298fe380 Iustin Pop
7291 923ddac0 Michael Hanselmann
    result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
7292 4c4e4e1e Iustin Pop
    result.Raise("Failure while running the iallocator script")
7293 8d528b7c Iustin Pop
7294 87f5c298 Iustin Pop
    self.out_text = result.payload
7295 d1c2dd75 Iustin Pop
    if validate:
7296 d1c2dd75 Iustin Pop
      self._ValidateResult()
7297 298fe380 Iustin Pop
7298 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
7299 d1c2dd75 Iustin Pop
    """Process the allocator results.
7300 538475ca Iustin Pop

7301 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
7302 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
7303 538475ca Iustin Pop

7304 d1c2dd75 Iustin Pop
    """
7305 d1c2dd75 Iustin Pop
    try:
7306 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
7307 d1c2dd75 Iustin Pop
    except Exception, err:
7308 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
7309 d1c2dd75 Iustin Pop
7310 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
7311 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
7312 538475ca Iustin Pop
7313 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
7314 d1c2dd75 Iustin Pop
      if key not in rdict:
7315 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
7316 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
7317 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
7318 538475ca Iustin Pop
7319 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
7320 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
7321 d1c2dd75 Iustin Pop
                               " is not a list")
7322 d1c2dd75 Iustin Pop
    self.out_data = rdict
7323 538475ca Iustin Pop
7324 538475ca Iustin Pop
7325 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
7326 d61df03e Iustin Pop
  """Run allocator tests.
7327 d61df03e Iustin Pop

7328 d61df03e Iustin Pop
  This LU runs the allocator tests
7329 d61df03e Iustin Pop

7330 d61df03e Iustin Pop
  """
7331 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
7332 d61df03e Iustin Pop
7333 d61df03e Iustin Pop
  def CheckPrereq(self):
7334 d61df03e Iustin Pop
    """Check prerequisites.
7335 d61df03e Iustin Pop

7336 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
7337 d61df03e Iustin Pop

7338 d61df03e Iustin Pop
    """
7339 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
7340 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
7341 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
7342 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
7343 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
7344 d61df03e Iustin Pop
                                     attr)
7345 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
7346 d61df03e Iustin Pop
      if iname is not None:
7347 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
7348 d61df03e Iustin Pop
                                   iname)
7349 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
7350 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
7351 d61df03e Iustin Pop
      for row in self.op.nics:
7352 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
7353 d61df03e Iustin Pop
            "mac" not in row or
7354 d61df03e Iustin Pop
            "ip" not in row or
7355 d61df03e Iustin Pop
            "bridge" not in row):
7356 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
7357 d61df03e Iustin Pop
                                     " 'nics' parameter")
7358 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
7359 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
7360 d61df03e Iustin Pop
      for row in self.op.disks:
7361 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
7362 d61df03e Iustin Pop
            "size" not in row or
7363 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
7364 d61df03e Iustin Pop
            "mode" not in row or
7365 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
7366 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
7367 d61df03e Iustin Pop
                                     " 'disks' parameter")
7368 8901997e Iustin Pop
      if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None:
7369 8cc7e742 Guido Trotter
        self.op.hypervisor = self.cfg.GetHypervisorType()
7370 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
7371 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
7372 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
7373 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
7374 d61df03e Iustin Pop
      if fname is None:
7375 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
7376 d61df03e Iustin Pop
                                   self.op.name)
7377 d61df03e Iustin Pop
      self.op.name = fname
7378 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
7379 d61df03e Iustin Pop
    else:
7380 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
7381 d61df03e Iustin Pop
                                 self.op.mode)
7382 d61df03e Iustin Pop
7383 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
7384 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
7385 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
7386 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
7387 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
7388 d61df03e Iustin Pop
                                 self.op.direction)
7389 d61df03e Iustin Pop
7390 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
7391 d61df03e Iustin Pop
    """Run the allocator test.
7392 d61df03e Iustin Pop

7393 d61df03e Iustin Pop
    """
7394 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
7395 923ddac0 Michael Hanselmann
      ial = IAllocator(self.cfg, self.rpc,
7396 29859cb7 Iustin Pop
                       mode=self.op.mode,
7397 29859cb7 Iustin Pop
                       name=self.op.name,
7398 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
7399 29859cb7 Iustin Pop
                       disks=self.op.disks,
7400 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
7401 29859cb7 Iustin Pop
                       os=self.op.os,
7402 29859cb7 Iustin Pop
                       tags=self.op.tags,
7403 29859cb7 Iustin Pop
                       nics=self.op.nics,
7404 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
7405 8cc7e742 Guido Trotter
                       hypervisor=self.op.hypervisor,
7406 29859cb7 Iustin Pop
                       )
7407 29859cb7 Iustin Pop
    else:
7408 923ddac0 Michael Hanselmann
      ial = IAllocator(self.cfg, self.rpc,
7409 29859cb7 Iustin Pop
                       mode=self.op.mode,
7410 29859cb7 Iustin Pop
                       name=self.op.name,
7411 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
7412 29859cb7 Iustin Pop
                       )
7413 d61df03e Iustin Pop
7414 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
7415 d1c2dd75 Iustin Pop
      result = ial.in_text
7416 298fe380 Iustin Pop
    else:
7417 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
7418 d1c2dd75 Iustin Pop
      result = ial.out_text
7419 298fe380 Iustin Pop
    return result