Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 31821208

History | View | Annotate | Download (252.8 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import time
29 a8083063 Iustin Pop
import re
30 a8083063 Iustin Pop
import platform
31 ffa1c0dc Iustin Pop
import logging
32 74409b12 Iustin Pop
import copy
33 a8083063 Iustin Pop
34 a8083063 Iustin Pop
from ganeti import ssh
35 a8083063 Iustin Pop
from ganeti import utils
36 a8083063 Iustin Pop
from ganeti import errors
37 a8083063 Iustin Pop
from ganeti import hypervisor
38 6048c986 Guido Trotter
from ganeti import locking
39 a8083063 Iustin Pop
from ganeti import constants
40 a8083063 Iustin Pop
from ganeti import objects
41 8d14b30d Iustin Pop
from ganeti import serializer
42 112f18a5 Iustin Pop
from ganeti import ssconf
43 d61df03e Iustin Pop
44 d61df03e Iustin Pop
45 a8083063 Iustin Pop
class LogicalUnit(object):
46 396e1b78 Michael Hanselmann
  """Logical Unit base class.
47 a8083063 Iustin Pop

48 a8083063 Iustin Pop
  Subclasses must follow these rules:
49 d465bdc8 Guido Trotter
    - implement ExpandNames
50 d465bdc8 Guido Trotter
    - implement CheckPrereq
51 a8083063 Iustin Pop
    - implement Exec
52 a8083063 Iustin Pop
    - implement BuildHooksEnv
53 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
54 05f86716 Guido Trotter
    - optionally redefine their run requirements:
55 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
56 05f86716 Guido Trotter

57 05f86716 Guido Trotter
  Note that all commands require root permissions.
58 a8083063 Iustin Pop

59 a8083063 Iustin Pop
  """
60 a8083063 Iustin Pop
  HPATH = None
61 a8083063 Iustin Pop
  HTYPE = None
62 a8083063 Iustin Pop
  _OP_REQP = []
63 7e55040e Guido Trotter
  REQ_BGL = True
64 a8083063 Iustin Pop
65 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
66 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
67 a8083063 Iustin Pop

68 5bbd3f7f Michael Hanselmann
    This needs to be overridden in derived classes in order to check op
69 a8083063 Iustin Pop
    validity.
70 a8083063 Iustin Pop

71 a8083063 Iustin Pop
    """
72 5bfac263 Iustin Pop
    self.proc = processor
73 a8083063 Iustin Pop
    self.op = op
74 77b657a3 Guido Trotter
    self.cfg = context.cfg
75 77b657a3 Guido Trotter
    self.context = context
76 72737a7f Iustin Pop
    self.rpc = rpc
77 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
78 d465bdc8 Guido Trotter
    self.needed_locks = None
79 6683bba2 Guido Trotter
    self.acquired_locks = {}
80 3977a4c1 Guido Trotter
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
81 ca2a79e1 Guido Trotter
    self.add_locks = {}
82 ca2a79e1 Guido Trotter
    self.remove_locks = {}
83 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
84 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
85 c92b310a Michael Hanselmann
    self.__ssh = None
86 86d9d3bb Iustin Pop
    # logging
87 86d9d3bb Iustin Pop
    self.LogWarning = processor.LogWarning
88 86d9d3bb Iustin Pop
    self.LogInfo = processor.LogInfo
89 c92b310a Michael Hanselmann
90 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
91 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
92 a8083063 Iustin Pop
      if attr_val is None:
93 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
94 3ecf6786 Iustin Pop
                                   attr_name)
95 4be4691d Iustin Pop
    self.CheckArguments()
96 a8083063 Iustin Pop
97 c92b310a Michael Hanselmann
  def __GetSSH(self):
98 c92b310a Michael Hanselmann
    """Returns the SshRunner object
99 c92b310a Michael Hanselmann

100 c92b310a Michael Hanselmann
    """
101 c92b310a Michael Hanselmann
    if not self.__ssh:
102 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
103 c92b310a Michael Hanselmann
    return self.__ssh
104 c92b310a Michael Hanselmann
105 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
106 c92b310a Michael Hanselmann
107 4be4691d Iustin Pop
  def CheckArguments(self):
108 4be4691d Iustin Pop
    """Check syntactic validity for the opcode arguments.
109 4be4691d Iustin Pop

110 4be4691d Iustin Pop
    This method is for doing a simple syntactic check and ensure
111 4be4691d Iustin Pop
    validity of opcode parameters, without any cluster-related
112 4be4691d Iustin Pop
    checks. While the same can be accomplished in ExpandNames and/or
113 4be4691d Iustin Pop
    CheckPrereq, doing these separate is better because:
114 4be4691d Iustin Pop

115 4be4691d Iustin Pop
      - ExpandNames is left as as purely a lock-related function
116 5bbd3f7f Michael Hanselmann
      - CheckPrereq is run after we have acquired locks (and possible
117 4be4691d Iustin Pop
        waited for them)
118 4be4691d Iustin Pop

119 4be4691d Iustin Pop
    The function is allowed to change the self.op attribute so that
120 4be4691d Iustin Pop
    later methods can no longer worry about missing parameters.
121 4be4691d Iustin Pop

122 4be4691d Iustin Pop
    """
123 4be4691d Iustin Pop
    pass
124 4be4691d Iustin Pop
125 d465bdc8 Guido Trotter
  def ExpandNames(self):
126 d465bdc8 Guido Trotter
    """Expand names for this LU.
127 d465bdc8 Guido Trotter

128 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
129 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
130 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
131 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
132 d465bdc8 Guido Trotter

133 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
134 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
135 d465bdc8 Guido Trotter
    as values. Rules:
136 e4376078 Iustin Pop

137 e4376078 Iustin Pop
      - use an empty dict if you don't need any lock
138 e4376078 Iustin Pop
      - if you don't need any lock at a particular level omit that level
139 e4376078 Iustin Pop
      - don't put anything for the BGL level
140 e4376078 Iustin Pop
      - if you want all locks at a level use locking.ALL_SET as a value
141 d465bdc8 Guido Trotter

142 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
143 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
144 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
145 3977a4c1 Guido Trotter

146 e4376078 Iustin Pop
    Examples::
147 e4376078 Iustin Pop

148 e4376078 Iustin Pop
      # Acquire all nodes and one instance
149 e4376078 Iustin Pop
      self.needed_locks = {
150 e4376078 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
151 e4376078 Iustin Pop
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
152 e4376078 Iustin Pop
      }
153 e4376078 Iustin Pop
      # Acquire just two nodes
154 e4376078 Iustin Pop
      self.needed_locks = {
155 e4376078 Iustin Pop
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
156 e4376078 Iustin Pop
      }
157 e4376078 Iustin Pop
      # Acquire no locks
158 e4376078 Iustin Pop
      self.needed_locks = {} # No, you can't leave it to the default value None
159 d465bdc8 Guido Trotter

160 d465bdc8 Guido Trotter
    """
161 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
162 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
163 d465bdc8 Guido Trotter
    # time.
164 d465bdc8 Guido Trotter
    if self.REQ_BGL:
165 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
166 d465bdc8 Guido Trotter
    else:
167 d465bdc8 Guido Trotter
      raise NotImplementedError
168 d465bdc8 Guido Trotter
169 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
170 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
171 fb8dcb62 Guido Trotter

172 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
173 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
174 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
175 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
176 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
177 fb8dcb62 Guido Trotter
    default it does nothing.
178 fb8dcb62 Guido Trotter

179 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
180 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
181 fb8dcb62 Guido Trotter

182 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
183 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
184 fb8dcb62 Guido Trotter

185 fb8dcb62 Guido Trotter
    """
186 fb8dcb62 Guido Trotter
187 a8083063 Iustin Pop
  def CheckPrereq(self):
188 a8083063 Iustin Pop
    """Check prerequisites for this LU.
189 a8083063 Iustin Pop

190 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
191 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
192 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
193 a8083063 Iustin Pop
    allowed.
194 a8083063 Iustin Pop

195 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
196 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
197 a8083063 Iustin Pop

198 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
199 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
200 a8083063 Iustin Pop

201 a8083063 Iustin Pop
    """
202 a8083063 Iustin Pop
    raise NotImplementedError
203 a8083063 Iustin Pop
204 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
205 a8083063 Iustin Pop
    """Execute the LU.
206 a8083063 Iustin Pop

207 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
208 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
209 a8083063 Iustin Pop
    code, or expected.
210 a8083063 Iustin Pop

211 a8083063 Iustin Pop
    """
212 a8083063 Iustin Pop
    raise NotImplementedError
213 a8083063 Iustin Pop
214 a8083063 Iustin Pop
  def BuildHooksEnv(self):
215 a8083063 Iustin Pop
    """Build hooks environment for this LU.
216 a8083063 Iustin Pop

217 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
218 a8083063 Iustin Pop
    containing the environment that will be used for running the
219 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
220 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
221 a8083063 Iustin Pop
    the hook should run after the execution.
222 a8083063 Iustin Pop

223 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
224 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
225 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
226 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
227 a8083063 Iustin Pop

228 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
229 a8083063 Iustin Pop

230 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
231 a8083063 Iustin Pop
    not be called.
232 a8083063 Iustin Pop

233 a8083063 Iustin Pop
    """
234 a8083063 Iustin Pop
    raise NotImplementedError
235 a8083063 Iustin Pop
236 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
237 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
238 1fce5219 Guido Trotter

239 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
240 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
241 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
242 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
243 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
244 1fce5219 Guido Trotter

245 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
246 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
247 e4376078 Iustin Pop
    @param hook_results: the results of the multi-node hooks rpc call
248 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
249 e4376078 Iustin Pop
    @param lu_result: the previous Exec result this LU had, or None
250 e4376078 Iustin Pop
        in the PRE phase
251 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
252 e4376078 Iustin Pop
        and hook results
253 1fce5219 Guido Trotter

254 1fce5219 Guido Trotter
    """
255 1fce5219 Guido Trotter
    return lu_result
256 1fce5219 Guido Trotter
257 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
258 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
259 43905206 Guido Trotter

260 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
261 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
262 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
263 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
264 43905206 Guido Trotter
    before.
265 43905206 Guido Trotter

266 43905206 Guido Trotter
    """
267 43905206 Guido Trotter
    if self.needed_locks is None:
268 43905206 Guido Trotter
      self.needed_locks = {}
269 43905206 Guido Trotter
    else:
270 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
271 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
272 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
273 43905206 Guido Trotter
    if expanded_name is None:
274 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
275 43905206 Guido Trotter
                                  self.op.instance_name)
276 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
277 43905206 Guido Trotter
    self.op.instance_name = expanded_name
278 43905206 Guido Trotter
279 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
280 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
281 c4a2fee1 Guido Trotter

282 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
283 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
284 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
285 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
286 c4a2fee1 Guido Trotter

287 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
288 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
289 c4a2fee1 Guido Trotter

290 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
291 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
292 c4a2fee1 Guido Trotter

293 e4376078 Iustin Pop
    If should be called in DeclareLocks in a way similar to::
294 c4a2fee1 Guido Trotter

295 e4376078 Iustin Pop
      if level == locking.LEVEL_NODE:
296 e4376078 Iustin Pop
        self._LockInstancesNodes()
297 c4a2fee1 Guido Trotter

298 a82ce292 Guido Trotter
    @type primary_only: boolean
299 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
300 a82ce292 Guido Trotter

301 c4a2fee1 Guido Trotter
    """
302 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
303 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
304 c4a2fee1 Guido Trotter
305 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
306 c4a2fee1 Guido Trotter
307 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
308 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
309 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
310 c4a2fee1 Guido Trotter
    wanted_nodes = []
311 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
312 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
313 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
314 a82ce292 Guido Trotter
      if not primary_only:
315 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
316 9513b6ab Guido Trotter
317 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
318 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
319 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
320 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
321 c4a2fee1 Guido Trotter
322 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
323 c4a2fee1 Guido Trotter
324 a8083063 Iustin Pop
325 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
326 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
327 a8083063 Iustin Pop

328 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
329 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
330 a8083063 Iustin Pop

331 a8083063 Iustin Pop
  """
332 a8083063 Iustin Pop
  HPATH = None
333 a8083063 Iustin Pop
  HTYPE = None
334 a8083063 Iustin Pop
335 a8083063 Iustin Pop
336 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
337 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
338 83120a01 Michael Hanselmann

339 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
340 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
341 e4376078 Iustin Pop
  @type nodes: list
342 e4376078 Iustin Pop
  @param nodes: list of node names or None for all nodes
343 e4376078 Iustin Pop
  @rtype: list
344 e4376078 Iustin Pop
  @return: the list of nodes, sorted
345 e4376078 Iustin Pop
  @raise errors.OpProgrammerError: if the nodes parameter is wrong type
346 83120a01 Michael Hanselmann

347 83120a01 Michael Hanselmann
  """
348 3312b702 Iustin Pop
  if not isinstance(nodes, list):
349 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
350 dcb93971 Michael Hanselmann
351 ea47808a Guido Trotter
  if not nodes:
352 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
353 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
354 dcb93971 Michael Hanselmann
355 ea47808a Guido Trotter
  wanted = []
356 ea47808a Guido Trotter
  for name in nodes:
357 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
358 ea47808a Guido Trotter
    if node is None:
359 ea47808a Guido Trotter
      raise errors.OpPrereqError("No such node name '%s'" % name)
360 ea47808a Guido Trotter
    wanted.append(node)
361 dcb93971 Michael Hanselmann
362 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
363 3312b702 Iustin Pop
364 3312b702 Iustin Pop
365 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
366 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
367 3312b702 Iustin Pop

368 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
369 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
370 e4376078 Iustin Pop
  @type instances: list
371 e4376078 Iustin Pop
  @param instances: list of instance names or None for all instances
372 e4376078 Iustin Pop
  @rtype: list
373 e4376078 Iustin Pop
  @return: the list of instances, sorted
374 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if the instances parameter is wrong type
375 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if any of the passed instances is not found
376 3312b702 Iustin Pop

377 3312b702 Iustin Pop
  """
378 3312b702 Iustin Pop
  if not isinstance(instances, list):
379 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
380 3312b702 Iustin Pop
381 3312b702 Iustin Pop
  if instances:
382 3312b702 Iustin Pop
    wanted = []
383 3312b702 Iustin Pop
384 3312b702 Iustin Pop
    for name in instances:
385 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
386 3312b702 Iustin Pop
      if instance is None:
387 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
388 3312b702 Iustin Pop
      wanted.append(instance)
389 3312b702 Iustin Pop
390 3312b702 Iustin Pop
  else:
391 a7f5dc98 Iustin Pop
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
392 a7f5dc98 Iustin Pop
  return wanted
393 dcb93971 Michael Hanselmann
394 dcb93971 Michael Hanselmann
395 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
396 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
397 83120a01 Michael Hanselmann

398 a2d2e1a7 Iustin Pop
  @type static: L{utils.FieldSet}
399 31bf511f Iustin Pop
  @param static: static fields set
400 a2d2e1a7 Iustin Pop
  @type dynamic: L{utils.FieldSet}
401 31bf511f Iustin Pop
  @param dynamic: dynamic fields set
402 83120a01 Michael Hanselmann

403 83120a01 Michael Hanselmann
  """
404 a2d2e1a7 Iustin Pop
  f = utils.FieldSet()
405 31bf511f Iustin Pop
  f.Extend(static)
406 31bf511f Iustin Pop
  f.Extend(dynamic)
407 dcb93971 Michael Hanselmann
408 31bf511f Iustin Pop
  delta = f.NonMatching(selected)
409 31bf511f Iustin Pop
  if delta:
410 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
411 31bf511f Iustin Pop
                               % ",".join(delta))
412 dcb93971 Michael Hanselmann
413 dcb93971 Michael Hanselmann
414 a5961235 Iustin Pop
def _CheckBooleanOpField(op, name):
415 a5961235 Iustin Pop
  """Validates boolean opcode parameters.
416 a5961235 Iustin Pop

417 a5961235 Iustin Pop
  This will ensure that an opcode parameter is either a boolean value,
418 a5961235 Iustin Pop
  or None (but that it always exists).
419 a5961235 Iustin Pop

420 a5961235 Iustin Pop
  """
421 a5961235 Iustin Pop
  val = getattr(op, name, None)
422 a5961235 Iustin Pop
  if not (val is None or isinstance(val, bool)):
423 a5961235 Iustin Pop
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
424 a5961235 Iustin Pop
                               (name, str(val)))
425 a5961235 Iustin Pop
  setattr(op, name, val)
426 a5961235 Iustin Pop
427 a5961235 Iustin Pop
428 a5961235 Iustin Pop
def _CheckNodeOnline(lu, node):
429 a5961235 Iustin Pop
  """Ensure that a given node is online.
430 a5961235 Iustin Pop

431 a5961235 Iustin Pop
  @param lu: the LU on behalf of which we make the check
432 a5961235 Iustin Pop
  @param node: the node to check
433 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is offline
434 a5961235 Iustin Pop

435 a5961235 Iustin Pop
  """
436 a5961235 Iustin Pop
  if lu.cfg.GetNodeInfo(node).offline:
437 a5961235 Iustin Pop
    raise errors.OpPrereqError("Can't use offline node %s" % node)
438 a5961235 Iustin Pop
439 a5961235 Iustin Pop
440 733a2b6a Iustin Pop
def _CheckNodeNotDrained(lu, node):
441 733a2b6a Iustin Pop
  """Ensure that a given node is not drained.
442 733a2b6a Iustin Pop

443 733a2b6a Iustin Pop
  @param lu: the LU on behalf of which we make the check
444 733a2b6a Iustin Pop
  @param node: the node to check
445 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is drained
446 733a2b6a Iustin Pop

447 733a2b6a Iustin Pop
  """
448 733a2b6a Iustin Pop
  if lu.cfg.GetNodeInfo(node).drained:
449 733a2b6a Iustin Pop
    raise errors.OpPrereqError("Can't use drained node %s" % node)
450 733a2b6a Iustin Pop
451 733a2b6a Iustin Pop
452 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
453 67fc3042 Iustin Pop
                          memory, vcpus, nics, disk_template, disks,
454 7c4d6c7b Michael Hanselmann
                          bep, hvp, hypervisor_name):
455 e4376078 Iustin Pop
  """Builds instance related env variables for hooks
456 e4376078 Iustin Pop

457 e4376078 Iustin Pop
  This builds the hook environment from individual variables.
458 e4376078 Iustin Pop

459 e4376078 Iustin Pop
  @type name: string
460 e4376078 Iustin Pop
  @param name: the name of the instance
461 e4376078 Iustin Pop
  @type primary_node: string
462 e4376078 Iustin Pop
  @param primary_node: the name of the instance's primary node
463 e4376078 Iustin Pop
  @type secondary_nodes: list
464 e4376078 Iustin Pop
  @param secondary_nodes: list of secondary nodes as strings
465 e4376078 Iustin Pop
  @type os_type: string
466 e4376078 Iustin Pop
  @param os_type: the name of the instance's OS
467 0d68c45d Iustin Pop
  @type status: boolean
468 0d68c45d Iustin Pop
  @param status: the should_run status of the instance
469 e4376078 Iustin Pop
  @type memory: string
470 e4376078 Iustin Pop
  @param memory: the memory size of the instance
471 e4376078 Iustin Pop
  @type vcpus: string
472 e4376078 Iustin Pop
  @param vcpus: the count of VCPUs the instance has
473 e4376078 Iustin Pop
  @type nics: list
474 e4376078 Iustin Pop
  @param nics: list of tuples (ip, bridge, mac) representing
475 e4376078 Iustin Pop
      the NICs the instance  has
476 2c2690c9 Iustin Pop
  @type disk_template: string
477 5bbd3f7f Michael Hanselmann
  @param disk_template: the disk template of the instance
478 2c2690c9 Iustin Pop
  @type disks: list
479 2c2690c9 Iustin Pop
  @param disks: the list of (size, mode) pairs
480 67fc3042 Iustin Pop
  @type bep: dict
481 67fc3042 Iustin Pop
  @param bep: the backend parameters for the instance
482 67fc3042 Iustin Pop
  @type hvp: dict
483 67fc3042 Iustin Pop
  @param hvp: the hypervisor parameters for the instance
484 7c4d6c7b Michael Hanselmann
  @type hypervisor_name: string
485 7c4d6c7b Michael Hanselmann
  @param hypervisor_name: the hypervisor for the instance
486 e4376078 Iustin Pop
  @rtype: dict
487 e4376078 Iustin Pop
  @return: the hook environment for this instance
488 ecb215b5 Michael Hanselmann

489 396e1b78 Michael Hanselmann
  """
490 0d68c45d Iustin Pop
  if status:
491 0d68c45d Iustin Pop
    str_status = "up"
492 0d68c45d Iustin Pop
  else:
493 0d68c45d Iustin Pop
    str_status = "down"
494 396e1b78 Michael Hanselmann
  env = {
495 0e137c28 Iustin Pop
    "OP_TARGET": name,
496 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
497 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
498 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
499 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
500 0d68c45d Iustin Pop
    "INSTANCE_STATUS": str_status,
501 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
502 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
503 2c2690c9 Iustin Pop
    "INSTANCE_DISK_TEMPLATE": disk_template,
504 7c4d6c7b Michael Hanselmann
    "INSTANCE_HYPERVISOR": hypervisor_name,
505 396e1b78 Michael Hanselmann
  }
506 396e1b78 Michael Hanselmann
507 396e1b78 Michael Hanselmann
  if nics:
508 396e1b78 Michael Hanselmann
    nic_count = len(nics)
509 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
510 396e1b78 Michael Hanselmann
      if ip is None:
511 396e1b78 Michael Hanselmann
        ip = ""
512 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
513 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
514 2c2690c9 Iustin Pop
      env["INSTANCE_NIC%d_MAC" % idx] = mac
515 396e1b78 Michael Hanselmann
  else:
516 396e1b78 Michael Hanselmann
    nic_count = 0
517 396e1b78 Michael Hanselmann
518 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
519 396e1b78 Michael Hanselmann
520 2c2690c9 Iustin Pop
  if disks:
521 2c2690c9 Iustin Pop
    disk_count = len(disks)
522 2c2690c9 Iustin Pop
    for idx, (size, mode) in enumerate(disks):
523 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_SIZE" % idx] = size
524 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_MODE" % idx] = mode
525 2c2690c9 Iustin Pop
  else:
526 2c2690c9 Iustin Pop
    disk_count = 0
527 2c2690c9 Iustin Pop
528 2c2690c9 Iustin Pop
  env["INSTANCE_DISK_COUNT"] = disk_count
529 2c2690c9 Iustin Pop
530 67fc3042 Iustin Pop
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
531 67fc3042 Iustin Pop
    for key, value in source.items():
532 67fc3042 Iustin Pop
      env["INSTANCE_%s_%s" % (kind, key)] = value
533 67fc3042 Iustin Pop
534 396e1b78 Michael Hanselmann
  return env
535 396e1b78 Michael Hanselmann
536 396e1b78 Michael Hanselmann
537 338e51e8 Iustin Pop
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
538 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
539 ecb215b5 Michael Hanselmann

540 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
541 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
542 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
543 e4376078 Iustin Pop
  @param instance: the instance for which we should build the
544 e4376078 Iustin Pop
      environment
545 e4376078 Iustin Pop
  @type override: dict
546 e4376078 Iustin Pop
  @param override: dictionary with key/values that will override
547 e4376078 Iustin Pop
      our values
548 e4376078 Iustin Pop
  @rtype: dict
549 e4376078 Iustin Pop
  @return: the hook environment dictionary
550 e4376078 Iustin Pop

551 ecb215b5 Michael Hanselmann
  """
552 67fc3042 Iustin Pop
  cluster = lu.cfg.GetClusterInfo()
553 67fc3042 Iustin Pop
  bep = cluster.FillBE(instance)
554 67fc3042 Iustin Pop
  hvp = cluster.FillHV(instance)
555 396e1b78 Michael Hanselmann
  args = {
556 396e1b78 Michael Hanselmann
    'name': instance.name,
557 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
558 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
559 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
560 0d68c45d Iustin Pop
    'status': instance.admin_up,
561 338e51e8 Iustin Pop
    'memory': bep[constants.BE_MEMORY],
562 338e51e8 Iustin Pop
    'vcpus': bep[constants.BE_VCPUS],
563 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
564 2c2690c9 Iustin Pop
    'disk_template': instance.disk_template,
565 2c2690c9 Iustin Pop
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
566 67fc3042 Iustin Pop
    'bep': bep,
567 67fc3042 Iustin Pop
    'hvp': hvp,
568 b0c63e2b Iustin Pop
    'hypervisor_name': instance.hypervisor,
569 396e1b78 Michael Hanselmann
  }
570 396e1b78 Michael Hanselmann
  if override:
571 396e1b78 Michael Hanselmann
    args.update(override)
572 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
573 396e1b78 Michael Hanselmann
574 396e1b78 Michael Hanselmann
575 ec0292f1 Iustin Pop
def _AdjustCandidatePool(lu):
576 ec0292f1 Iustin Pop
  """Adjust the candidate pool after node operations.
577 ec0292f1 Iustin Pop

578 ec0292f1 Iustin Pop
  """
579 ec0292f1 Iustin Pop
  mod_list = lu.cfg.MaintainCandidatePool()
580 ec0292f1 Iustin Pop
  if mod_list:
581 ec0292f1 Iustin Pop
    lu.LogInfo("Promoted nodes to master candidate role: %s",
582 ee513a66 Iustin Pop
               ", ".join(node.name for node in mod_list))
583 ec0292f1 Iustin Pop
    for name in mod_list:
584 ec0292f1 Iustin Pop
      lu.context.ReaddNode(name)
585 ec0292f1 Iustin Pop
  mc_now, mc_max = lu.cfg.GetMasterCandidateStats()
586 ec0292f1 Iustin Pop
  if mc_now > mc_max:
587 ec0292f1 Iustin Pop
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
588 ec0292f1 Iustin Pop
               (mc_now, mc_max))
589 ec0292f1 Iustin Pop
590 ec0292f1 Iustin Pop
591 b9bddb6b Iustin Pop
def _CheckInstanceBridgesExist(lu, instance):
592 5bbd3f7f Michael Hanselmann
  """Check that the bridges needed by an instance exist.
593 bf6929a2 Alexander Schreiber

594 bf6929a2 Alexander Schreiber
  """
595 5bbd3f7f Michael Hanselmann
  # check bridges existence
596 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
597 781de953 Iustin Pop
  result = lu.rpc.call_bridges_exist(instance.primary_node, brlist)
598 781de953 Iustin Pop
  result.Raise()
599 781de953 Iustin Pop
  if not result.data:
600 781de953 Iustin Pop
    raise errors.OpPrereqError("One or more target bridges %s does not"
601 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
602 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
603 bf6929a2 Alexander Schreiber
604 bf6929a2 Alexander Schreiber
605 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
606 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
607 a8083063 Iustin Pop

608 a8083063 Iustin Pop
  """
609 a8083063 Iustin Pop
  _OP_REQP = []
610 a8083063 Iustin Pop
611 a8083063 Iustin Pop
  def CheckPrereq(self):
612 a8083063 Iustin Pop
    """Check prerequisites.
613 a8083063 Iustin Pop

614 a8083063 Iustin Pop
    This checks whether the cluster is empty.
615 a8083063 Iustin Pop

616 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
617 a8083063 Iustin Pop

618 a8083063 Iustin Pop
    """
619 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
620 a8083063 Iustin Pop
621 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
622 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
623 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
624 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
625 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
626 db915bd1 Michael Hanselmann
    if instancelist:
627 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
628 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
629 a8083063 Iustin Pop
630 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
631 a8083063 Iustin Pop
    """Destroys the cluster.
632 a8083063 Iustin Pop

633 a8083063 Iustin Pop
    """
634 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
635 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
636 781de953 Iustin Pop
    result.Raise()
637 781de953 Iustin Pop
    if not result.data:
638 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
639 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
640 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
641 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
642 140aa4a8 Iustin Pop
    return master
643 a8083063 Iustin Pop
644 a8083063 Iustin Pop
645 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
646 a8083063 Iustin Pop
  """Verifies the cluster status.
647 a8083063 Iustin Pop

648 a8083063 Iustin Pop
  """
649 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
650 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
651 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
652 d4b9d97f Guido Trotter
  REQ_BGL = False
653 d4b9d97f Guido Trotter
654 d4b9d97f Guido Trotter
  def ExpandNames(self):
655 d4b9d97f Guido Trotter
    self.needed_locks = {
656 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
657 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
658 d4b9d97f Guido Trotter
    }
659 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
660 a8083063 Iustin Pop
661 25361b9a Iustin Pop
  def _VerifyNode(self, nodeinfo, file_list, local_cksum,
662 6d2e83d5 Iustin Pop
                  node_result, feedback_fn, master_files,
663 cc9e1230 Guido Trotter
                  drbd_map, vg_name):
664 a8083063 Iustin Pop
    """Run multiple tests against a node.
665 a8083063 Iustin Pop

666 112f18a5 Iustin Pop
    Test list:
667 e4376078 Iustin Pop

668 a8083063 Iustin Pop
      - compares ganeti version
669 5bbd3f7f Michael Hanselmann
      - checks vg existence and size > 20G
670 a8083063 Iustin Pop
      - checks config file checksum
671 a8083063 Iustin Pop
      - checks ssh to other nodes
672 a8083063 Iustin Pop

673 112f18a5 Iustin Pop
    @type nodeinfo: L{objects.Node}
674 112f18a5 Iustin Pop
    @param nodeinfo: the node to check
675 e4376078 Iustin Pop
    @param file_list: required list of files
676 e4376078 Iustin Pop
    @param local_cksum: dictionary of local files and their checksums
677 e4376078 Iustin Pop
    @param node_result: the results from the node
678 e4376078 Iustin Pop
    @param feedback_fn: function used to accumulate results
679 112f18a5 Iustin Pop
    @param master_files: list of files that only masters should have
680 6d2e83d5 Iustin Pop
    @param drbd_map: the useddrbd minors for this node, in
681 6d2e83d5 Iustin Pop
        form of minor: (instance, must_exist) which correspond to instances
682 6d2e83d5 Iustin Pop
        and their running status
683 cc9e1230 Guido Trotter
    @param vg_name: Ganeti Volume Group (result of self.cfg.GetVGName())
684 098c0958 Michael Hanselmann

685 a8083063 Iustin Pop
    """
686 112f18a5 Iustin Pop
    node = nodeinfo.name
687 25361b9a Iustin Pop
688 25361b9a Iustin Pop
    # main result, node_result should be a non-empty dict
689 25361b9a Iustin Pop
    if not node_result or not isinstance(node_result, dict):
690 25361b9a Iustin Pop
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
691 25361b9a Iustin Pop
      return True
692 25361b9a Iustin Pop
693 a8083063 Iustin Pop
    # compares ganeti version
694 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
695 25361b9a Iustin Pop
    remote_version = node_result.get('version', None)
696 e9ce0a64 Iustin Pop
    if not (remote_version and isinstance(remote_version, (list, tuple)) and
697 e9ce0a64 Iustin Pop
            len(remote_version) == 2):
698 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
699 a8083063 Iustin Pop
      return True
700 a8083063 Iustin Pop
701 e9ce0a64 Iustin Pop
    if local_version != remote_version[0]:
702 e9ce0a64 Iustin Pop
      feedback_fn("  - ERROR: incompatible protocol versions: master %s,"
703 e9ce0a64 Iustin Pop
                  " node %s %s" % (local_version, node, remote_version[0]))
704 a8083063 Iustin Pop
      return True
705 a8083063 Iustin Pop
706 e9ce0a64 Iustin Pop
    # node seems compatible, we can actually try to look into its results
707 a8083063 Iustin Pop
708 a8083063 Iustin Pop
    bad = False
709 e9ce0a64 Iustin Pop
710 e9ce0a64 Iustin Pop
    # full package version
711 e9ce0a64 Iustin Pop
    if constants.RELEASE_VERSION != remote_version[1]:
712 e9ce0a64 Iustin Pop
      feedback_fn("  - WARNING: software version mismatch: master %s,"
713 e9ce0a64 Iustin Pop
                  " node %s %s" %
714 e9ce0a64 Iustin Pop
                  (constants.RELEASE_VERSION, node, remote_version[1]))
715 e9ce0a64 Iustin Pop
716 e9ce0a64 Iustin Pop
    # checks vg existence and size > 20G
717 cc9e1230 Guido Trotter
    if vg_name is not None:
718 cc9e1230 Guido Trotter
      vglist = node_result.get(constants.NV_VGLIST, None)
719 cc9e1230 Guido Trotter
      if not vglist:
720 cc9e1230 Guido Trotter
        feedback_fn("  - ERROR: unable to check volume groups on node %s." %
721 cc9e1230 Guido Trotter
                        (node,))
722 a8083063 Iustin Pop
        bad = True
723 cc9e1230 Guido Trotter
      else:
724 cc9e1230 Guido Trotter
        vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
725 cc9e1230 Guido Trotter
                                              constants.MIN_VG_SIZE)
726 cc9e1230 Guido Trotter
        if vgstatus:
727 cc9e1230 Guido Trotter
          feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
728 cc9e1230 Guido Trotter
          bad = True
729 a8083063 Iustin Pop
730 a8083063 Iustin Pop
    # checks config file checksum
731 a8083063 Iustin Pop
732 25361b9a Iustin Pop
    remote_cksum = node_result.get(constants.NV_FILELIST, None)
733 25361b9a Iustin Pop
    if not isinstance(remote_cksum, dict):
734 a8083063 Iustin Pop
      bad = True
735 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
736 a8083063 Iustin Pop
    else:
737 a8083063 Iustin Pop
      for file_name in file_list:
738 112f18a5 Iustin Pop
        node_is_mc = nodeinfo.master_candidate
739 112f18a5 Iustin Pop
        must_have_file = file_name not in master_files
740 a8083063 Iustin Pop
        if file_name not in remote_cksum:
741 112f18a5 Iustin Pop
          if node_is_mc or must_have_file:
742 112f18a5 Iustin Pop
            bad = True
743 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' missing" % file_name)
744 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
745 112f18a5 Iustin Pop
          if node_is_mc or must_have_file:
746 112f18a5 Iustin Pop
            bad = True
747 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
748 112f18a5 Iustin Pop
          else:
749 112f18a5 Iustin Pop
            # not candidate and this is not a must-have file
750 112f18a5 Iustin Pop
            bad = True
751 e631cb25 Iustin Pop
            feedback_fn("  - ERROR: file '%s' should not exist on non master"
752 e631cb25 Iustin Pop
                        " candidates (and the file is outdated)" % file_name)
753 112f18a5 Iustin Pop
        else:
754 112f18a5 Iustin Pop
          # all good, except non-master/non-must have combination
755 112f18a5 Iustin Pop
          if not node_is_mc and not must_have_file:
756 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' should not exist on non master"
757 112f18a5 Iustin Pop
                        " candidates" % file_name)
758 a8083063 Iustin Pop
759 25361b9a Iustin Pop
    # checks ssh to any
760 25361b9a Iustin Pop
761 25361b9a Iustin Pop
    if constants.NV_NODELIST not in node_result:
762 a8083063 Iustin Pop
      bad = True
763 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
764 a8083063 Iustin Pop
    else:
765 25361b9a Iustin Pop
      if node_result[constants.NV_NODELIST]:
766 a8083063 Iustin Pop
        bad = True
767 25361b9a Iustin Pop
        for node in node_result[constants.NV_NODELIST]:
768 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
769 25361b9a Iustin Pop
                          (node, node_result[constants.NV_NODELIST][node]))
770 25361b9a Iustin Pop
771 25361b9a Iustin Pop
    if constants.NV_NODENETTEST not in node_result:
772 9d4bfc96 Iustin Pop
      bad = True
773 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
774 9d4bfc96 Iustin Pop
    else:
775 25361b9a Iustin Pop
      if node_result[constants.NV_NODENETTEST]:
776 9d4bfc96 Iustin Pop
        bad = True
777 25361b9a Iustin Pop
        nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys())
778 9d4bfc96 Iustin Pop
        for node in nlist:
779 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
780 25361b9a Iustin Pop
                          (node, node_result[constants.NV_NODENETTEST][node]))
781 9d4bfc96 Iustin Pop
782 25361b9a Iustin Pop
    hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
783 e69d05fd Iustin Pop
    if isinstance(hyp_result, dict):
784 e69d05fd Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
785 e69d05fd Iustin Pop
        if hv_result is not None:
786 e69d05fd Iustin Pop
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
787 e69d05fd Iustin Pop
                      (hv_name, hv_result))
788 6d2e83d5 Iustin Pop
789 6d2e83d5 Iustin Pop
    # check used drbd list
790 cc9e1230 Guido Trotter
    if vg_name is not None:
791 cc9e1230 Guido Trotter
      used_minors = node_result.get(constants.NV_DRBDLIST, [])
792 cc9e1230 Guido Trotter
      if not isinstance(used_minors, (tuple, list)):
793 cc9e1230 Guido Trotter
        feedback_fn("  - ERROR: cannot parse drbd status file: %s" %
794 cc9e1230 Guido Trotter
                    str(used_minors))
795 cc9e1230 Guido Trotter
      else:
796 cc9e1230 Guido Trotter
        for minor, (iname, must_exist) in drbd_map.items():
797 cc9e1230 Guido Trotter
          if minor not in used_minors and must_exist:
798 35e994e9 Iustin Pop
            feedback_fn("  - ERROR: drbd minor %d of instance %s is"
799 35e994e9 Iustin Pop
                        " not active" % (minor, iname))
800 cc9e1230 Guido Trotter
            bad = True
801 cc9e1230 Guido Trotter
        for minor in used_minors:
802 cc9e1230 Guido Trotter
          if minor not in drbd_map:
803 35e994e9 Iustin Pop
            feedback_fn("  - ERROR: unallocated drbd minor %d is in use" %
804 35e994e9 Iustin Pop
                        minor)
805 cc9e1230 Guido Trotter
            bad = True
806 6d2e83d5 Iustin Pop
807 a8083063 Iustin Pop
    return bad
808 a8083063 Iustin Pop
809 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
810 0a66c968 Iustin Pop
                      node_instance, feedback_fn, n_offline):
811 a8083063 Iustin Pop
    """Verify an instance.
812 a8083063 Iustin Pop

813 a8083063 Iustin Pop
    This function checks to see if the required block devices are
814 a8083063 Iustin Pop
    available on the instance's node.
815 a8083063 Iustin Pop

816 a8083063 Iustin Pop
    """
817 a8083063 Iustin Pop
    bad = False
818 a8083063 Iustin Pop
819 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
820 a8083063 Iustin Pop
821 a8083063 Iustin Pop
    node_vol_should = {}
822 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
823 a8083063 Iustin Pop
824 a8083063 Iustin Pop
    for node in node_vol_should:
825 0a66c968 Iustin Pop
      if node in n_offline:
826 0a66c968 Iustin Pop
        # ignore missing volumes on offline nodes
827 0a66c968 Iustin Pop
        continue
828 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
829 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
830 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
831 a8083063 Iustin Pop
                          (volume, node))
832 a8083063 Iustin Pop
          bad = True
833 a8083063 Iustin Pop
834 0d68c45d Iustin Pop
    if instanceconfig.admin_up:
835 0a66c968 Iustin Pop
      if ((node_current not in node_instance or
836 0a66c968 Iustin Pop
          not instance in node_instance[node_current]) and
837 0a66c968 Iustin Pop
          node_current not in n_offline):
838 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
839 a8083063 Iustin Pop
                        (instance, node_current))
840 a8083063 Iustin Pop
        bad = True
841 a8083063 Iustin Pop
842 a8083063 Iustin Pop
    for node in node_instance:
843 a8083063 Iustin Pop
      if (not node == node_current):
844 a8083063 Iustin Pop
        if instance in node_instance[node]:
845 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
846 a8083063 Iustin Pop
                          (instance, node))
847 a8083063 Iustin Pop
          bad = True
848 a8083063 Iustin Pop
849 6a438c98 Michael Hanselmann
    return bad
850 a8083063 Iustin Pop
851 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
852 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
853 a8083063 Iustin Pop

854 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
855 a8083063 Iustin Pop
    reported as unknown.
856 a8083063 Iustin Pop

857 a8083063 Iustin Pop
    """
858 a8083063 Iustin Pop
    bad = False
859 a8083063 Iustin Pop
860 a8083063 Iustin Pop
    for node in node_vol_is:
861 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
862 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
863 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
864 a8083063 Iustin Pop
                      (volume, node))
865 a8083063 Iustin Pop
          bad = True
866 a8083063 Iustin Pop
    return bad
867 a8083063 Iustin Pop
868 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
869 a8083063 Iustin Pop
    """Verify the list of running instances.
870 a8083063 Iustin Pop

871 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
872 a8083063 Iustin Pop

873 a8083063 Iustin Pop
    """
874 a8083063 Iustin Pop
    bad = False
875 a8083063 Iustin Pop
    for node in node_instance:
876 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
877 a8083063 Iustin Pop
        if runninginstance not in instancelist:
878 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
879 a8083063 Iustin Pop
                          (runninginstance, node))
880 a8083063 Iustin Pop
          bad = True
881 a8083063 Iustin Pop
    return bad
882 a8083063 Iustin Pop
883 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
884 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
885 2b3b6ddd Guido Trotter

886 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
887 2b3b6ddd Guido Trotter
    was primary for.
888 2b3b6ddd Guido Trotter

889 2b3b6ddd Guido Trotter
    """
890 2b3b6ddd Guido Trotter
    bad = False
891 2b3b6ddd Guido Trotter
892 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
893 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
894 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
895 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
896 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
897 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
898 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
899 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
900 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
901 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
902 2b3b6ddd Guido Trotter
        needed_mem = 0
903 2b3b6ddd Guido Trotter
        for instance in instances:
904 338e51e8 Iustin Pop
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
905 c0f2b229 Iustin Pop
          if bep[constants.BE_AUTO_BALANCE]:
906 3924700f Iustin Pop
            needed_mem += bep[constants.BE_MEMORY]
907 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
908 5bbd3f7f Michael Hanselmann
          feedback_fn("  - ERROR: not enough memory on node %s to accommodate"
909 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
910 2b3b6ddd Guido Trotter
          bad = True
911 2b3b6ddd Guido Trotter
    return bad
912 2b3b6ddd Guido Trotter
913 a8083063 Iustin Pop
  def CheckPrereq(self):
914 a8083063 Iustin Pop
    """Check prerequisites.
915 a8083063 Iustin Pop

916 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
917 e54c4c5e Guido Trotter
    all its members are valid.
918 a8083063 Iustin Pop

919 a8083063 Iustin Pop
    """
920 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
921 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
922 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
923 a8083063 Iustin Pop
924 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
925 d8fff41c Guido Trotter
    """Build hooks env.
926 d8fff41c Guido Trotter

927 5bbd3f7f Michael Hanselmann
    Cluster-Verify hooks just ran in the post phase and their failure makes
928 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
929 d8fff41c Guido Trotter

930 d8fff41c Guido Trotter
    """
931 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
932 35e994e9 Iustin Pop
    env = {
933 35e994e9 Iustin Pop
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
934 35e994e9 Iustin Pop
      }
935 35e994e9 Iustin Pop
    for node in self.cfg.GetAllNodesInfo().values():
936 35e994e9 Iustin Pop
      env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
937 35e994e9 Iustin Pop
938 d8fff41c Guido Trotter
    return env, [], all_nodes
939 d8fff41c Guido Trotter
940 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
941 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
942 a8083063 Iustin Pop

943 a8083063 Iustin Pop
    """
944 a8083063 Iustin Pop
    bad = False
945 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
946 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
947 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
948 a8083063 Iustin Pop
949 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
950 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
951 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
952 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
953 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
954 6d2e83d5 Iustin Pop
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
955 6d2e83d5 Iustin Pop
                        for iname in instancelist)
956 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
957 3924700f Iustin Pop
    i_non_a_balanced = [] # Non auto-balanced instances
958 0a66c968 Iustin Pop
    n_offline = [] # List of offline nodes
959 22f0f71d Iustin Pop
    n_drained = [] # List of nodes being drained
960 a8083063 Iustin Pop
    node_volume = {}
961 a8083063 Iustin Pop
    node_instance = {}
962 9c9c7d30 Guido Trotter
    node_info = {}
963 26b6af5e Guido Trotter
    instance_cfg = {}
964 a8083063 Iustin Pop
965 a8083063 Iustin Pop
    # FIXME: verify OS list
966 a8083063 Iustin Pop
    # do local checksums
967 112f18a5 Iustin Pop
    master_files = [constants.CLUSTER_CONF_FILE]
968 112f18a5 Iustin Pop
969 112f18a5 Iustin Pop
    file_names = ssconf.SimpleStore().GetFileList()
970 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
971 699777f2 Michael Hanselmann
    file_names.append(constants.RAPI_CERT_FILE)
972 112f18a5 Iustin Pop
    file_names.extend(master_files)
973 112f18a5 Iustin Pop
974 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
975 a8083063 Iustin Pop
976 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
977 a8083063 Iustin Pop
    node_verify_param = {
978 25361b9a Iustin Pop
      constants.NV_FILELIST: file_names,
979 82e37788 Iustin Pop
      constants.NV_NODELIST: [node.name for node in nodeinfo
980 82e37788 Iustin Pop
                              if not node.offline],
981 25361b9a Iustin Pop
      constants.NV_HYPERVISOR: hypervisors,
982 25361b9a Iustin Pop
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
983 82e37788 Iustin Pop
                                  node.secondary_ip) for node in nodeinfo
984 82e37788 Iustin Pop
                                 if not node.offline],
985 25361b9a Iustin Pop
      constants.NV_INSTANCELIST: hypervisors,
986 25361b9a Iustin Pop
      constants.NV_VERSION: None,
987 25361b9a Iustin Pop
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
988 a8083063 Iustin Pop
      }
989 cc9e1230 Guido Trotter
    if vg_name is not None:
990 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_VGLIST] = None
991 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_LVLIST] = vg_name
992 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_DRBDLIST] = None
993 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
994 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
995 a8083063 Iustin Pop
996 3924700f Iustin Pop
    cluster = self.cfg.GetClusterInfo()
997 112f18a5 Iustin Pop
    master_node = self.cfg.GetMasterNode()
998 6d2e83d5 Iustin Pop
    all_drbd_map = self.cfg.ComputeDRBDMap()
999 6d2e83d5 Iustin Pop
1000 112f18a5 Iustin Pop
    for node_i in nodeinfo:
1001 112f18a5 Iustin Pop
      node = node_i.name
1002 25361b9a Iustin Pop
      nresult = all_nvinfo[node].data
1003 25361b9a Iustin Pop
1004 0a66c968 Iustin Pop
      if node_i.offline:
1005 0a66c968 Iustin Pop
        feedback_fn("* Skipping offline node %s" % (node,))
1006 0a66c968 Iustin Pop
        n_offline.append(node)
1007 0a66c968 Iustin Pop
        continue
1008 0a66c968 Iustin Pop
1009 112f18a5 Iustin Pop
      if node == master_node:
1010 25361b9a Iustin Pop
        ntype = "master"
1011 112f18a5 Iustin Pop
      elif node_i.master_candidate:
1012 25361b9a Iustin Pop
        ntype = "master candidate"
1013 22f0f71d Iustin Pop
      elif node_i.drained:
1014 22f0f71d Iustin Pop
        ntype = "drained"
1015 22f0f71d Iustin Pop
        n_drained.append(node)
1016 112f18a5 Iustin Pop
      else:
1017 25361b9a Iustin Pop
        ntype = "regular"
1018 112f18a5 Iustin Pop
      feedback_fn("* Verifying node %s (%s)" % (node, ntype))
1019 25361b9a Iustin Pop
1020 25361b9a Iustin Pop
      if all_nvinfo[node].failed or not isinstance(nresult, dict):
1021 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
1022 25361b9a Iustin Pop
        bad = True
1023 25361b9a Iustin Pop
        continue
1024 25361b9a Iustin Pop
1025 6d2e83d5 Iustin Pop
      node_drbd = {}
1026 6d2e83d5 Iustin Pop
      for minor, instance in all_drbd_map[node].items():
1027 c614e5fb Iustin Pop
        if instance not in instanceinfo:
1028 c614e5fb Iustin Pop
          feedback_fn("  - ERROR: ghost instance '%s' in temporary DRBD map" %
1029 c614e5fb Iustin Pop
                      instance)
1030 c614e5fb Iustin Pop
          # ghost instance should not be running, but otherwise we
1031 c614e5fb Iustin Pop
          # don't give double warnings (both ghost instance and
1032 c614e5fb Iustin Pop
          # unallocated minor in use)
1033 c614e5fb Iustin Pop
          node_drbd[minor] = (instance, False)
1034 c614e5fb Iustin Pop
        else:
1035 c614e5fb Iustin Pop
          instance = instanceinfo[instance]
1036 c614e5fb Iustin Pop
          node_drbd[minor] = (instance.name, instance.admin_up)
1037 112f18a5 Iustin Pop
      result = self._VerifyNode(node_i, file_names, local_checksums,
1038 6d2e83d5 Iustin Pop
                                nresult, feedback_fn, master_files,
1039 cc9e1230 Guido Trotter
                                node_drbd, vg_name)
1040 a8083063 Iustin Pop
      bad = bad or result
1041 a8083063 Iustin Pop
1042 25361b9a Iustin Pop
      lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1043 cc9e1230 Guido Trotter
      if vg_name is None:
1044 cc9e1230 Guido Trotter
        node_volume[node] = {}
1045 cc9e1230 Guido Trotter
      elif isinstance(lvdata, basestring):
1046 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
1047 26f15862 Iustin Pop
                    (node, utils.SafeEncode(lvdata)))
1048 b63ed789 Iustin Pop
        bad = True
1049 b63ed789 Iustin Pop
        node_volume[node] = {}
1050 25361b9a Iustin Pop
      elif not isinstance(lvdata, dict):
1051 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (lvlist)" % (node,))
1052 a8083063 Iustin Pop
        bad = True
1053 a8083063 Iustin Pop
        continue
1054 b63ed789 Iustin Pop
      else:
1055 25361b9a Iustin Pop
        node_volume[node] = lvdata
1056 a8083063 Iustin Pop
1057 a8083063 Iustin Pop
      # node_instance
1058 25361b9a Iustin Pop
      idata = nresult.get(constants.NV_INSTANCELIST, None)
1059 25361b9a Iustin Pop
      if not isinstance(idata, list):
1060 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (instancelist)" %
1061 25361b9a Iustin Pop
                    (node,))
1062 a8083063 Iustin Pop
        bad = True
1063 a8083063 Iustin Pop
        continue
1064 a8083063 Iustin Pop
1065 25361b9a Iustin Pop
      node_instance[node] = idata
1066 a8083063 Iustin Pop
1067 9c9c7d30 Guido Trotter
      # node_info
1068 25361b9a Iustin Pop
      nodeinfo = nresult.get(constants.NV_HVINFO, None)
1069 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
1070 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (hvinfo)" % (node,))
1071 9c9c7d30 Guido Trotter
        bad = True
1072 9c9c7d30 Guido Trotter
        continue
1073 9c9c7d30 Guido Trotter
1074 9c9c7d30 Guido Trotter
      try:
1075 9c9c7d30 Guido Trotter
        node_info[node] = {
1076 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
1077 93e4c50b Guido Trotter
          "pinst": [],
1078 93e4c50b Guido Trotter
          "sinst": [],
1079 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
1080 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
1081 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
1082 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
1083 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
1084 36e7da50 Guido Trotter
          # secondary.
1085 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
1086 9c9c7d30 Guido Trotter
        }
1087 cc9e1230 Guido Trotter
        # FIXME: devise a free space model for file based instances as well
1088 cc9e1230 Guido Trotter
        if vg_name is not None:
1089 9a198532 Iustin Pop
          if (constants.NV_VGLIST not in nresult or
1090 9a198532 Iustin Pop
              vg_name not in nresult[constants.NV_VGLIST]):
1091 9a198532 Iustin Pop
            feedback_fn("  - ERROR: node %s didn't return data for the"
1092 9a198532 Iustin Pop
                        " volume group '%s' - it is either missing or broken" %
1093 9a198532 Iustin Pop
                        (node, vg_name))
1094 9a198532 Iustin Pop
            bad = True
1095 9a198532 Iustin Pop
            continue
1096 cc9e1230 Guido Trotter
          node_info[node]["dfree"] = int(nresult[constants.NV_VGLIST][vg_name])
1097 9a198532 Iustin Pop
      except (ValueError, KeyError):
1098 9a198532 Iustin Pop
        feedback_fn("  - ERROR: invalid nodeinfo value returned"
1099 9a198532 Iustin Pop
                    " from node %s" % (node,))
1100 9c9c7d30 Guido Trotter
        bad = True
1101 9c9c7d30 Guido Trotter
        continue
1102 9c9c7d30 Guido Trotter
1103 a8083063 Iustin Pop
    node_vol_should = {}
1104 a8083063 Iustin Pop
1105 a8083063 Iustin Pop
    for instance in instancelist:
1106 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
1107 6d2e83d5 Iustin Pop
      inst_config = instanceinfo[instance]
1108 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
1109 0a66c968 Iustin Pop
                                     node_instance, feedback_fn, n_offline)
1110 c5705f58 Guido Trotter
      bad = bad or result
1111 832261fd Iustin Pop
      inst_nodes_offline = []
1112 a8083063 Iustin Pop
1113 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
1114 a8083063 Iustin Pop
1115 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
1116 26b6af5e Guido Trotter
1117 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
1118 93e4c50b Guido Trotter
      if pnode in node_info:
1119 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
1120 0a66c968 Iustin Pop
      elif pnode not in n_offline:
1121 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
1122 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
1123 93e4c50b Guido Trotter
        bad = True
1124 93e4c50b Guido Trotter
1125 832261fd Iustin Pop
      if pnode in n_offline:
1126 832261fd Iustin Pop
        inst_nodes_offline.append(pnode)
1127 832261fd Iustin Pop
1128 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
1129 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
1130 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
1131 93e4c50b Guido Trotter
      # supported either.
1132 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
1133 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
1134 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
1135 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
1136 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
1137 93e4c50b Guido Trotter
                    % instance)
1138 93e4c50b Guido Trotter
1139 c0f2b229 Iustin Pop
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1140 3924700f Iustin Pop
        i_non_a_balanced.append(instance)
1141 3924700f Iustin Pop
1142 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
1143 93e4c50b Guido Trotter
        if snode in node_info:
1144 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
1145 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
1146 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
1147 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
1148 0a66c968 Iustin Pop
        elif snode not in n_offline:
1149 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
1150 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
1151 832261fd Iustin Pop
          bad = True
1152 832261fd Iustin Pop
        if snode in n_offline:
1153 832261fd Iustin Pop
          inst_nodes_offline.append(snode)
1154 832261fd Iustin Pop
1155 832261fd Iustin Pop
      if inst_nodes_offline:
1156 832261fd Iustin Pop
        # warn that the instance lives on offline nodes, and set bad=True
1157 832261fd Iustin Pop
        feedback_fn("  - ERROR: instance lives on offline node(s) %s" %
1158 832261fd Iustin Pop
                    ", ".join(inst_nodes_offline))
1159 832261fd Iustin Pop
        bad = True
1160 93e4c50b Guido Trotter
1161 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
1162 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
1163 a8083063 Iustin Pop
                                       feedback_fn)
1164 a8083063 Iustin Pop
    bad = bad or result
1165 a8083063 Iustin Pop
1166 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
1167 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
1168 a8083063 Iustin Pop
                                         feedback_fn)
1169 a8083063 Iustin Pop
    bad = bad or result
1170 a8083063 Iustin Pop
1171 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1172 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
1173 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
1174 e54c4c5e Guido Trotter
      bad = bad or result
1175 2b3b6ddd Guido Trotter
1176 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
1177 2b3b6ddd Guido Trotter
    if i_non_redundant:
1178 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1179 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
1180 2b3b6ddd Guido Trotter
1181 3924700f Iustin Pop
    if i_non_a_balanced:
1182 3924700f Iustin Pop
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1183 3924700f Iustin Pop
                  % len(i_non_a_balanced))
1184 3924700f Iustin Pop
1185 0a66c968 Iustin Pop
    if n_offline:
1186 0a66c968 Iustin Pop
      feedback_fn("  - NOTICE: %d offline node(s) found." % len(n_offline))
1187 0a66c968 Iustin Pop
1188 22f0f71d Iustin Pop
    if n_drained:
1189 22f0f71d Iustin Pop
      feedback_fn("  - NOTICE: %d drained node(s) found." % len(n_drained))
1190 22f0f71d Iustin Pop
1191 34290825 Michael Hanselmann
    return not bad
1192 a8083063 Iustin Pop
1193 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1194 5bbd3f7f Michael Hanselmann
    """Analyze the post-hooks' result
1195 e4376078 Iustin Pop

1196 e4376078 Iustin Pop
    This method analyses the hook result, handles it, and sends some
1197 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
1198 d8fff41c Guido Trotter

1199 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1200 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1201 e4376078 Iustin Pop
    @param hooks_results: the results of the multi-node hooks rpc call
1202 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
1203 e4376078 Iustin Pop
    @param lu_result: previous Exec result
1204 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
1205 e4376078 Iustin Pop
        and hook results
1206 d8fff41c Guido Trotter

1207 d8fff41c Guido Trotter
    """
1208 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
1209 38206f3c Iustin Pop
    # their results
1210 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
1211 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
1212 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
1213 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
1214 d8fff41c Guido Trotter
      if not hooks_results:
1215 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
1216 d8fff41c Guido Trotter
        lu_result = 1
1217 d8fff41c Guido Trotter
      else:
1218 d8fff41c Guido Trotter
        for node_name in hooks_results:
1219 d8fff41c Guido Trotter
          show_node_header = True
1220 d8fff41c Guido Trotter
          res = hooks_results[node_name]
1221 25361b9a Iustin Pop
          if res.failed or res.data is False or not isinstance(res.data, list):
1222 0a66c968 Iustin Pop
            if res.offline:
1223 0a66c968 Iustin Pop
              # no need to warn or set fail return value
1224 0a66c968 Iustin Pop
              continue
1225 25361b9a Iustin Pop
            feedback_fn("    Communication failure in hooks execution")
1226 d8fff41c Guido Trotter
            lu_result = 1
1227 d8fff41c Guido Trotter
            continue
1228 25361b9a Iustin Pop
          for script, hkr, output in res.data:
1229 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
1230 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
1231 d8fff41c Guido Trotter
              # failing hooks on that node
1232 d8fff41c Guido Trotter
              if show_node_header:
1233 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
1234 d8fff41c Guido Trotter
                show_node_header = False
1235 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
1236 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
1237 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
1238 d8fff41c Guido Trotter
              lu_result = 1
1239 d8fff41c Guido Trotter
1240 d8fff41c Guido Trotter
      return lu_result
1241 d8fff41c Guido Trotter
1242 a8083063 Iustin Pop
1243 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
1244 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
1245 2c95a8d4 Iustin Pop

1246 2c95a8d4 Iustin Pop
  """
1247 2c95a8d4 Iustin Pop
  _OP_REQP = []
1248 d4b9d97f Guido Trotter
  REQ_BGL = False
1249 d4b9d97f Guido Trotter
1250 d4b9d97f Guido Trotter
  def ExpandNames(self):
1251 d4b9d97f Guido Trotter
    self.needed_locks = {
1252 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1253 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1254 d4b9d97f Guido Trotter
    }
1255 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1256 2c95a8d4 Iustin Pop
1257 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
1258 2c95a8d4 Iustin Pop
    """Check prerequisites.
1259 2c95a8d4 Iustin Pop

1260 2c95a8d4 Iustin Pop
    This has no prerequisites.
1261 2c95a8d4 Iustin Pop

1262 2c95a8d4 Iustin Pop
    """
1263 2c95a8d4 Iustin Pop
    pass
1264 2c95a8d4 Iustin Pop
1265 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
1266 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
1267 2c95a8d4 Iustin Pop

1268 2c95a8d4 Iustin Pop
    """
1269 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
1270 2c95a8d4 Iustin Pop
1271 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
1272 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1273 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
1274 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
1275 2c95a8d4 Iustin Pop
1276 2c95a8d4 Iustin Pop
    nv_dict = {}
1277 2c95a8d4 Iustin Pop
    for inst in instances:
1278 2c95a8d4 Iustin Pop
      inst_lvs = {}
1279 0d68c45d Iustin Pop
      if (not inst.admin_up or
1280 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
1281 2c95a8d4 Iustin Pop
        continue
1282 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
1283 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1284 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
1285 2c95a8d4 Iustin Pop
        for vol in vol_list:
1286 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
1287 2c95a8d4 Iustin Pop
1288 2c95a8d4 Iustin Pop
    if not nv_dict:
1289 2c95a8d4 Iustin Pop
      return result
1290 2c95a8d4 Iustin Pop
1291 72737a7f Iustin Pop
    node_lvs = self.rpc.call_volume_list(nodes, vg_name)
1292 2c95a8d4 Iustin Pop
1293 2c95a8d4 Iustin Pop
    for node in nodes:
1294 2c95a8d4 Iustin Pop
      # node_volume
1295 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
1296 781de953 Iustin Pop
      if lvs.failed:
1297 0a66c968 Iustin Pop
        if not lvs.offline:
1298 0a66c968 Iustin Pop
          self.LogWarning("Connection to node %s failed: %s" %
1299 0a66c968 Iustin Pop
                          (node, lvs.data))
1300 781de953 Iustin Pop
        continue
1301 781de953 Iustin Pop
      lvs = lvs.data
1302 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
1303 9a4f63d1 Iustin Pop
        logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
1304 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
1305 ea9ddc07 Iustin Pop
        continue
1306 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
1307 9a4f63d1 Iustin Pop
        logging.warning("Connection to node %s failed or invalid data"
1308 9a4f63d1 Iustin Pop
                        " returned", node)
1309 2c95a8d4 Iustin Pop
        res_nodes.append(node)
1310 2c95a8d4 Iustin Pop
        continue
1311 2c95a8d4 Iustin Pop
1312 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
1313 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
1314 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
1315 b63ed789 Iustin Pop
            and inst.name not in res_instances):
1316 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
1317 2c95a8d4 Iustin Pop
1318 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1319 b63ed789 Iustin Pop
    # data better
1320 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1321 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1322 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1323 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1324 b63ed789 Iustin Pop
1325 2c95a8d4 Iustin Pop
    return result
1326 2c95a8d4 Iustin Pop
1327 2c95a8d4 Iustin Pop
1328 60975797 Iustin Pop
class LURepairDiskSizes(NoHooksLU):
1329 60975797 Iustin Pop
  """Verifies the cluster disks sizes.
1330 60975797 Iustin Pop

1331 60975797 Iustin Pop
  """
1332 60975797 Iustin Pop
  _OP_REQP = ["instances"]
1333 60975797 Iustin Pop
  REQ_BGL = False
1334 60975797 Iustin Pop
1335 60975797 Iustin Pop
  def ExpandNames(self):
1336 60975797 Iustin Pop
1337 60975797 Iustin Pop
    if not isinstance(self.op.instances, list):
1338 60975797 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'")
1339 60975797 Iustin Pop
1340 60975797 Iustin Pop
    if self.op.instances:
1341 60975797 Iustin Pop
      self.wanted_names = []
1342 60975797 Iustin Pop
      for name in self.op.instances:
1343 60975797 Iustin Pop
        full_name = self.cfg.ExpandInstanceName(name)
1344 60975797 Iustin Pop
        if full_name is None:
1345 60975797 Iustin Pop
          raise errors.OpPrereqError("Instance '%s' not known" % name)
1346 60975797 Iustin Pop
        self.wanted_names.append(full_name)
1347 60975797 Iustin Pop
      self.needed_locks = {
1348 60975797 Iustin Pop
        locking.LEVEL_NODE: [],
1349 60975797 Iustin Pop
        locking.LEVEL_INSTANCE: self.wanted_names,
1350 60975797 Iustin Pop
        }
1351 60975797 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1352 60975797 Iustin Pop
    else:
1353 60975797 Iustin Pop
      self.wanted_names = None
1354 60975797 Iustin Pop
      self.needed_locks = {
1355 60975797 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
1356 60975797 Iustin Pop
        locking.LEVEL_INSTANCE: locking.ALL_SET,
1357 60975797 Iustin Pop
        }
1358 60975797 Iustin Pop
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1359 60975797 Iustin Pop
1360 60975797 Iustin Pop
  def DeclareLocks(self, level):
1361 60975797 Iustin Pop
    if level == locking.LEVEL_NODE and self.wanted_names is not None:
1362 60975797 Iustin Pop
      self._LockInstancesNodes(primary_only=True)
1363 60975797 Iustin Pop
1364 60975797 Iustin Pop
  def CheckPrereq(self):
1365 60975797 Iustin Pop
    """Check prerequisites.
1366 60975797 Iustin Pop

1367 60975797 Iustin Pop
    This only checks the optional instance list against the existing names.
1368 60975797 Iustin Pop

1369 60975797 Iustin Pop
    """
1370 60975797 Iustin Pop
    if self.wanted_names is None:
1371 60975797 Iustin Pop
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
1372 60975797 Iustin Pop
1373 60975797 Iustin Pop
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
1374 60975797 Iustin Pop
                             in self.wanted_names]
1375 60975797 Iustin Pop
1376 b775c337 Iustin Pop
  def _EnsureChildSizes(self, disk):
1377 b775c337 Iustin Pop
    """Ensure children of the disk have the needed disk size.
1378 b775c337 Iustin Pop

1379 b775c337 Iustin Pop
    This is valid mainly for DRBD8 and fixes an issue where the
1380 b775c337 Iustin Pop
    children have smaller disk size.
1381 b775c337 Iustin Pop

1382 b775c337 Iustin Pop
    @param disk: an L{ganeti.objects.Disk} object
1383 b775c337 Iustin Pop

1384 b775c337 Iustin Pop
    """
1385 b775c337 Iustin Pop
    if disk.dev_type == constants.LD_DRBD8:
1386 b775c337 Iustin Pop
      assert disk.children, "Empty children for DRBD8?"
1387 b775c337 Iustin Pop
      fchild = disk.children[0]
1388 b775c337 Iustin Pop
      mismatch = fchild.size < disk.size
1389 b775c337 Iustin Pop
      if mismatch:
1390 b775c337 Iustin Pop
        self.LogInfo("Child disk has size %d, parent %d, fixing",
1391 b775c337 Iustin Pop
                     fchild.size, disk.size)
1392 b775c337 Iustin Pop
        fchild.size = disk.size
1393 b775c337 Iustin Pop
1394 b775c337 Iustin Pop
      # and we recurse on this child only, not on the metadev
1395 b775c337 Iustin Pop
      return self._EnsureChildSizes(fchild) or mismatch
1396 b775c337 Iustin Pop
    else:
1397 b775c337 Iustin Pop
      return False
1398 b775c337 Iustin Pop
1399 60975797 Iustin Pop
  def Exec(self, feedback_fn):
1400 60975797 Iustin Pop
    """Verify the size of cluster disks.
1401 60975797 Iustin Pop

1402 60975797 Iustin Pop
    """
1403 60975797 Iustin Pop
    # TODO: check child disks too
1404 60975797 Iustin Pop
    # TODO: check differences in size between primary/secondary nodes
1405 60975797 Iustin Pop
    per_node_disks = {}
1406 60975797 Iustin Pop
    for instance in self.wanted_instances:
1407 60975797 Iustin Pop
      pnode = instance.primary_node
1408 60975797 Iustin Pop
      if pnode not in per_node_disks:
1409 60975797 Iustin Pop
        per_node_disks[pnode] = []
1410 60975797 Iustin Pop
      for idx, disk in enumerate(instance.disks):
1411 60975797 Iustin Pop
        per_node_disks[pnode].append((instance, idx, disk))
1412 60975797 Iustin Pop
1413 60975797 Iustin Pop
    changed = []
1414 60975797 Iustin Pop
    for node, dskl in per_node_disks.items():
1415 4d9e6835 Iustin Pop
      newl = [v[2].Copy() for v in dskl]
1416 4d9e6835 Iustin Pop
      for dsk in newl:
1417 4d9e6835 Iustin Pop
        self.cfg.SetDiskID(dsk, node)
1418 4d9e6835 Iustin Pop
      result = self.rpc.call_blockdev_getsizes(node, newl)
1419 60975797 Iustin Pop
      if result.failed:
1420 60975797 Iustin Pop
        self.LogWarning("Failure in blockdev_getsizes call to node"
1421 60975797 Iustin Pop
                        " %s, ignoring", node)
1422 60975797 Iustin Pop
        continue
1423 60975797 Iustin Pop
      if len(result.data) != len(dskl):
1424 60975797 Iustin Pop
        self.LogWarning("Invalid result from node %s, ignoring node results",
1425 60975797 Iustin Pop
                        node)
1426 60975797 Iustin Pop
        continue
1427 60975797 Iustin Pop
      for ((instance, idx, disk), size) in zip(dskl, result.data):
1428 60975797 Iustin Pop
        if size is None:
1429 60975797 Iustin Pop
          self.LogWarning("Disk %d of instance %s did not return size"
1430 60975797 Iustin Pop
                          " information, ignoring", idx, instance.name)
1431 60975797 Iustin Pop
          continue
1432 60975797 Iustin Pop
        if not isinstance(size, (int, long)):
1433 60975797 Iustin Pop
          self.LogWarning("Disk %d of instance %s did not return valid"
1434 60975797 Iustin Pop
                          " size information, ignoring", idx, instance.name)
1435 60975797 Iustin Pop
          continue
1436 60975797 Iustin Pop
        size = size >> 20
1437 60975797 Iustin Pop
        if size != disk.size:
1438 60975797 Iustin Pop
          self.LogInfo("Disk %d of instance %s has mismatched size,"
1439 60975797 Iustin Pop
                       " correcting: recorded %d, actual %d", idx,
1440 60975797 Iustin Pop
                       instance.name, disk.size, size)
1441 60975797 Iustin Pop
          disk.size = size
1442 60975797 Iustin Pop
          self.cfg.Update(instance)
1443 60975797 Iustin Pop
          changed.append((instance.name, idx, size))
1444 b775c337 Iustin Pop
        if self._EnsureChildSizes(disk):
1445 b775c337 Iustin Pop
          self.cfg.Update(instance)
1446 b775c337 Iustin Pop
          changed.append((instance.name, idx, disk.size))
1447 60975797 Iustin Pop
    return changed
1448 60975797 Iustin Pop
1449 60975797 Iustin Pop
1450 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1451 07bd8a51 Iustin Pop
  """Rename the cluster.
1452 07bd8a51 Iustin Pop

1453 07bd8a51 Iustin Pop
  """
1454 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1455 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1456 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1457 07bd8a51 Iustin Pop
1458 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1459 07bd8a51 Iustin Pop
    """Build hooks env.
1460 07bd8a51 Iustin Pop

1461 07bd8a51 Iustin Pop
    """
1462 07bd8a51 Iustin Pop
    env = {
1463 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1464 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1465 07bd8a51 Iustin Pop
      }
1466 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1467 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1468 07bd8a51 Iustin Pop
1469 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1470 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1471 07bd8a51 Iustin Pop

1472 07bd8a51 Iustin Pop
    """
1473 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1474 07bd8a51 Iustin Pop
1475 bcf043c9 Iustin Pop
    new_name = hostname.name
1476 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1477 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
1478 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
1479 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1480 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1481 07bd8a51 Iustin Pop
                                 " cluster has changed")
1482 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1483 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1484 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1485 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1486 07bd8a51 Iustin Pop
                                   new_ip)
1487 07bd8a51 Iustin Pop
1488 07bd8a51 Iustin Pop
    self.op.name = new_name
1489 07bd8a51 Iustin Pop
1490 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1491 07bd8a51 Iustin Pop
    """Rename the cluster.
1492 07bd8a51 Iustin Pop

1493 07bd8a51 Iustin Pop
    """
1494 07bd8a51 Iustin Pop
    clustername = self.op.name
1495 07bd8a51 Iustin Pop
    ip = self.ip
1496 07bd8a51 Iustin Pop
1497 07bd8a51 Iustin Pop
    # shutdown the master IP
1498 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
1499 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
1500 781de953 Iustin Pop
    if result.failed or not result.data:
1501 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1502 07bd8a51 Iustin Pop
1503 07bd8a51 Iustin Pop
    try:
1504 55cf7d83 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
1505 55cf7d83 Iustin Pop
      cluster.cluster_name = clustername
1506 55cf7d83 Iustin Pop
      cluster.master_ip = ip
1507 55cf7d83 Iustin Pop
      self.cfg.Update(cluster)
1508 ec85e3d5 Iustin Pop
1509 ec85e3d5 Iustin Pop
      # update the known hosts file
1510 ec85e3d5 Iustin Pop
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
1511 ec85e3d5 Iustin Pop
      node_list = self.cfg.GetNodeList()
1512 ec85e3d5 Iustin Pop
      try:
1513 ec85e3d5 Iustin Pop
        node_list.remove(master)
1514 ec85e3d5 Iustin Pop
      except ValueError:
1515 ec85e3d5 Iustin Pop
        pass
1516 ec85e3d5 Iustin Pop
      result = self.rpc.call_upload_file(node_list,
1517 ec85e3d5 Iustin Pop
                                         constants.SSH_KNOWN_HOSTS_FILE)
1518 ec85e3d5 Iustin Pop
      for to_node, to_result in result.iteritems():
1519 ec85e3d5 Iustin Pop
        if to_result.failed or not to_result.data:
1520 d1dc3548 Iustin Pop
          logging.error("Copy of file %s to node %s failed",
1521 d1dc3548 Iustin Pop
                        constants.SSH_KNOWN_HOSTS_FILE, to_node)
1522 ec85e3d5 Iustin Pop
1523 07bd8a51 Iustin Pop
    finally:
1524 2503680f Guido Trotter
      result = self.rpc.call_node_start_master(master, False, False)
1525 781de953 Iustin Pop
      if result.failed or not result.data:
1526 86d9d3bb Iustin Pop
        self.LogWarning("Could not re-enable the master role on"
1527 86d9d3bb Iustin Pop
                        " the master, please restart manually.")
1528 07bd8a51 Iustin Pop
1529 07bd8a51 Iustin Pop
1530 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1531 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1532 8084f9f6 Manuel Franceschini

1533 e4376078 Iustin Pop
  @type disk: L{objects.Disk}
1534 e4376078 Iustin Pop
  @param disk: the disk to check
1535 5bbd3f7f Michael Hanselmann
  @rtype: boolean
1536 e4376078 Iustin Pop
  @return: boolean indicating whether a LD_LV dev_type was found or not
1537 8084f9f6 Manuel Franceschini

1538 8084f9f6 Manuel Franceschini
  """
1539 8084f9f6 Manuel Franceschini
  if disk.children:
1540 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1541 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1542 8084f9f6 Manuel Franceschini
        return True
1543 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1544 8084f9f6 Manuel Franceschini
1545 8084f9f6 Manuel Franceschini
1546 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1547 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1548 8084f9f6 Manuel Franceschini

1549 8084f9f6 Manuel Franceschini
  """
1550 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1551 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1552 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1553 c53279cf Guido Trotter
  REQ_BGL = False
1554 c53279cf Guido Trotter
1555 3994f455 Iustin Pop
  def CheckArguments(self):
1556 4b7735f9 Iustin Pop
    """Check parameters
1557 4b7735f9 Iustin Pop

1558 4b7735f9 Iustin Pop
    """
1559 4b7735f9 Iustin Pop
    if not hasattr(self.op, "candidate_pool_size"):
1560 4b7735f9 Iustin Pop
      self.op.candidate_pool_size = None
1561 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1562 4b7735f9 Iustin Pop
      try:
1563 4b7735f9 Iustin Pop
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1564 3994f455 Iustin Pop
      except (ValueError, TypeError), err:
1565 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1566 4b7735f9 Iustin Pop
                                   str(err))
1567 4b7735f9 Iustin Pop
      if self.op.candidate_pool_size < 1:
1568 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("At least one master candidate needed")
1569 4b7735f9 Iustin Pop
1570 c53279cf Guido Trotter
  def ExpandNames(self):
1571 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
1572 c53279cf Guido Trotter
    # all nodes to be modified.
1573 c53279cf Guido Trotter
    self.needed_locks = {
1574 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1575 c53279cf Guido Trotter
    }
1576 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1577 8084f9f6 Manuel Franceschini
1578 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1579 8084f9f6 Manuel Franceschini
    """Build hooks env.
1580 8084f9f6 Manuel Franceschini

1581 8084f9f6 Manuel Franceschini
    """
1582 8084f9f6 Manuel Franceschini
    env = {
1583 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1584 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1585 8084f9f6 Manuel Franceschini
      }
1586 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1587 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1588 8084f9f6 Manuel Franceschini
1589 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1590 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1591 8084f9f6 Manuel Franceschini

1592 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1593 5f83e263 Iustin Pop
    if the given volume group is valid.
1594 8084f9f6 Manuel Franceschini

1595 8084f9f6 Manuel Franceschini
    """
1596 779c15bb Iustin Pop
    if self.op.vg_name is not None and not self.op.vg_name:
1597 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
1598 8084f9f6 Manuel Franceschini
      for inst in instances:
1599 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1600 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1601 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1602 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1603 8084f9f6 Manuel Franceschini
1604 779c15bb Iustin Pop
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1605 779c15bb Iustin Pop
1606 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1607 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1608 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
1609 8084f9f6 Manuel Franceschini
      for node in node_list:
1610 781de953 Iustin Pop
        if vglist[node].failed:
1611 781de953 Iustin Pop
          # ignoring down node
1612 781de953 Iustin Pop
          self.LogWarning("Node %s unreachable/error, ignoring" % node)
1613 781de953 Iustin Pop
          continue
1614 781de953 Iustin Pop
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].data,
1615 781de953 Iustin Pop
                                              self.op.vg_name,
1616 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1617 8084f9f6 Manuel Franceschini
        if vgstatus:
1618 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1619 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1620 8084f9f6 Manuel Franceschini
1621 779c15bb Iustin Pop
    self.cluster = cluster = self.cfg.GetClusterInfo()
1622 d4b72030 Guido Trotter
    # validate beparams changes
1623 779c15bb Iustin Pop
    if self.op.beparams:
1624 a5728081 Guido Trotter
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
1625 779c15bb Iustin Pop
      self.new_beparams = cluster.FillDict(
1626 779c15bb Iustin Pop
        cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)
1627 779c15bb Iustin Pop
1628 779c15bb Iustin Pop
    # hypervisor list/parameters
1629 779c15bb Iustin Pop
    self.new_hvparams = cluster.FillDict(cluster.hvparams, {})
1630 779c15bb Iustin Pop
    if self.op.hvparams:
1631 779c15bb Iustin Pop
      if not isinstance(self.op.hvparams, dict):
1632 779c15bb Iustin Pop
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1633 779c15bb Iustin Pop
      for hv_name, hv_dict in self.op.hvparams.items():
1634 779c15bb Iustin Pop
        if hv_name not in self.new_hvparams:
1635 779c15bb Iustin Pop
          self.new_hvparams[hv_name] = hv_dict
1636 779c15bb Iustin Pop
        else:
1637 779c15bb Iustin Pop
          self.new_hvparams[hv_name].update(hv_dict)
1638 779c15bb Iustin Pop
1639 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1640 779c15bb Iustin Pop
      self.hv_list = self.op.enabled_hypervisors
1641 b119bccb Guido Trotter
      if not self.hv_list:
1642 b119bccb Guido Trotter
        raise errors.OpPrereqError("Enabled hypervisors list must contain at"
1643 b119bccb Guido Trotter
                                   " least one member")
1644 b119bccb Guido Trotter
      invalid_hvs = set(self.hv_list) - constants.HYPER_TYPES
1645 b119bccb Guido Trotter
      if invalid_hvs:
1646 b119bccb Guido Trotter
        raise errors.OpPrereqError("Enabled hypervisors contains invalid"
1647 b119bccb Guido Trotter
                                   " entries: %s" % invalid_hvs)
1648 779c15bb Iustin Pop
    else:
1649 779c15bb Iustin Pop
      self.hv_list = cluster.enabled_hypervisors
1650 779c15bb Iustin Pop
1651 779c15bb Iustin Pop
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1652 779c15bb Iustin Pop
      # either the enabled list has changed, or the parameters have, validate
1653 779c15bb Iustin Pop
      for hv_name, hv_params in self.new_hvparams.items():
1654 779c15bb Iustin Pop
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1655 779c15bb Iustin Pop
            (self.op.enabled_hypervisors and
1656 779c15bb Iustin Pop
             hv_name in self.op.enabled_hypervisors)):
1657 779c15bb Iustin Pop
          # either this is a new hypervisor, or its parameters have changed
1658 779c15bb Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
1659 a5728081 Guido Trotter
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1660 779c15bb Iustin Pop
          hv_class.CheckParameterSyntax(hv_params)
1661 779c15bb Iustin Pop
          _CheckHVParams(self, node_list, hv_name, hv_params)
1662 779c15bb Iustin Pop
1663 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1664 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1665 8084f9f6 Manuel Franceschini

1666 8084f9f6 Manuel Franceschini
    """
1667 779c15bb Iustin Pop
    if self.op.vg_name is not None:
1668 b2482333 Guido Trotter
      new_volume = self.op.vg_name
1669 b2482333 Guido Trotter
      if not new_volume:
1670 b2482333 Guido Trotter
        new_volume = None
1671 b2482333 Guido Trotter
      if new_volume != self.cfg.GetVGName():
1672 b2482333 Guido Trotter
        self.cfg.SetVGName(new_volume)
1673 779c15bb Iustin Pop
      else:
1674 779c15bb Iustin Pop
        feedback_fn("Cluster LVM configuration already in desired"
1675 779c15bb Iustin Pop
                    " state, not changing")
1676 779c15bb Iustin Pop
    if self.op.hvparams:
1677 779c15bb Iustin Pop
      self.cluster.hvparams = self.new_hvparams
1678 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1679 779c15bb Iustin Pop
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1680 779c15bb Iustin Pop
    if self.op.beparams:
1681 779c15bb Iustin Pop
      self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams
1682 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1683 4b7735f9 Iustin Pop
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
1684 75e914fb Iustin Pop
      # we need to update the pool size here, otherwise the save will fail
1685 75e914fb Iustin Pop
      _AdjustCandidatePool(self)
1686 4b7735f9 Iustin Pop
1687 779c15bb Iustin Pop
    self.cfg.Update(self.cluster)
1688 8084f9f6 Manuel Franceschini
1689 8084f9f6 Manuel Franceschini
1690 afee0879 Iustin Pop
class LURedistributeConfig(NoHooksLU):
1691 afee0879 Iustin Pop
  """Force the redistribution of cluster configuration.
1692 afee0879 Iustin Pop

1693 afee0879 Iustin Pop
  This is a very simple LU.
1694 afee0879 Iustin Pop

1695 afee0879 Iustin Pop
  """
1696 afee0879 Iustin Pop
  _OP_REQP = []
1697 afee0879 Iustin Pop
  REQ_BGL = False
1698 afee0879 Iustin Pop
1699 afee0879 Iustin Pop
  def ExpandNames(self):
1700 afee0879 Iustin Pop
    self.needed_locks = {
1701 afee0879 Iustin Pop
      locking.LEVEL_NODE: locking.ALL_SET,
1702 afee0879 Iustin Pop
    }
1703 afee0879 Iustin Pop
    self.share_locks[locking.LEVEL_NODE] = 1
1704 afee0879 Iustin Pop
1705 afee0879 Iustin Pop
  def CheckPrereq(self):
1706 afee0879 Iustin Pop
    """Check prerequisites.
1707 afee0879 Iustin Pop

1708 afee0879 Iustin Pop
    """
1709 afee0879 Iustin Pop
1710 afee0879 Iustin Pop
  def Exec(self, feedback_fn):
1711 afee0879 Iustin Pop
    """Redistribute the configuration.
1712 afee0879 Iustin Pop

1713 afee0879 Iustin Pop
    """
1714 afee0879 Iustin Pop
    self.cfg.Update(self.cfg.GetClusterInfo())
1715 afee0879 Iustin Pop
1716 afee0879 Iustin Pop
1717 b9bddb6b Iustin Pop
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1718 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1719 a8083063 Iustin Pop

1720 a8083063 Iustin Pop
  """
1721 a8083063 Iustin Pop
  if not instance.disks:
1722 a8083063 Iustin Pop
    return True
1723 a8083063 Iustin Pop
1724 a8083063 Iustin Pop
  if not oneshot:
1725 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1726 a8083063 Iustin Pop
1727 a8083063 Iustin Pop
  node = instance.primary_node
1728 a8083063 Iustin Pop
1729 a8083063 Iustin Pop
  for dev in instance.disks:
1730 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
1731 a8083063 Iustin Pop
1732 a8083063 Iustin Pop
  retries = 0
1733 fbafd7a8 Iustin Pop
  degr_retries = 10 # in seconds, as we sleep 1 second each time
1734 a8083063 Iustin Pop
  while True:
1735 a8083063 Iustin Pop
    max_time = 0
1736 a8083063 Iustin Pop
    done = True
1737 a8083063 Iustin Pop
    cumul_degraded = False
1738 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1739 781de953 Iustin Pop
    if rstats.failed or not rstats.data:
1740 86d9d3bb Iustin Pop
      lu.LogWarning("Can't get any data from node %s", node)
1741 a8083063 Iustin Pop
      retries += 1
1742 a8083063 Iustin Pop
      if retries >= 10:
1743 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1744 3ecf6786 Iustin Pop
                                 " aborting." % node)
1745 a8083063 Iustin Pop
      time.sleep(6)
1746 a8083063 Iustin Pop
      continue
1747 781de953 Iustin Pop
    rstats = rstats.data
1748 a8083063 Iustin Pop
    retries = 0
1749 1492cca7 Iustin Pop
    for i, mstat in enumerate(rstats):
1750 a8083063 Iustin Pop
      if mstat is None:
1751 86d9d3bb Iustin Pop
        lu.LogWarning("Can't compute data for node %s/%s",
1752 86d9d3bb Iustin Pop
                           node, instance.disks[i].iv_name)
1753 a8083063 Iustin Pop
        continue
1754 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1755 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1756 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1757 a8083063 Iustin Pop
      if perc_done is not None:
1758 a8083063 Iustin Pop
        done = False
1759 a8083063 Iustin Pop
        if est_time is not None:
1760 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1761 a8083063 Iustin Pop
          max_time = est_time
1762 a8083063 Iustin Pop
        else:
1763 a8083063 Iustin Pop
          rem_time = "no time estimate"
1764 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1765 b9bddb6b Iustin Pop
                        (instance.disks[i].iv_name, perc_done, rem_time))
1766 fbafd7a8 Iustin Pop
1767 fbafd7a8 Iustin Pop
    # if we're done but degraded, let's do a few small retries, to
1768 fbafd7a8 Iustin Pop
    # make sure we see a stable and not transient situation; therefore
1769 fbafd7a8 Iustin Pop
    # we force restart of the loop
1770 fbafd7a8 Iustin Pop
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
1771 fbafd7a8 Iustin Pop
      logging.info("Degraded disks found, %d retries left", degr_retries)
1772 fbafd7a8 Iustin Pop
      degr_retries -= 1
1773 fbafd7a8 Iustin Pop
      time.sleep(1)
1774 fbafd7a8 Iustin Pop
      continue
1775 fbafd7a8 Iustin Pop
1776 a8083063 Iustin Pop
    if done or oneshot:
1777 a8083063 Iustin Pop
      break
1778 a8083063 Iustin Pop
1779 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
1780 a8083063 Iustin Pop
1781 a8083063 Iustin Pop
  if done:
1782 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1783 a8083063 Iustin Pop
  return not cumul_degraded
1784 a8083063 Iustin Pop
1785 a8083063 Iustin Pop
1786 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1787 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1788 a8083063 Iustin Pop

1789 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1790 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1791 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1792 0834c866 Iustin Pop

1793 a8083063 Iustin Pop
  """
1794 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
1795 0834c866 Iustin Pop
  if ldisk:
1796 0834c866 Iustin Pop
    idx = 6
1797 0834c866 Iustin Pop
  else:
1798 0834c866 Iustin Pop
    idx = 5
1799 a8083063 Iustin Pop
1800 a8083063 Iustin Pop
  result = True
1801 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1802 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
1803 23829f6f Iustin Pop
    msg = rstats.RemoteFailMsg()
1804 23829f6f Iustin Pop
    if msg:
1805 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
1806 23829f6f Iustin Pop
      result = False
1807 23829f6f Iustin Pop
    elif not rstats.payload:
1808 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s", node)
1809 a8083063 Iustin Pop
      result = False
1810 a8083063 Iustin Pop
    else:
1811 23829f6f Iustin Pop
      result = result and (not rstats.payload[idx])
1812 a8083063 Iustin Pop
  if dev.children:
1813 a8083063 Iustin Pop
    for child in dev.children:
1814 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1815 a8083063 Iustin Pop
1816 a8083063 Iustin Pop
  return result
1817 a8083063 Iustin Pop
1818 a8083063 Iustin Pop
1819 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1820 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1821 a8083063 Iustin Pop

1822 a8083063 Iustin Pop
  """
1823 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1824 6bf01bbb Guido Trotter
  REQ_BGL = False
1825 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet()
1826 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
1827 a8083063 Iustin Pop
1828 6bf01bbb Guido Trotter
  def ExpandNames(self):
1829 1f9430d6 Iustin Pop
    if self.op.names:
1830 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1831 1f9430d6 Iustin Pop
1832 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1833 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1834 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1835 1f9430d6 Iustin Pop
1836 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
1837 a6ab004b Iustin Pop
    # Temporary removal of locks, should be reverted later
1838 a6ab004b Iustin Pop
    # TODO: reintroduce locks when they are lighter-weight
1839 6bf01bbb Guido Trotter
    self.needed_locks = {}
1840 a6ab004b Iustin Pop
    #self.share_locks[locking.LEVEL_NODE] = 1
1841 a6ab004b Iustin Pop
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1842 6bf01bbb Guido Trotter
1843 6bf01bbb Guido Trotter
  def CheckPrereq(self):
1844 6bf01bbb Guido Trotter
    """Check prerequisites.
1845 6bf01bbb Guido Trotter

1846 6bf01bbb Guido Trotter
    """
1847 6bf01bbb Guido Trotter
1848 1f9430d6 Iustin Pop
  @staticmethod
1849 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1850 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1851 1f9430d6 Iustin Pop

1852 e4376078 Iustin Pop
    @param node_list: a list with the names of all nodes
1853 e4376078 Iustin Pop
    @param rlist: a map with node names as keys and OS objects as values
1854 1f9430d6 Iustin Pop

1855 e4376078 Iustin Pop
    @rtype: dict
1856 5fcc718f Iustin Pop
    @return: a dictionary with osnames as keys and as value another map, with
1857 e4376078 Iustin Pop
        nodes as keys and list of OS objects as values, eg::
1858 e4376078 Iustin Pop

1859 e4376078 Iustin Pop
          {"debian-etch": {"node1": [<object>,...],
1860 e4376078 Iustin Pop
                           "node2": [<object>,]}
1861 e4376078 Iustin Pop
          }
1862 1f9430d6 Iustin Pop

1863 1f9430d6 Iustin Pop
    """
1864 1f9430d6 Iustin Pop
    all_os = {}
1865 a6ab004b Iustin Pop
    # we build here the list of nodes that didn't fail the RPC (at RPC
1866 a6ab004b Iustin Pop
    # level), so that nodes with a non-responding node daemon don't
1867 a6ab004b Iustin Pop
    # make all OSes invalid
1868 a6ab004b Iustin Pop
    good_nodes = [node_name for node_name in rlist
1869 a6ab004b Iustin Pop
                  if not rlist[node_name].failed]
1870 1f9430d6 Iustin Pop
    for node_name, nr in rlist.iteritems():
1871 781de953 Iustin Pop
      if nr.failed or not nr.data:
1872 1f9430d6 Iustin Pop
        continue
1873 781de953 Iustin Pop
      for os_obj in nr.data:
1874 b4de68a9 Iustin Pop
        if os_obj.name not in all_os:
1875 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1876 1f9430d6 Iustin Pop
          # for each node in node_list
1877 b4de68a9 Iustin Pop
          all_os[os_obj.name] = {}
1878 a6ab004b Iustin Pop
          for nname in good_nodes:
1879 b4de68a9 Iustin Pop
            all_os[os_obj.name][nname] = []
1880 b4de68a9 Iustin Pop
        all_os[os_obj.name][node_name].append(os_obj)
1881 1f9430d6 Iustin Pop
    return all_os
1882 a8083063 Iustin Pop
1883 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1884 a8083063 Iustin Pop
    """Compute the list of OSes.
1885 a8083063 Iustin Pop

1886 a8083063 Iustin Pop
    """
1887 a6ab004b Iustin Pop
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
1888 94a02bb5 Iustin Pop
    node_data = self.rpc.call_os_diagnose(valid_nodes)
1889 a8083063 Iustin Pop
    if node_data == False:
1890 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1891 94a02bb5 Iustin Pop
    pol = self._DiagnoseByOS(valid_nodes, node_data)
1892 1f9430d6 Iustin Pop
    output = []
1893 1f9430d6 Iustin Pop
    for os_name, os_data in pol.iteritems():
1894 1f9430d6 Iustin Pop
      row = []
1895 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1896 1f9430d6 Iustin Pop
        if field == "name":
1897 1f9430d6 Iustin Pop
          val = os_name
1898 1f9430d6 Iustin Pop
        elif field == "valid":
1899 1f9430d6 Iustin Pop
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1900 1f9430d6 Iustin Pop
        elif field == "node_status":
1901 1f9430d6 Iustin Pop
          val = {}
1902 1f9430d6 Iustin Pop
          for node_name, nos_list in os_data.iteritems():
1903 1f9430d6 Iustin Pop
            val[node_name] = [(v.status, v.path) for v in nos_list]
1904 1f9430d6 Iustin Pop
        else:
1905 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1906 1f9430d6 Iustin Pop
        row.append(val)
1907 1f9430d6 Iustin Pop
      output.append(row)
1908 1f9430d6 Iustin Pop
1909 1f9430d6 Iustin Pop
    return output
1910 a8083063 Iustin Pop
1911 a8083063 Iustin Pop
1912 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1913 a8083063 Iustin Pop
  """Logical unit for removing a node.
1914 a8083063 Iustin Pop

1915 a8083063 Iustin Pop
  """
1916 a8083063 Iustin Pop
  HPATH = "node-remove"
1917 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1918 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1919 a8083063 Iustin Pop
1920 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1921 a8083063 Iustin Pop
    """Build hooks env.
1922 a8083063 Iustin Pop

1923 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1924 d08869ee Guido Trotter
    node would then be impossible to remove.
1925 a8083063 Iustin Pop

1926 a8083063 Iustin Pop
    """
1927 396e1b78 Michael Hanselmann
    env = {
1928 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1929 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1930 396e1b78 Michael Hanselmann
      }
1931 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1932 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1933 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1934 a8083063 Iustin Pop
1935 a8083063 Iustin Pop
  def CheckPrereq(self):
1936 a8083063 Iustin Pop
    """Check prerequisites.
1937 a8083063 Iustin Pop

1938 a8083063 Iustin Pop
    This checks:
1939 a8083063 Iustin Pop
     - the node exists in the configuration
1940 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1941 a8083063 Iustin Pop
     - it's not the master
1942 a8083063 Iustin Pop

1943 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
1944 a8083063 Iustin Pop

1945 a8083063 Iustin Pop
    """
1946 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1947 a8083063 Iustin Pop
    if node is None:
1948 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1949 a8083063 Iustin Pop
1950 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1951 a8083063 Iustin Pop
1952 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
1953 a8083063 Iustin Pop
    if node.name == masternode:
1954 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1955 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1956 a8083063 Iustin Pop
1957 a8083063 Iustin Pop
    for instance_name in instance_list:
1958 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1959 6b12959c Iustin Pop
      if node.name in instance.all_nodes:
1960 6b12959c Iustin Pop
        raise errors.OpPrereqError("Instance %s is still running on the node,"
1961 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1962 a8083063 Iustin Pop
    self.op.node_name = node.name
1963 a8083063 Iustin Pop
    self.node = node
1964 a8083063 Iustin Pop
1965 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1966 a8083063 Iustin Pop
    """Removes the node from the cluster.
1967 a8083063 Iustin Pop

1968 a8083063 Iustin Pop
    """
1969 a8083063 Iustin Pop
    node = self.node
1970 9a4f63d1 Iustin Pop
    logging.info("Stopping the node daemon and removing configs from node %s",
1971 9a4f63d1 Iustin Pop
                 node.name)
1972 a8083063 Iustin Pop
1973 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
1974 a8083063 Iustin Pop
1975 72737a7f Iustin Pop
    self.rpc.call_node_leave_cluster(node.name)
1976 c8a0948f Michael Hanselmann
1977 eb1742d5 Guido Trotter
    # Promote nodes to master candidate as needed
1978 ec0292f1 Iustin Pop
    _AdjustCandidatePool(self)
1979 eb1742d5 Guido Trotter
1980 a8083063 Iustin Pop
1981 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1982 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1983 a8083063 Iustin Pop

1984 a8083063 Iustin Pop
  """
1985 bc8e4a1a Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
1986 35705d8f Guido Trotter
  REQ_BGL = False
1987 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet(
1988 31bf511f Iustin Pop
    "dtotal", "dfree",
1989 31bf511f Iustin Pop
    "mtotal", "mnode", "mfree",
1990 31bf511f Iustin Pop
    "bootid",
1991 0105bad3 Iustin Pop
    "ctotal", "cnodes", "csockets",
1992 31bf511f Iustin Pop
    )
1993 31bf511f Iustin Pop
1994 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(
1995 31bf511f Iustin Pop
    "name", "pinst_cnt", "sinst_cnt",
1996 31bf511f Iustin Pop
    "pinst_list", "sinst_list",
1997 31bf511f Iustin Pop
    "pip", "sip", "tags",
1998 31bf511f Iustin Pop
    "serial_no",
1999 0e67cdbe Iustin Pop
    "master_candidate",
2000 0e67cdbe Iustin Pop
    "master",
2001 9ddb5e45 Iustin Pop
    "offline",
2002 0b2454b9 Iustin Pop
    "drained",
2003 c120ff34 Iustin Pop
    "role",
2004 31bf511f Iustin Pop
    )
2005 a8083063 Iustin Pop
2006 35705d8f Guido Trotter
  def ExpandNames(self):
2007 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2008 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2009 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2010 a8083063 Iustin Pop
2011 35705d8f Guido Trotter
    self.needed_locks = {}
2012 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2013 c8d8b4c8 Iustin Pop
2014 c8d8b4c8 Iustin Pop
    if self.op.names:
2015 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
2016 35705d8f Guido Trotter
    else:
2017 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
2018 c8d8b4c8 Iustin Pop
2019 bc8e4a1a Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
2020 bc8e4a1a Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
2021 c8d8b4c8 Iustin Pop
    if self.do_locking:
2022 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
2023 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
2024 c8d8b4c8 Iustin Pop
2025 35705d8f Guido Trotter
2026 35705d8f Guido Trotter
  def CheckPrereq(self):
2027 35705d8f Guido Trotter
    """Check prerequisites.
2028 35705d8f Guido Trotter

2029 35705d8f Guido Trotter
    """
2030 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
2031 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
2032 c8d8b4c8 Iustin Pop
    pass
2033 a8083063 Iustin Pop
2034 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2035 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2036 a8083063 Iustin Pop

2037 a8083063 Iustin Pop
    """
2038 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
2039 c8d8b4c8 Iustin Pop
    if self.do_locking:
2040 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
2041 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
2042 3fa93523 Guido Trotter
      nodenames = self.wanted
2043 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
2044 3fa93523 Guido Trotter
      if missing:
2045 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
2046 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
2047 c8d8b4c8 Iustin Pop
    else:
2048 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
2049 c1f1cbb2 Iustin Pop
2050 c1f1cbb2 Iustin Pop
    nodenames = utils.NiceSort(nodenames)
2051 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
2052 a8083063 Iustin Pop
2053 a8083063 Iustin Pop
    # begin data gathering
2054 a8083063 Iustin Pop
2055 bc8e4a1a Iustin Pop
    if self.do_node_query:
2056 a8083063 Iustin Pop
      live_data = {}
2057 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
2058 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
2059 a8083063 Iustin Pop
      for name in nodenames:
2060 781de953 Iustin Pop
        nodeinfo = node_data[name]
2061 781de953 Iustin Pop
        if not nodeinfo.failed and nodeinfo.data:
2062 781de953 Iustin Pop
          nodeinfo = nodeinfo.data
2063 d599d686 Iustin Pop
          fn = utils.TryConvert
2064 a8083063 Iustin Pop
          live_data[name] = {
2065 d599d686 Iustin Pop
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
2066 d599d686 Iustin Pop
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
2067 d599d686 Iustin Pop
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
2068 d599d686 Iustin Pop
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
2069 d599d686 Iustin Pop
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
2070 d599d686 Iustin Pop
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
2071 d599d686 Iustin Pop
            "bootid": nodeinfo.get('bootid', None),
2072 0105bad3 Iustin Pop
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
2073 0105bad3 Iustin Pop
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
2074 a8083063 Iustin Pop
            }
2075 a8083063 Iustin Pop
        else:
2076 a8083063 Iustin Pop
          live_data[name] = {}
2077 a8083063 Iustin Pop
    else:
2078 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
2079 a8083063 Iustin Pop
2080 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
2081 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
2082 a8083063 Iustin Pop
2083 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
2084 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
2085 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
2086 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
2087 a8083063 Iustin Pop
2088 ec223efb Iustin Pop
      for instance_name in instancelist:
2089 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
2090 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
2091 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
2092 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
2093 ec223efb Iustin Pop
          if secnode in node_to_secondary:
2094 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
2095 a8083063 Iustin Pop
2096 0e67cdbe Iustin Pop
    master_node = self.cfg.GetMasterNode()
2097 0e67cdbe Iustin Pop
2098 a8083063 Iustin Pop
    # end data gathering
2099 a8083063 Iustin Pop
2100 a8083063 Iustin Pop
    output = []
2101 a8083063 Iustin Pop
    for node in nodelist:
2102 a8083063 Iustin Pop
      node_output = []
2103 a8083063 Iustin Pop
      for field in self.op.output_fields:
2104 a8083063 Iustin Pop
        if field == "name":
2105 a8083063 Iustin Pop
          val = node.name
2106 ec223efb Iustin Pop
        elif field == "pinst_list":
2107 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
2108 ec223efb Iustin Pop
        elif field == "sinst_list":
2109 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
2110 ec223efb Iustin Pop
        elif field == "pinst_cnt":
2111 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
2112 ec223efb Iustin Pop
        elif field == "sinst_cnt":
2113 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
2114 a8083063 Iustin Pop
        elif field == "pip":
2115 a8083063 Iustin Pop
          val = node.primary_ip
2116 a8083063 Iustin Pop
        elif field == "sip":
2117 a8083063 Iustin Pop
          val = node.secondary_ip
2118 130a6a6f Iustin Pop
        elif field == "tags":
2119 130a6a6f Iustin Pop
          val = list(node.GetTags())
2120 38d7239a Iustin Pop
        elif field == "serial_no":
2121 38d7239a Iustin Pop
          val = node.serial_no
2122 0e67cdbe Iustin Pop
        elif field == "master_candidate":
2123 0e67cdbe Iustin Pop
          val = node.master_candidate
2124 0e67cdbe Iustin Pop
        elif field == "master":
2125 0e67cdbe Iustin Pop
          val = node.name == master_node
2126 9ddb5e45 Iustin Pop
        elif field == "offline":
2127 9ddb5e45 Iustin Pop
          val = node.offline
2128 0b2454b9 Iustin Pop
        elif field == "drained":
2129 0b2454b9 Iustin Pop
          val = node.drained
2130 31bf511f Iustin Pop
        elif self._FIELDS_DYNAMIC.Matches(field):
2131 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
2132 c120ff34 Iustin Pop
        elif field == "role":
2133 c120ff34 Iustin Pop
          if node.name == master_node:
2134 c120ff34 Iustin Pop
            val = "M"
2135 c120ff34 Iustin Pop
          elif node.master_candidate:
2136 c120ff34 Iustin Pop
            val = "C"
2137 c120ff34 Iustin Pop
          elif node.drained:
2138 c120ff34 Iustin Pop
            val = "D"
2139 c120ff34 Iustin Pop
          elif node.offline:
2140 c120ff34 Iustin Pop
            val = "O"
2141 c120ff34 Iustin Pop
          else:
2142 c120ff34 Iustin Pop
            val = "R"
2143 a8083063 Iustin Pop
        else:
2144 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2145 a8083063 Iustin Pop
        node_output.append(val)
2146 a8083063 Iustin Pop
      output.append(node_output)
2147 a8083063 Iustin Pop
2148 a8083063 Iustin Pop
    return output
2149 a8083063 Iustin Pop
2150 a8083063 Iustin Pop
2151 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
2152 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
2153 dcb93971 Michael Hanselmann

2154 dcb93971 Michael Hanselmann
  """
2155 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
2156 21a15682 Guido Trotter
  REQ_BGL = False
2157 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
2158 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("node")
2159 21a15682 Guido Trotter
2160 21a15682 Guido Trotter
  def ExpandNames(self):
2161 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2162 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2163 21a15682 Guido Trotter
                       selected=self.op.output_fields)
2164 21a15682 Guido Trotter
2165 21a15682 Guido Trotter
    self.needed_locks = {}
2166 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2167 21a15682 Guido Trotter
    if not self.op.nodes:
2168 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2169 21a15682 Guido Trotter
    else:
2170 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
2171 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
2172 dcb93971 Michael Hanselmann
2173 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
2174 dcb93971 Michael Hanselmann
    """Check prerequisites.
2175 dcb93971 Michael Hanselmann

2176 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
2177 dcb93971 Michael Hanselmann

2178 dcb93971 Michael Hanselmann
    """
2179 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
2180 dcb93971 Michael Hanselmann
2181 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
2182 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
2183 dcb93971 Michael Hanselmann

2184 dcb93971 Michael Hanselmann
    """
2185 a7ba5e53 Iustin Pop
    nodenames = self.nodes
2186 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
2187 dcb93971 Michael Hanselmann
2188 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
2189 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
2190 dcb93971 Michael Hanselmann
2191 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
2192 dcb93971 Michael Hanselmann
2193 dcb93971 Michael Hanselmann
    output = []
2194 dcb93971 Michael Hanselmann
    for node in nodenames:
2195 781de953 Iustin Pop
      if node not in volumes or volumes[node].failed or not volumes[node].data:
2196 37d19eb2 Michael Hanselmann
        continue
2197 37d19eb2 Michael Hanselmann
2198 781de953 Iustin Pop
      node_vols = volumes[node].data[:]
2199 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
2200 dcb93971 Michael Hanselmann
2201 dcb93971 Michael Hanselmann
      for vol in node_vols:
2202 dcb93971 Michael Hanselmann
        node_output = []
2203 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
2204 dcb93971 Michael Hanselmann
          if field == "node":
2205 dcb93971 Michael Hanselmann
            val = node
2206 dcb93971 Michael Hanselmann
          elif field == "phys":
2207 dcb93971 Michael Hanselmann
            val = vol['dev']
2208 dcb93971 Michael Hanselmann
          elif field == "vg":
2209 dcb93971 Michael Hanselmann
            val = vol['vg']
2210 dcb93971 Michael Hanselmann
          elif field == "name":
2211 dcb93971 Michael Hanselmann
            val = vol['name']
2212 dcb93971 Michael Hanselmann
          elif field == "size":
2213 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
2214 dcb93971 Michael Hanselmann
          elif field == "instance":
2215 dcb93971 Michael Hanselmann
            for inst in ilist:
2216 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
2217 dcb93971 Michael Hanselmann
                continue
2218 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
2219 dcb93971 Michael Hanselmann
                val = inst.name
2220 dcb93971 Michael Hanselmann
                break
2221 dcb93971 Michael Hanselmann
            else:
2222 dcb93971 Michael Hanselmann
              val = '-'
2223 dcb93971 Michael Hanselmann
          else:
2224 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
2225 dcb93971 Michael Hanselmann
          node_output.append(str(val))
2226 dcb93971 Michael Hanselmann
2227 dcb93971 Michael Hanselmann
        output.append(node_output)
2228 dcb93971 Michael Hanselmann
2229 dcb93971 Michael Hanselmann
    return output
2230 dcb93971 Michael Hanselmann
2231 dcb93971 Michael Hanselmann
2232 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
2233 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
2234 a8083063 Iustin Pop

2235 a8083063 Iustin Pop
  """
2236 a8083063 Iustin Pop
  HPATH = "node-add"
2237 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2238 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
2239 a8083063 Iustin Pop
2240 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2241 a8083063 Iustin Pop
    """Build hooks env.
2242 a8083063 Iustin Pop

2243 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
2244 a8083063 Iustin Pop

2245 a8083063 Iustin Pop
    """
2246 a8083063 Iustin Pop
    env = {
2247 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
2248 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
2249 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
2250 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
2251 a8083063 Iustin Pop
      }
2252 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
2253 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
2254 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
2255 a8083063 Iustin Pop
2256 a8083063 Iustin Pop
  def CheckPrereq(self):
2257 a8083063 Iustin Pop
    """Check prerequisites.
2258 a8083063 Iustin Pop

2259 a8083063 Iustin Pop
    This checks:
2260 a8083063 Iustin Pop
     - the new node is not already in the config
2261 a8083063 Iustin Pop
     - it is resolvable
2262 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
2263 a8083063 Iustin Pop

2264 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
2265 a8083063 Iustin Pop

2266 a8083063 Iustin Pop
    """
2267 a8083063 Iustin Pop
    node_name = self.op.node_name
2268 a8083063 Iustin Pop
    cfg = self.cfg
2269 a8083063 Iustin Pop
2270 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
2271 a8083063 Iustin Pop
2272 bcf043c9 Iustin Pop
    node = dns_data.name
2273 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
2274 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
2275 a8083063 Iustin Pop
    if secondary_ip is None:
2276 a8083063 Iustin Pop
      secondary_ip = primary_ip
2277 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
2278 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
2279 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
2280 e7c6e02b Michael Hanselmann
2281 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
2282 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
2283 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
2284 e7c6e02b Michael Hanselmann
                                 node)
2285 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
2286 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
2287 a8083063 Iustin Pop
2288 a8083063 Iustin Pop
    for existing_node_name in node_list:
2289 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
2290 e7c6e02b Michael Hanselmann
2291 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
2292 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
2293 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
2294 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
2295 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
2296 e7c6e02b Michael Hanselmann
        continue
2297 e7c6e02b Michael Hanselmann
2298 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
2299 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
2300 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
2301 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
2302 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
2303 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
2304 a8083063 Iustin Pop
2305 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
2306 a8083063 Iustin Pop
    # same as for the master
2307 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
2308 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
2309 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
2310 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
2311 a8083063 Iustin Pop
      if master_singlehomed:
2312 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
2313 3ecf6786 Iustin Pop
                                   " new node has one")
2314 a8083063 Iustin Pop
      else:
2315 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
2316 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
2317 a8083063 Iustin Pop
2318 5bbd3f7f Michael Hanselmann
    # checks reachability
2319 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
2320 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
2321 a8083063 Iustin Pop
2322 a8083063 Iustin Pop
    if not newbie_singlehomed:
2323 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
2324 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
2325 b15d625f Iustin Pop
                           source=myself.secondary_ip):
2326 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
2327 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
2328 a8083063 Iustin Pop
2329 0fff97e9 Guido Trotter
    cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2330 a8ae3eb5 Iustin Pop
    if self.op.readd:
2331 a8ae3eb5 Iustin Pop
      exceptions = [node]
2332 a8ae3eb5 Iustin Pop
    else:
2333 a8ae3eb5 Iustin Pop
      exceptions = []
2334 a8ae3eb5 Iustin Pop
    mc_now, mc_max = self.cfg.GetMasterCandidateStats(exceptions)
2335 a8ae3eb5 Iustin Pop
    # the new node will increase mc_max with one, so:
2336 a8ae3eb5 Iustin Pop
    mc_max = min(mc_max + 1, cp_size)
2337 a8ae3eb5 Iustin Pop
    self.master_candidate = mc_now < mc_max
2338 0fff97e9 Guido Trotter
2339 a8ae3eb5 Iustin Pop
    if self.op.readd:
2340 a8ae3eb5 Iustin Pop
      self.new_node = self.cfg.GetNodeInfo(node)
2341 a8ae3eb5 Iustin Pop
      assert self.new_node is not None, "Can't retrieve locked node %s" % node
2342 a8ae3eb5 Iustin Pop
    else:
2343 a8ae3eb5 Iustin Pop
      self.new_node = objects.Node(name=node,
2344 a8ae3eb5 Iustin Pop
                                   primary_ip=primary_ip,
2345 a8ae3eb5 Iustin Pop
                                   secondary_ip=secondary_ip,
2346 a8ae3eb5 Iustin Pop
                                   master_candidate=self.master_candidate,
2347 a8ae3eb5 Iustin Pop
                                   offline=False, drained=False)
2348 a8083063 Iustin Pop
2349 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2350 a8083063 Iustin Pop
    """Adds the new node to the cluster.
2351 a8083063 Iustin Pop

2352 a8083063 Iustin Pop
    """
2353 a8083063 Iustin Pop
    new_node = self.new_node
2354 a8083063 Iustin Pop
    node = new_node.name
2355 a8083063 Iustin Pop
2356 a8ae3eb5 Iustin Pop
    # for re-adds, reset the offline/drained/master-candidate flags;
2357 a8ae3eb5 Iustin Pop
    # we need to reset here, otherwise offline would prevent RPC calls
2358 a8ae3eb5 Iustin Pop
    # later in the procedure; this also means that if the re-add
2359 a8ae3eb5 Iustin Pop
    # fails, we are left with a non-offlined, broken node
2360 a8ae3eb5 Iustin Pop
    if self.op.readd:
2361 a8ae3eb5 Iustin Pop
      new_node.drained = new_node.offline = False
2362 a8ae3eb5 Iustin Pop
      self.LogInfo("Readding a node, the offline/drained flags were reset")
2363 a8ae3eb5 Iustin Pop
      # if we demote the node, we do cleanup later in the procedure
2364 a8ae3eb5 Iustin Pop
      new_node.master_candidate = self.master_candidate
2365 a8ae3eb5 Iustin Pop
2366 a8ae3eb5 Iustin Pop
    # notify the user about any possible mc promotion
2367 a8ae3eb5 Iustin Pop
    if new_node.master_candidate:
2368 a8ae3eb5 Iustin Pop
      self.LogInfo("Node will be a master candidate")
2369 a8ae3eb5 Iustin Pop
2370 a8083063 Iustin Pop
    # check connectivity
2371 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
2372 781de953 Iustin Pop
    result.Raise()
2373 781de953 Iustin Pop
    if result.data:
2374 781de953 Iustin Pop
      if constants.PROTOCOL_VERSION == result.data:
2375 9a4f63d1 Iustin Pop
        logging.info("Communication to node %s fine, sw version %s match",
2376 781de953 Iustin Pop
                     node, result.data)
2377 a8083063 Iustin Pop
      else:
2378 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
2379 3ecf6786 Iustin Pop
                                 " node version %s" %
2380 781de953 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result.data))
2381 a8083063 Iustin Pop
    else:
2382 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
2383 a8083063 Iustin Pop
2384 a8083063 Iustin Pop
    # setup ssh on node
2385 9a4f63d1 Iustin Pop
    logging.info("Copy ssh key to node %s", node)
2386 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
2387 a8083063 Iustin Pop
    keyarray = []
2388 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
2389 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
2390 70d9e3d8 Iustin Pop
                priv_key, pub_key]
2391 a8083063 Iustin Pop
2392 a8083063 Iustin Pop
    for i in keyfiles:
2393 a8083063 Iustin Pop
      f = open(i, 'r')
2394 a8083063 Iustin Pop
      try:
2395 a8083063 Iustin Pop
        keyarray.append(f.read())
2396 a8083063 Iustin Pop
      finally:
2397 a8083063 Iustin Pop
        f.close()
2398 a8083063 Iustin Pop
2399 72737a7f Iustin Pop
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
2400 72737a7f Iustin Pop
                                    keyarray[2],
2401 72737a7f Iustin Pop
                                    keyarray[3], keyarray[4], keyarray[5])
2402 a8083063 Iustin Pop
2403 a1b805fb Iustin Pop
    msg = result.RemoteFailMsg()
2404 a1b805fb Iustin Pop
    if msg:
2405 a1b805fb Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the"
2406 a1b805fb Iustin Pop
                               " new node: %s" % msg)
2407 a8083063 Iustin Pop
2408 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
2409 aafb303d Guido Trotter
    if self.cfg.GetClusterInfo().modify_etc_hosts:
2410 aafb303d Guido Trotter
      utils.AddHostToEtcHosts(new_node.name)
2411 c8a0948f Michael Hanselmann
2412 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
2413 781de953 Iustin Pop
      result = self.rpc.call_node_has_ip_address(new_node.name,
2414 781de953 Iustin Pop
                                                 new_node.secondary_ip)
2415 781de953 Iustin Pop
      if result.failed or not result.data:
2416 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
2417 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
2418 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
2419 a8083063 Iustin Pop
2420 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
2421 5c0527ed Guido Trotter
    node_verify_param = {
2422 5c0527ed Guido Trotter
      'nodelist': [node],
2423 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
2424 5c0527ed Guido Trotter
    }
2425 5c0527ed Guido Trotter
2426 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
2427 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
2428 5c0527ed Guido Trotter
    for verifier in node_verify_list:
2429 f08ce603 Guido Trotter
      if result[verifier].failed or not result[verifier].data:
2430 5c0527ed Guido Trotter
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
2431 5c0527ed Guido Trotter
                                 " for remote verification" % verifier)
2432 781de953 Iustin Pop
      if result[verifier].data['nodelist']:
2433 781de953 Iustin Pop
        for failed in result[verifier].data['nodelist']:
2434 31821208 Iustin Pop
          feedback_fn("ssh/hostname verification failed"
2435 31821208 Iustin Pop
                      " (checking from %s): %s" %
2436 bafc1d90 Iustin Pop
                      (verifier, result[verifier].data['nodelist'][failed]))
2437 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
2438 ff98055b Iustin Pop
2439 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
2440 a8083063 Iustin Pop
    # including the node just added
2441 d6a02168 Michael Hanselmann
    myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
2442 102b115b Michael Hanselmann
    dist_nodes = self.cfg.GetNodeList()
2443 102b115b Michael Hanselmann
    if not self.op.readd:
2444 102b115b Michael Hanselmann
      dist_nodes.append(node)
2445 a8083063 Iustin Pop
    if myself.name in dist_nodes:
2446 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
2447 a8083063 Iustin Pop
2448 9a4f63d1 Iustin Pop
    logging.debug("Copying hosts and known_hosts to all nodes")
2449 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
2450 72737a7f Iustin Pop
      result = self.rpc.call_upload_file(dist_nodes, fname)
2451 ec85e3d5 Iustin Pop
      for to_node, to_result in result.iteritems():
2452 ec85e3d5 Iustin Pop
        if to_result.failed or not to_result.data:
2453 9a4f63d1 Iustin Pop
          logging.error("Copy of file %s to node %s failed", fname, to_node)
2454 a8083063 Iustin Pop
2455 d6a02168 Michael Hanselmann
    to_copy = []
2456 2928f08d Guido Trotter
    enabled_hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
2457 ccd905ac Guido Trotter
    if constants.HTS_COPY_VNC_PASSWORD.intersection(enabled_hypervisors):
2458 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
2459 2928f08d Guido Trotter
2460 a8083063 Iustin Pop
    for fname in to_copy:
2461 72737a7f Iustin Pop
      result = self.rpc.call_upload_file([node], fname)
2462 781de953 Iustin Pop
      if result[node].failed or not result[node]:
2463 9a4f63d1 Iustin Pop
        logging.error("Could not copy file %s to node %s", fname, node)
2464 a8083063 Iustin Pop
2465 d8470559 Michael Hanselmann
    if self.op.readd:
2466 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
2467 a8ae3eb5 Iustin Pop
      # make sure we redistribute the config
2468 a8ae3eb5 Iustin Pop
      self.cfg.Update(new_node)
2469 a8ae3eb5 Iustin Pop
      # and make sure the new node will not have old files around
2470 a8ae3eb5 Iustin Pop
      if not new_node.master_candidate:
2471 a8ae3eb5 Iustin Pop
        result = self.rpc.call_node_demote_from_mc(new_node.name)
2472 a8ae3eb5 Iustin Pop
        msg = result.RemoteFailMsg()
2473 a8ae3eb5 Iustin Pop
        if msg:
2474 a8ae3eb5 Iustin Pop
          self.LogWarning("Node failed to demote itself from master"
2475 a8ae3eb5 Iustin Pop
                          " candidate status: %s" % msg)
2476 d8470559 Michael Hanselmann
    else:
2477 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
2478 a8083063 Iustin Pop
2479 a8083063 Iustin Pop
2480 b31c8676 Iustin Pop
class LUSetNodeParams(LogicalUnit):
2481 b31c8676 Iustin Pop
  """Modifies the parameters of a node.
2482 b31c8676 Iustin Pop

2483 b31c8676 Iustin Pop
  """
2484 b31c8676 Iustin Pop
  HPATH = "node-modify"
2485 b31c8676 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2486 b31c8676 Iustin Pop
  _OP_REQP = ["node_name"]
2487 b31c8676 Iustin Pop
  REQ_BGL = False
2488 b31c8676 Iustin Pop
2489 b31c8676 Iustin Pop
  def CheckArguments(self):
2490 b31c8676 Iustin Pop
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2491 b31c8676 Iustin Pop
    if node_name is None:
2492 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2493 b31c8676 Iustin Pop
    self.op.node_name = node_name
2494 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'master_candidate')
2495 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'offline')
2496 c9d443ea Iustin Pop
    _CheckBooleanOpField(self.op, 'drained')
2497 c9d443ea Iustin Pop
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
2498 c9d443ea Iustin Pop
    if all_mods.count(None) == 3:
2499 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Please pass at least one modification")
2500 c9d443ea Iustin Pop
    if all_mods.count(True) > 1:
2501 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Can't set the node into more than one"
2502 c9d443ea Iustin Pop
                                 " state at the same time")
2503 b31c8676 Iustin Pop
2504 b31c8676 Iustin Pop
  def ExpandNames(self):
2505 b31c8676 Iustin Pop
    self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
2506 b31c8676 Iustin Pop
2507 b31c8676 Iustin Pop
  def BuildHooksEnv(self):
2508 b31c8676 Iustin Pop
    """Build hooks env.
2509 b31c8676 Iustin Pop

2510 b31c8676 Iustin Pop
    This runs on the master node.
2511 b31c8676 Iustin Pop

2512 b31c8676 Iustin Pop
    """
2513 b31c8676 Iustin Pop
    env = {
2514 b31c8676 Iustin Pop
      "OP_TARGET": self.op.node_name,
2515 b31c8676 Iustin Pop
      "MASTER_CANDIDATE": str(self.op.master_candidate),
2516 3a5ba66a Iustin Pop
      "OFFLINE": str(self.op.offline),
2517 c9d443ea Iustin Pop
      "DRAINED": str(self.op.drained),
2518 b31c8676 Iustin Pop
      }
2519 b31c8676 Iustin Pop
    nl = [self.cfg.GetMasterNode(),
2520 b31c8676 Iustin Pop
          self.op.node_name]
2521 b31c8676 Iustin Pop
    return env, nl, nl
2522 b31c8676 Iustin Pop
2523 b31c8676 Iustin Pop
  def CheckPrereq(self):
2524 b31c8676 Iustin Pop
    """Check prerequisites.
2525 b31c8676 Iustin Pop

2526 b31c8676 Iustin Pop
    This only checks the instance list against the existing names.
2527 b31c8676 Iustin Pop

2528 b31c8676 Iustin Pop
    """
2529 3a5ba66a Iustin Pop
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
2530 b31c8676 Iustin Pop
2531 97c61d46 Iustin Pop
    if (self.op.master_candidate is not None or
2532 97c61d46 Iustin Pop
        self.op.drained is not None or
2533 97c61d46 Iustin Pop
        self.op.offline is not None):
2534 97c61d46 Iustin Pop
      # we can't change the master's node flags
2535 97c61d46 Iustin Pop
      if self.op.node_name == self.cfg.GetMasterNode():
2536 97c61d46 Iustin Pop
        raise errors.OpPrereqError("The master role can be changed"
2537 97c61d46 Iustin Pop
                                   " only via masterfailover")
2538 97c61d46 Iustin Pop
2539 c9d443ea Iustin Pop
    if ((self.op.master_candidate == False or self.op.offline == True or
2540 c9d443ea Iustin Pop
         self.op.drained == True) and node.master_candidate):
2541 3e83dd48 Iustin Pop
      cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2542 3a5ba66a Iustin Pop
      num_candidates, _ = self.cfg.GetMasterCandidateStats()
2543 3e83dd48 Iustin Pop
      if num_candidates <= cp_size:
2544 3e83dd48 Iustin Pop
        msg = ("Not enough master candidates (desired"
2545 3e83dd48 Iustin Pop
               " %d, new value will be %d)" % (cp_size, num_candidates-1))
2546 3a5ba66a Iustin Pop
        if self.op.force:
2547 3e83dd48 Iustin Pop
          self.LogWarning(msg)
2548 3e83dd48 Iustin Pop
        else:
2549 3e83dd48 Iustin Pop
          raise errors.OpPrereqError(msg)
2550 3e83dd48 Iustin Pop
2551 c9d443ea Iustin Pop
    if (self.op.master_candidate == True and
2552 c9d443ea Iustin Pop
        ((node.offline and not self.op.offline == False) or
2553 c9d443ea Iustin Pop
         (node.drained and not self.op.drained == False))):
2554 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
2555 949bdabe Iustin Pop
                                 " to master_candidate" % node.name)
2556 3a5ba66a Iustin Pop
2557 b31c8676 Iustin Pop
    return
2558 b31c8676 Iustin Pop
2559 b31c8676 Iustin Pop
  def Exec(self, feedback_fn):
2560 b31c8676 Iustin Pop
    """Modifies a node.
2561 b31c8676 Iustin Pop

2562 b31c8676 Iustin Pop
    """
2563 3a5ba66a Iustin Pop
    node = self.node
2564 b31c8676 Iustin Pop
2565 b31c8676 Iustin Pop
    result = []
2566 c9d443ea Iustin Pop
    changed_mc = False
2567 b31c8676 Iustin Pop
2568 3a5ba66a Iustin Pop
    if self.op.offline is not None:
2569 3a5ba66a Iustin Pop
      node.offline = self.op.offline
2570 3a5ba66a Iustin Pop
      result.append(("offline", str(self.op.offline)))
2571 c9d443ea Iustin Pop
      if self.op.offline == True:
2572 c9d443ea Iustin Pop
        if node.master_candidate:
2573 c9d443ea Iustin Pop
          node.master_candidate = False
2574 c9d443ea Iustin Pop
          changed_mc = True
2575 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to offline"))
2576 c9d443ea Iustin Pop
        if node.drained:
2577 c9d443ea Iustin Pop
          node.drained = False
2578 c9d443ea Iustin Pop
          result.append(("drained", "clear drained status due to offline"))
2579 3a5ba66a Iustin Pop
2580 b31c8676 Iustin Pop
    if self.op.master_candidate is not None:
2581 b31c8676 Iustin Pop
      node.master_candidate = self.op.master_candidate
2582 c9d443ea Iustin Pop
      changed_mc = True
2583 b31c8676 Iustin Pop
      result.append(("master_candidate", str(self.op.master_candidate)))
2584 56aa9fd5 Iustin Pop
      if self.op.master_candidate == False:
2585 56aa9fd5 Iustin Pop
        rrc = self.rpc.call_node_demote_from_mc(node.name)
2586 0959c824 Iustin Pop
        msg = rrc.RemoteFailMsg()
2587 0959c824 Iustin Pop
        if msg:
2588 0959c824 Iustin Pop
          self.LogWarning("Node failed to demote itself: %s" % msg)
2589 b31c8676 Iustin Pop
2590 c9d443ea Iustin Pop
    if self.op.drained is not None:
2591 c9d443ea Iustin Pop
      node.drained = self.op.drained
2592 82e12743 Iustin Pop
      result.append(("drained", str(self.op.drained)))
2593 c9d443ea Iustin Pop
      if self.op.drained == True:
2594 c9d443ea Iustin Pop
        if node.master_candidate:
2595 c9d443ea Iustin Pop
          node.master_candidate = False
2596 c9d443ea Iustin Pop
          changed_mc = True
2597 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to drain"))
2598 dec0d9da Iustin Pop
          rrc = self.rpc.call_node_demote_from_mc(node.name)
2599 dec0d9da Iustin Pop
          msg = rrc.RemoteFailMsg()
2600 dec0d9da Iustin Pop
          if msg:
2601 dec0d9da Iustin Pop
            self.LogWarning("Node failed to demote itself: %s" % msg)
2602 c9d443ea Iustin Pop
        if node.offline:
2603 c9d443ea Iustin Pop
          node.offline = False
2604 c9d443ea Iustin Pop
          result.append(("offline", "clear offline status due to drain"))
2605 c9d443ea Iustin Pop
2606 b31c8676 Iustin Pop
    # this will trigger configuration file update, if needed
2607 b31c8676 Iustin Pop
    self.cfg.Update(node)
2608 b31c8676 Iustin Pop
    # this will trigger job queue propagation or cleanup
2609 c9d443ea Iustin Pop
    if changed_mc:
2610 3a26773f Iustin Pop
      self.context.ReaddNode(node)
2611 b31c8676 Iustin Pop
2612 b31c8676 Iustin Pop
    return result
2613 b31c8676 Iustin Pop
2614 b31c8676 Iustin Pop
2615 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
2616 a8083063 Iustin Pop
  """Query cluster configuration.
2617 a8083063 Iustin Pop

2618 a8083063 Iustin Pop
  """
2619 a8083063 Iustin Pop
  _OP_REQP = []
2620 642339cf Guido Trotter
  REQ_BGL = False
2621 642339cf Guido Trotter
2622 642339cf Guido Trotter
  def ExpandNames(self):
2623 642339cf Guido Trotter
    self.needed_locks = {}
2624 a8083063 Iustin Pop
2625 a8083063 Iustin Pop
  def CheckPrereq(self):
2626 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
2627 a8083063 Iustin Pop

2628 a8083063 Iustin Pop
    """
2629 a8083063 Iustin Pop
    pass
2630 a8083063 Iustin Pop
2631 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2632 a8083063 Iustin Pop
    """Return cluster config.
2633 a8083063 Iustin Pop

2634 a8083063 Iustin Pop
    """
2635 469f88e1 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
2636 a8083063 Iustin Pop
    result = {
2637 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
2638 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
2639 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
2640 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
2641 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
2642 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
2643 469f88e1 Iustin Pop
      "name": cluster.cluster_name,
2644 469f88e1 Iustin Pop
      "master": cluster.master_node,
2645 02691904 Alexander Schreiber
      "default_hypervisor": cluster.default_hypervisor,
2646 469f88e1 Iustin Pop
      "enabled_hypervisors": cluster.enabled_hypervisors,
2647 b8810fec Michael Hanselmann
      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
2648 7c4d6c7b Michael Hanselmann
                        for hypervisor_name in cluster.enabled_hypervisors]),
2649 469f88e1 Iustin Pop
      "beparams": cluster.beparams,
2650 4b7735f9 Iustin Pop
      "candidate_pool_size": cluster.candidate_pool_size,
2651 7a56b411 Guido Trotter
      "default_bridge": cluster.default_bridge,
2652 7a56b411 Guido Trotter
      "master_netdev": cluster.master_netdev,
2653 7a56b411 Guido Trotter
      "volume_group_name": cluster.volume_group_name,
2654 7a56b411 Guido Trotter
      "file_storage_dir": cluster.file_storage_dir,
2655 c118d1f4 Michael Hanselmann
      "tags": list(cluster.GetTags()),
2656 a8083063 Iustin Pop
      }
2657 a8083063 Iustin Pop
2658 a8083063 Iustin Pop
    return result
2659 a8083063 Iustin Pop
2660 a8083063 Iustin Pop
2661 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
2662 ae5849b5 Michael Hanselmann
  """Return configuration values.
2663 a8083063 Iustin Pop

2664 a8083063 Iustin Pop
  """
2665 a8083063 Iustin Pop
  _OP_REQP = []
2666 642339cf Guido Trotter
  REQ_BGL = False
2667 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet()
2668 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag")
2669 642339cf Guido Trotter
2670 642339cf Guido Trotter
  def ExpandNames(self):
2671 642339cf Guido Trotter
    self.needed_locks = {}
2672 a8083063 Iustin Pop
2673 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2674 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2675 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
2676 ae5849b5 Michael Hanselmann
2677 a8083063 Iustin Pop
  def CheckPrereq(self):
2678 a8083063 Iustin Pop
    """No prerequisites.
2679 a8083063 Iustin Pop

2680 a8083063 Iustin Pop
    """
2681 a8083063 Iustin Pop
    pass
2682 a8083063 Iustin Pop
2683 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2684 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
2685 a8083063 Iustin Pop

2686 a8083063 Iustin Pop
    """
2687 ae5849b5 Michael Hanselmann
    values = []
2688 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
2689 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
2690 3ccafd0e Iustin Pop
        entry = self.cfg.GetClusterName()
2691 ae5849b5 Michael Hanselmann
      elif field == "master_node":
2692 3ccafd0e Iustin Pop
        entry = self.cfg.GetMasterNode()
2693 3ccafd0e Iustin Pop
      elif field == "drain_flag":
2694 3ccafd0e Iustin Pop
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2695 ae5849b5 Michael Hanselmann
      else:
2696 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
2697 3ccafd0e Iustin Pop
      values.append(entry)
2698 ae5849b5 Michael Hanselmann
    return values
2699 a8083063 Iustin Pop
2700 a8083063 Iustin Pop
2701 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
2702 a8083063 Iustin Pop
  """Bring up an instance's disks.
2703 a8083063 Iustin Pop

2704 a8083063 Iustin Pop
  """
2705 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2706 f22a8ba3 Guido Trotter
  REQ_BGL = False
2707 f22a8ba3 Guido Trotter
2708 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2709 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2710 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2711 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2712 f22a8ba3 Guido Trotter
2713 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2714 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2715 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2716 a8083063 Iustin Pop
2717 a8083063 Iustin Pop
  def CheckPrereq(self):
2718 a8083063 Iustin Pop
    """Check prerequisites.
2719 a8083063 Iustin Pop

2720 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2721 a8083063 Iustin Pop

2722 a8083063 Iustin Pop
    """
2723 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2724 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2725 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2726 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
2727 b4ec07f8 Iustin Pop
    if not hasattr(self.op, "ignore_size"):
2728 b4ec07f8 Iustin Pop
      self.op.ignore_size = False
2729 a8083063 Iustin Pop
2730 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2731 a8083063 Iustin Pop
    """Activate the disks.
2732 a8083063 Iustin Pop

2733 a8083063 Iustin Pop
    """
2734 b4ec07f8 Iustin Pop
    disks_ok, disks_info = \
2735 b4ec07f8 Iustin Pop
              _AssembleInstanceDisks(self, self.instance,
2736 b4ec07f8 Iustin Pop
                                     ignore_size=self.op.ignore_size)
2737 a8083063 Iustin Pop
    if not disks_ok:
2738 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
2739 a8083063 Iustin Pop
2740 a8083063 Iustin Pop
    return disks_info
2741 a8083063 Iustin Pop
2742 a8083063 Iustin Pop
2743 e3443b36 Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False,
2744 e3443b36 Iustin Pop
                           ignore_size=False):
2745 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
2746 a8083063 Iustin Pop

2747 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
2748 a8083063 Iustin Pop

2749 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
2750 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
2751 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
2752 e4376078 Iustin Pop
  @param instance: the instance for whose disks we assemble
2753 e4376078 Iustin Pop
  @type ignore_secondaries: boolean
2754 e4376078 Iustin Pop
  @param ignore_secondaries: if true, errors on secondary nodes
2755 e4376078 Iustin Pop
      won't result in an error return from the function
2756 e3443b36 Iustin Pop
  @type ignore_size: boolean
2757 e3443b36 Iustin Pop
  @param ignore_size: if true, the current known size of the disk
2758 e3443b36 Iustin Pop
      will not be used during the disk activation, useful for cases
2759 e3443b36 Iustin Pop
      when the size is wrong
2760 e4376078 Iustin Pop
  @return: False if the operation failed, otherwise a list of
2761 e4376078 Iustin Pop
      (host, instance_visible_name, node_visible_name)
2762 e4376078 Iustin Pop
      with the mapping from node devices to instance devices
2763 a8083063 Iustin Pop

2764 a8083063 Iustin Pop
  """
2765 a8083063 Iustin Pop
  device_info = []
2766 a8083063 Iustin Pop
  disks_ok = True
2767 fdbd668d Iustin Pop
  iname = instance.name
2768 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
2769 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
2770 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
2771 fdbd668d Iustin Pop
2772 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
2773 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
2774 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
2775 fdbd668d Iustin Pop
  # SyncSource, etc.)
2776 fdbd668d Iustin Pop
2777 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
2778 a8083063 Iustin Pop
  for inst_disk in instance.disks:
2779 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2780 e3443b36 Iustin Pop
      if ignore_size:
2781 e3443b36 Iustin Pop
        node_disk = node_disk.Copy()
2782 e3443b36 Iustin Pop
        node_disk.UnsetSize()
2783 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2784 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2785 53c14ef1 Iustin Pop
      msg = result.RemoteFailMsg()
2786 53c14ef1 Iustin Pop
      if msg:
2787 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2788 53c14ef1 Iustin Pop
                           " (is_primary=False, pass=1): %s",
2789 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
2790 fdbd668d Iustin Pop
        if not ignore_secondaries:
2791 a8083063 Iustin Pop
          disks_ok = False
2792 fdbd668d Iustin Pop
2793 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
2794 fdbd668d Iustin Pop
2795 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
2796 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
2797 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2798 fdbd668d Iustin Pop
      if node != instance.primary_node:
2799 fdbd668d Iustin Pop
        continue
2800 e3443b36 Iustin Pop
      if ignore_size:
2801 e3443b36 Iustin Pop
        node_disk = node_disk.Copy()
2802 e3443b36 Iustin Pop
        node_disk.UnsetSize()
2803 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2804 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2805 53c14ef1 Iustin Pop
      msg = result.RemoteFailMsg()
2806 53c14ef1 Iustin Pop
      if msg:
2807 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2808 53c14ef1 Iustin Pop
                           " (is_primary=True, pass=2): %s",
2809 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
2810 fdbd668d Iustin Pop
        disks_ok = False
2811 1dff8e07 Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name,
2812 1dff8e07 Iustin Pop
                        result.payload))
2813 a8083063 Iustin Pop
2814 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
2815 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
2816 b352ab5b Iustin Pop
  # improving the logical/physical id handling
2817 b352ab5b Iustin Pop
  for disk in instance.disks:
2818 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
2819 b352ab5b Iustin Pop
2820 a8083063 Iustin Pop
  return disks_ok, device_info
2821 a8083063 Iustin Pop
2822 a8083063 Iustin Pop
2823 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
2824 3ecf6786 Iustin Pop
  """Start the disks of an instance.
2825 3ecf6786 Iustin Pop

2826 3ecf6786 Iustin Pop
  """
2827 7c4d6c7b Michael Hanselmann
  disks_ok, _ = _AssembleInstanceDisks(lu, instance,
2828 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
2829 fe7b0351 Michael Hanselmann
  if not disks_ok:
2830 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
2831 fe7b0351 Michael Hanselmann
    if force is not None and not force:
2832 86d9d3bb Iustin Pop
      lu.proc.LogWarning("", hint="If the message above refers to a"
2833 86d9d3bb Iustin Pop
                         " secondary node,"
2834 86d9d3bb Iustin Pop
                         " you can retry the operation using '--force'.")
2835 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
2836 fe7b0351 Michael Hanselmann
2837 fe7b0351 Michael Hanselmann
2838 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
2839 a8083063 Iustin Pop
  """Shutdown an instance's disks.
2840 a8083063 Iustin Pop

2841 a8083063 Iustin Pop
  """
2842 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2843 f22a8ba3 Guido Trotter
  REQ_BGL = False
2844 f22a8ba3 Guido Trotter
2845 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2846 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2847 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2848 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2849 f22a8ba3 Guido Trotter
2850 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2851 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2852 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2853 a8083063 Iustin Pop
2854 a8083063 Iustin Pop
  def CheckPrereq(self):
2855 a8083063 Iustin Pop
    """Check prerequisites.
2856 a8083063 Iustin Pop

2857 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2858 a8083063 Iustin Pop

2859 a8083063 Iustin Pop
    """
2860 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2861 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2862 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2863 a8083063 Iustin Pop
2864 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2865 a8083063 Iustin Pop
    """Deactivate the disks
2866 a8083063 Iustin Pop

2867 a8083063 Iustin Pop
    """
2868 a8083063 Iustin Pop
    instance = self.instance
2869 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
2870 a8083063 Iustin Pop
2871 a8083063 Iustin Pop
2872 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
2873 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
2874 155d6c75 Guido Trotter

2875 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
2876 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
2877 155d6c75 Guido Trotter

2878 155d6c75 Guido Trotter
  """
2879 72737a7f Iustin Pop
  ins_l = lu.rpc.call_instance_list([instance.primary_node],
2880 72737a7f Iustin Pop
                                      [instance.hypervisor])
2881 155d6c75 Guido Trotter
  ins_l = ins_l[instance.primary_node]
2882 781de953 Iustin Pop
  if ins_l.failed or not isinstance(ins_l.data, list):
2883 155d6c75 Guido Trotter
    raise errors.OpExecError("Can't contact node '%s'" %
2884 155d6c75 Guido Trotter
                             instance.primary_node)
2885 155d6c75 Guido Trotter
2886 781de953 Iustin Pop
  if instance.name in ins_l.data:
2887 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
2888 155d6c75 Guido Trotter
                             " block devices.")
2889 155d6c75 Guido Trotter
2890 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
2891 a8083063 Iustin Pop
2892 a8083063 Iustin Pop
2893 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2894 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
2895 a8083063 Iustin Pop

2896 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
2897 a8083063 Iustin Pop

2898 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
2899 a8083063 Iustin Pop
  ignored.
2900 a8083063 Iustin Pop

2901 a8083063 Iustin Pop
  """
2902 cacfd1fd Iustin Pop
  all_result = True
2903 a8083063 Iustin Pop
  for disk in instance.disks:
2904 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2905 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
2906 781de953 Iustin Pop
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
2907 cacfd1fd Iustin Pop
      msg = result.RemoteFailMsg()
2908 cacfd1fd Iustin Pop
      if msg:
2909 cacfd1fd Iustin Pop
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
2910 cacfd1fd Iustin Pop
                      disk.iv_name, node, msg)
2911 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
2912 cacfd1fd Iustin Pop
          all_result = False
2913 cacfd1fd Iustin Pop
  return all_result
2914 a8083063 Iustin Pop
2915 a8083063 Iustin Pop
2916 9ca87a96 Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
2917 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
2918 d4f16fd9 Iustin Pop

2919 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
2920 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
2921 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
2922 d4f16fd9 Iustin Pop
  exception.
2923 d4f16fd9 Iustin Pop

2924 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
2925 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
2926 e69d05fd Iustin Pop
  @type node: C{str}
2927 e69d05fd Iustin Pop
  @param node: the node to check
2928 e69d05fd Iustin Pop
  @type reason: C{str}
2929 e69d05fd Iustin Pop
  @param reason: string to use in the error message
2930 e69d05fd Iustin Pop
  @type requested: C{int}
2931 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
2932 9ca87a96 Iustin Pop
  @type hypervisor_name: C{str}
2933 9ca87a96 Iustin Pop
  @param hypervisor_name: the hypervisor to ask for memory stats
2934 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2935 e69d05fd Iustin Pop
      we cannot check the node
2936 d4f16fd9 Iustin Pop

2937 d4f16fd9 Iustin Pop
  """
2938 9ca87a96 Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
2939 781de953 Iustin Pop
  nodeinfo[node].Raise()
2940 781de953 Iustin Pop
  free_mem = nodeinfo[node].data.get('memory_free')
2941 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2942 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2943 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
2944 d4f16fd9 Iustin Pop
  if requested > free_mem:
2945 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2946 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
2947 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
2948 d4f16fd9 Iustin Pop
2949 d4f16fd9 Iustin Pop
2950 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2951 a8083063 Iustin Pop
  """Starts an instance.
2952 a8083063 Iustin Pop

2953 a8083063 Iustin Pop
  """
2954 a8083063 Iustin Pop
  HPATH = "instance-start"
2955 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2956 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2957 e873317a Guido Trotter
  REQ_BGL = False
2958 e873317a Guido Trotter
2959 e873317a Guido Trotter
  def ExpandNames(self):
2960 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2961 a8083063 Iustin Pop
2962 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2963 a8083063 Iustin Pop
    """Build hooks env.
2964 a8083063 Iustin Pop

2965 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2966 a8083063 Iustin Pop

2967 a8083063 Iustin Pop
    """
2968 a8083063 Iustin Pop
    env = {
2969 a8083063 Iustin Pop
      "FORCE": self.op.force,
2970 a8083063 Iustin Pop
      }
2971 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2972 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2973 a8083063 Iustin Pop
    return env, nl, nl
2974 a8083063 Iustin Pop
2975 a8083063 Iustin Pop
  def CheckPrereq(self):
2976 a8083063 Iustin Pop
    """Check prerequisites.
2977 a8083063 Iustin Pop

2978 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2979 a8083063 Iustin Pop

2980 a8083063 Iustin Pop
    """
2981 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2982 e873317a Guido Trotter
    assert self.instance is not None, \
2983 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2984 a8083063 Iustin Pop
2985 d04aaa2f Iustin Pop
    # extra beparams
2986 d04aaa2f Iustin Pop
    self.beparams = getattr(self.op, "beparams", {})
2987 d04aaa2f Iustin Pop
    if self.beparams:
2988 d04aaa2f Iustin Pop
      if not isinstance(self.beparams, dict):
2989 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid beparams passed: %s, expected"
2990 d04aaa2f Iustin Pop
                                   " dict" % (type(self.beparams), ))
2991 d04aaa2f Iustin Pop
      # fill the beparams dict
2992 d04aaa2f Iustin Pop
      utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
2993 d04aaa2f Iustin Pop
      self.op.beparams = self.beparams
2994 d04aaa2f Iustin Pop
2995 d04aaa2f Iustin Pop
    # extra hvparams
2996 d04aaa2f Iustin Pop
    self.hvparams = getattr(self.op, "hvparams", {})
2997 d04aaa2f Iustin Pop
    if self.hvparams:
2998 d04aaa2f Iustin Pop
      if not isinstance(self.hvparams, dict):
2999 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid hvparams passed: %s, expected"
3000 d04aaa2f Iustin Pop
                                   " dict" % (type(self.hvparams), ))
3001 d04aaa2f Iustin Pop
3002 d04aaa2f Iustin Pop
      # check hypervisor parameter syntax (locally)
3003 d04aaa2f Iustin Pop
      cluster = self.cfg.GetClusterInfo()
3004 d04aaa2f Iustin Pop
      utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
3005 d04aaa2f Iustin Pop
      filled_hvp = cluster.FillDict(cluster.hvparams[instance.hypervisor],
3006 d04aaa2f Iustin Pop
                                    instance.hvparams)
3007 d04aaa2f Iustin Pop
      filled_hvp.update(self.hvparams)
3008 d04aaa2f Iustin Pop
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
3009 d04aaa2f Iustin Pop
      hv_type.CheckParameterSyntax(filled_hvp)
3010 d04aaa2f Iustin Pop
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
3011 d04aaa2f Iustin Pop
      self.op.hvparams = self.hvparams
3012 d04aaa2f Iustin Pop
3013 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3014 7527a8a4 Iustin Pop
3015 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3016 5bbd3f7f Michael Hanselmann
    # check bridges existence
3017 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
3018 a8083063 Iustin Pop
3019 f1926756 Guido Trotter
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3020 f1926756 Guido Trotter
                                              instance.name,
3021 f1926756 Guido Trotter
                                              instance.hypervisor)
3022 f1926756 Guido Trotter
    remote_info.Raise()
3023 f1926756 Guido Trotter
    if not remote_info.data:
3024 f1926756 Guido Trotter
      _CheckNodeFreeMemory(self, instance.primary_node,
3025 f1926756 Guido Trotter
                           "starting instance %s" % instance.name,
3026 f1926756 Guido Trotter
                           bep[constants.BE_MEMORY], instance.hypervisor)
3027 d4f16fd9 Iustin Pop
3028 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3029 a8083063 Iustin Pop
    """Start the instance.
3030 a8083063 Iustin Pop

3031 a8083063 Iustin Pop
    """
3032 a8083063 Iustin Pop
    instance = self.instance
3033 a8083063 Iustin Pop
    force = self.op.force
3034 a8083063 Iustin Pop
3035 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
3036 fe482621 Iustin Pop
3037 a8083063 Iustin Pop
    node_current = instance.primary_node
3038 a8083063 Iustin Pop
3039 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
3040 a8083063 Iustin Pop
3041 d04aaa2f Iustin Pop
    result = self.rpc.call_instance_start(node_current, instance,
3042 d04aaa2f Iustin Pop
                                          self.hvparams, self.beparams)
3043 dd279568 Iustin Pop
    msg = result.RemoteFailMsg()
3044 dd279568 Iustin Pop
    if msg:
3045 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
3046 dd279568 Iustin Pop
      raise errors.OpExecError("Could not start instance: %s" % msg)
3047 a8083063 Iustin Pop
3048 a8083063 Iustin Pop
3049 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
3050 bf6929a2 Alexander Schreiber
  """Reboot an instance.
3051 bf6929a2 Alexander Schreiber

3052 bf6929a2 Alexander Schreiber
  """
3053 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
3054 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
3055 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
3056 e873317a Guido Trotter
  REQ_BGL = False
3057 e873317a Guido Trotter
3058 e873317a Guido Trotter
  def ExpandNames(self):
3059 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
3060 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
3061 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
3062 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
3063 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
3064 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
3065 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
3066 e873317a Guido Trotter
    self._ExpandAndLockInstance()
3067 bf6929a2 Alexander Schreiber
3068 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
3069 bf6929a2 Alexander Schreiber
    """Build hooks env.
3070 bf6929a2 Alexander Schreiber

3071 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
3072 bf6929a2 Alexander Schreiber

3073 bf6929a2 Alexander Schreiber
    """
3074 bf6929a2 Alexander Schreiber
    env = {
3075 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
3076 2c2690c9 Iustin Pop
      "REBOOT_TYPE": self.op.reboot_type,
3077 bf6929a2 Alexander Schreiber
      }
3078 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3079 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3080 bf6929a2 Alexander Schreiber
    return env, nl, nl
3081 bf6929a2 Alexander Schreiber
3082 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
3083 bf6929a2 Alexander Schreiber
    """Check prerequisites.
3084 bf6929a2 Alexander Schreiber

3085 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
3086 bf6929a2 Alexander Schreiber

3087 bf6929a2 Alexander Schreiber
    """
3088 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3089 e873317a Guido Trotter
    assert self.instance is not None, \
3090 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3091 bf6929a2 Alexander Schreiber
3092 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3093 7527a8a4 Iustin Pop
3094 5bbd3f7f Michael Hanselmann
    # check bridges existence
3095 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
3096 bf6929a2 Alexander Schreiber
3097 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
3098 bf6929a2 Alexander Schreiber
    """Reboot the instance.
3099 bf6929a2 Alexander Schreiber

3100 bf6929a2 Alexander Schreiber
    """
3101 bf6929a2 Alexander Schreiber
    instance = self.instance
3102 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
3103 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
3104 bf6929a2 Alexander Schreiber
3105 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
3106 bf6929a2 Alexander Schreiber
3107 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
3108 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
3109 ae48ac32 Iustin Pop
      for disk in instance.disks:
3110 ae48ac32 Iustin Pop
        self.cfg.SetDiskID(disk, node_current)
3111 781de953 Iustin Pop
      result = self.rpc.call_instance_reboot(node_current, instance,
3112 07813a9e Iustin Pop
                                             reboot_type)
3113 489fcbe9 Iustin Pop
      msg = result.RemoteFailMsg()
3114 489fcbe9 Iustin Pop
      if msg:
3115 489fcbe9 Iustin Pop
        raise errors.OpExecError("Could not reboot instance: %s" % msg)
3116 bf6929a2 Alexander Schreiber
    else:
3117 1fae010f Iustin Pop
      result = self.rpc.call_instance_shutdown(node_current, instance)
3118 1fae010f Iustin Pop
      msg = result.RemoteFailMsg()
3119 1fae010f Iustin Pop
      if msg:
3120 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance for"
3121 1fae010f Iustin Pop
                                 " full reboot: %s" % msg)
3122 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
3123 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
3124 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(node_current, instance, None, None)
3125 dd279568 Iustin Pop
      msg = result.RemoteFailMsg()
3126 dd279568 Iustin Pop
      if msg:
3127 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3128 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance for"
3129 dd279568 Iustin Pop
                                 " full reboot: %s" % msg)
3130 bf6929a2 Alexander Schreiber
3131 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
3132 bf6929a2 Alexander Schreiber
3133 bf6929a2 Alexander Schreiber
3134 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
3135 a8083063 Iustin Pop
  """Shutdown an instance.
3136 a8083063 Iustin Pop

3137 a8083063 Iustin Pop
  """
3138 a8083063 Iustin Pop
  HPATH = "instance-stop"
3139 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3140 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3141 e873317a Guido Trotter
  REQ_BGL = False
3142 e873317a Guido Trotter
3143 e873317a Guido Trotter
  def ExpandNames(self):
3144 e873317a Guido Trotter
    self._ExpandAndLockInstance()
3145 a8083063 Iustin Pop
3146 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3147 a8083063 Iustin Pop
    """Build hooks env.
3148 a8083063 Iustin Pop

3149 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3150 a8083063 Iustin Pop

3151 a8083063 Iustin Pop
    """
3152 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3153 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3154 a8083063 Iustin Pop
    return env, nl, nl
3155 a8083063 Iustin Pop
3156 a8083063 Iustin Pop
  def CheckPrereq(self):
3157 a8083063 Iustin Pop
    """Check prerequisites.
3158 a8083063 Iustin Pop

3159 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3160 a8083063 Iustin Pop

3161 a8083063 Iustin Pop
    """
3162 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3163 e873317a Guido Trotter
    assert self.instance is not None, \
3164 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3165 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
3166 a8083063 Iustin Pop
3167 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3168 a8083063 Iustin Pop
    """Shutdown the instance.
3169 a8083063 Iustin Pop

3170 a8083063 Iustin Pop
    """
3171 a8083063 Iustin Pop
    instance = self.instance
3172 a8083063 Iustin Pop
    node_current = instance.primary_node
3173 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
3174 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(node_current, instance)
3175 1fae010f Iustin Pop
    msg = result.RemoteFailMsg()
3176 1fae010f Iustin Pop
    if msg:
3177 1fae010f Iustin Pop
      self.proc.LogWarning("Could not shutdown instance: %s" % msg)
3178 a8083063 Iustin Pop
3179 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
3180 a8083063 Iustin Pop
3181 a8083063 Iustin Pop
3182 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
3183 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
3184 fe7b0351 Michael Hanselmann

3185 fe7b0351 Michael Hanselmann
  """
3186 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
3187 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
3188 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
3189 4e0b4d2d Guido Trotter
  REQ_BGL = False
3190 4e0b4d2d Guido Trotter
3191 4e0b4d2d Guido Trotter
  def ExpandNames(self):
3192 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
3193 fe7b0351 Michael Hanselmann
3194 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
3195 fe7b0351 Michael Hanselmann
    """Build hooks env.
3196 fe7b0351 Michael Hanselmann

3197 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
3198 fe7b0351 Michael Hanselmann

3199 fe7b0351 Michael Hanselmann
    """
3200 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3201 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3202 fe7b0351 Michael Hanselmann
    return env, nl, nl
3203 fe7b0351 Michael Hanselmann
3204 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
3205 fe7b0351 Michael Hanselmann
    """Check prerequisites.
3206 fe7b0351 Michael Hanselmann

3207 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
3208 fe7b0351 Michael Hanselmann

3209 fe7b0351 Michael Hanselmann
    """
3210 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3211 4e0b4d2d Guido Trotter
    assert instance is not None, \
3212 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3213 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3214 4e0b4d2d Guido Trotter
3215 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
3216 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
3217 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3218 0d68c45d Iustin Pop
    if instance.admin_up:
3219 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3220 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3221 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3222 72737a7f Iustin Pop
                                              instance.name,
3223 72737a7f Iustin Pop
                                              instance.hypervisor)
3224 b4874c9e Guido Trotter
    remote_info.Raise()
3225 b4874c9e Guido Trotter
    if remote_info.data:
3226 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3227 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
3228 3ecf6786 Iustin Pop
                                  instance.primary_node))
3229 d0834de3 Michael Hanselmann
3230 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
3231 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
3232 d0834de3 Michael Hanselmann
      # OS verification
3233 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
3234 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
3235 d0834de3 Michael Hanselmann
      if pnode is None:
3236 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
3237 3ecf6786 Iustin Pop
                                   self.op.pnode)
3238 781de953 Iustin Pop
      result = self.rpc.call_os_get(pnode.name, self.op.os_type)
3239 781de953 Iustin Pop
      result.Raise()
3240 781de953 Iustin Pop
      if not isinstance(result.data, objects.OS):
3241 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
3242 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
3243 d0834de3 Michael Hanselmann
3244 fe7b0351 Michael Hanselmann
    self.instance = instance
3245 fe7b0351 Michael Hanselmann
3246 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
3247 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
3248 fe7b0351 Michael Hanselmann

3249 fe7b0351 Michael Hanselmann
    """
3250 fe7b0351 Michael Hanselmann
    inst = self.instance
3251 fe7b0351 Michael Hanselmann
3252 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
3253 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
3254 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
3255 97abc79f Iustin Pop
      self.cfg.Update(inst)
3256 d0834de3 Michael Hanselmann
3257 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
3258 fe7b0351 Michael Hanselmann
    try:
3259 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
3260 781de953 Iustin Pop
      result = self.rpc.call_instance_os_add(inst.primary_node, inst)
3261 20e01edd Iustin Pop
      msg = result.RemoteFailMsg()
3262 20e01edd Iustin Pop
      if msg:
3263 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
3264 20e01edd Iustin Pop
                                 " on node %s: %s" %
3265 20e01edd Iustin Pop
                                 (inst.name, inst.primary_node, msg))
3266 fe7b0351 Michael Hanselmann
    finally:
3267 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
3268 fe7b0351 Michael Hanselmann
3269 fe7b0351 Michael Hanselmann
3270 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
3271 decd5f45 Iustin Pop
  """Rename an instance.
3272 decd5f45 Iustin Pop

3273 decd5f45 Iustin Pop
  """
3274 decd5f45 Iustin Pop
  HPATH = "instance-rename"
3275 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3276 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
3277 decd5f45 Iustin Pop
3278 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
3279 decd5f45 Iustin Pop
    """Build hooks env.
3280 decd5f45 Iustin Pop

3281 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3282 decd5f45 Iustin Pop

3283 decd5f45 Iustin Pop
    """
3284 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3285 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
3286 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3287 decd5f45 Iustin Pop
    return env, nl, nl
3288 decd5f45 Iustin Pop
3289 decd5f45 Iustin Pop
  def CheckPrereq(self):
3290 decd5f45 Iustin Pop
    """Check prerequisites.
3291 decd5f45 Iustin Pop

3292 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
3293 decd5f45 Iustin Pop

3294 decd5f45 Iustin Pop
    """
3295 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3296 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3297 decd5f45 Iustin Pop
    if instance is None:
3298 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3299 decd5f45 Iustin Pop
                                 self.op.instance_name)
3300 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3301 7527a8a4 Iustin Pop
3302 0d68c45d Iustin Pop
    if instance.admin_up:
3303 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3304 decd5f45 Iustin Pop
                                 self.op.instance_name)
3305 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3306 72737a7f Iustin Pop
                                              instance.name,
3307 72737a7f Iustin Pop
                                              instance.hypervisor)
3308 781de953 Iustin Pop
    remote_info.Raise()
3309 781de953 Iustin Pop
    if remote_info.data:
3310 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3311 decd5f45 Iustin Pop
                                 (self.op.instance_name,
3312 decd5f45 Iustin Pop
                                  instance.primary_node))
3313 decd5f45 Iustin Pop
    self.instance = instance
3314 decd5f45 Iustin Pop
3315 decd5f45 Iustin Pop
    # new name verification
3316 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
3317 decd5f45 Iustin Pop
3318 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
3319 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
3320 7bde3275 Guido Trotter
    if new_name in instance_list:
3321 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3322 c09f363f Manuel Franceschini
                                 new_name)
3323 7bde3275 Guido Trotter
3324 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
3325 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
3326 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3327 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
3328 decd5f45 Iustin Pop
3329 decd5f45 Iustin Pop
3330 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
3331 decd5f45 Iustin Pop
    """Reinstall the instance.
3332 decd5f45 Iustin Pop

3333 decd5f45 Iustin Pop
    """
3334 decd5f45 Iustin Pop
    inst = self.instance
3335 decd5f45 Iustin Pop
    old_name = inst.name
3336 decd5f45 Iustin Pop
3337 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
3338 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3339 b23c4333 Manuel Franceschini
3340 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
3341 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
3342 cb4e8387 Iustin Pop
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
3343 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
3344 decd5f45 Iustin Pop
3345 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
3346 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
3347 decd5f45 Iustin Pop
3348 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
3349 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3350 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
3351 72737a7f Iustin Pop
                                                     old_file_storage_dir,
3352 72737a7f Iustin Pop
                                                     new_file_storage_dir)
3353 781de953 Iustin Pop
      result.Raise()
3354 781de953 Iustin Pop
      if not result.data:
3355 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not connect to node '%s' to rename"
3356 b23c4333 Manuel Franceschini
                                 " directory '%s' to '%s' (but the instance"
3357 b23c4333 Manuel Franceschini
                                 " has been renamed in Ganeti)" % (
3358 b23c4333 Manuel Franceschini
                                 inst.primary_node, old_file_storage_dir,
3359 b23c4333 Manuel Franceschini
                                 new_file_storage_dir))
3360 b23c4333 Manuel Franceschini
3361 781de953 Iustin Pop
      if not result.data[0]:
3362 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
3363 b23c4333 Manuel Franceschini
                                 " (but the instance has been renamed in"
3364 b23c4333 Manuel Franceschini
                                 " Ganeti)" % (old_file_storage_dir,
3365 b23c4333 Manuel Franceschini
                                               new_file_storage_dir))
3366 b23c4333 Manuel Franceschini
3367 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
3368 decd5f45 Iustin Pop
    try:
3369 781de953 Iustin Pop
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
3370 781de953 Iustin Pop
                                                 old_name)
3371 96841384 Iustin Pop
      msg = result.RemoteFailMsg()
3372 96841384 Iustin Pop
      if msg:
3373 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
3374 96841384 Iustin Pop
               " (but the instance has been renamed in Ganeti): %s" %
3375 96841384 Iustin Pop
               (inst.name, inst.primary_node, msg))
3376 86d9d3bb Iustin Pop
        self.proc.LogWarning(msg)
3377 decd5f45 Iustin Pop
    finally:
3378 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
3379 decd5f45 Iustin Pop
3380 decd5f45 Iustin Pop
3381 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
3382 a8083063 Iustin Pop
  """Remove an instance.
3383 a8083063 Iustin Pop

3384 a8083063 Iustin Pop
  """
3385 a8083063 Iustin Pop
  HPATH = "instance-remove"
3386 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3387 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
3388 cf472233 Guido Trotter
  REQ_BGL = False
3389 cf472233 Guido Trotter
3390 cf472233 Guido Trotter
  def ExpandNames(self):
3391 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
3392 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3393 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3394 cf472233 Guido Trotter
3395 cf472233 Guido Trotter
  def DeclareLocks(self, level):
3396 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
3397 cf472233 Guido Trotter
      self._LockInstancesNodes()
3398 a8083063 Iustin Pop
3399 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3400 a8083063 Iustin Pop
    """Build hooks env.
3401 a8083063 Iustin Pop

3402 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3403 a8083063 Iustin Pop

3404 a8083063 Iustin Pop
    """
3405 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3406 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
3407 a8083063 Iustin Pop
    return env, nl, nl
3408 a8083063 Iustin Pop
3409 a8083063 Iustin Pop
  def CheckPrereq(self):
3410 a8083063 Iustin Pop
    """Check prerequisites.
3411 a8083063 Iustin Pop

3412 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3413 a8083063 Iustin Pop

3414 a8083063 Iustin Pop
    """
3415 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3416 cf472233 Guido Trotter
    assert self.instance is not None, \
3417 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3418 a8083063 Iustin Pop
3419 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3420 a8083063 Iustin Pop
    """Remove the instance.
3421 a8083063 Iustin Pop

3422 a8083063 Iustin Pop
    """
3423 a8083063 Iustin Pop
    instance = self.instance
3424 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
3425 9a4f63d1 Iustin Pop
                 instance.name, instance.primary_node)
3426 a8083063 Iustin Pop
3427 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
3428 1fae010f Iustin Pop
    msg = result.RemoteFailMsg()
3429 1fae010f Iustin Pop
    if msg:
3430 1d67656e Iustin Pop
      if self.op.ignore_failures:
3431 1fae010f Iustin Pop
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
3432 1d67656e Iustin Pop
      else:
3433 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
3434 1fae010f Iustin Pop
                                 " node %s: %s" %
3435 1fae010f Iustin Pop
                                 (instance.name, instance.primary_node, msg))
3436 a8083063 Iustin Pop
3437 9a4f63d1 Iustin Pop
    logging.info("Removing block devices for instance %s", instance.name)
3438 a8083063 Iustin Pop
3439 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
3440 1d67656e Iustin Pop
      if self.op.ignore_failures:
3441 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
3442 1d67656e Iustin Pop
      else:
3443 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
3444 a8083063 Iustin Pop
3445 9a4f63d1 Iustin Pop
    logging.info("Removing instance %s out of cluster config", instance.name)
3446 a8083063 Iustin Pop
3447 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
3448 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
3449 a8083063 Iustin Pop
3450 a8083063 Iustin Pop
3451 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
3452 a8083063 Iustin Pop
  """Logical unit for querying instances.
3453 a8083063 Iustin Pop

3454 a8083063 Iustin Pop
  """
3455 ec79568d Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
3456 7eb9d8f7 Guido Trotter
  REQ_BGL = False
3457 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
3458 5b460366 Iustin Pop
                                    "admin_state",
3459 a2d2e1a7 Iustin Pop
                                    "disk_template", "ip", "mac", "bridge",
3460 a2d2e1a7 Iustin Pop
                                    "sda_size", "sdb_size", "vcpus", "tags",
3461 a2d2e1a7 Iustin Pop
                                    "network_port", "beparams",
3462 8aec325c Iustin Pop
                                    r"(disk)\.(size)/([0-9]+)",
3463 8aec325c Iustin Pop
                                    r"(disk)\.(sizes)", "disk_usage",
3464 8aec325c Iustin Pop
                                    r"(nic)\.(mac|ip|bridge)/([0-9]+)",
3465 8aec325c Iustin Pop
                                    r"(nic)\.(macs|ips|bridges)",
3466 8aec325c Iustin Pop
                                    r"(disk|nic)\.(count)",
3467 a2d2e1a7 Iustin Pop
                                    "serial_no", "hypervisor", "hvparams",] +
3468 a2d2e1a7 Iustin Pop
                                  ["hv/%s" % name
3469 a2d2e1a7 Iustin Pop
                                   for name in constants.HVS_PARAMETERS] +
3470 a2d2e1a7 Iustin Pop
                                  ["be/%s" % name
3471 a2d2e1a7 Iustin Pop
                                   for name in constants.BES_PARAMETERS])
3472 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
3473 31bf511f Iustin Pop
3474 a8083063 Iustin Pop
3475 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
3476 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
3477 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
3478 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
3479 a8083063 Iustin Pop
3480 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
3481 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
3482 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
3483 7eb9d8f7 Guido Trotter
3484 57a2fb91 Iustin Pop
    if self.op.names:
3485 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
3486 7eb9d8f7 Guido Trotter
    else:
3487 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
3488 7eb9d8f7 Guido Trotter
3489 ec79568d Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
3490 ec79568d Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
3491 57a2fb91 Iustin Pop
    if self.do_locking:
3492 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3493 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
3494 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3495 7eb9d8f7 Guido Trotter
3496 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
3497 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
3498 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
3499 7eb9d8f7 Guido Trotter
3500 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
3501 7eb9d8f7 Guido Trotter
    """Check prerequisites.
3502 7eb9d8f7 Guido Trotter

3503 7eb9d8f7 Guido Trotter
    """
3504 57a2fb91 Iustin Pop
    pass
3505 069dcc86 Iustin Pop
3506 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3507 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
3508 a8083063 Iustin Pop

3509 a8083063 Iustin Pop
    """
3510 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
3511 a7f5dc98 Iustin Pop
    if self.wanted == locking.ALL_SET:
3512 a7f5dc98 Iustin Pop
      # caller didn't specify instance names, so ordering is not important
3513 a7f5dc98 Iustin Pop
      if self.do_locking:
3514 a7f5dc98 Iustin Pop
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
3515 a7f5dc98 Iustin Pop
      else:
3516 a7f5dc98 Iustin Pop
        instance_names = all_info.keys()
3517 a7f5dc98 Iustin Pop
      instance_names = utils.NiceSort(instance_names)
3518 57a2fb91 Iustin Pop
    else:
3519 a7f5dc98 Iustin Pop
      # caller did specify names, so we must keep the ordering
3520 a7f5dc98 Iustin Pop
      if self.do_locking:
3521 a7f5dc98 Iustin Pop
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
3522 a7f5dc98 Iustin Pop
      else:
3523 a7f5dc98 Iustin Pop
        tgt_set = all_info.keys()
3524 a7f5dc98 Iustin Pop
      missing = set(self.wanted).difference(tgt_set)
3525 a7f5dc98 Iustin Pop
      if missing:
3526 a7f5dc98 Iustin Pop
        raise errors.OpExecError("Some instances were removed before"
3527 a7f5dc98 Iustin Pop
                                 " retrieving their data: %s" % missing)
3528 a7f5dc98 Iustin Pop
      instance_names = self.wanted
3529 c1f1cbb2 Iustin Pop
3530 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
3531 a8083063 Iustin Pop
3532 a8083063 Iustin Pop
    # begin data gathering
3533 a8083063 Iustin Pop
3534 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
3535 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
3536 a8083063 Iustin Pop
3537 a8083063 Iustin Pop
    bad_nodes = []
3538 cbfc4681 Iustin Pop
    off_nodes = []
3539 ec79568d Iustin Pop
    if self.do_node_query:
3540 a8083063 Iustin Pop
      live_data = {}
3541 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
3542 a8083063 Iustin Pop
      for name in nodes:
3543 a8083063 Iustin Pop
        result = node_data[name]
3544 cbfc4681 Iustin Pop
        if result.offline:
3545 cbfc4681 Iustin Pop
          # offline nodes will be in both lists
3546 cbfc4681 Iustin Pop
          off_nodes.append(name)
3547 781de953 Iustin Pop
        if result.failed:
3548 a8083063 Iustin Pop
          bad_nodes.append(name)
3549 781de953 Iustin Pop
        else:
3550 781de953 Iustin Pop
          if result.data:
3551 781de953 Iustin Pop
            live_data.update(result.data)
3552 781de953 Iustin Pop
            # else no instance is alive
3553 a8083063 Iustin Pop
    else:
3554 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
3555 a8083063 Iustin Pop
3556 a8083063 Iustin Pop
    # end data gathering
3557 a8083063 Iustin Pop
3558 5018a335 Iustin Pop
    HVPREFIX = "hv/"
3559 338e51e8 Iustin Pop
    BEPREFIX = "be/"
3560 a8083063 Iustin Pop
    output = []
3561 a8083063 Iustin Pop
    for instance in instance_list:
3562 a8083063 Iustin Pop
      iout = []
3563 5018a335 Iustin Pop
      i_hv = self.cfg.GetClusterInfo().FillHV(instance)
3564 338e51e8 Iustin Pop
      i_be = self.cfg.GetClusterInfo().FillBE(instance)
3565 a8083063 Iustin Pop
      for field in self.op.output_fields:
3566 71c1af58 Iustin Pop
        st_match = self._FIELDS_STATIC.Matches(field)
3567 a8083063 Iustin Pop
        if field == "name":
3568 a8083063 Iustin Pop
          val = instance.name
3569 a8083063 Iustin Pop
        elif field == "os":
3570 a8083063 Iustin Pop
          val = instance.os
3571 a8083063 Iustin Pop
        elif field == "pnode":
3572 a8083063 Iustin Pop
          val = instance.primary_node
3573 a8083063 Iustin Pop
        elif field == "snodes":
3574 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
3575 a8083063 Iustin Pop
        elif field == "admin_state":
3576 0d68c45d Iustin Pop
          val = instance.admin_up
3577 a8083063 Iustin Pop
        elif field == "oper_state":
3578 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
3579 8a23d2d3 Iustin Pop
            val = None
3580 a8083063 Iustin Pop
          else:
3581 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
3582 d8052456 Iustin Pop
        elif field == "status":
3583 cbfc4681 Iustin Pop
          if instance.primary_node in off_nodes:
3584 cbfc4681 Iustin Pop
            val = "ERROR_nodeoffline"
3585 cbfc4681 Iustin Pop
          elif instance.primary_node in bad_nodes:
3586 d8052456 Iustin Pop
            val = "ERROR_nodedown"
3587 d8052456 Iustin Pop
          else:
3588 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
3589 d8052456 Iustin Pop
            if running:
3590 0d68c45d Iustin Pop
              if instance.admin_up:
3591 d8052456 Iustin Pop
                val = "running"
3592 d8052456 Iustin Pop
              else:
3593 d8052456 Iustin Pop
                val = "ERROR_up"
3594 d8052456 Iustin Pop
            else:
3595 0d68c45d Iustin Pop
              if instance.admin_up:
3596 d8052456 Iustin Pop
                val = "ERROR_down"
3597 d8052456 Iustin Pop
              else:
3598 d8052456 Iustin Pop
                val = "ADMIN_down"
3599 a8083063 Iustin Pop
        elif field == "oper_ram":
3600 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
3601 8a23d2d3 Iustin Pop
            val = None
3602 a8083063 Iustin Pop
          elif instance.name in live_data:
3603 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
3604 a8083063 Iustin Pop
          else:
3605 a8083063 Iustin Pop
            val = "-"
3606 c1ce76bb Iustin Pop
        elif field == "vcpus":
3607 c1ce76bb Iustin Pop
          val = i_be[constants.BE_VCPUS]
3608 a8083063 Iustin Pop
        elif field == "disk_template":
3609 a8083063 Iustin Pop
          val = instance.disk_template
3610 a8083063 Iustin Pop
        elif field == "ip":
3611 39a02558 Guido Trotter
          if instance.nics:
3612 39a02558 Guido Trotter
            val = instance.nics[0].ip
3613 39a02558 Guido Trotter
          else:
3614 39a02558 Guido Trotter
            val = None
3615 a8083063 Iustin Pop
        elif field == "bridge":
3616 39a02558 Guido Trotter
          if instance.nics:
3617 39a02558 Guido Trotter
            val = instance.nics[0].bridge
3618 39a02558 Guido Trotter
          else:
3619 39a02558 Guido Trotter
            val = None
3620 a8083063 Iustin Pop
        elif field == "mac":
3621 39a02558 Guido Trotter
          if instance.nics:
3622 39a02558 Guido Trotter
            val = instance.nics[0].mac
3623 39a02558 Guido Trotter
          else:
3624 39a02558 Guido Trotter
            val = None
3625 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
3626 ad24e046 Iustin Pop
          idx = ord(field[2]) - ord('a')
3627 ad24e046 Iustin Pop
          try:
3628 ad24e046 Iustin Pop
            val = instance.FindDisk(idx).size
3629 ad24e046 Iustin Pop
          except errors.OpPrereqError:
3630 8a23d2d3 Iustin Pop
            val = None
3631 024e157f Iustin Pop
        elif field == "disk_usage": # total disk usage per node
3632 024e157f Iustin Pop
          disk_sizes = [{'size': disk.size} for disk in instance.disks]
3633 024e157f Iustin Pop
          val = _ComputeDiskSize(instance.disk_template, disk_sizes)
3634 130a6a6f Iustin Pop
        elif field == "tags":
3635 130a6a6f Iustin Pop
          val = list(instance.GetTags())
3636 38d7239a Iustin Pop
        elif field == "serial_no":
3637 38d7239a Iustin Pop
          val = instance.serial_no
3638 5018a335 Iustin Pop
        elif field == "network_port":
3639 5018a335 Iustin Pop
          val = instance.network_port
3640 338e51e8 Iustin Pop
        elif field == "hypervisor":
3641 338e51e8 Iustin Pop
          val = instance.hypervisor
3642 338e51e8 Iustin Pop
        elif field == "hvparams":
3643 338e51e8 Iustin Pop
          val = i_hv
3644 5018a335 Iustin Pop
        elif (field.startswith(HVPREFIX) and
3645 5018a335 Iustin Pop
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
3646 5018a335 Iustin Pop
          val = i_hv.get(field[len(HVPREFIX):], None)
3647 338e51e8 Iustin Pop
        elif field == "beparams":
3648 338e51e8 Iustin Pop
          val = i_be
3649 338e51e8 Iustin Pop
        elif (field.startswith(BEPREFIX) and
3650 338e51e8 Iustin Pop
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
3651 338e51e8 Iustin Pop
          val = i_be.get(field[len(BEPREFIX):], None)
3652 71c1af58 Iustin Pop
        elif st_match and st_match.groups():
3653 71c1af58 Iustin Pop
          # matches a variable list
3654 71c1af58 Iustin Pop
          st_groups = st_match.groups()
3655 71c1af58 Iustin Pop
          if st_groups and st_groups[0] == "disk":
3656 71c1af58 Iustin Pop
            if st_groups[1] == "count":
3657 71c1af58 Iustin Pop
              val = len(instance.disks)
3658 41a776da Iustin Pop
            elif st_groups[1] == "sizes":
3659 41a776da Iustin Pop
              val = [disk.size for disk in instance.disks]
3660 71c1af58 Iustin Pop
            elif st_groups[1] == "size":
3661 3e0cea06 Iustin Pop
              try:
3662 3e0cea06 Iustin Pop
                val = instance.FindDisk(st_groups[2]).size
3663 3e0cea06 Iustin Pop
              except errors.OpPrereqError:
3664 71c1af58 Iustin Pop
                val = None
3665 71c1af58 Iustin Pop
            else:
3666 71c1af58 Iustin Pop
              assert False, "Unhandled disk parameter"
3667 71c1af58 Iustin Pop
          elif st_groups[0] == "nic":
3668 71c1af58 Iustin Pop
            if st_groups[1] == "count":
3669 71c1af58 Iustin Pop
              val = len(instance.nics)
3670 41a776da Iustin Pop
            elif st_groups[1] == "macs":
3671 41a776da Iustin Pop
              val = [nic.mac for nic in instance.nics]
3672 41a776da Iustin Pop
            elif st_groups[1] == "ips":
3673 41a776da Iustin Pop
              val = [nic.ip for nic in instance.nics]
3674 41a776da Iustin Pop
            elif st_groups[1] == "bridges":
3675 41a776da Iustin Pop
              val = [nic.bridge for nic in instance.nics]
3676 71c1af58 Iustin Pop
            else:
3677 71c1af58 Iustin Pop
              # index-based item
3678 71c1af58 Iustin Pop
              nic_idx = int(st_groups[2])
3679 71c1af58 Iustin Pop
              if nic_idx >= len(instance.nics):
3680 71c1af58 Iustin Pop
                val = None
3681 71c1af58 Iustin Pop
              else:
3682 71c1af58 Iustin Pop
                if st_groups[1] == "mac":
3683 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].mac
3684 71c1af58 Iustin Pop
                elif st_groups[1] == "ip":
3685 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].ip
3686 71c1af58 Iustin Pop
                elif st_groups[1] == "bridge":
3687 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].bridge
3688 71c1af58 Iustin Pop
                else:
3689 71c1af58 Iustin Pop
                  assert False, "Unhandled NIC parameter"
3690 71c1af58 Iustin Pop
          else:
3691 c1ce76bb Iustin Pop
            assert False, ("Declared but unhandled variable parameter '%s'" %
3692 c1ce76bb Iustin Pop
                           field)
3693 a8083063 Iustin Pop
        else:
3694 c1ce76bb Iustin Pop
          assert False, "Declared but unhandled parameter '%s'" % field
3695 a8083063 Iustin Pop
        iout.append(val)
3696 a8083063 Iustin Pop
      output.append(iout)
3697 a8083063 Iustin Pop
3698 a8083063 Iustin Pop
    return output
3699 a8083063 Iustin Pop
3700 a8083063 Iustin Pop
3701 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
3702 a8083063 Iustin Pop
  """Failover an instance.
3703 a8083063 Iustin Pop

3704 a8083063 Iustin Pop
  """
3705 a8083063 Iustin Pop
  HPATH = "instance-failover"
3706 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3707 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
3708 c9e5c064 Guido Trotter
  REQ_BGL = False
3709 c9e5c064 Guido Trotter
3710 c9e5c064 Guido Trotter
  def ExpandNames(self):
3711 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
3712 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3713 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3714 c9e5c064 Guido Trotter
3715 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
3716 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
3717 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
3718 a8083063 Iustin Pop
3719 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3720 a8083063 Iustin Pop
    """Build hooks env.
3721 a8083063 Iustin Pop

3722 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3723 a8083063 Iustin Pop

3724 a8083063 Iustin Pop
    """
3725 a8083063 Iustin Pop
    env = {
3726 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
3727 a8083063 Iustin Pop
      }
3728 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3729 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3730 a8083063 Iustin Pop
    return env, nl, nl
3731 a8083063 Iustin Pop
3732 a8083063 Iustin Pop
  def CheckPrereq(self):
3733 a8083063 Iustin Pop
    """Check prerequisites.
3734 a8083063 Iustin Pop

3735 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3736 a8083063 Iustin Pop

3737 a8083063 Iustin Pop
    """
3738 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3739 c9e5c064 Guido Trotter
    assert self.instance is not None, \
3740 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3741 a8083063 Iustin Pop
3742 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3743 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3744 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
3745 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
3746 2a710df1 Michael Hanselmann
3747 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
3748 2a710df1 Michael Hanselmann
    if not secondary_nodes:
3749 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
3750 abdf0113 Iustin Pop
                                   "a mirrored disk template")
3751 2a710df1 Michael Hanselmann
3752 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
3753 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, target_node)
3754 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, target_node)
3755 d27776f0 Iustin Pop
3756 d27776f0 Iustin Pop
    if instance.admin_up:
3757 d27776f0 Iustin Pop
      # check memory requirements on the secondary node
3758 d27776f0 Iustin Pop
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
3759 d27776f0 Iustin Pop
                           instance.name, bep[constants.BE_MEMORY],
3760 d27776f0 Iustin Pop
                           instance.hypervisor)
3761 d27776f0 Iustin Pop
    else:
3762 d27776f0 Iustin Pop
      self.LogInfo("Not checking memory on the secondary node as"
3763 d27776f0 Iustin Pop
                   " instance will not be started")
3764 3a7c308e Guido Trotter
3765 5bbd3f7f Michael Hanselmann
    # check bridge existence
3766 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
3767 781de953 Iustin Pop
    result = self.rpc.call_bridges_exist(target_node, brlist)
3768 781de953 Iustin Pop
    result.Raise()
3769 781de953 Iustin Pop
    if not result.data:
3770 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
3771 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
3772 50ff9a7a Iustin Pop
                                 (brlist, target_node))
3773 a8083063 Iustin Pop
3774 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3775 a8083063 Iustin Pop
    """Failover an instance.
3776 a8083063 Iustin Pop

3777 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
3778 a8083063 Iustin Pop
    starting it on the secondary.
3779 a8083063 Iustin Pop

3780 a8083063 Iustin Pop
    """
3781 a8083063 Iustin Pop
    instance = self.instance
3782 a8083063 Iustin Pop
3783 a8083063 Iustin Pop
    source_node = instance.primary_node
3784 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
3785 a8083063 Iustin Pop
3786 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
3787 a8083063 Iustin Pop
    for dev in instance.disks:
3788 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
3789 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
3790 0d68c45d Iustin Pop
        if instance.admin_up and not self.op.ignore_consistency:
3791 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
3792 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
3793 a8083063 Iustin Pop
3794 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
3795 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
3796 9a4f63d1 Iustin Pop
                 instance.name, source_node)
3797 a8083063 Iustin Pop
3798 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(source_node, instance)
3799 1fae010f Iustin Pop
    msg = result.RemoteFailMsg()
3800 1fae010f Iustin Pop
    if msg:
3801 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
3802 86d9d3bb Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
3803 1fae010f Iustin Pop
                             " Proceeding anyway. Please make sure node"
3804 1fae010f Iustin Pop
                             " %s is down. Error details: %s",
3805 1fae010f Iustin Pop
                             instance.name, source_node, source_node, msg)
3806 24a40d57 Iustin Pop
      else:
3807 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
3808 1fae010f Iustin Pop
                                 " node %s: %s" %
3809 1fae010f Iustin Pop
                                 (instance.name, source_node, msg))
3810 a8083063 Iustin Pop
3811 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
3812 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3813 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
3814 a8083063 Iustin Pop
3815 a8083063 Iustin Pop
    instance.primary_node = target_node
3816 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
3817 b6102dab Guido Trotter
    self.cfg.Update(instance)
3818 a8083063 Iustin Pop
3819 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
3820 0d68c45d Iustin Pop
    if instance.admin_up:
3821 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
3822 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s",
3823 9a4f63d1 Iustin Pop
                   instance.name, target_node)
3824 12a0cfbe Guido Trotter
3825 7c4d6c7b Michael Hanselmann
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
3826 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
3827 12a0cfbe Guido Trotter
      if not disks_ok:
3828 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3829 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
3830 a8083063 Iustin Pop
3831 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
3832 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance, None, None)
3833 dd279568 Iustin Pop
      msg = result.RemoteFailMsg()
3834 dd279568 Iustin Pop
      if msg:
3835 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3836 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
3837 dd279568 Iustin Pop
                                 (instance.name, target_node, msg))
3838 a8083063 Iustin Pop
3839 a8083063 Iustin Pop
3840 53c776b5 Iustin Pop
class LUMigrateInstance(LogicalUnit):
3841 53c776b5 Iustin Pop
  """Migrate an instance.
3842 53c776b5 Iustin Pop

3843 53c776b5 Iustin Pop
  This is migration without shutting down, compared to the failover,
3844 53c776b5 Iustin Pop
  which is done with shutdown.
3845 53c776b5 Iustin Pop

3846 53c776b5 Iustin Pop
  """
3847 53c776b5 Iustin Pop
  HPATH = "instance-migrate"
3848 53c776b5 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3849 53c776b5 Iustin Pop
  _OP_REQP = ["instance_name", "live", "cleanup"]
3850 53c776b5 Iustin Pop
3851 53c776b5 Iustin Pop
  REQ_BGL = False
3852 53c776b5 Iustin Pop
3853 53c776b5 Iustin Pop
  def ExpandNames(self):
3854 53c776b5 Iustin Pop
    self._ExpandAndLockInstance()
3855 53c776b5 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
3856 53c776b5 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3857 53c776b5 Iustin Pop
3858 53c776b5 Iustin Pop
  def DeclareLocks(self, level):
3859 53c776b5 Iustin Pop
    if level == locking.LEVEL_NODE:
3860 53c776b5 Iustin Pop
      self._LockInstancesNodes()
3861 53c776b5 Iustin Pop
3862 53c776b5 Iustin Pop
  def BuildHooksEnv(self):
3863 53c776b5 Iustin Pop
    """Build hooks env.
3864 53c776b5 Iustin Pop

3865 53c776b5 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3866 53c776b5 Iustin Pop

3867 53c776b5 Iustin Pop
    """
3868 53c776b5 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3869 2c2690c9 Iustin Pop
    env["MIGRATE_LIVE"] = self.op.live
3870 2c2690c9 Iustin Pop
    env["MIGRATE_CLEANUP"] = self.op.cleanup
3871 53c776b5 Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3872 53c776b5 Iustin Pop
    return env, nl, nl
3873 53c776b5 Iustin Pop
3874 53c776b5 Iustin Pop
  def CheckPrereq(self):
3875 53c776b5 Iustin Pop
    """Check prerequisites.
3876 53c776b5 Iustin Pop

3877 53c776b5 Iustin Pop
    This checks that the instance is in the cluster.
3878 53c776b5 Iustin Pop

3879 53c776b5 Iustin Pop
    """
3880 53c776b5 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3881 53c776b5 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3882 53c776b5 Iustin Pop
    if instance is None:
3883 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3884 53c776b5 Iustin Pop
                                 self.op.instance_name)
3885 53c776b5 Iustin Pop
3886 53c776b5 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
3887 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3888 53c776b5 Iustin Pop
                                 " drbd8, cannot migrate.")
3889 53c776b5 Iustin Pop
3890 53c776b5 Iustin Pop
    secondary_nodes = instance.secondary_nodes
3891 53c776b5 Iustin Pop
    if not secondary_nodes:
3892 733a2b6a Iustin Pop
      raise errors.ConfigurationError("No secondary node but using"
3893 733a2b6a Iustin Pop
                                      " drbd8 disk template")
3894 53c776b5 Iustin Pop
3895 53c776b5 Iustin Pop
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
3896 53c776b5 Iustin Pop
3897 53c776b5 Iustin Pop
    target_node = secondary_nodes[0]
3898 53c776b5 Iustin Pop
    # check memory requirements on the secondary node
3899 53c776b5 Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
3900 53c776b5 Iustin Pop
                         instance.name, i_be[constants.BE_MEMORY],
3901 53c776b5 Iustin Pop
                         instance.hypervisor)
3902 53c776b5 Iustin Pop
3903 5bbd3f7f Michael Hanselmann
    # check bridge existence
3904 53c776b5 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
3905 53c776b5 Iustin Pop
    result = self.rpc.call_bridges_exist(target_node, brlist)
3906 53c776b5 Iustin Pop
    if result.failed or not result.data:
3907 53c776b5 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
3908 53c776b5 Iustin Pop
                                 " exist on destination node '%s'" %
3909 53c776b5 Iustin Pop
                                 (brlist, target_node))
3910 53c776b5 Iustin Pop
3911 53c776b5 Iustin Pop
    if not self.op.cleanup:
3912 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, target_node)
3913 53c776b5 Iustin Pop
      result = self.rpc.call_instance_migratable(instance.primary_node,
3914 53c776b5 Iustin Pop
                                                 instance)
3915 53c776b5 Iustin Pop
      msg = result.RemoteFailMsg()
3916 53c776b5 Iustin Pop
      if msg:
3917 53c776b5 Iustin Pop
        raise errors.OpPrereqError("Can't migrate: %s - please use failover" %
3918 53c776b5 Iustin Pop
                                   msg)
3919 53c776b5 Iustin Pop
3920 53c776b5 Iustin Pop
    self.instance = instance
3921 53c776b5 Iustin Pop
3922 53c776b5 Iustin Pop
  def _WaitUntilSync(self):
3923 53c776b5 Iustin Pop
    """Poll with custom rpc for disk sync.
3924 53c776b5 Iustin Pop

3925 53c776b5 Iustin Pop
    This uses our own step-based rpc call.
3926 53c776b5 Iustin Pop

3927 53c776b5 Iustin Pop
    """
3928 53c776b5 Iustin Pop
    self.feedback_fn("* wait until resync is done")
3929 53c776b5 Iustin Pop
    all_done = False
3930 53c776b5 Iustin Pop
    while not all_done:
3931 53c776b5 Iustin Pop
      all_done = True
3932 53c776b5 Iustin Pop
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
3933 53c776b5 Iustin Pop
                                            self.nodes_ip,
3934 53c776b5 Iustin Pop
                                            self.instance.disks)
3935 53c776b5 Iustin Pop
      min_percent = 100
3936 53c776b5 Iustin Pop
      for node, nres in result.items():
3937 53c776b5 Iustin Pop
        msg = nres.RemoteFailMsg()
3938 53c776b5 Iustin Pop
        if msg:
3939 53c776b5 Iustin Pop
          raise errors.OpExecError("Cannot resync disks on node %s: %s" %
3940 53c776b5 Iustin Pop
                                   (node, msg))
3941 0959c824 Iustin Pop
        node_done, node_percent = nres.payload
3942 53c776b5 Iustin Pop
        all_done = all_done and node_done
3943 53c776b5 Iustin Pop
        if node_percent is not None:
3944 53c776b5 Iustin Pop
          min_percent = min(min_percent, node_percent)
3945 53c776b5 Iustin Pop
      if not all_done:
3946 53c776b5 Iustin Pop
        if min_percent < 100:
3947 53c776b5 Iustin Pop
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
3948 53c776b5 Iustin Pop
        time.sleep(2)
3949 53c776b5 Iustin Pop
3950 53c776b5 Iustin Pop
  def _EnsureSecondary(self, node):
3951 53c776b5 Iustin Pop
    """Demote a node to secondary.
3952 53c776b5 Iustin Pop

3953 53c776b5 Iustin Pop
    """
3954 53c776b5 Iustin Pop
    self.feedback_fn("* switching node %s to secondary mode" % node)
3955 53c776b5 Iustin Pop
3956 53c776b5 Iustin Pop
    for dev in self.instance.disks:
3957 53c776b5 Iustin Pop
      self.cfg.SetDiskID(dev, node)
3958 53c776b5 Iustin Pop
3959 53c776b5 Iustin Pop
    result = self.rpc.call_blockdev_close(node, self.instance.name,
3960 53c776b5 Iustin Pop
                                          self.instance.disks)
3961 53c776b5 Iustin Pop
    msg = result.RemoteFailMsg()
3962 53c776b5 Iustin Pop
    if msg:
3963 53c776b5 Iustin Pop
      raise errors.OpExecError("Cannot change disk to secondary on node %s,"
3964 53c776b5 Iustin Pop
                               " error %s" % (node, msg))
3965 53c776b5 Iustin Pop
3966 53c776b5 Iustin Pop
  def _GoStandalone(self):
3967 53c776b5 Iustin Pop
    """Disconnect from the network.
3968 53c776b5 Iustin Pop

3969 53c776b5 Iustin Pop
    """
3970 53c776b5 Iustin Pop
    self.feedback_fn("* changing into standalone mode")
3971 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
3972 53c776b5 Iustin Pop
                                               self.instance.disks)
3973 53c776b5 Iustin Pop
    for node, nres in result.items():
3974 53c776b5 Iustin Pop
      msg = nres.RemoteFailMsg()
3975 53c776b5 Iustin Pop
      if msg:
3976 53c776b5 Iustin Pop
        raise errors.OpExecError("Cannot disconnect disks node %s,"
3977 53c776b5 Iustin Pop
                                 " error %s" % (node, msg))
3978 53c776b5 Iustin Pop
3979 53c776b5 Iustin Pop
  def _GoReconnect(self, multimaster):
3980 53c776b5 Iustin Pop
    """Reconnect to the network.
3981 53c776b5 Iustin Pop

3982 53c776b5 Iustin Pop
    """
3983 53c776b5 Iustin Pop
    if multimaster:
3984 53c776b5 Iustin Pop
      msg = "dual-master"
3985 53c776b5 Iustin Pop
    else:
3986 53c776b5 Iustin Pop
      msg = "single-master"
3987 53c776b5 Iustin Pop
    self.feedback_fn("* changing disks into %s mode" % msg)
3988 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
3989 53c776b5 Iustin Pop
                                           self.instance.disks,
3990 53c776b5 Iustin Pop
                                           self.instance.name, multimaster)
3991 53c776b5 Iustin Pop
    for node, nres in result.items():
3992 53c776b5 Iustin Pop
      msg = nres.RemoteFailMsg()
3993 53c776b5 Iustin Pop
      if msg:
3994 53c776b5 Iustin Pop
        raise errors.OpExecError("Cannot change disks config on node %s,"
3995 53c776b5 Iustin Pop
                                 " error: %s" % (node, msg))
3996 53c776b5 Iustin Pop
3997 53c776b5 Iustin Pop
  def _ExecCleanup(self):
3998 53c776b5 Iustin Pop
    """Try to cleanup after a failed migration.
3999 53c776b5 Iustin Pop

4000 53c776b5 Iustin Pop
    The cleanup is done by:
4001 53c776b5 Iustin Pop
      - check that the instance is running only on one node
4002 53c776b5 Iustin Pop
        (and update the config if needed)
4003 53c776b5 Iustin Pop
      - change disks on its secondary node to secondary
4004 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
4005 53c776b5 Iustin Pop
      - disconnect from the network
4006 53c776b5 Iustin Pop
      - change disks into single-master mode
4007 53c776b5 Iustin Pop
      - wait again until disks are fully synchronized
4008 53c776b5 Iustin Pop

4009 53c776b5 Iustin Pop
    """
4010 53c776b5 Iustin Pop
    instance = self.instance
4011 53c776b5 Iustin Pop
    target_node = self.target_node
4012 53c776b5 Iustin Pop
    source_node = self.source_node
4013 53c776b5 Iustin Pop
4014 53c776b5 Iustin Pop
    # check running on only one node
4015 53c776b5 Iustin Pop
    self.feedback_fn("* checking where the instance actually runs"
4016 53c776b5 Iustin Pop
                     " (if this hangs, the hypervisor might be in"
4017 53c776b5 Iustin Pop
                     " a bad state)")
4018 53c776b5 Iustin Pop
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
4019 53c776b5 Iustin Pop
    for node, result in ins_l.items():
4020 53c776b5 Iustin Pop
      result.Raise()
4021 53c776b5 Iustin Pop
      if not isinstance(result.data, list):
4022 53c776b5 Iustin Pop
        raise errors.OpExecError("Can't contact node '%s'" % node)
4023 53c776b5 Iustin Pop
4024 53c776b5 Iustin Pop
    runningon_source = instance.name in ins_l[source_node].data
4025 53c776b5 Iustin Pop
    runningon_target = instance.name in ins_l[target_node].data
4026 53c776b5 Iustin Pop
4027 53c776b5 Iustin Pop
    if runningon_source and runningon_target:
4028 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance seems to be running on two nodes,"
4029 53c776b5 Iustin Pop
                               " or the hypervisor is confused. You will have"
4030 53c776b5 Iustin Pop
                               " to ensure manually that it runs only on one"
4031 53c776b5 Iustin Pop
                               " and restart this operation.")
4032 53c776b5 Iustin Pop
4033 53c776b5 Iustin Pop
    if not (runningon_source or runningon_target):
4034 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance does not seem to be running at all."
4035 53c776b5 Iustin Pop
                               " In this case, it's safer to repair by"
4036 53c776b5 Iustin Pop
                               " running 'gnt-instance stop' to ensure disk"
4037 53c776b5 Iustin Pop
                               " shutdown, and then restarting it.")
4038 53c776b5 Iustin Pop
4039 53c776b5 Iustin Pop
    if runningon_target:
4040 53c776b5 Iustin Pop
      # the migration has actually succeeded, we need to update the config
4041 53c776b5 Iustin Pop
      self.feedback_fn("* instance running on secondary node (%s),"
4042 53c776b5 Iustin Pop
                       " updating config" % target_node)
4043 53c776b5 Iustin Pop
      instance.primary_node = target_node
4044 53c776b5 Iustin Pop
      self.cfg.Update(instance)
4045 53c776b5 Iustin Pop
      demoted_node = source_node
4046 53c776b5 Iustin Pop
    else:
4047 53c776b5 Iustin Pop
      self.feedback_fn("* instance confirmed to be running on its"
4048 53c776b5 Iustin Pop
                       " primary node (%s)" % source_node)
4049 53c776b5 Iustin Pop
      demoted_node = target_node
4050 53c776b5 Iustin Pop
4051 53c776b5 Iustin Pop
    self._EnsureSecondary(demoted_node)
4052 53c776b5 Iustin Pop
    try:
4053 53c776b5 Iustin Pop
      self._WaitUntilSync()
4054 53c776b5 Iustin Pop
    except errors.OpExecError:
4055 53c776b5 Iustin Pop
      # we ignore here errors, since if the device is standalone, it
4056 53c776b5 Iustin Pop
      # won't be able to sync
4057 53c776b5 Iustin Pop
      pass
4058 53c776b5 Iustin Pop
    self._GoStandalone()
4059 53c776b5 Iustin Pop
    self._GoReconnect(False)
4060 53c776b5 Iustin Pop
    self._WaitUntilSync()
4061 53c776b5 Iustin Pop
4062 53c776b5 Iustin Pop
    self.feedback_fn("* done")
4063 53c776b5 Iustin Pop
4064 6906a9d8 Guido Trotter
  def _RevertDiskStatus(self):
4065 6906a9d8 Guido Trotter
    """Try to revert the disk status after a failed migration.
4066 6906a9d8 Guido Trotter

4067 6906a9d8 Guido Trotter
    """
4068 6906a9d8 Guido Trotter
    target_node = self.target_node
4069 6906a9d8 Guido Trotter
    try:
4070 6906a9d8 Guido Trotter
      self._EnsureSecondary(target_node)
4071 6906a9d8 Guido Trotter
      self._GoStandalone()
4072 6906a9d8 Guido Trotter
      self._GoReconnect(False)
4073 6906a9d8 Guido Trotter
      self._WaitUntilSync()
4074 6906a9d8 Guido Trotter
    except errors.OpExecError, err:
4075 6906a9d8 Guido Trotter
      self.LogWarning("Migration failed and I can't reconnect the"
4076 6906a9d8 Guido Trotter
                      " drives: error '%s'\n"
4077 6906a9d8 Guido Trotter
                      "Please look and recover the instance status" %
4078 6906a9d8 Guido Trotter
                      str(err))
4079 6906a9d8 Guido Trotter
4080 6906a9d8 Guido Trotter
  def _AbortMigration(self):
4081 6906a9d8 Guido Trotter
    """Call the hypervisor code to abort a started migration.
4082 6906a9d8 Guido Trotter

4083 6906a9d8 Guido Trotter
    """
4084 6906a9d8 Guido Trotter
    instance = self.instance
4085 6906a9d8 Guido Trotter
    target_node = self.target_node
4086 6906a9d8 Guido Trotter
    migration_info = self.migration_info
4087 6906a9d8 Guido Trotter
4088 6906a9d8 Guido Trotter
    abort_result = self.rpc.call_finalize_migration(target_node,
4089 6906a9d8 Guido Trotter
                                                    instance,
4090 6906a9d8 Guido Trotter
                                                    migration_info,
4091 6906a9d8 Guido Trotter
                                                    False)
4092 6906a9d8 Guido Trotter
    abort_msg = abort_result.RemoteFailMsg()
4093 6906a9d8 Guido Trotter
    if abort_msg:
4094 6906a9d8 Guido Trotter
      logging.error("Aborting migration failed on target node %s: %s" %
4095 6906a9d8 Guido Trotter
                    (target_node, abort_msg))
4096 6906a9d8 Guido Trotter
      # Don't raise an exception here, as we stil have to try to revert the
4097 6906a9d8 Guido Trotter
      # disk status, even if this step failed.
4098 6906a9d8 Guido Trotter
4099 53c776b5 Iustin Pop
  def _ExecMigration(self):
4100 53c776b5 Iustin Pop
    """Migrate an instance.
4101 53c776b5 Iustin Pop

4102 53c776b5 Iustin Pop
    The migrate is done by:
4103 53c776b5 Iustin Pop
      - change the disks into dual-master mode
4104 53c776b5 Iustin Pop
      - wait until disks are fully synchronized again
4105 53c776b5 Iustin Pop
      - migrate the instance
4106 53c776b5 Iustin Pop
      - change disks on the new secondary node (the old primary) to secondary
4107 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
4108 53c776b5 Iustin Pop
      - change disks into single-master mode
4109 53c776b5 Iustin Pop

4110 53c776b5 Iustin Pop
    """
4111 53c776b5 Iustin Pop
    instance = self.instance
4112 53c776b5 Iustin Pop
    target_node = self.target_node
4113 53c776b5 Iustin Pop
    source_node = self.source_node
4114 53c776b5 Iustin Pop
4115 53c776b5 Iustin Pop
    self.feedback_fn("* checking disk consistency between source and target")
4116 53c776b5 Iustin Pop
    for dev in instance.disks:
4117 53c776b5 Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
4118 53c776b5 Iustin Pop
        raise errors.OpExecError("Disk %s is degraded or not fully"
4119 53c776b5 Iustin Pop
                                 " synchronized on target node,"
4120 53c776b5 Iustin Pop
                                 " aborting migrate." % dev.iv_name)
4121 53c776b5 Iustin Pop
4122 6906a9d8 Guido Trotter
    # First get the migration information from the remote node
4123 6906a9d8 Guido Trotter
    result = self.rpc.call_migration_info(source_node, instance)
4124 6906a9d8 Guido Trotter
    msg = result.RemoteFailMsg()
4125 6906a9d8 Guido Trotter
    if msg:
4126 6906a9d8 Guido Trotter
      log_err = ("Failed fetching source migration information from %s: %s" %
4127 0959c824 Iustin Pop
                 (source_node, msg))
4128 6906a9d8 Guido Trotter
      logging.error(log_err)
4129 6906a9d8 Guido Trotter
      raise errors.OpExecError(log_err)
4130 6906a9d8 Guido Trotter
4131 0959c824 Iustin Pop
    self.migration_info = migration_info = result.payload
4132 6906a9d8 Guido Trotter
4133 6906a9d8 Guido Trotter
    # Then switch the disks to master/master mode
4134 53c776b5 Iustin Pop
    self._EnsureSecondary(target_node)
4135 53c776b5 Iustin Pop
    self._GoStandalone()
4136 53c776b5 Iustin Pop
    self._GoReconnect(True)
4137 53c776b5 Iustin Pop
    self._WaitUntilSync()
4138 53c776b5 Iustin Pop
4139 6906a9d8 Guido Trotter
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
4140 6906a9d8 Guido Trotter
    result = self.rpc.call_accept_instance(target_node,
4141 6906a9d8 Guido Trotter
                                           instance,
4142 6906a9d8 Guido Trotter
                                           migration_info,
4143 6906a9d8 Guido Trotter
                                           self.nodes_ip[target_node])
4144 6906a9d8 Guido Trotter
4145 6906a9d8 Guido Trotter
    msg = result.RemoteFailMsg()
4146 6906a9d8 Guido Trotter
    if msg:
4147 6906a9d8 Guido Trotter
      logging.error("Instance pre-migration failed, trying to revert"
4148 6906a9d8 Guido Trotter
                    " disk status: %s", msg)
4149 6906a9d8 Guido Trotter
      self._AbortMigration()
4150 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
4151 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
4152 6906a9d8 Guido Trotter
                               (instance.name, msg))
4153 6906a9d8 Guido Trotter
4154 53c776b5 Iustin Pop
    self.feedback_fn("* migrating instance to %s" % target_node)
4155 53c776b5 Iustin Pop
    time.sleep(10)
4156 53c776b5 Iustin Pop
    result = self.rpc.call_instance_migrate(source_node, instance,
4157 53c776b5 Iustin Pop
                                            self.nodes_ip[target_node],
4158 53c776b5 Iustin Pop
                                            self.op.live)
4159 53c776b5 Iustin Pop
    msg = result.RemoteFailMsg()
4160 53c776b5 Iustin Pop
    if msg:
4161 53c776b5 Iustin Pop
      logging.error("Instance migration failed, trying to revert"
4162 53c776b5 Iustin Pop
                    " disk status: %s", msg)
4163 6906a9d8 Guido Trotter
      self._AbortMigration()
4164 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
4165 53c776b5 Iustin Pop
      raise errors.OpExecError("Could not migrate instance %s: %s" %
4166 53c776b5 Iustin Pop
                               (instance.name, msg))
4167 53c776b5 Iustin Pop
    time.sleep(10)
4168 53c776b5 Iustin Pop
4169 53c776b5 Iustin Pop
    instance.primary_node = target_node
4170 53c776b5 Iustin Pop
    # distribute new instance config to the other nodes
4171 53c776b5 Iustin Pop
    self.cfg.Update(instance)
4172 53c776b5 Iustin Pop
4173 6906a9d8 Guido Trotter
    result = self.rpc.call_finalize_migration(target_node,
4174 6906a9d8 Guido Trotter
                                              instance,
4175 6906a9d8 Guido Trotter
                                              migration_info,
4176 6906a9d8 Guido Trotter
                                              True)
4177 6906a9d8 Guido Trotter
    msg = result.RemoteFailMsg()
4178 6906a9d8 Guido Trotter
    if msg:
4179 6906a9d8 Guido Trotter
      logging.error("Instance migration succeeded, but finalization failed:"
4180 6906a9d8 Guido Trotter
                    " %s" % msg)
4181 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not finalize instance migration: %s" %
4182 6906a9d8 Guido Trotter
                               msg)
4183 6906a9d8 Guido Trotter
4184 53c776b5 Iustin Pop
    self._EnsureSecondary(source_node)
4185 53c776b5 Iustin Pop
    self._WaitUntilSync()
4186 53c776b5 Iustin Pop
    self._GoStandalone()
4187 53c776b5 Iustin Pop
    self._GoReconnect(False)
4188 53c776b5 Iustin Pop
    self._WaitUntilSync()
4189 53c776b5 Iustin Pop
4190 53c776b5 Iustin Pop
    self.feedback_fn("* done")
4191 53c776b5 Iustin Pop
4192 53c776b5 Iustin Pop
  def Exec(self, feedback_fn):
4193 53c776b5 Iustin Pop
    """Perform the migration.
4194 53c776b5 Iustin Pop

4195 53c776b5 Iustin Pop
    """
4196 53c776b5 Iustin Pop
    self.feedback_fn = feedback_fn
4197 53c776b5 Iustin Pop
4198 53c776b5 Iustin Pop
    self.source_node = self.instance.primary_node
4199 53c776b5 Iustin Pop
    self.target_node = self.instance.secondary_nodes[0]
4200 53c776b5 Iustin Pop
    self.all_nodes = [self.source_node, self.target_node]
4201 53c776b5 Iustin Pop
    self.nodes_ip = {
4202 53c776b5 Iustin Pop
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
4203 53c776b5 Iustin Pop
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
4204 53c776b5 Iustin Pop
      }
4205 53c776b5 Iustin Pop
    if self.op.cleanup:
4206 53c776b5 Iustin Pop
      return self._ExecCleanup()
4207 53c776b5 Iustin Pop
    else:
4208 53c776b5 Iustin Pop
      return self._ExecMigration()
4209 53c776b5 Iustin Pop
4210 53c776b5 Iustin Pop
4211 428958aa Iustin Pop
def _CreateBlockDev(lu, node, instance, device, force_create,
4212 428958aa Iustin Pop
                    info, force_open):
4213 428958aa Iustin Pop
  """Create a tree of block devices on a given node.
4214 a8083063 Iustin Pop

4215 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
4216 a8083063 Iustin Pop
  all its children.
4217 a8083063 Iustin Pop

4218 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
4219 a8083063 Iustin Pop

4220 428958aa Iustin Pop
  @param lu: the lu on whose behalf we execute
4221 428958aa Iustin Pop
  @param node: the node on which to create the device
4222 428958aa Iustin Pop
  @type instance: L{objects.Instance}
4223 428958aa Iustin Pop
  @param instance: the instance which owns the device
4224 428958aa Iustin Pop
  @type device: L{objects.Disk}
4225 428958aa Iustin Pop
  @param device: the device to create
4226 428958aa Iustin Pop
  @type force_create: boolean
4227 428958aa Iustin Pop
  @param force_create: whether to force creation of this device; this
4228 428958aa Iustin Pop
      will be change to True whenever we find a device which has
4229 428958aa Iustin Pop
      CreateOnSecondary() attribute
4230 428958aa Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
4231 428958aa Iustin Pop
      (this will be represented as a LVM tag)
4232 428958aa Iustin Pop
  @type force_open: boolean
4233 428958aa Iustin Pop
  @param force_open: this parameter will be passes to the
4234 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
4235 428958aa Iustin Pop
      whether we run on primary or not, and it affects both
4236 428958aa Iustin Pop
      the child assembly and the device own Open() execution
4237 428958aa Iustin Pop

4238 a8083063 Iustin Pop
  """
4239 a8083063 Iustin Pop
  if device.CreateOnSecondary():
4240 428958aa Iustin Pop
    force_create = True
4241 796cab27 Iustin Pop
4242 a8083063 Iustin Pop
  if device.children:
4243 a8083063 Iustin Pop
    for child in device.children:
4244 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, child, force_create,
4245 428958aa Iustin Pop
                      info, force_open)
4246 a8083063 Iustin Pop
4247 428958aa Iustin Pop
  if not force_create:
4248 796cab27 Iustin Pop
    return
4249 796cab27 Iustin Pop
4250 de12473a Iustin Pop
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
4251 de12473a Iustin Pop
4252 de12473a Iustin Pop
4253 de12473a Iustin Pop
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
4254 de12473a Iustin Pop
  """Create a single block device on a given node.
4255 de12473a Iustin Pop

4256 de12473a Iustin Pop
  This will not recurse over children of the device, so they must be
4257 de12473a Iustin Pop
  created in advance.
4258 de12473a Iustin Pop

4259 de12473a Iustin Pop
  @param lu: the lu on whose behalf we execute
4260 de12473a Iustin Pop
  @param node: the node on which to create the device
4261 de12473a Iustin Pop
  @type instance: L{objects.Instance}
4262 de12473a Iustin Pop
  @param instance: the instance which owns the device
4263 de12473a Iustin Pop
  @type device: L{objects.Disk}
4264 de12473a Iustin Pop
  @param device: the device to create
4265 de12473a Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
4266 de12473a Iustin Pop
      (this will be represented as a LVM tag)
4267 de12473a Iustin Pop
  @type force_open: boolean
4268 de12473a Iustin Pop
  @param force_open: this parameter will be passes to the
4269 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
4270 de12473a Iustin Pop
      whether we run on primary or not, and it affects both
4271 de12473a Iustin Pop
      the child assembly and the device own Open() execution
4272 de12473a Iustin Pop

4273 de12473a Iustin Pop
  """
4274 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
4275 7d81697f Iustin Pop
  result = lu.rpc.call_blockdev_create(node, device, device.size,
4276 428958aa Iustin Pop
                                       instance.name, force_open, info)
4277 7d81697f Iustin Pop
  msg = result.RemoteFailMsg()
4278 7d81697f Iustin Pop
  if msg:
4279 428958aa Iustin Pop
    raise errors.OpExecError("Can't create block device %s on"
4280 7d81697f Iustin Pop
                             " node %s for instance %s: %s" %
4281 7d81697f Iustin Pop
                             (device, node, instance.name, msg))
4282 a8083063 Iustin Pop
  if device.physical_id is None:
4283 0959c824 Iustin Pop
    device.physical_id = result.payload
4284 a8083063 Iustin Pop
4285 a8083063 Iustin Pop
4286 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
4287 923b1523 Iustin Pop
  """Generate a suitable LV name.
4288 923b1523 Iustin Pop

4289 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
4290 923b1523 Iustin Pop

4291 923b1523 Iustin Pop
  """
4292 923b1523 Iustin Pop
  results = []
4293 923b1523 Iustin Pop
  for val in exts:
4294 b9bddb6b Iustin Pop
    new_id = lu.cfg.GenerateUniqueID()
4295 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
4296 923b1523 Iustin Pop
  return results
4297 923b1523 Iustin Pop
4298 923b1523 Iustin Pop
4299 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
4300 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
4301 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
4302 a1f445d3 Iustin Pop

4303 a1f445d3 Iustin Pop
  """
4304 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
4305 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
4306 b9bddb6b Iustin Pop
  shared_secret = lu.cfg.GenerateDRBDSecret()
4307 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
4308 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
4309 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
4310 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
4311 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
4312 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
4313 f9518d38 Iustin Pop
                                      p_minor, s_minor,
4314 f9518d38 Iustin Pop
                                      shared_secret),
4315 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
4316 a1f445d3 Iustin Pop
                          iv_name=iv_name)
4317 a1f445d3 Iustin Pop
  return drbd_dev
4318 a1f445d3 Iustin Pop
4319 7c0d6283 Michael Hanselmann
4320 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
4321 a8083063 Iustin Pop
                          instance_name, primary_node,
4322 08db7c5c Iustin Pop
                          secondary_nodes, disk_info,
4323 e2a65344 Iustin Pop
                          file_storage_dir, file_driver,
4324 e2a65344 Iustin Pop
                          base_index):
4325 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
4326 a8083063 Iustin Pop

4327 a8083063 Iustin Pop
  """
4328 a8083063 Iustin Pop
  #TODO: compute space requirements
4329 a8083063 Iustin Pop
4330 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
4331 08db7c5c Iustin Pop
  disk_count = len(disk_info)
4332 08db7c5c Iustin Pop
  disks = []
4333 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
4334 08db7c5c Iustin Pop
    pass
4335 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
4336 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
4337 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
4338 923b1523 Iustin Pop
4339 fb4b324b Guido Trotter
    names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
4340 08db7c5c Iustin Pop
                                      for i in range(disk_count)])
4341 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4342 e2a65344 Iustin Pop
      disk_index = idx + base_index
4343 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
4344 08db7c5c Iustin Pop
                              logical_id=(vgname, names[idx]),
4345 6ec66eae Iustin Pop
                              iv_name="disk/%d" % disk_index,
4346 6ec66eae Iustin Pop
                              mode=disk["mode"])
4347 08db7c5c Iustin Pop
      disks.append(disk_dev)
4348 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
4349 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
4350 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
4351 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
4352 08db7c5c Iustin Pop
    minors = lu.cfg.AllocateDRBDMinor(
4353 08db7c5c Iustin Pop
      [primary_node, remote_node] * len(disk_info), instance_name)
4354 08db7c5c Iustin Pop
4355 e6c1ff2f Iustin Pop
    names = []
4356 fb4b324b Guido Trotter
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
4357 e6c1ff2f Iustin Pop
                                               for i in range(disk_count)]):
4358 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_data")
4359 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_meta")
4360 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4361 112050d9 Iustin Pop
      disk_index = idx + base_index
4362 08db7c5c Iustin Pop
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
4363 08db7c5c Iustin Pop
                                      disk["size"], names[idx*2:idx*2+2],
4364 e2a65344 Iustin Pop
                                      "disk/%d" % disk_index,
4365 08db7c5c Iustin Pop
                                      minors[idx*2], minors[idx*2+1])
4366 6ec66eae Iustin Pop
      disk_dev.mode = disk["mode"]
4367 08db7c5c Iustin Pop
      disks.append(disk_dev)
4368 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
4369 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
4370 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
4371 0f1a06e3 Manuel Franceschini
4372 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4373 112050d9 Iustin Pop
      disk_index = idx + base_index
4374 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
4375 e2a65344 Iustin Pop
                              iv_name="disk/%d" % disk_index,
4376 08db7c5c Iustin Pop
                              logical_id=(file_driver,
4377 08db7c5c Iustin Pop
                                          "%s/disk%d" % (file_storage_dir,
4378 43e99cff Guido Trotter
                                                         disk_index)),
4379 6ec66eae Iustin Pop
                              mode=disk["mode"])
4380 08db7c5c Iustin Pop
      disks.append(disk_dev)
4381 a8083063 Iustin Pop
  else:
4382 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
4383 a8083063 Iustin Pop
  return disks
4384 a8083063 Iustin Pop
4385 a8083063 Iustin Pop
4386 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
4387 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
4388 3ecf6786 Iustin Pop

4389 3ecf6786 Iustin Pop
  """
4390 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
4391 a0c3fea1 Michael Hanselmann
4392 a0c3fea1 Michael Hanselmann
4393 b9bddb6b Iustin Pop
def _CreateDisks(lu, instance):
4394 a8083063 Iustin Pop
  """Create all disks for an instance.
4395 a8083063 Iustin Pop

4396 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
4397 a8083063 Iustin Pop

4398 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
4399 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
4400 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
4401 e4376078 Iustin Pop
  @param instance: the instance whose disks we should create
4402 e4376078 Iustin Pop
  @rtype: boolean
4403 e4376078 Iustin Pop
  @return: the success of the creation
4404 a8083063 Iustin Pop

4405 a8083063 Iustin Pop
  """
4406 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
4407 428958aa Iustin Pop
  pnode = instance.primary_node
4408 a0c3fea1 Michael Hanselmann
4409 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
4410 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4411 428958aa Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
4412 0f1a06e3 Manuel Franceschini
4413 781de953 Iustin Pop
    if result.failed or not result.data:
4414 428958aa Iustin Pop
      raise errors.OpExecError("Could not connect to node '%s'" % pnode)
4415 0f1a06e3 Manuel Franceschini
4416 781de953 Iustin Pop
    if not result.data[0]:
4417 796cab27 Iustin Pop
      raise errors.OpExecError("Failed to create directory '%s'" %
4418 796cab27 Iustin Pop
                               file_storage_dir)
4419 0f1a06e3 Manuel Franceschini
4420 24991749 Iustin Pop
  # Note: this needs to be kept in sync with adding of disks in
4421 24991749 Iustin Pop
  # LUSetInstanceParams
4422 a8083063 Iustin Pop
  for device in instance.disks:
4423 9a4f63d1 Iustin Pop
    logging.info("Creating volume %s for instance %s",
4424 9a4f63d1 Iustin Pop
                 device.iv_name, instance.name)
4425 a8083063 Iustin Pop
    #HARDCODE
4426 428958aa Iustin Pop
    for node in instance.all_nodes:
4427 428958aa Iustin Pop
      f_create = node == pnode
4428 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
4429 a8083063 Iustin Pop
4430 a8083063 Iustin Pop
4431 b9bddb6b Iustin Pop
def _RemoveDisks(lu, instance):
4432 a8083063 Iustin Pop
  """Remove all disks for an instance.
4433 a8083063 Iustin Pop

4434 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
4435 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
4436 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
4437 a8083063 Iustin Pop
  with `_CreateDisks()`).
4438 a8083063 Iustin Pop

4439 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
4440 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
4441 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
4442 e4376078 Iustin Pop
  @param instance: the instance whose disks we should remove
4443 e4376078 Iustin Pop
  @rtype: boolean
4444 e4376078 Iustin Pop
  @return: the success of the removal
4445 a8083063 Iustin Pop

4446 a8083063 Iustin Pop
  """
4447 9a4f63d1 Iustin Pop
  logging.info("Removing block devices for instance %s", instance.name)
4448 a8083063 Iustin Pop
4449 e1bc0878 Iustin Pop
  all_result = True
4450 a8083063 Iustin Pop
  for device in instance.disks:
4451 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
4452 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
4453 e1bc0878 Iustin Pop
      msg = lu.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
4454 e1bc0878 Iustin Pop
      if msg:
4455 e1bc0878 Iustin Pop
        lu.LogWarning("Could not remove block device %s on node %s,"
4456 e1bc0878 Iustin Pop
                      " continuing anyway: %s", device.iv_name, node, msg)
4457 e1bc0878 Iustin Pop
        all_result = False
4458 0f1a06e3 Manuel Franceschini
4459 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
4460 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4461 781de953 Iustin Pop
    result = lu.rpc.call_file_storage_dir_remove(instance.primary_node,
4462 781de953 Iustin Pop
                                                 file_storage_dir)
4463 781de953 Iustin Pop
    if result.failed or not result.data:
4464 9a4f63d1 Iustin Pop
      logging.error("Could not remove directory '%s'", file_storage_dir)
4465 e1bc0878 Iustin Pop
      all_result = False
4466 0f1a06e3 Manuel Franceschini
4467 e1bc0878 Iustin Pop
  return all_result
4468 a8083063 Iustin Pop
4469 a8083063 Iustin Pop
4470 08db7c5c Iustin Pop
def _ComputeDiskSize(disk_template, disks):
4471 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
4472 e2fe6369 Iustin Pop

4473 e2fe6369 Iustin Pop
  """
4474 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
4475 e2fe6369 Iustin Pop
  req_size_dict = {
4476 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
4477 08db7c5c Iustin Pop
    constants.DT_PLAIN: sum(d["size"] for d in disks),
4478 08db7c5c Iustin Pop
    # 128 MB are added for drbd metadata for each disk
4479 08db7c5c Iustin Pop
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
4480 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
4481 e2fe6369 Iustin Pop
  }
4482 e2fe6369 Iustin Pop
4483 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
4484 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
4485 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
4486 e2fe6369 Iustin Pop
4487 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
4488 e2fe6369 Iustin Pop
4489 e2fe6369 Iustin Pop
4490 74409b12 Iustin Pop
def _CheckHVParams(lu, nodenames, hvname, hvparams):
4491 74409b12 Iustin Pop
  """Hypervisor parameter validation.
4492 74409b12 Iustin Pop

4493 74409b12 Iustin Pop
  This function abstract the hypervisor parameter validation to be
4494 74409b12 Iustin Pop
  used in both instance create and instance modify.
4495 74409b12 Iustin Pop

4496 74409b12 Iustin Pop
  @type lu: L{LogicalUnit}
4497 74409b12 Iustin Pop
  @param lu: the logical unit for which we check
4498 74409b12 Iustin Pop
  @type nodenames: list
4499 74409b12 Iustin Pop
  @param nodenames: the list of nodes on which we should check
4500 74409b12 Iustin Pop
  @type hvname: string
4501 74409b12 Iustin Pop
  @param hvname: the name of the hypervisor we should use
4502 74409b12 Iustin Pop
  @type hvparams: dict
4503 74409b12 Iustin Pop
  @param hvparams: the parameters which we need to check
4504 74409b12 Iustin Pop
  @raise errors.OpPrereqError: if the parameters are not valid
4505 74409b12 Iustin Pop

4506 74409b12 Iustin Pop
  """
4507 74409b12 Iustin Pop
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
4508 74409b12 Iustin Pop
                                                  hvname,
4509 74409b12 Iustin Pop
                                                  hvparams)
4510 74409b12 Iustin Pop
  for node in nodenames:
4511 781de953 Iustin Pop
    info = hvinfo[node]
4512 68c6f21c Iustin Pop
    if info.offline:
4513 68c6f21c Iustin Pop
      continue
4514 0959c824 Iustin Pop
    msg = info.RemoteFailMsg()
4515 0959c824 Iustin Pop
    if msg:
4516 d64769a8 Iustin Pop
      raise errors.OpPrereqError("Hypervisor parameter validation"
4517 d64769a8 Iustin Pop
                                 " failed on node %s: %s" % (node, msg))
4518 74409b12 Iustin Pop
4519 74409b12 Iustin Pop
4520 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
4521 a8083063 Iustin Pop
  """Create an instance.
4522 a8083063 Iustin Pop

4523 a8083063 Iustin Pop
  """
4524 a8083063 Iustin Pop
  HPATH = "instance-add"
4525 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4526 08db7c5c Iustin Pop
  _OP_REQP = ["instance_name", "disks", "disk_template",
4527 08db7c5c Iustin Pop
              "mode", "start",
4528 08db7c5c Iustin Pop
              "wait_for_sync", "ip_check", "nics",
4529 338e51e8 Iustin Pop
              "hvparams", "beparams"]
4530 7baf741d Guido Trotter
  REQ_BGL = False
4531 7baf741d Guido Trotter
4532 7baf741d Guido Trotter
  def _ExpandNode(self, node):
4533 7baf741d Guido Trotter
    """Expands and checks one node name.
4534 7baf741d Guido Trotter

4535 7baf741d Guido Trotter
    """
4536 7baf741d Guido Trotter
    node_full = self.cfg.ExpandNodeName(node)
4537 7baf741d Guido Trotter
    if node_full is None:
4538 7baf741d Guido Trotter
      raise errors.OpPrereqError("Unknown node %s" % node)
4539 7baf741d Guido Trotter
    return node_full
4540 7baf741d Guido Trotter
4541 7baf741d Guido Trotter
  def ExpandNames(self):
4542 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
4543 7baf741d Guido Trotter

4544 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
4545 7baf741d Guido Trotter

4546 7baf741d Guido Trotter
    """
4547 7baf741d Guido Trotter
    self.needed_locks = {}
4548 7baf741d Guido Trotter
4549 7baf741d Guido Trotter
    # set optional parameters to none if they don't exist
4550 6785674e Iustin Pop
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
4551 7baf741d Guido Trotter
      if not hasattr(self.op, attr):
4552 7baf741d Guido Trotter
        setattr(self.op, attr, None)
4553 7baf741d Guido Trotter
4554 4b2f38dd Iustin Pop
    # cheap checks, mostly valid constants given
4555 4b2f38dd Iustin Pop
4556 7baf741d Guido Trotter
    # verify creation mode
4557 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
4558 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
4559 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
4560 7baf741d Guido Trotter
                                 self.op.mode)
4561 4b2f38dd Iustin Pop
4562 7baf741d Guido Trotter
    # disk template and mirror node verification
4563 7baf741d Guido Trotter
    if self.op.disk_template not in constants.DISK_TEMPLATES:
4564 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid disk template name")
4565 7baf741d Guido Trotter
4566 4b2f38dd Iustin Pop
    if self.op.hypervisor is None:
4567 4b2f38dd Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
4568 4b2f38dd Iustin Pop
4569 8705eb96 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
4570 8705eb96 Iustin Pop
    enabled_hvs = cluster.enabled_hypervisors
4571 4b2f38dd Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
4572 4b2f38dd Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
4573 4b2f38dd Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
4574 4b2f38dd Iustin Pop
                                  ",".join(enabled_hvs)))
4575 4b2f38dd Iustin Pop
4576 6785674e Iustin Pop
    # check hypervisor parameter syntax (locally)
4577 a5728081 Guido Trotter
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
4578 8705eb96 Iustin Pop
    filled_hvp = cluster.FillDict(cluster.hvparams[self.op.hypervisor],
4579 8705eb96 Iustin Pop
                                  self.op.hvparams)
4580 6785674e Iustin Pop
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
4581 8705eb96 Iustin Pop
    hv_type.CheckParameterSyntax(filled_hvp)
4582 67fc3042 Iustin Pop
    self.hv_full = filled_hvp
4583 6785674e Iustin Pop
4584 338e51e8 Iustin Pop
    # fill and remember the beparams dict
4585 a5728081 Guido Trotter
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
4586 338e51e8 Iustin Pop
    self.be_full = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
4587 338e51e8 Iustin Pop
                                    self.op.beparams)
4588 338e51e8 Iustin Pop
4589 7baf741d Guido Trotter
    #### instance parameters check
4590 7baf741d Guido Trotter
4591 7baf741d Guido Trotter
    # instance name verification
4592 7baf741d Guido Trotter
    hostname1 = utils.HostInfo(self.op.instance_name)
4593 7baf741d Guido Trotter
    self.op.instance_name = instance_name = hostname1.name
4594 7baf741d Guido Trotter
4595 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
4596 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
4597 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
4598 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
4599 7baf741d Guido Trotter
                                 instance_name)
4600 7baf741d Guido Trotter
4601 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
4602 7baf741d Guido Trotter
4603 08db7c5c Iustin Pop
    # NIC buildup
4604 08db7c5c Iustin Pop
    self.nics = []
4605 08db7c5c Iustin Pop
    for nic in self.op.nics:
4606 08db7c5c Iustin Pop
      # ip validity checks
4607 08db7c5c Iustin Pop
      ip = nic.get("ip", None)
4608 08db7c5c Iustin Pop
      if ip is None or ip.lower() == "none":
4609 08db7c5c Iustin Pop
        nic_ip = None
4610 08db7c5c Iustin Pop
      elif ip.lower() == constants.VALUE_AUTO:
4611 08db7c5c Iustin Pop
        nic_ip = hostname1.ip
4612 08db7c5c Iustin Pop
      else:
4613 08db7c5c Iustin Pop
        if not utils.IsValidIP(ip):
4614 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
4615 08db7c5c Iustin Pop
                                     " like a valid IP" % ip)
4616 08db7c5c Iustin Pop
        nic_ip = ip
4617 08db7c5c Iustin Pop
4618 08db7c5c Iustin Pop
      # MAC address verification
4619 08db7c5c Iustin Pop
      mac = nic.get("mac", constants.VALUE_AUTO)
4620 08db7c5c Iustin Pop
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4621 08db7c5c Iustin Pop
        if not utils.IsValidMac(mac.lower()):
4622 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Invalid MAC address specified: %s" %
4623 08db7c5c Iustin Pop
                                     mac)
4624 87e43988 Iustin Pop
        else:
4625 87e43988 Iustin Pop
          # or validate/reserve the current one
4626 87e43988 Iustin Pop
          if self.cfg.IsMacInUse(mac):
4627 87e43988 Iustin Pop
            raise errors.OpPrereqError("MAC address %s already in use"
4628 87e43988 Iustin Pop
                                       " in cluster" % mac)
4629 87e43988 Iustin Pop
4630 08db7c5c Iustin Pop
      # bridge verification
4631 9939547b Iustin Pop
      bridge = nic.get("bridge", None)
4632 9939547b Iustin Pop
      if bridge is None:
4633 9939547b Iustin Pop
        bridge = self.cfg.GetDefBridge()
4634 08db7c5c Iustin Pop
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, bridge=bridge))
4635 08db7c5c Iustin Pop
4636 08db7c5c Iustin Pop
    # disk checks/pre-build
4637 08db7c5c Iustin Pop
    self.disks = []
4638 08db7c5c Iustin Pop
    for disk in self.op.disks:
4639 08db7c5c Iustin Pop
      mode = disk.get("mode", constants.DISK_RDWR)
4640 08db7c5c Iustin Pop
      if mode not in constants.DISK_ACCESS_SET:
4641 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
4642 08db7c5c Iustin Pop
                                   mode)
4643 08db7c5c Iustin Pop
      size = disk.get("size", None)
4644 08db7c5c Iustin Pop
      if size is None:
4645 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Missing disk size")
4646 08db7c5c Iustin Pop
      try:
4647 08db7c5c Iustin Pop
        size = int(size)
4648 08db7c5c Iustin Pop
      except ValueError:
4649 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk size '%s'" % size)
4650 08db7c5c Iustin Pop
      self.disks.append({"size": size, "mode": mode})
4651 08db7c5c Iustin Pop
4652 7baf741d Guido Trotter
    # used in CheckPrereq for ip ping check
4653 7baf741d Guido Trotter
    self.check_ip = hostname1.ip
4654 7baf741d Guido Trotter
4655 7baf741d Guido Trotter
    # file storage checks
4656 7baf741d Guido Trotter
    if (self.op.file_driver and
4657 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
4658 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
4659 7baf741d Guido Trotter
                                 self.op.file_driver)
4660 7baf741d Guido Trotter
4661 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
4662 7baf741d Guido Trotter
      raise errors.OpPrereqError("File storage directory path not absolute")
4663 7baf741d Guido Trotter
4664 7baf741d Guido Trotter
    ### Node/iallocator related checks
4665 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
4666 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
4667 7baf741d Guido Trotter
                                 " node must be given")
4668 7baf741d Guido Trotter
4669 7baf741d Guido Trotter
    if self.op.iallocator:
4670 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4671 7baf741d Guido Trotter
    else:
4672 7baf741d Guido Trotter
      self.op.pnode = self._ExpandNode(self.op.pnode)
4673 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
4674 7baf741d Guido Trotter
      if self.op.snode is not None:
4675 7baf741d Guido Trotter
        self.op.snode = self._ExpandNode(self.op.snode)
4676 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
4677 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
4678 7baf741d Guido Trotter
4679 7baf741d Guido Trotter
    # in case of import lock the source node too
4680 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
4681 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
4682 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
4683 7baf741d Guido Trotter
4684 b9322a9f Guido Trotter
      if src_path is None:
4685 b9322a9f Guido Trotter
        self.op.src_path = src_path = self.op.instance_name
4686 b9322a9f Guido Trotter
4687 b9322a9f Guido Trotter
      if src_node is None:
4688 b9322a9f Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4689 b9322a9f Guido Trotter
        self.op.src_node = None
4690 b9322a9f Guido Trotter
        if os.path.isabs(src_path):
4691 b9322a9f Guido Trotter
          raise errors.OpPrereqError("Importing an instance from an absolute"
4692 b9322a9f Guido Trotter
                                     " path requires a source node option.")
4693 b9322a9f Guido Trotter
      else:
4694 b9322a9f Guido Trotter
        self.op.src_node = src_node = self._ExpandNode(src_node)
4695 b9322a9f Guido Trotter
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4696 b9322a9f Guido Trotter
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
4697 b9322a9f Guido Trotter
        if not os.path.isabs(src_path):
4698 b9322a9f Guido Trotter
          self.op.src_path = src_path = \
4699 b9322a9f Guido Trotter
            os.path.join(constants.EXPORT_DIR, src_path)
4700 7baf741d Guido Trotter
4701 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
4702 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
4703 7baf741d Guido Trotter
        raise errors.OpPrereqError("No guest OS specified")
4704 a8083063 Iustin Pop
4705 538475ca Iustin Pop
  def _RunAllocator(self):
4706 538475ca Iustin Pop
    """Run the allocator based on input opcode.
4707 538475ca Iustin Pop

4708 538475ca Iustin Pop
    """
4709 08db7c5c Iustin Pop
    nics = [n.ToDict() for n in self.nics]
4710 72737a7f Iustin Pop
    ial = IAllocator(self,
4711 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
4712 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
4713 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
4714 d1c2dd75 Iustin Pop
                     tags=[],
4715 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
4716 338e51e8 Iustin Pop
                     vcpus=self.be_full[constants.BE_VCPUS],
4717 338e51e8 Iustin Pop
                     mem_size=self.be_full[constants.BE_MEMORY],
4718 08db7c5c Iustin Pop
                     disks=self.disks,
4719 d1c2dd75 Iustin Pop
                     nics=nics,
4720 8cc7e742 Guido Trotter
                     hypervisor=self.op.hypervisor,
4721 29859cb7 Iustin Pop
                     )
4722 d1c2dd75 Iustin Pop
4723 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
4724 d1c2dd75 Iustin Pop
4725 d1c2dd75 Iustin Pop
    if not ial.success:
4726 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
4727 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
4728 d1c2dd75 Iustin Pop
                                                           ial.info))
4729 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
4730 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4731 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
4732 97abc79f Iustin Pop
                                 (self.op.iallocator, len(ial.nodes),
4733 1ce4bbe3 Renรฉ Nussbaumer
                                  ial.required_nodes))
4734 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
4735 86d9d3bb Iustin Pop
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
4736 86d9d3bb Iustin Pop
                 self.op.instance_name, self.op.iallocator,
4737 86d9d3bb Iustin Pop
                 ", ".join(ial.nodes))
4738 27579978 Iustin Pop
    if ial.required_nodes == 2:
4739 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
4740 538475ca Iustin Pop
4741 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4742 a8083063 Iustin Pop
    """Build hooks env.
4743 a8083063 Iustin Pop

4744 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4745 a8083063 Iustin Pop

4746 a8083063 Iustin Pop
    """
4747 a8083063 Iustin Pop
    env = {
4748 2c2690c9 Iustin Pop
      "ADD_MODE": self.op.mode,
4749 a8083063 Iustin Pop
      }
4750 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
4751 2c2690c9 Iustin Pop
      env["SRC_NODE"] = self.op.src_node
4752 2c2690c9 Iustin Pop
      env["SRC_PATH"] = self.op.src_path
4753 2c2690c9 Iustin Pop
      env["SRC_IMAGES"] = self.src_images
4754 396e1b78 Michael Hanselmann
4755 2c2690c9 Iustin Pop
    env.update(_BuildInstanceHookEnv(
4756 2c2690c9 Iustin Pop
      name=self.op.instance_name,
4757 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
4758 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
4759 4978db17 Iustin Pop
      status=self.op.start,
4760 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
4761 338e51e8 Iustin Pop
      memory=self.be_full[constants.BE_MEMORY],
4762 338e51e8 Iustin Pop
      vcpus=self.be_full[constants.BE_VCPUS],
4763 08db7c5c Iustin Pop
      nics=[(n.ip, n.bridge, n.mac) for n in self.nics],
4764 2c2690c9 Iustin Pop
      disk_template=self.op.disk_template,
4765 2c2690c9 Iustin Pop
      disks=[(d["size"], d["mode"]) for d in self.disks],
4766 67fc3042 Iustin Pop
      bep=self.be_full,
4767 67fc3042 Iustin Pop
      hvp=self.hv_full,
4768 3df6e710 Iustin Pop
      hypervisor_name=self.op.hypervisor,
4769 396e1b78 Michael Hanselmann
    ))
4770 a8083063 Iustin Pop
4771 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
4772 a8083063 Iustin Pop
          self.secondaries)
4773 a8083063 Iustin Pop
    return env, nl, nl
4774 a8083063 Iustin Pop
4775 a8083063 Iustin Pop
4776 a8083063 Iustin Pop
  def CheckPrereq(self):
4777 a8083063 Iustin Pop
    """Check prerequisites.
4778 a8083063 Iustin Pop

4779 a8083063 Iustin Pop
    """
4780 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
4781 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
4782 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
4783 eedc99de Manuel Franceschini
                                 " instances")
4784 eedc99de Manuel Franceschini
4785 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
4786 7baf741d Guido Trotter
      src_node = self.op.src_node
4787 7baf741d Guido Trotter
      src_path = self.op.src_path
4788 a8083063 Iustin Pop
4789 c0cbdc67 Guido Trotter
      if src_node is None:
4790 c0cbdc67 Guido Trotter
        exp_list = self.rpc.call_export_list(
4791 781de953 Iustin Pop
          self.acquired_locks[locking.LEVEL_NODE])
4792 c0cbdc67 Guido Trotter
        found = False
4793 c0cbdc67 Guido Trotter
        for node in exp_list:
4794 781de953 Iustin Pop
          if not exp_list[node].failed and src_path in exp_list[node].data:
4795 c0cbdc67 Guido Trotter
            found = True
4796 c0cbdc67 Guido Trotter
            self.op.src_node = src_node = node
4797 c0cbdc67 Guido Trotter
            self.op.src_path = src_path = os.path.join(constants.EXPORT_DIR,
4798 c0cbdc67 Guido Trotter
                                                       src_path)
4799 c0cbdc67 Guido Trotter
            break
4800 c0cbdc67 Guido Trotter
        if not found:
4801 c0cbdc67 Guido Trotter
          raise errors.OpPrereqError("No export found for relative path %s" %
4802 c0cbdc67 Guido Trotter
                                      src_path)
4803 c0cbdc67 Guido Trotter
4804 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, src_node)
4805 781de953 Iustin Pop
      result = self.rpc.call_export_info(src_node, src_path)
4806 781de953 Iustin Pop
      result.Raise()
4807 781de953 Iustin Pop
      if not result.data:
4808 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
4809 a8083063 Iustin Pop
4810 781de953 Iustin Pop
      export_info = result.data
4811 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
4812 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
4813 a8083063 Iustin Pop
4814 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
4815 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
4816 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
4817 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
4818 a8083063 Iustin Pop
4819 09acf207 Guido Trotter
      # Check that the new instance doesn't have less disks than the export
4820 08db7c5c Iustin Pop
      instance_disks = len(self.disks)
4821 09acf207 Guido Trotter
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
4822 09acf207 Guido Trotter
      if instance_disks < export_disks:
4823 09acf207 Guido Trotter
        raise errors.OpPrereqError("Not enough disks to import."
4824 09acf207 Guido Trotter
                                   " (instance: %d, export: %d)" %
4825 726d7d68 Iustin Pop
                                   (instance_disks, export_disks))
4826 a8083063 Iustin Pop
4827 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
4828 09acf207 Guido Trotter
      disk_images = []
4829 09acf207 Guido Trotter
      for idx in range(export_disks):
4830 09acf207 Guido Trotter
        option = 'disk%d_dump' % idx
4831 09acf207 Guido Trotter
        if export_info.has_option(constants.INISECT_INS, option):
4832 09acf207 Guido Trotter
          # FIXME: are the old os-es, disk sizes, etc. useful?
4833 09acf207 Guido Trotter
          export_name = export_info.get(constants.INISECT_INS, option)
4834 09acf207 Guido Trotter
          image = os.path.join(src_path, export_name)
4835 09acf207 Guido Trotter
          disk_images.append(image)
4836 09acf207 Guido Trotter
        else:
4837 09acf207 Guido Trotter
          disk_images.append(False)
4838 09acf207 Guido Trotter
4839 09acf207 Guido Trotter
      self.src_images = disk_images
4840 901a65c1 Iustin Pop
4841 b4364a6b Guido Trotter
      old_name = export_info.get(constants.INISECT_INS, 'name')
4842 b4364a6b Guido Trotter
      # FIXME: int() here could throw a ValueError on broken exports
4843 b4364a6b Guido Trotter
      exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
4844 b4364a6b Guido Trotter
      if self.op.instance_name == old_name:
4845 b4364a6b Guido Trotter
        for idx, nic in enumerate(self.nics):
4846 b4364a6b Guido Trotter
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
4847 b4364a6b Guido Trotter
            nic_mac_ini = 'nic%d_mac' % idx
4848 b4364a6b Guido Trotter
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
4849 bc89efc3 Guido Trotter
4850 295728df Guido Trotter
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
4851 7baf741d Guido Trotter
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
4852 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
4853 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
4854 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
4855 901a65c1 Iustin Pop
4856 901a65c1 Iustin Pop
    if self.op.ip_check:
4857 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
4858 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
4859 7b3a8fb5 Iustin Pop
                                   (self.check_ip, self.op.instance_name))
4860 901a65c1 Iustin Pop
4861 295728df Guido Trotter
    #### mac address generation
4862 295728df Guido Trotter
    # By generating here the mac address both the allocator and the hooks get
4863 295728df Guido Trotter
    # the real final mac address rather than the 'auto' or 'generate' value.
4864 295728df Guido Trotter
    # There is a race condition between the generation and the instance object
4865 295728df Guido Trotter
    # creation, which means that we know the mac is valid now, but we're not
4866 295728df Guido Trotter
    # sure it will be when we actually add the instance. If things go bad
4867 295728df Guido Trotter
    # adding the instance will abort because of a duplicate mac, and the
4868 295728df Guido Trotter
    # creation job will fail.
4869 295728df Guido Trotter
    for nic in self.nics:
4870 295728df Guido Trotter
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4871 295728df Guido Trotter
        nic.mac = self.cfg.GenerateMAC()
4872 295728df Guido Trotter
4873 538475ca Iustin Pop
    #### allocator run
4874 538475ca Iustin Pop
4875 538475ca Iustin Pop
    if self.op.iallocator is not None:
4876 538475ca Iustin Pop
      self._RunAllocator()
4877 0f1a06e3 Manuel Franceschini
4878 901a65c1 Iustin Pop
    #### node related checks
4879 901a65c1 Iustin Pop
4880 901a65c1 Iustin Pop
    # check primary node
4881 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
4882 7baf741d Guido Trotter
    assert self.pnode is not None, \
4883 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
4884 7527a8a4 Iustin Pop
    if pnode.offline:
4885 7527a8a4 Iustin Pop
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
4886 7527a8a4 Iustin Pop
                                 pnode.name)
4887 733a2b6a Iustin Pop
    if pnode.drained:
4888 733a2b6a Iustin Pop
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
4889 733a2b6a Iustin Pop
                                 pnode.name)
4890 7527a8a4 Iustin Pop
4891 901a65c1 Iustin Pop
    self.secondaries = []
4892 901a65c1 Iustin Pop
4893 901a65c1 Iustin Pop
    # mirror node verification
4894 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
4895 7baf741d Guido Trotter
      if self.op.snode is None:
4896 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
4897 3ecf6786 Iustin Pop
                                   " a mirror node")
4898 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
4899 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
4900 3ecf6786 Iustin Pop
                                   " the primary node.")
4901 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, self.op.snode)
4902 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, self.op.snode)
4903 733a2b6a Iustin Pop
      self.secondaries.append(self.op.snode)
4904 a8083063 Iustin Pop
4905 6785674e Iustin Pop
    nodenames = [pnode.name] + self.secondaries
4906 6785674e Iustin Pop
4907 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
4908 08db7c5c Iustin Pop
                                self.disks)
4909 ed1ebc60 Guido Trotter
4910 8d75db10 Iustin Pop
    # Check lv size requirements
4911 8d75db10 Iustin Pop
    if req_size is not None:
4912 72737a7f Iustin Pop
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4913 72737a7f Iustin Pop
                                         self.op.hypervisor)
4914 8d75db10 Iustin Pop
      for node in nodenames:
4915 781de953 Iustin Pop
        info = nodeinfo[node]
4916 781de953 Iustin Pop
        info.Raise()
4917 781de953 Iustin Pop
        info = info.data
4918 8d75db10 Iustin Pop
        if not info:
4919 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
4920 3e91897b Iustin Pop
                                     " from node '%s'" % node)
4921 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
4922 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
4923 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
4924 8d75db10 Iustin Pop
                                     " node %s" % node)
4925 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
4926 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
4927 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
4928 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
4929 ed1ebc60 Guido Trotter
4930 74409b12 Iustin Pop
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
4931 6785674e Iustin Pop
4932 a8083063 Iustin Pop
    # os verification
4933 781de953 Iustin Pop
    result = self.rpc.call_os_get(pnode.name, self.op.os_type)
4934 781de953 Iustin Pop
    result.Raise()
4935 6dfad215 Iustin Pop
    if not isinstance(result.data, objects.OS) or not result.data:
4936 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
4937 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
4938 a8083063 Iustin Pop
4939 901a65c1 Iustin Pop
    # bridge check on primary node
4940 08db7c5c Iustin Pop
    bridges = [n.bridge for n in self.nics]
4941 781de953 Iustin Pop
    result = self.rpc.call_bridges_exist(self.pnode.name, bridges)
4942 781de953 Iustin Pop
    result.Raise()
4943 781de953 Iustin Pop
    if not result.data:
4944 781de953 Iustin Pop
      raise errors.OpPrereqError("One of the target bridges '%s' does not"
4945 781de953 Iustin Pop
                                 " exist on destination node '%s'" %
4946 08db7c5c Iustin Pop
                                 (",".join(bridges), pnode.name))
4947 a8083063 Iustin Pop
4948 49ce1563 Iustin Pop
    # memory check on primary node
4949 49ce1563 Iustin Pop
    if self.op.start:
4950 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
4951 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
4952 338e51e8 Iustin Pop
                           self.be_full[constants.BE_MEMORY],
4953 338e51e8 Iustin Pop
                           self.op.hypervisor)
4954 49ce1563 Iustin Pop
4955 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4956 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
4957 a8083063 Iustin Pop

4958 a8083063 Iustin Pop
    """
4959 a8083063 Iustin Pop
    instance = self.op.instance_name
4960 a8083063 Iustin Pop
    pnode_name = self.pnode.name
4961 a8083063 Iustin Pop
4962 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
4963 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
4964 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
4965 2a6469d5 Alexander Schreiber
    else:
4966 2a6469d5 Alexander Schreiber
      network_port = None
4967 58acb49d Alexander Schreiber
4968 6785674e Iustin Pop
    ##if self.op.vnc_bind_address is None:
4969 6785674e Iustin Pop
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
4970 31a853d2 Iustin Pop
4971 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
4972 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
4973 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
4974 2c313123 Manuel Franceschini
    else:
4975 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
4976 2c313123 Manuel Franceschini
4977 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
4978 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
4979 d6a02168 Michael Hanselmann
                                        self.cfg.GetFileStorageDir(),
4980 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
4981 0f1a06e3 Manuel Franceschini
4982 0f1a06e3 Manuel Franceschini
4983 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
4984 a8083063 Iustin Pop
                                  self.op.disk_template,
4985 a8083063 Iustin Pop
                                  instance, pnode_name,
4986 08db7c5c Iustin Pop
                                  self.secondaries,
4987 08db7c5c Iustin Pop
                                  self.disks,
4988 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
4989 e2a65344 Iustin Pop
                                  self.op.file_driver,
4990 e2a65344 Iustin Pop
                                  0)
4991 a8083063 Iustin Pop
4992 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
4993 a8083063 Iustin Pop
                            primary_node=pnode_name,
4994 08db7c5c Iustin Pop
                            nics=self.nics, disks=disks,
4995 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
4996 4978db17 Iustin Pop
                            admin_up=False,
4997 58acb49d Alexander Schreiber
                            network_port=network_port,
4998 338e51e8 Iustin Pop
                            beparams=self.op.beparams,
4999 6785674e Iustin Pop
                            hvparams=self.op.hvparams,
5000 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
5001 a8083063 Iustin Pop
                            )
5002 a8083063 Iustin Pop
5003 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
5004 796cab27 Iustin Pop
    try:
5005 796cab27 Iustin Pop
      _CreateDisks(self, iobj)
5006 796cab27 Iustin Pop
    except errors.OpExecError:
5007 796cab27 Iustin Pop
      self.LogWarning("Device creation failed, reverting...")
5008 796cab27 Iustin Pop
      try:
5009 796cab27 Iustin Pop
        _RemoveDisks(self, iobj)
5010 796cab27 Iustin Pop
      finally:
5011 796cab27 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance)
5012 796cab27 Iustin Pop
        raise
5013 a8083063 Iustin Pop
5014 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
5015 a8083063 Iustin Pop
5016 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
5017 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
5018 7baf741d Guido Trotter
    # added the instance to the config
5019 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
5020 e36e96b4 Guido Trotter
    # Unlock all the nodes
5021 9c8971d7 Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
5022 9c8971d7 Guido Trotter
      nodes_keep = [self.op.src_node]
5023 9c8971d7 Guido Trotter
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
5024 9c8971d7 Guido Trotter
                       if node != self.op.src_node]
5025 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
5026 9c8971d7 Guido Trotter
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
5027 9c8971d7 Guido Trotter
    else:
5028 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE)
5029 9c8971d7 Guido Trotter
      del self.acquired_locks[locking.LEVEL_NODE]
5030 a8083063 Iustin Pop
5031 a8083063 Iustin Pop
    if self.op.wait_for_sync:
5032 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
5033 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
5034 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
5035 a8083063 Iustin Pop
      time.sleep(15)
5036 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
5037 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
5038 a8083063 Iustin Pop
    else:
5039 a8083063 Iustin Pop
      disk_abort = False
5040 a8083063 Iustin Pop
5041 a8083063 Iustin Pop
    if disk_abort:
5042 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
5043 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
5044 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
5045 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
5046 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
5047 3ecf6786 Iustin Pop
                               " this instance")
5048 a8083063 Iustin Pop
5049 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
5050 a8083063 Iustin Pop
                (instance, pnode_name))
5051 a8083063 Iustin Pop
5052 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
5053 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
5054 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
5055 781de953 Iustin Pop
        result = self.rpc.call_instance_os_add(pnode_name, iobj)
5056 20e01edd Iustin Pop
        msg = result.RemoteFailMsg()
5057 20e01edd Iustin Pop
        if msg:
5058 781de953 Iustin Pop
          raise errors.OpExecError("Could not add os for instance %s"
5059 20e01edd Iustin Pop
                                   " on node %s: %s" %
5060 20e01edd Iustin Pop
                                   (instance, pnode_name, msg))
5061 a8083063 Iustin Pop
5062 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
5063 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
5064 a8083063 Iustin Pop
        src_node = self.op.src_node
5065 09acf207 Guido Trotter
        src_images = self.src_images
5066 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
5067 6c0af70e Guido Trotter
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
5068 09acf207 Guido Trotter
                                                         src_node, src_images,
5069 6c0af70e Guido Trotter
                                                         cluster_name)
5070 781de953 Iustin Pop
        import_result.Raise()
5071 781de953 Iustin Pop
        for idx, result in enumerate(import_result.data):
5072 09acf207 Guido Trotter
          if not result:
5073 726d7d68 Iustin Pop
            self.LogWarning("Could not import the image %s for instance"
5074 726d7d68 Iustin Pop
                            " %s, disk %d, on node %s" %
5075 726d7d68 Iustin Pop
                            (src_images[idx], instance, idx, pnode_name))
5076 a8083063 Iustin Pop
      else:
5077 a8083063 Iustin Pop
        # also checked in the prereq part
5078 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
5079 3ecf6786 Iustin Pop
                                     % self.op.mode)
5080 a8083063 Iustin Pop
5081 a8083063 Iustin Pop
    if self.op.start:
5082 4978db17 Iustin Pop
      iobj.admin_up = True
5083 4978db17 Iustin Pop
      self.cfg.Update(iobj)
5084 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s", instance, pnode_name)
5085 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
5086 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
5087 dd279568 Iustin Pop
      msg = result.RemoteFailMsg()
5088 dd279568 Iustin Pop
      if msg:
5089 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance: %s" % msg)
5090 a8083063 Iustin Pop
5091 a8083063 Iustin Pop
5092 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
5093 a8083063 Iustin Pop
  """Connect to an instance's console.
5094 a8083063 Iustin Pop

5095 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
5096 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
5097 a8083063 Iustin Pop
  console.
5098 a8083063 Iustin Pop

5099 a8083063 Iustin Pop
  """
5100 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
5101 8659b73e Guido Trotter
  REQ_BGL = False
5102 8659b73e Guido Trotter
5103 8659b73e Guido Trotter
  def ExpandNames(self):
5104 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
5105 a8083063 Iustin Pop
5106 a8083063 Iustin Pop
  def CheckPrereq(self):
5107 a8083063 Iustin Pop
    """Check prerequisites.
5108 a8083063 Iustin Pop

5109 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
5110 a8083063 Iustin Pop

5111 a8083063 Iustin Pop
    """
5112 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5113 8659b73e Guido Trotter
    assert self.instance is not None, \
5114 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5115 513e896d Guido Trotter
    _CheckNodeOnline(self, self.instance.primary_node)
5116 a8083063 Iustin Pop
5117 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5118 a8083063 Iustin Pop
    """Connect to the console of an instance
5119 a8083063 Iustin Pop

5120 a8083063 Iustin Pop
    """
5121 a8083063 Iustin Pop
    instance = self.instance
5122 a8083063 Iustin Pop
    node = instance.primary_node
5123 a8083063 Iustin Pop
5124 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
5125 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
5126 781de953 Iustin Pop
    node_insts.Raise()
5127 a8083063 Iustin Pop
5128 781de953 Iustin Pop
    if instance.name not in node_insts.data:
5129 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
5130 a8083063 Iustin Pop
5131 9a4f63d1 Iustin Pop
    logging.debug("Connecting to console of %s on %s", instance.name, node)
5132 a8083063 Iustin Pop
5133 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
5134 5431b2e4 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
5135 5431b2e4 Guido Trotter
    # beparams and hvparams are passed separately, to avoid editing the
5136 5431b2e4 Guido Trotter
    # instance and then saving the defaults in the instance itself.
5137 5431b2e4 Guido Trotter
    hvparams = cluster.FillHV(instance)
5138 5431b2e4 Guido Trotter
    beparams = cluster.FillBE(instance)
5139 5431b2e4 Guido Trotter
    console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
5140 b047857b Michael Hanselmann
5141 82122173 Iustin Pop
    # build ssh cmdline
5142 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
5143 a8083063 Iustin Pop
5144 a8083063 Iustin Pop
5145 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
5146 a8083063 Iustin Pop
  """Replace the disks of an instance.
5147 a8083063 Iustin Pop

5148 a8083063 Iustin Pop
  """
5149 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
5150 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5151 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
5152 efd990e4 Guido Trotter
  REQ_BGL = False
5153 efd990e4 Guido Trotter
5154 7e9366f7 Iustin Pop
  def CheckArguments(self):
5155 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
5156 efd990e4 Guido Trotter
      self.op.remote_node = None
5157 7e9366f7 Iustin Pop
    if not hasattr(self.op, "iallocator"):
5158 7e9366f7 Iustin Pop
      self.op.iallocator = None
5159 7e9366f7 Iustin Pop
5160 7e9366f7 Iustin Pop
    # check for valid parameter combination
5161 7e9366f7 Iustin Pop
    cnt = [self.op.remote_node, self.op.iallocator].count(None)
5162 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_CHG:
5163 7e9366f7 Iustin Pop
      if cnt == 2:
5164 7e9366f7 Iustin Pop
        raise errors.OpPrereqError("When changing the secondary either an"
5165 7e9366f7 Iustin Pop
                                   " iallocator script must be used or the"
5166 7e9366f7 Iustin Pop
                                   " new node given")
5167 7e9366f7 Iustin Pop
      elif cnt == 0:
5168 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Give either the iallocator or the new"
5169 efd990e4 Guido Trotter
                                   " secondary, not both")
5170 7e9366f7 Iustin Pop
    else: # not replacing the secondary
5171 7e9366f7 Iustin Pop
      if cnt != 2:
5172 7e9366f7 Iustin Pop
        raise errors.OpPrereqError("The iallocator and new node options can"
5173 7e9366f7 Iustin Pop
                                   " be used only when changing the"
5174 7e9366f7 Iustin Pop
                                   " secondary node")
5175 7e9366f7 Iustin Pop
5176 7e9366f7 Iustin Pop
  def ExpandNames(self):
5177 7e9366f7 Iustin Pop
    self._ExpandAndLockInstance()
5178 7e9366f7 Iustin Pop
5179 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
5180 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5181 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
5182 efd990e4 Guido Trotter
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
5183 efd990e4 Guido Trotter
      if remote_node is None:
5184 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Node '%s' not known" %
5185 efd990e4 Guido Trotter
                                   self.op.remote_node)
5186 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
5187 3b559640 Iustin Pop
      # Warning: do not remove the locking of the new secondary here
5188 3b559640 Iustin Pop
      # unless DRBD8.AddChildren is changed to work in parallel;
5189 3b559640 Iustin Pop
      # currently it doesn't since parallel invocations of
5190 3b559640 Iustin Pop
      # FindUnusedMinor will conflict
5191 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
5192 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5193 efd990e4 Guido Trotter
    else:
5194 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
5195 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5196 efd990e4 Guido Trotter
5197 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
5198 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
5199 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
5200 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
5201 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
5202 efd990e4 Guido Trotter
      self._LockInstancesNodes()
5203 a8083063 Iustin Pop
5204 b6e82a65 Iustin Pop
  def _RunAllocator(self):
5205 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
5206 b6e82a65 Iustin Pop

5207 b6e82a65 Iustin Pop
    """
5208 72737a7f Iustin Pop
    ial = IAllocator(self,
5209 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
5210 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
5211 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
5212 b6e82a65 Iustin Pop
5213 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
5214 b6e82a65 Iustin Pop
5215 b6e82a65 Iustin Pop
    if not ial.success:
5216 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
5217 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
5218 b6e82a65 Iustin Pop
                                                           ial.info))
5219 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
5220 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
5221 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
5222 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
5223 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
5224 86d9d3bb Iustin Pop
    self.LogInfo("Selected new secondary for the instance: %s",
5225 86d9d3bb Iustin Pop
                 self.op.remote_node)
5226 b6e82a65 Iustin Pop
5227 a8083063 Iustin Pop
  def BuildHooksEnv(self):
5228 a8083063 Iustin Pop
    """Build hooks env.
5229 a8083063 Iustin Pop

5230 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
5231 a8083063 Iustin Pop

5232 a8083063 Iustin Pop
    """
5233 a8083063 Iustin Pop
    env = {
5234 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
5235 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
5236 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
5237 a8083063 Iustin Pop
      }
5238 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5239 0834c866 Iustin Pop
    nl = [
5240 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
5241 0834c866 Iustin Pop
      self.instance.primary_node,
5242 0834c866 Iustin Pop
      ]
5243 0834c866 Iustin Pop
    if self.op.remote_node is not None:
5244 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
5245 a8083063 Iustin Pop
    return env, nl, nl
5246 a8083063 Iustin Pop
5247 a8083063 Iustin Pop
  def CheckPrereq(self):
5248 a8083063 Iustin Pop
    """Check prerequisites.
5249 a8083063 Iustin Pop

5250 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
5251 a8083063 Iustin Pop

5252 a8083063 Iustin Pop
    """
5253 efd990e4 Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5254 efd990e4 Guido Trotter
    assert instance is not None, \
5255 efd990e4 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5256 a8083063 Iustin Pop
    self.instance = instance
5257 a8083063 Iustin Pop
5258 7e9366f7 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
5259 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
5260 7e9366f7 Iustin Pop
                                 " instances")
5261 a8083063 Iustin Pop
5262 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
5263 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
5264 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
5265 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
5266 a8083063 Iustin Pop
5267 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
5268 a9e0c397 Iustin Pop
5269 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
5270 de8c7666 Guido Trotter
      self._RunAllocator()
5271 b6e82a65 Iustin Pop
5272 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
5273 a9e0c397 Iustin Pop
    if remote_node is not None:
5274 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
5275 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
5276 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
5277 a9e0c397 Iustin Pop
    else:
5278 a9e0c397 Iustin Pop
      self.remote_node_info = None
5279 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
5280 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
5281 3ecf6786 Iustin Pop
                                 " the instance.")
5282 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
5283 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("The specified node is already the"
5284 7e9366f7 Iustin Pop
                                 " secondary node of the instance.")
5285 7e9366f7 Iustin Pop
5286 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_PRI:
5287 7e9366f7 Iustin Pop
      n1 = self.tgt_node = instance.primary_node
5288 7e9366f7 Iustin Pop
      n2 = self.oth_node = self.sec_node
5289 7e9366f7 Iustin Pop
    elif self.op.mode == constants.REPLACE_DISK_SEC:
5290 7e9366f7 Iustin Pop
      n1 = self.tgt_node = self.sec_node
5291 7e9366f7 Iustin Pop
      n2 = self.oth_node = instance.primary_node
5292 7e9366f7 Iustin Pop
    elif self.op.mode == constants.REPLACE_DISK_CHG:
5293 7e9366f7 Iustin Pop
      n1 = self.new_node = remote_node
5294 7e9366f7 Iustin Pop
      n2 = self.oth_node = instance.primary_node
5295 7e9366f7 Iustin Pop
      self.tgt_node = self.sec_node
5296 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, remote_node)
5297 7e9366f7 Iustin Pop
    else:
5298 7e9366f7 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replace mode")
5299 7e9366f7 Iustin Pop
5300 7e9366f7 Iustin Pop
    _CheckNodeOnline(self, n1)
5301 7e9366f7 Iustin Pop
    _CheckNodeOnline(self, n2)
5302 a9e0c397 Iustin Pop
5303 54155f52 Iustin Pop
    if not self.op.disks:
5304 54155f52 Iustin Pop
      self.op.disks = range(len(instance.disks))
5305 54155f52 Iustin Pop
5306 54155f52 Iustin Pop
    for disk_idx in self.op.disks:
5307 3e0cea06 Iustin Pop
      instance.FindDisk(disk_idx)
5308 a8083063 Iustin Pop
5309 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
5310 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
5311 a9e0c397 Iustin Pop

5312 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
5313 e4376078 Iustin Pop

5314 e4376078 Iustin Pop
      1. for each disk to be replaced:
5315 e4376078 Iustin Pop

5316 e4376078 Iustin Pop
        1. create new LVs on the target node with unique names
5317 e4376078 Iustin Pop
        1. detach old LVs from the drbd device
5318 e4376078 Iustin Pop
        1. rename old LVs to name_replaced.<time_t>
5319 e4376078 Iustin Pop
        1. rename new LVs to old LVs
5320 e4376078 Iustin Pop
        1. attach the new LVs (with the old names now) to the drbd device
5321 e4376078 Iustin Pop

5322 e4376078 Iustin Pop
      1. wait for sync across all devices
5323 e4376078 Iustin Pop

5324 e4376078 Iustin Pop
      1. for each modified disk:
5325 e4376078 Iustin Pop

5326 e4376078 Iustin Pop
        1. remove old LVs (which have the name name_replaces.<time_t>)
5327 a9e0c397 Iustin Pop

5328 a9e0c397 Iustin Pop
    Failures are not very well handled.
5329 cff90b79 Iustin Pop

5330 a9e0c397 Iustin Pop
    """
5331 cff90b79 Iustin Pop
    steps_total = 6
5332 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
5333 a9e0c397 Iustin Pop
    instance = self.instance
5334 a9e0c397 Iustin Pop
    iv_names = {}
5335 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
5336 a9e0c397 Iustin Pop
    # start of work
5337 a9e0c397 Iustin Pop
    cfg = self.cfg
5338 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
5339 cff90b79 Iustin Pop
    oth_node = self.oth_node
5340 cff90b79 Iustin Pop
5341 cff90b79 Iustin Pop
    # Step: check device activation
5342 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
5343 cff90b79 Iustin Pop
    info("checking volume groups")
5344 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
5345 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([oth_node, tgt_node])
5346 cff90b79 Iustin Pop
    if not results:
5347 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
5348 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
5349 781de953 Iustin Pop
      res = results[node]
5350 781de953 Iustin Pop
      if res.failed or not res.data or my_vg not in res.data:
5351 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
5352 cff90b79 Iustin Pop
                                 (my_vg, node))
5353 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
5354 54155f52 Iustin Pop
      if idx not in self.op.disks:
5355 cff90b79 Iustin Pop
        continue
5356 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
5357 54155f52 Iustin Pop
        info("checking disk/%d on %s" % (idx, node))
5358 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
5359 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(node, dev)
5360 23829f6f Iustin Pop
        msg = result.RemoteFailMsg()
5361 23829f6f Iustin Pop
        if not msg and not result.payload:
5362 23829f6f Iustin Pop
          msg = "disk not found"
5363 23829f6f Iustin Pop
        if msg:
5364 23829f6f Iustin Pop
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5365 23829f6f Iustin Pop
                                   (idx, node, msg))
5366 cff90b79 Iustin Pop
5367 cff90b79 Iustin Pop
    # Step: check other node consistency
5368 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
5369 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
5370 54155f52 Iustin Pop
      if idx not in self.op.disks:
5371 cff90b79 Iustin Pop
        continue
5372 54155f52 Iustin Pop
      info("checking disk/%d consistency on %s" % (idx, oth_node))
5373 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, oth_node,
5374 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
5375 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
5376 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
5377 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
5378 cff90b79 Iustin Pop
5379 cff90b79 Iustin Pop
    # Step: create new storage
5380 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
5381 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
5382 54155f52 Iustin Pop
      if idx not in self.op.disks:
5383 a9e0c397 Iustin Pop
        continue
5384 a9e0c397 Iustin Pop
      size = dev.size
5385 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
5386 54155f52 Iustin Pop
      lv_names = [".disk%d_%s" % (idx, suf)
5387 54155f52 Iustin Pop
                  for suf in ["data", "meta"]]
5388 b9bddb6b Iustin Pop
      names = _GenerateUniqueNames(self, lv_names)
5389 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
5390 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
5391 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
5392 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
5393 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
5394 a9e0c397 Iustin Pop
      old_lvs = dev.children
5395 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
5396 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
5397 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
5398 428958aa Iustin Pop
      # we pass force_create=True to force the LVM creation
5399 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
5400 428958aa Iustin Pop
        _CreateBlockDev(self, tgt_node, instance, new_lv, True,
5401 428958aa Iustin Pop
                        _GetInstanceInfoText(instance), False)
5402 a9e0c397 Iustin Pop
5403 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
5404 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
5405 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
5406 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
5407 781de953 Iustin Pop
      result = self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs)
5408 781de953 Iustin Pop
      result.Raise()
5409 781de953 Iustin Pop
      if not result.data:
5410 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
5411 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
5412 cff90b79 Iustin Pop
      #dev.children = []
5413 cff90b79 Iustin Pop
      #cfg.Update(instance)
5414 a9e0c397 Iustin Pop
5415 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
5416 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
5417 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
5418 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
5419 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
5420 cff90b79 Iustin Pop
5421 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
5422 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
5423 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
5424 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
5425 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
5426 cff90b79 Iustin Pop
      rlist = []
5427 cff90b79 Iustin Pop
      for to_ren in old_lvs:
5428 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(tgt_node, to_ren)
5429 23829f6f Iustin Pop
        if not result.RemoteFailMsg() and result.payload:
5430 23829f6f Iustin Pop
          # device exists
5431 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
5432 cff90b79 Iustin Pop
5433 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
5434 781de953 Iustin Pop
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5435 781de953 Iustin Pop
      result.Raise()
5436 781de953 Iustin Pop
      if not result.data:
5437 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
5438 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
5439 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
5440 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
5441 781de953 Iustin Pop
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5442 781de953 Iustin Pop
      result.Raise()
5443 781de953 Iustin Pop
      if not result.data:
5444 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
5445 cff90b79 Iustin Pop
5446 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
5447 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
5448 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
5449 a9e0c397 Iustin Pop
5450 cff90b79 Iustin Pop
      for disk in old_lvs:
5451 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
5452 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
5453 a9e0c397 Iustin Pop
5454 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
5455 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
5456 4504c3d6 Iustin Pop
      result = self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs)
5457 781de953 Iustin Pop
      if result.failed or not result.data:
5458 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
5459 e1bc0878 Iustin Pop
          msg = self.rpc.call_blockdev_remove(tgt_node, new_lv).RemoteFailMsg()
5460 e1bc0878 Iustin Pop
          if msg:
5461 e1bc0878 Iustin Pop
            warning("Can't rollback device %s: %s", dev, msg,
5462 e1bc0878 Iustin Pop
                    hint="cleanup manually the unused logical volumes")
5463 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
5464 a9e0c397 Iustin Pop
5465 a9e0c397 Iustin Pop
      dev.children = new_lvs
5466 a9e0c397 Iustin Pop
      cfg.Update(instance)
5467 a9e0c397 Iustin Pop
5468 cff90b79 Iustin Pop
    # Step: wait for sync
5469 a9e0c397 Iustin Pop
5470 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
5471 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
5472 a9e0c397 Iustin Pop
    # return value
5473 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
5474 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
5475 a9e0c397 Iustin Pop
5476 a9e0c397 Iustin Pop
    # so check manually all the devices
5477 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5478 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
5479 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(instance.primary_node, dev)
5480 23829f6f Iustin Pop
      msg = result.RemoteFailMsg()
5481 23829f6f Iustin Pop
      if not msg and not result.payload:
5482 23829f6f Iustin Pop
        msg = "disk not found"
5483 23829f6f Iustin Pop
      if msg:
5484 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
5485 23829f6f Iustin Pop
                                 (name, msg))
5486 23829f6f Iustin Pop
      if result.payload[5]:
5487 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
5488 a9e0c397 Iustin Pop
5489 cff90b79 Iustin Pop
    # Step: remove old storage
5490 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
5491 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5492 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
5493 a9e0c397 Iustin Pop
      for lv in old_lvs:
5494 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
5495 e1bc0878 Iustin Pop
        msg = self.rpc.call_blockdev_remove(tgt_node, lv).RemoteFailMsg()
5496 e1bc0878 Iustin Pop
        if msg:
5497 e1bc0878 Iustin Pop
          warning("Can't remove old LV: %s" % msg,
5498 e1bc0878 Iustin Pop
                  hint="manually remove unused LVs")
5499 a9e0c397 Iustin Pop
          continue
5500 a9e0c397 Iustin Pop
5501 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
5502 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
5503 a9e0c397 Iustin Pop

5504 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
5505 a9e0c397 Iustin Pop
      - for all disks of the instance:
5506 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
5507 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
5508 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
5509 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
5510 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
5511 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
5512 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
5513 a9e0c397 Iustin Pop
          not network enabled
5514 a9e0c397 Iustin Pop
      - wait for sync across all devices
5515 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
5516 a9e0c397 Iustin Pop

5517 a9e0c397 Iustin Pop
    Failures are not very well handled.
5518 0834c866 Iustin Pop

5519 a9e0c397 Iustin Pop
    """
5520 0834c866 Iustin Pop
    steps_total = 6
5521 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
5522 a9e0c397 Iustin Pop
    instance = self.instance
5523 a9e0c397 Iustin Pop
    iv_names = {}
5524 a9e0c397 Iustin Pop
    # start of work
5525 a9e0c397 Iustin Pop
    cfg = self.cfg
5526 a9e0c397 Iustin Pop
    old_node = self.tgt_node
5527 a9e0c397 Iustin Pop
    new_node = self.new_node
5528 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
5529 a2d59d8b Iustin Pop
    nodes_ip = {
5530 a2d59d8b Iustin Pop
      old_node: self.cfg.GetNodeInfo(old_node).secondary_ip,
5531 a2d59d8b Iustin Pop
      new_node: self.cfg.GetNodeInfo(new_node).secondary_ip,
5532 a2d59d8b Iustin Pop
      pri_node: self.cfg.GetNodeInfo(pri_node).secondary_ip,
5533 a2d59d8b Iustin Pop
      }
5534 0834c866 Iustin Pop
5535 0834c866 Iustin Pop
    # Step: check device activation
5536 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
5537 0834c866 Iustin Pop
    info("checking volume groups")
5538 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
5539 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([pri_node, new_node])
5540 0834c866 Iustin Pop
    for node in pri_node, new_node:
5541 781de953 Iustin Pop
      res = results[node]
5542 781de953 Iustin Pop
      if res.failed or not res.data or my_vg not in res.data:
5543 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
5544 0834c866 Iustin Pop
                                 (my_vg, node))
5545 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5546 d418ebfb Iustin Pop
      if idx not in self.op.disks:
5547 0834c866 Iustin Pop
        continue
5548 d418ebfb Iustin Pop
      info("checking disk/%d on %s" % (idx, pri_node))
5549 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5550 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(pri_node, dev)
5551 23829f6f Iustin Pop
      msg = result.RemoteFailMsg()
5552 23829f6f Iustin Pop
      if not msg and not result.payload:
5553 23829f6f Iustin Pop
        msg = "disk not found"
5554 23829f6f Iustin Pop
      if msg:
5555 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5556 23829f6f Iustin Pop
                                 (idx, pri_node, msg))
5557 0834c866 Iustin Pop
5558 0834c866 Iustin Pop
    # Step: check other node consistency
5559 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
5560 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5561 d418ebfb Iustin Pop
      if idx not in self.op.disks:
5562 0834c866 Iustin Pop
        continue
5563 d418ebfb Iustin Pop
      info("checking disk/%d consistency on %s" % (idx, pri_node))
5564 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
5565 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
5566 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
5567 0834c866 Iustin Pop
                                 pri_node)
5568 0834c866 Iustin Pop
5569 0834c866 Iustin Pop
    # Step: create new storage
5570 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
5571 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5572 d418ebfb Iustin Pop
      info("adding new local storage on %s for disk/%d" %
5573 d418ebfb Iustin Pop
           (new_node, idx))
5574 428958aa Iustin Pop
      # we pass force_create=True to force LVM creation
5575 a9e0c397 Iustin Pop
      for new_lv in dev.children:
5576 428958aa Iustin Pop
        _CreateBlockDev(self, new_node, instance, new_lv, True,
5577 428958aa Iustin Pop
                        _GetInstanceInfoText(instance), False)
5578 a9e0c397 Iustin Pop
5579 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
5580 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
5581 a1578d63 Iustin Pop
    # error and the success paths
5582 a1578d63 Iustin Pop
    minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
5583 a1578d63 Iustin Pop
                                   instance.name)
5584 468b46f9 Iustin Pop
    logging.debug("Allocated minors %s" % (minors,))
5585 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
5586 d418ebfb Iustin Pop
    for idx, (dev, new_minor) in enumerate(zip(instance.disks, minors)):
5587 d418ebfb Iustin Pop
      info("activating a new drbd on %s for disk/%d" % (new_node, idx))
5588 a2d59d8b Iustin Pop
      # create new devices on new_node; note that we create two IDs:
5589 a2d59d8b Iustin Pop
      # one without port, so the drbd will be activated without
5590 a2d59d8b Iustin Pop
      # networking information on the new node at this stage, and one
5591 a2d59d8b Iustin Pop
      # with network, for the latter activation in step 4
5592 a2d59d8b Iustin Pop
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
5593 a2d59d8b Iustin Pop
      if pri_node == o_node1:
5594 a2d59d8b Iustin Pop
        p_minor = o_minor1
5595 ffa1c0dc Iustin Pop
      else:
5596 a2d59d8b Iustin Pop
        p_minor = o_minor2
5597 a2d59d8b Iustin Pop
5598 a2d59d8b Iustin Pop
      new_alone_id = (pri_node, new_node, None, p_minor, new_minor, o_secret)
5599 a2d59d8b Iustin Pop
      new_net_id = (pri_node, new_node, o_port, p_minor, new_minor, o_secret)
5600 a2d59d8b Iustin Pop
5601 a2d59d8b Iustin Pop
      iv_names[idx] = (dev, dev.children, new_net_id)
5602 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
5603 a2d59d8b Iustin Pop
                    new_net_id)
5604 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
5605 a2d59d8b Iustin Pop
                              logical_id=new_alone_id,
5606 8a6c7011 Iustin Pop
                              children=dev.children,
5607 8a6c7011 Iustin Pop
                              size=dev.size)
5608 796cab27 Iustin Pop
      try:
5609 de12473a Iustin Pop
        _CreateSingleBlockDev(self, new_node, instance, new_drbd,
5610 de12473a Iustin Pop
                              _GetInstanceInfoText(instance), False)
5611 82759cb1 Iustin Pop
      except errors.GenericError:
5612 a1578d63 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
5613 796cab27 Iustin Pop
        raise
5614 a9e0c397 Iustin Pop
5615 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5616 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
5617 d418ebfb Iustin Pop
      info("shutting down drbd for disk/%d on old node" % idx)
5618 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
5619 cacfd1fd Iustin Pop
      msg = self.rpc.call_blockdev_shutdown(old_node, dev).RemoteFailMsg()
5620 cacfd1fd Iustin Pop
      if msg:
5621 cacfd1fd Iustin Pop
        warning("Failed to shutdown drbd for disk/%d on old node: %s" %
5622 cacfd1fd Iustin Pop
                (idx, msg),
5623 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
5624 a9e0c397 Iustin Pop
5625 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
5626 a2d59d8b Iustin Pop
    result = self.rpc.call_drbd_disconnect_net([pri_node], nodes_ip,
5627 a2d59d8b Iustin Pop
                                               instance.disks)[pri_node]
5628 642445d9 Iustin Pop
5629 a2d59d8b Iustin Pop
    msg = result.RemoteFailMsg()
5630 a2d59d8b Iustin Pop
    if msg:
5631 a2d59d8b Iustin Pop
      # detaches didn't succeed (unlikely)
5632 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance.name)
5633 a2d59d8b Iustin Pop
      raise errors.OpExecError("Can't detach the disks from the network on"
5634 a2d59d8b Iustin Pop
                               " old node: %s" % (msg,))
5635 642445d9 Iustin Pop
5636 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
5637 642445d9 Iustin Pop
    # the instance to point to the new secondary
5638 642445d9 Iustin Pop
    info("updating instance configuration")
5639 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
5640 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
5641 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5642 642445d9 Iustin Pop
    cfg.Update(instance)
5643 a9e0c397 Iustin Pop
5644 642445d9 Iustin Pop
    # and now perform the drbd attach
5645 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
5646 a2d59d8b Iustin Pop
    result = self.rpc.call_drbd_attach_net([pri_node, new_node], nodes_ip,
5647 a2d59d8b Iustin Pop
                                           instance.disks, instance.name,
5648 a2d59d8b Iustin Pop
                                           False)
5649 a2d59d8b Iustin Pop
    for to_node, to_result in result.items():
5650 a2d59d8b Iustin Pop
      msg = to_result.RemoteFailMsg()
5651 a2d59d8b Iustin Pop
      if msg:
5652 a2d59d8b Iustin Pop
        warning("can't attach drbd disks on node %s: %s", to_node, msg,
5653 a2d59d8b Iustin Pop
                hint="please do a gnt-instance info to see the"
5654 a2d59d8b Iustin Pop
                " status of disks")
5655 a9e0c397 Iustin Pop
5656 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
5657 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
5658 a9e0c397 Iustin Pop
    # return value
5659 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
5660 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
5661 a9e0c397 Iustin Pop
5662 a9e0c397 Iustin Pop
    # so check manually all the devices
5663 d418ebfb Iustin Pop
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
5664 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5665 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(pri_node, dev)
5666 23829f6f Iustin Pop
      msg = result.RemoteFailMsg()
5667 23829f6f Iustin Pop
      if not msg and not result.payload:
5668 23829f6f Iustin Pop
        msg = "disk not found"
5669 23829f6f Iustin Pop
      if msg:
5670 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find DRBD device disk/%d: %s" %
5671 23829f6f Iustin Pop
                                 (idx, msg))
5672 23829f6f Iustin Pop
      if result.payload[5]:
5673 d418ebfb Iustin Pop
        raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx)
5674 a9e0c397 Iustin Pop
5675 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
5676 d418ebfb Iustin Pop
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
5677 d418ebfb Iustin Pop
      info("remove logical volumes for disk/%d" % idx)
5678 a9e0c397 Iustin Pop
      for lv in old_lvs:
5679 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
5680 e1bc0878 Iustin Pop
        msg = self.rpc.call_blockdev_remove(old_node, lv).RemoteFailMsg()
5681 e1bc0878 Iustin Pop
        if msg:
5682 e1bc0878 Iustin Pop
          warning("Can't remove LV on old secondary: %s", msg,
5683 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
5684 a9e0c397 Iustin Pop
5685 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
5686 a9e0c397 Iustin Pop
    """Execute disk replacement.
5687 a9e0c397 Iustin Pop

5688 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
5689 a9e0c397 Iustin Pop

5690 a9e0c397 Iustin Pop
    """
5691 a9e0c397 Iustin Pop
    instance = self.instance
5692 22985314 Guido Trotter
5693 22985314 Guido Trotter
    # Activate the instance disks if we're replacing them on a down instance
5694 0d68c45d Iustin Pop
    if not instance.admin_up:
5695 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, True)
5696 22985314 Guido Trotter
5697 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_CHG:
5698 7e9366f7 Iustin Pop
      fn = self._ExecD8Secondary
5699 a9e0c397 Iustin Pop
    else:
5700 7e9366f7 Iustin Pop
      fn = self._ExecD8DiskOnly
5701 22985314 Guido Trotter
5702 22985314 Guido Trotter
    ret = fn(feedback_fn)
5703 22985314 Guido Trotter
5704 22985314 Guido Trotter
    # Deactivate the instance disks if we're replacing them on a down instance
5705 0d68c45d Iustin Pop
    if not instance.admin_up:
5706 b9bddb6b Iustin Pop
      _SafeShutdownInstanceDisks(self, instance)
5707 22985314 Guido Trotter
5708 22985314 Guido Trotter
    return ret
5709 a9e0c397 Iustin Pop
5710 a8083063 Iustin Pop
5711 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
5712 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
5713 8729e0d7 Iustin Pop

5714 8729e0d7 Iustin Pop
  """
5715 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
5716 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5717 6605411d Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
5718 31e63dbf Guido Trotter
  REQ_BGL = False
5719 31e63dbf Guido Trotter
5720 31e63dbf Guido Trotter
  def ExpandNames(self):
5721 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
5722 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
5723 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5724 31e63dbf Guido Trotter
5725 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
5726 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
5727 31e63dbf Guido Trotter
      self._LockInstancesNodes()
5728 8729e0d7 Iustin Pop
5729 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
5730 8729e0d7 Iustin Pop
    """Build hooks env.
5731 8729e0d7 Iustin Pop

5732 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
5733 8729e0d7 Iustin Pop

5734 8729e0d7 Iustin Pop
    """
5735 8729e0d7 Iustin Pop
    env = {
5736 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
5737 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
5738 8729e0d7 Iustin Pop
      }
5739 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5740 8729e0d7 Iustin Pop
    nl = [
5741 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
5742 8729e0d7 Iustin Pop
      self.instance.primary_node,
5743 8729e0d7 Iustin Pop
      ]
5744 8729e0d7 Iustin Pop
    return env, nl, nl
5745 8729e0d7 Iustin Pop
5746 8729e0d7 Iustin Pop
  def CheckPrereq(self):
5747 8729e0d7 Iustin Pop
    """Check prerequisites.
5748 8729e0d7 Iustin Pop

5749 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
5750 8729e0d7 Iustin Pop

5751 8729e0d7 Iustin Pop
    """
5752 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5753 31e63dbf Guido Trotter
    assert instance is not None, \
5754 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5755 6b12959c Iustin Pop
    nodenames = list(instance.all_nodes)
5756 6b12959c Iustin Pop
    for node in nodenames:
5757 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, node)
5758 7527a8a4 Iustin Pop
5759 31e63dbf Guido Trotter
5760 8729e0d7 Iustin Pop
    self.instance = instance
5761 8729e0d7 Iustin Pop
5762 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
5763 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
5764 8729e0d7 Iustin Pop
                                 " growing.")
5765 8729e0d7 Iustin Pop
5766 ad24e046 Iustin Pop
    self.disk = instance.FindDisk(self.op.disk)
5767 8729e0d7 Iustin Pop
5768 72737a7f Iustin Pop
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
5769 72737a7f Iustin Pop
                                       instance.hypervisor)
5770 8729e0d7 Iustin Pop
    for node in nodenames:
5771 781de953 Iustin Pop
      info = nodeinfo[node]
5772 781de953 Iustin Pop
      if info.failed or not info.data:
5773 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
5774 8729e0d7 Iustin Pop
                                   " from node '%s'" % node)
5775 781de953 Iustin Pop
      vg_free = info.data.get('vg_free', None)
5776 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
5777 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
5778 8729e0d7 Iustin Pop
                                   " node %s" % node)
5779 781de953 Iustin Pop
      if self.op.amount > vg_free:
5780 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
5781 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
5782 781de953 Iustin Pop
                                   (node, vg_free, self.op.amount))
5783 8729e0d7 Iustin Pop
5784 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
5785 8729e0d7 Iustin Pop
    """Execute disk grow.
5786 8729e0d7 Iustin Pop

5787 8729e0d7 Iustin Pop
    """
5788 8729e0d7 Iustin Pop
    instance = self.instance
5789 ad24e046 Iustin Pop
    disk = self.disk
5790 6b12959c Iustin Pop
    for node in instance.all_nodes:
5791 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
5792 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
5793 0959c824 Iustin Pop
      msg = result.RemoteFailMsg()
5794 0959c824 Iustin Pop
      if msg:
5795 781de953 Iustin Pop
        raise errors.OpExecError("Grow request failed to node %s: %s" %
5796 0959c824 Iustin Pop
                                 (node, msg))
5797 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
5798 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
5799 6605411d Iustin Pop
    if self.op.wait_for_sync:
5800 cd4d138f Guido Trotter
      disk_abort = not _WaitForSync(self, instance)
5801 6605411d Iustin Pop
      if disk_abort:
5802 86d9d3bb Iustin Pop
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
5803 86d9d3bb Iustin Pop
                             " status.\nPlease check the instance.")
5804 8729e0d7 Iustin Pop
5805 8729e0d7 Iustin Pop
5806 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
5807 a8083063 Iustin Pop
  """Query runtime instance data.
5808 a8083063 Iustin Pop

5809 a8083063 Iustin Pop
  """
5810 57821cac Iustin Pop
  _OP_REQP = ["instances", "static"]
5811 a987fa48 Guido Trotter
  REQ_BGL = False
5812 ae5849b5 Michael Hanselmann
5813 a987fa48 Guido Trotter
  def ExpandNames(self):
5814 a987fa48 Guido Trotter
    self.needed_locks = {}
5815 a987fa48 Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
5816 a987fa48 Guido Trotter
5817 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
5818 a987fa48 Guido Trotter
      raise errors.OpPrereqError("Invalid argument type 'instances'")
5819 a987fa48 Guido Trotter
5820 a987fa48 Guido Trotter
    if self.op.instances:
5821 a987fa48 Guido Trotter
      self.wanted_names = []
5822 a987fa48 Guido Trotter
      for name in self.op.instances:
5823 a987fa48 Guido Trotter
        full_name = self.cfg.ExpandInstanceName(name)
5824 a987fa48 Guido Trotter
        if full_name is None:
5825 f57c76e4 Iustin Pop
          raise errors.OpPrereqError("Instance '%s' not known" % name)
5826 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
5827 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
5828 a987fa48 Guido Trotter
    else:
5829 a987fa48 Guido Trotter
      self.wanted_names = None
5830 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
5831 a987fa48 Guido Trotter
5832 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
5833 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5834 a987fa48 Guido Trotter
5835 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
5836 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
5837 a987fa48 Guido Trotter
      self._LockInstancesNodes()
5838 a8083063 Iustin Pop
5839 a8083063 Iustin Pop
  def CheckPrereq(self):
5840 a8083063 Iustin Pop
    """Check prerequisites.
5841 a8083063 Iustin Pop

5842 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
5843 a8083063 Iustin Pop

5844 a8083063 Iustin Pop
    """
5845 a987fa48 Guido Trotter
    if self.wanted_names is None:
5846 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
5847 a8083063 Iustin Pop
5848 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
5849 a987fa48 Guido Trotter
                             in self.wanted_names]
5850 a987fa48 Guido Trotter
    return
5851 a8083063 Iustin Pop
5852 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
5853 a8083063 Iustin Pop
    """Compute block device status.
5854 a8083063 Iustin Pop

5855 a8083063 Iustin Pop
    """
5856 57821cac Iustin Pop
    static = self.op.static
5857 57821cac Iustin Pop
    if not static:
5858 57821cac Iustin Pop
      self.cfg.SetDiskID(dev, instance.primary_node)
5859 57821cac Iustin Pop
      dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
5860 9854f5d0 Iustin Pop
      if dev_pstatus.offline:
5861 9854f5d0 Iustin Pop
        dev_pstatus = None
5862 9854f5d0 Iustin Pop
      else:
5863 9854f5d0 Iustin Pop
        msg = dev_pstatus.RemoteFailMsg()
5864 9854f5d0 Iustin Pop
        if msg:
5865 9854f5d0 Iustin Pop
          raise errors.OpExecError("Can't compute disk status for %s: %s" %
5866 9854f5d0 Iustin Pop
                                   (instance.name, msg))
5867 9854f5d0 Iustin Pop
        dev_pstatus = dev_pstatus.payload
5868 57821cac Iustin Pop
    else:
5869 57821cac Iustin Pop
      dev_pstatus = None
5870 57821cac Iustin Pop
5871 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
5872 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
5873 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
5874 a8083063 Iustin Pop
        snode = dev.logical_id[1]
5875 a8083063 Iustin Pop
      else:
5876 a8083063 Iustin Pop
        snode = dev.logical_id[0]
5877 a8083063 Iustin Pop
5878 57821cac Iustin Pop
    if snode and not static:
5879 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
5880 72737a7f Iustin Pop
      dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
5881 9854f5d0 Iustin Pop
      if dev_sstatus.offline:
5882 9854f5d0 Iustin Pop
        dev_sstatus = None
5883 9854f5d0 Iustin Pop
      else:
5884 9854f5d0 Iustin Pop
        msg = dev_sstatus.RemoteFailMsg()
5885 9854f5d0 Iustin Pop
        if msg:
5886 9854f5d0 Iustin Pop
          raise errors.OpExecError("Can't compute disk status for %s: %s" %
5887 9854f5d0 Iustin Pop
                                   (instance.name, msg))
5888 9854f5d0 Iustin Pop
        dev_sstatus = dev_sstatus.payload
5889 a8083063 Iustin Pop
    else:
5890 a8083063 Iustin Pop
      dev_sstatus = None
5891 a8083063 Iustin Pop
5892 a8083063 Iustin Pop
    if dev.children:
5893 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
5894 a8083063 Iustin Pop
                      for child in dev.children]
5895 a8083063 Iustin Pop
    else:
5896 a8083063 Iustin Pop
      dev_children = []
5897 a8083063 Iustin Pop
5898 a8083063 Iustin Pop
    data = {
5899 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
5900 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
5901 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
5902 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
5903 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
5904 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
5905 a8083063 Iustin Pop
      "children": dev_children,
5906 b6fdf8b8 Iustin Pop
      "mode": dev.mode,
5907 c98162a7 Iustin Pop
      "size": dev.size,
5908 a8083063 Iustin Pop
      }
5909 a8083063 Iustin Pop
5910 a8083063 Iustin Pop
    return data
5911 a8083063 Iustin Pop
5912 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5913 a8083063 Iustin Pop
    """Gather and return data"""
5914 a8083063 Iustin Pop
    result = {}
5915 338e51e8 Iustin Pop
5916 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
5917 338e51e8 Iustin Pop
5918 a8083063 Iustin Pop
    for instance in self.wanted_instances:
5919 57821cac Iustin Pop
      if not self.op.static:
5920 57821cac Iustin Pop
        remote_info = self.rpc.call_instance_info(instance.primary_node,
5921 57821cac Iustin Pop
                                                  instance.name,
5922 57821cac Iustin Pop
                                                  instance.hypervisor)
5923 781de953 Iustin Pop
        remote_info.Raise()
5924 781de953 Iustin Pop
        remote_info = remote_info.data
5925 57821cac Iustin Pop
        if remote_info and "state" in remote_info:
5926 57821cac Iustin Pop
          remote_state = "up"
5927 57821cac Iustin Pop
        else:
5928 57821cac Iustin Pop
          remote_state = "down"
5929 a8083063 Iustin Pop
      else:
5930 57821cac Iustin Pop
        remote_state = None
5931 0d68c45d Iustin Pop
      if instance.admin_up:
5932 a8083063 Iustin Pop
        config_state = "up"
5933 0d68c45d Iustin Pop
      else:
5934 0d68c45d Iustin Pop
        config_state = "down"
5935 a8083063 Iustin Pop
5936 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
5937 a8083063 Iustin Pop
               for device in instance.disks]
5938 a8083063 Iustin Pop
5939 a8083063 Iustin Pop
      idict = {
5940 a8083063 Iustin Pop
        "name": instance.name,
5941 a8083063 Iustin Pop
        "config_state": config_state,
5942 a8083063 Iustin Pop
        "run_state": remote_state,
5943 a8083063 Iustin Pop
        "pnode": instance.primary_node,
5944 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
5945 a8083063 Iustin Pop
        "os": instance.os,
5946 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
5947 a8083063 Iustin Pop
        "disks": disks,
5948 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
5949 24838135 Iustin Pop
        "network_port": instance.network_port,
5950 24838135 Iustin Pop
        "hv_instance": instance.hvparams,
5951 338e51e8 Iustin Pop
        "hv_actual": cluster.FillHV(instance),
5952 338e51e8 Iustin Pop
        "be_instance": instance.beparams,
5953 338e51e8 Iustin Pop
        "be_actual": cluster.FillBE(instance),
5954 a8083063 Iustin Pop
        }
5955 a8083063 Iustin Pop
5956 a8083063 Iustin Pop
      result[instance.name] = idict
5957 a8083063 Iustin Pop
5958 a8083063 Iustin Pop
    return result
5959 a8083063 Iustin Pop
5960 a8083063 Iustin Pop
5961 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
5962 a8083063 Iustin Pop
  """Modifies an instances's parameters.
5963 a8083063 Iustin Pop

5964 a8083063 Iustin Pop
  """
5965 a8083063 Iustin Pop
  HPATH = "instance-modify"
5966 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5967 24991749 Iustin Pop
  _OP_REQP = ["instance_name"]
5968 1a5c7281 Guido Trotter
  REQ_BGL = False
5969 1a5c7281 Guido Trotter
5970 24991749 Iustin Pop
  def CheckArguments(self):
5971 24991749 Iustin Pop
    if not hasattr(self.op, 'nics'):
5972 24991749 Iustin Pop
      self.op.nics = []
5973 24991749 Iustin Pop
    if not hasattr(self.op, 'disks'):
5974 24991749 Iustin Pop
      self.op.disks = []
5975 24991749 Iustin Pop
    if not hasattr(self.op, 'beparams'):
5976 24991749 Iustin Pop
      self.op.beparams = {}
5977 24991749 Iustin Pop
    if not hasattr(self.op, 'hvparams'):
5978 24991749 Iustin Pop
      self.op.hvparams = {}
5979 24991749 Iustin Pop
    self.op.force = getattr(self.op, "force", False)
5980 24991749 Iustin Pop
    if not (self.op.nics or self.op.disks or
5981 24991749 Iustin Pop
            self.op.hvparams or self.op.beparams):
5982 24991749 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
5983 24991749 Iustin Pop
5984 24991749 Iustin Pop
    # Disk validation
5985 24991749 Iustin Pop
    disk_addremove = 0
5986 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
5987 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
5988 24991749 Iustin Pop
        disk_addremove += 1
5989 24991749 Iustin Pop
        continue
5990 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
5991 24991749 Iustin Pop
        disk_addremove += 1
5992 24991749 Iustin Pop
      else:
5993 24991749 Iustin Pop
        if not isinstance(disk_op, int):
5994 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index")
5995 24991749 Iustin Pop
      if disk_op == constants.DDM_ADD:
5996 24991749 Iustin Pop
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
5997 6ec66eae Iustin Pop
        if mode not in constants.DISK_ACCESS_SET:
5998 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode)
5999 24991749 Iustin Pop
        size = disk_dict.get('size', None)
6000 24991749 Iustin Pop
        if size is None:
6001 24991749 Iustin Pop
          raise errors.OpPrereqError("Required disk parameter size missing")
6002 24991749 Iustin Pop
        try:
6003 24991749 Iustin Pop
          size = int(size)
6004 24991749 Iustin Pop
        except ValueError, err:
6005 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
6006 24991749 Iustin Pop
                                     str(err))
6007 24991749 Iustin Pop
        disk_dict['size'] = size
6008 24991749 Iustin Pop
      else:
6009 24991749 Iustin Pop
        # modification of disk
6010 24991749 Iustin Pop
        if 'size' in disk_dict:
6011 24991749 Iustin Pop
          raise errors.OpPrereqError("Disk size change not possible, use"
6012 24991749 Iustin Pop
                                     " grow-disk")
6013 24991749 Iustin Pop
6014 24991749 Iustin Pop
    if disk_addremove > 1:
6015 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one disk add or remove operation"
6016 24991749 Iustin Pop
                                 " supported at a time")
6017 24991749 Iustin Pop
6018 24991749 Iustin Pop
    # NIC validation
6019 24991749 Iustin Pop
    nic_addremove = 0
6020 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
6021 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
6022 24991749 Iustin Pop
        nic_addremove += 1
6023 24991749 Iustin Pop
        continue
6024 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
6025 24991749 Iustin Pop
        nic_addremove += 1
6026 24991749 Iustin Pop
      else:
6027 24991749 Iustin Pop
        if not isinstance(nic_op, int):
6028 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid nic index")
6029 24991749 Iustin Pop
6030 24991749 Iustin Pop
      # nic_dict should be a dict
6031 24991749 Iustin Pop
      nic_ip = nic_dict.get('ip', None)
6032 24991749 Iustin Pop
      if nic_ip is not None:
6033 5c44da6a Guido Trotter
        if nic_ip.lower() == constants.VALUE_NONE:
6034 24991749 Iustin Pop
          nic_dict['ip'] = None
6035 24991749 Iustin Pop
        else:
6036 24991749 Iustin Pop
          if not utils.IsValidIP(nic_ip):
6037 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip)
6038 5c44da6a Guido Trotter
6039 5c44da6a Guido Trotter
      if nic_op == constants.DDM_ADD:
6040 5c44da6a Guido Trotter
        nic_bridge = nic_dict.get('bridge', None)
6041 5c44da6a Guido Trotter
        if nic_bridge is None:
6042 5c44da6a Guido Trotter
          nic_dict['bridge'] = self.cfg.GetDefBridge()
6043 5c44da6a Guido Trotter
        nic_mac = nic_dict.get('mac', None)
6044 5c44da6a Guido Trotter
        if nic_mac is None:
6045 5c44da6a Guido Trotter
          nic_dict['mac'] = constants.VALUE_AUTO
6046 5c44da6a Guido Trotter
6047 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
6048 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
6049 24991749 Iustin Pop
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
6050 24991749 Iustin Pop
          if not utils.IsValidMac(nic_mac):
6051 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac)
6052 5c44da6a Guido Trotter
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
6053 5c44da6a Guido Trotter
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
6054 5c44da6a Guido Trotter
                                     " modifying an existing nic")
6055 5c44da6a Guido Trotter
6056 24991749 Iustin Pop
    if nic_addremove > 1:
6057 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one NIC add or remove operation"
6058 24991749 Iustin Pop
                                 " supported at a time")
6059 24991749 Iustin Pop
6060 1a5c7281 Guido Trotter
  def ExpandNames(self):
6061 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
6062 74409b12 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
6063 74409b12 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6064 74409b12 Iustin Pop
6065 74409b12 Iustin Pop
  def DeclareLocks(self, level):
6066 74409b12 Iustin Pop
    if level == locking.LEVEL_NODE:
6067 74409b12 Iustin Pop
      self._LockInstancesNodes()
6068 a8083063 Iustin Pop
6069 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6070 a8083063 Iustin Pop
    """Build hooks env.
6071 a8083063 Iustin Pop

6072 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
6073 a8083063 Iustin Pop

6074 a8083063 Iustin Pop
    """
6075 396e1b78 Michael Hanselmann
    args = dict()
6076 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.be_new:
6077 338e51e8 Iustin Pop
      args['memory'] = self.be_new[constants.BE_MEMORY]
6078 338e51e8 Iustin Pop
    if constants.BE_VCPUS in self.be_new:
6079 61be6ba4 Iustin Pop
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
6080 d8dcf3c9 Guido Trotter
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
6081 d8dcf3c9 Guido Trotter
    # information at all.
6082 d8dcf3c9 Guido Trotter
    if self.op.nics:
6083 d8dcf3c9 Guido Trotter
      args['nics'] = []
6084 d8dcf3c9 Guido Trotter
      nic_override = dict(self.op.nics)
6085 d8dcf3c9 Guido Trotter
      for idx, nic in enumerate(self.instance.nics):
6086 d8dcf3c9 Guido Trotter
        if idx in nic_override:
6087 d8dcf3c9 Guido Trotter
          this_nic_override = nic_override[idx]
6088 d8dcf3c9 Guido Trotter
        else:
6089 d8dcf3c9 Guido Trotter
          this_nic_override = {}
6090 d8dcf3c9 Guido Trotter
        if 'ip' in this_nic_override:
6091 d8dcf3c9 Guido Trotter
          ip = this_nic_override['ip']
6092 d8dcf3c9 Guido Trotter
        else:
6093 d8dcf3c9 Guido Trotter
          ip = nic.ip
6094 d8dcf3c9 Guido Trotter
        if 'bridge' in this_nic_override:
6095 d8dcf3c9 Guido Trotter
          bridge = this_nic_override['bridge']
6096 d8dcf3c9 Guido Trotter
        else:
6097 d8dcf3c9 Guido Trotter
          bridge = nic.bridge
6098 d8dcf3c9 Guido Trotter
        if 'mac' in this_nic_override:
6099 d8dcf3c9 Guido Trotter
          mac = this_nic_override['mac']
6100 d8dcf3c9 Guido Trotter
        else:
6101 d8dcf3c9 Guido Trotter
          mac = nic.mac
6102 d8dcf3c9 Guido Trotter
        args['nics'].append((ip, bridge, mac))
6103 d8dcf3c9 Guido Trotter
      if constants.DDM_ADD in nic_override:
6104 d8dcf3c9 Guido Trotter
        ip = nic_override[constants.DDM_ADD].get('ip', None)
6105 d8dcf3c9 Guido Trotter
        bridge = nic_override[constants.DDM_ADD]['bridge']
6106 d8dcf3c9 Guido Trotter
        mac = nic_override[constants.DDM_ADD]['mac']
6107 d8dcf3c9 Guido Trotter
        args['nics'].append((ip, bridge, mac))
6108 d8dcf3c9 Guido Trotter
      elif constants.DDM_REMOVE in nic_override:
6109 d8dcf3c9 Guido Trotter
        del args['nics'][-1]
6110 d8dcf3c9 Guido Trotter
6111 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
6112 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6113 a8083063 Iustin Pop
    return env, nl, nl
6114 a8083063 Iustin Pop
6115 a8083063 Iustin Pop
  def CheckPrereq(self):
6116 a8083063 Iustin Pop
    """Check prerequisites.
6117 a8083063 Iustin Pop

6118 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
6119 a8083063 Iustin Pop

6120 a8083063 Iustin Pop
    """
6121 7c4d6c7b Michael Hanselmann
    self.force = self.op.force
6122 a8083063 Iustin Pop
6123 74409b12 Iustin Pop
    # checking the new params on the primary/secondary nodes
6124 31a853d2 Iustin Pop
6125 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6126 1a5c7281 Guido Trotter
    assert self.instance is not None, \
6127 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
6128 6b12959c Iustin Pop
    pnode = instance.primary_node
6129 6b12959c Iustin Pop
    nodelist = list(instance.all_nodes)
6130 74409b12 Iustin Pop
6131 338e51e8 Iustin Pop
    # hvparams processing
6132 74409b12 Iustin Pop
    if self.op.hvparams:
6133 74409b12 Iustin Pop
      i_hvdict = copy.deepcopy(instance.hvparams)
6134 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
6135 8edcd611 Guido Trotter
        if val == constants.VALUE_DEFAULT:
6136 74409b12 Iustin Pop
          try:
6137 74409b12 Iustin Pop
            del i_hvdict[key]
6138 74409b12 Iustin Pop
          except KeyError:
6139 74409b12 Iustin Pop
            pass
6140 74409b12 Iustin Pop
        else:
6141 74409b12 Iustin Pop
          i_hvdict[key] = val
6142 74409b12 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
6143 a5728081 Guido Trotter
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
6144 74409b12 Iustin Pop
      hv_new = cluster.FillDict(cluster.hvparams[instance.hypervisor],
6145 74409b12 Iustin Pop
                                i_hvdict)
6146 74409b12 Iustin Pop
      # local check
6147 74409b12 Iustin Pop
      hypervisor.GetHypervisor(
6148 74409b12 Iustin Pop
        instance.hypervisor).CheckParameterSyntax(hv_new)
6149 74409b12 Iustin Pop
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
6150 338e51e8 Iustin Pop
      self.hv_new = hv_new # the new actual values
6151 338e51e8 Iustin Pop
      self.hv_inst = i_hvdict # the new dict (without defaults)
6152 338e51e8 Iustin Pop
    else:
6153 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
6154 338e51e8 Iustin Pop
6155 338e51e8 Iustin Pop
    # beparams processing
6156 338e51e8 Iustin Pop
    if self.op.beparams:
6157 338e51e8 Iustin Pop
      i_bedict = copy.deepcopy(instance.beparams)
6158 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
6159 8edcd611 Guido Trotter
        if val == constants.VALUE_DEFAULT:
6160 338e51e8 Iustin Pop
          try:
6161 338e51e8 Iustin Pop
            del i_bedict[key]
6162 338e51e8 Iustin Pop
          except KeyError:
6163 338e51e8 Iustin Pop
            pass
6164 338e51e8 Iustin Pop
        else:
6165 338e51e8 Iustin Pop
          i_bedict[key] = val
6166 338e51e8 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
6167 a5728081 Guido Trotter
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
6168 338e51e8 Iustin Pop
      be_new = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
6169 338e51e8 Iustin Pop
                                i_bedict)
6170 338e51e8 Iustin Pop
      self.be_new = be_new # the new actual values
6171 338e51e8 Iustin Pop
      self.be_inst = i_bedict # the new dict (without defaults)
6172 338e51e8 Iustin Pop
    else:
6173 b637ae4d Iustin Pop
      self.be_new = self.be_inst = {}
6174 74409b12 Iustin Pop
6175 cfefe007 Guido Trotter
    self.warn = []
6176 647a5d80 Iustin Pop
6177 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.op.beparams and not self.force:
6178 647a5d80 Iustin Pop
      mem_check_list = [pnode]
6179 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
6180 c0f2b229 Iustin Pop
        # either we changed auto_balance to yes or it was from before
6181 647a5d80 Iustin Pop
        mem_check_list.extend(instance.secondary_nodes)
6182 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
6183 72737a7f Iustin Pop
                                                  instance.hypervisor)
6184 647a5d80 Iustin Pop
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
6185 72737a7f Iustin Pop
                                         instance.hypervisor)
6186 781de953 Iustin Pop
      if nodeinfo[pnode].failed or not isinstance(nodeinfo[pnode].data, dict):
6187 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
6188 cfefe007 Guido Trotter
        self.warn.append("Can't get info from primary node %s" % pnode)
6189 cfefe007 Guido Trotter
      else:
6190 781de953 Iustin Pop
        if not instance_info.failed and instance_info.data:
6191 ade0e8cd Guido Trotter
          current_mem = int(instance_info.data['memory'])
6192 cfefe007 Guido Trotter
        else:
6193 cfefe007 Guido Trotter
          # Assume instance not running
6194 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
6195 cfefe007 Guido Trotter
          # and we have no other way to check)
6196 cfefe007 Guido Trotter
          current_mem = 0
6197 338e51e8 Iustin Pop
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
6198 781de953 Iustin Pop
                    nodeinfo[pnode].data['memory_free'])
6199 cfefe007 Guido Trotter
        if miss_mem > 0:
6200 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
6201 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
6202 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
6203 cfefe007 Guido Trotter
6204 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
6205 ea33068f Iustin Pop
        for node, nres in nodeinfo.iteritems():
6206 ea33068f Iustin Pop
          if node not in instance.secondary_nodes:
6207 ea33068f Iustin Pop
            continue
6208 781de953 Iustin Pop
          if nres.failed or not isinstance(nres.data, dict):
6209 647a5d80 Iustin Pop
            self.warn.append("Can't get info from secondary node %s" % node)
6210 781de953 Iustin Pop
          elif be_new[constants.BE_MEMORY] > nres.data['memory_free']:
6211 647a5d80 Iustin Pop
            self.warn.append("Not enough memory to failover instance to"
6212 647a5d80 Iustin Pop
                             " secondary node %s" % node)
6213 5bc84f33 Alexander Schreiber
6214 24991749 Iustin Pop
    # NIC processing
6215 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
6216 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
6217 24991749 Iustin Pop
        if not instance.nics:
6218 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance has no NICs, cannot remove")
6219 24991749 Iustin Pop
        continue
6220 24991749 Iustin Pop
      if nic_op != constants.DDM_ADD:
6221 24991749 Iustin Pop
        # an existing nic
6222 24991749 Iustin Pop
        if nic_op < 0 or nic_op >= len(instance.nics):
6223 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
6224 24991749 Iustin Pop
                                     " are 0 to %d" %
6225 24991749 Iustin Pop
                                     (nic_op, len(instance.nics)))
6226 5c44da6a Guido Trotter
      if 'bridge' in nic_dict:
6227 5c44da6a Guido Trotter
        nic_bridge = nic_dict['bridge']
6228 5c44da6a Guido Trotter
        if nic_bridge is None:
6229 5c44da6a Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic bridge to None')
6230 24991749 Iustin Pop
        if not self.rpc.call_bridges_exist(pnode, [nic_bridge]):
6231 24991749 Iustin Pop
          msg = ("Bridge '%s' doesn't exist on one of"
6232 24991749 Iustin Pop
                 " the instance nodes" % nic_bridge)
6233 24991749 Iustin Pop
          if self.force:
6234 24991749 Iustin Pop
            self.warn.append(msg)
6235 24991749 Iustin Pop
          else:
6236 24991749 Iustin Pop
            raise errors.OpPrereqError(msg)
6237 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
6238 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
6239 5c44da6a Guido Trotter
        if nic_mac is None:
6240 5c44da6a Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic mac to None')
6241 5c44da6a Guido Trotter
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
6242 5c44da6a Guido Trotter
          # otherwise generate the mac
6243 5c44da6a Guido Trotter
          nic_dict['mac'] = self.cfg.GenerateMAC()
6244 5c44da6a Guido Trotter
        else:
6245 5c44da6a Guido Trotter
          # or validate/reserve the current one
6246 5c44da6a Guido Trotter
          if self.cfg.IsMacInUse(nic_mac):
6247 5c44da6a Guido Trotter
            raise errors.OpPrereqError("MAC address %s already in use"
6248 5c44da6a Guido Trotter
                                       " in cluster" % nic_mac)
6249 24991749 Iustin Pop
6250 24991749 Iustin Pop
    # DISK processing
6251 24991749 Iustin Pop
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
6252 24991749 Iustin Pop
      raise errors.OpPrereqError("Disk operations not supported for"
6253 24991749 Iustin Pop
                                 " diskless instances")
6254 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
6255 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
6256 24991749 Iustin Pop
        if len(instance.disks) == 1:
6257 24991749 Iustin Pop
          raise errors.OpPrereqError("Cannot remove the last disk of"
6258 24991749 Iustin Pop
                                     " an instance")
6259 24991749 Iustin Pop
        ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
6260 24991749 Iustin Pop
        ins_l = ins_l[pnode]
6261 4cfb9426 Iustin Pop
        if ins_l.failed or not isinstance(ins_l.data, list):
6262 24991749 Iustin Pop
          raise errors.OpPrereqError("Can't contact node '%s'" % pnode)
6263 4cfb9426 Iustin Pop
        if instance.name in ins_l.data:
6264 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance is running, can't remove"
6265 24991749 Iustin Pop
                                     " disks.")
6266 24991749 Iustin Pop
6267 24991749 Iustin Pop
      if (disk_op == constants.DDM_ADD and
6268 24991749 Iustin Pop
          len(instance.nics) >= constants.MAX_DISKS):
6269 24991749 Iustin Pop
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
6270 24991749 Iustin Pop
                                   " add more" % constants.MAX_DISKS)
6271 24991749 Iustin Pop
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
6272 24991749 Iustin Pop
        # an existing disk
6273 24991749 Iustin Pop
        if disk_op < 0 or disk_op >= len(instance.disks):
6274 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
6275 24991749 Iustin Pop
                                     " are 0 to %d" %
6276 24991749 Iustin Pop
                                     (disk_op, len(instance.disks)))
6277 24991749 Iustin Pop
6278 a8083063 Iustin Pop
    return
6279 a8083063 Iustin Pop
6280 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6281 a8083063 Iustin Pop
    """Modifies an instance.
6282 a8083063 Iustin Pop

6283 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
6284 24991749 Iustin Pop

6285 a8083063 Iustin Pop
    """
6286 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
6287 cfefe007 Guido Trotter
    # feedback_fn there.
6288 cfefe007 Guido Trotter
    for warn in self.warn:
6289 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
6290 cfefe007 Guido Trotter
6291 a8083063 Iustin Pop
    result = []
6292 a8083063 Iustin Pop
    instance = self.instance
6293 24991749 Iustin Pop
    # disk changes
6294 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
6295 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
6296 24991749 Iustin Pop
        # remove the last disk
6297 24991749 Iustin Pop
        device = instance.disks.pop()
6298 24991749 Iustin Pop
        device_idx = len(instance.disks)
6299 24991749 Iustin Pop
        for node, disk in device.ComputeNodeTree(instance.primary_node):
6300 24991749 Iustin Pop
          self.cfg.SetDiskID(disk, node)
6301 e1bc0878 Iustin Pop
          msg = self.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
6302 e1bc0878 Iustin Pop
          if msg:
6303 e1bc0878 Iustin Pop
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
6304 e1bc0878 Iustin Pop
                            " continuing anyway", device_idx, node, msg)
6305 24991749 Iustin Pop
        result.append(("disk/%d" % device_idx, "remove"))
6306 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
6307 24991749 Iustin Pop
        # add a new disk
6308 24991749 Iustin Pop
        if instance.disk_template == constants.DT_FILE:
6309 24991749 Iustin Pop
          file_driver, file_path = instance.disks[0].logical_id
6310 24991749 Iustin Pop
          file_path = os.path.dirname(file_path)
6311 24991749 Iustin Pop
        else:
6312 24991749 Iustin Pop
          file_driver = file_path = None
6313 24991749 Iustin Pop
        disk_idx_base = len(instance.disks)
6314 24991749 Iustin Pop
        new_disk = _GenerateDiskTemplate(self,
6315 24991749 Iustin Pop
                                         instance.disk_template,
6316 32388e6d Iustin Pop
                                         instance.name, instance.primary_node,
6317 24991749 Iustin Pop
                                         instance.secondary_nodes,
6318 24991749 Iustin Pop
                                         [disk_dict],
6319 24991749 Iustin Pop
                                         file_path,
6320 24991749 Iustin Pop
                                         file_driver,
6321 24991749 Iustin Pop
                                         disk_idx_base)[0]
6322 24991749 Iustin Pop
        instance.disks.append(new_disk)
6323 24991749 Iustin Pop
        info = _GetInstanceInfoText(instance)
6324 24991749 Iustin Pop
6325 24991749 Iustin Pop
        logging.info("Creating volume %s for instance %s",
6326 24991749 Iustin Pop
                     new_disk.iv_name, instance.name)
6327 24991749 Iustin Pop
        # Note: this needs to be kept in sync with _CreateDisks
6328 24991749 Iustin Pop
        #HARDCODE
6329 428958aa Iustin Pop
        for node in instance.all_nodes:
6330 428958aa Iustin Pop
          f_create = node == instance.primary_node
6331 796cab27 Iustin Pop
          try:
6332 428958aa Iustin Pop
            _CreateBlockDev(self, node, instance, new_disk,
6333 428958aa Iustin Pop
                            f_create, info, f_create)
6334 1492cca7 Iustin Pop
          except errors.OpExecError, err:
6335 24991749 Iustin Pop
            self.LogWarning("Failed to create volume %s (%s) on"
6336 428958aa Iustin Pop
                            " node %s: %s",
6337 428958aa Iustin Pop
                            new_disk.iv_name, new_disk, node, err)
6338 24991749 Iustin Pop
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
6339 24991749 Iustin Pop
                       (new_disk.size, new_disk.mode)))
6340 24991749 Iustin Pop
      else:
6341 24991749 Iustin Pop
        # change a given disk
6342 24991749 Iustin Pop
        instance.disks[disk_op].mode = disk_dict['mode']
6343 24991749 Iustin Pop
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
6344 24991749 Iustin Pop
    # NIC changes
6345 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
6346 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
6347 24991749 Iustin Pop
        # remove the last nic
6348 24991749 Iustin Pop
        del instance.nics[-1]
6349 24991749 Iustin Pop
        result.append(("nic.%d" % len(instance.nics), "remove"))
6350 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
6351 5c44da6a Guido Trotter
        # mac and bridge should be set, by now
6352 5c44da6a Guido Trotter
        mac = nic_dict['mac']
6353 5c44da6a Guido Trotter
        bridge = nic_dict['bridge']
6354 24991749 Iustin Pop
        new_nic = objects.NIC(mac=mac, ip=nic_dict.get('ip', None),
6355 5c44da6a Guido Trotter
                              bridge=bridge)
6356 24991749 Iustin Pop
        instance.nics.append(new_nic)
6357 24991749 Iustin Pop
        result.append(("nic.%d" % (len(instance.nics) - 1),
6358 24991749 Iustin Pop
                       "add:mac=%s,ip=%s,bridge=%s" %
6359 24991749 Iustin Pop
                       (new_nic.mac, new_nic.ip, new_nic.bridge)))
6360 24991749 Iustin Pop
      else:
6361 24991749 Iustin Pop
        # change a given nic
6362 24991749 Iustin Pop
        for key in 'mac', 'ip', 'bridge':
6363 24991749 Iustin Pop
          if key in nic_dict:
6364 24991749 Iustin Pop
            setattr(instance.nics[nic_op], key, nic_dict[key])
6365 24991749 Iustin Pop
            result.append(("nic.%s/%d" % (key, nic_op), nic_dict[key]))
6366 24991749 Iustin Pop
6367 24991749 Iustin Pop
    # hvparams changes
6368 74409b12 Iustin Pop
    if self.op.hvparams:
6369 12649e35 Guido Trotter
      instance.hvparams = self.hv_inst
6370 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
6371 74409b12 Iustin Pop
        result.append(("hv/%s" % key, val))
6372 24991749 Iustin Pop
6373 24991749 Iustin Pop
    # beparams changes
6374 338e51e8 Iustin Pop
    if self.op.beparams:
6375 338e51e8 Iustin Pop
      instance.beparams = self.be_inst
6376 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
6377 338e51e8 Iustin Pop
        result.append(("be/%s" % key, val))
6378 a8083063 Iustin Pop
6379 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
6380 a8083063 Iustin Pop
6381 a8083063 Iustin Pop
    return result
6382 a8083063 Iustin Pop
6383 a8083063 Iustin Pop
6384 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
6385 a8083063 Iustin Pop
  """Query the exports list
6386 a8083063 Iustin Pop

6387 a8083063 Iustin Pop
  """
6388 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
6389 21a15682 Guido Trotter
  REQ_BGL = False
6390 21a15682 Guido Trotter
6391 21a15682 Guido Trotter
  def ExpandNames(self):
6392 21a15682 Guido Trotter
    self.needed_locks = {}
6393 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
6394 21a15682 Guido Trotter
    if not self.op.nodes:
6395 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6396 21a15682 Guido Trotter
    else:
6397 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
6398 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
6399 a8083063 Iustin Pop
6400 a8083063 Iustin Pop
  def CheckPrereq(self):
6401 21a15682 Guido Trotter
    """Check prerequisites.
6402 a8083063 Iustin Pop

6403 a8083063 Iustin Pop
    """
6404 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
6405 a8083063 Iustin Pop
6406 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6407 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
6408 a8083063 Iustin Pop

6409 e4376078 Iustin Pop
    @rtype: dict
6410 e4376078 Iustin Pop
    @return: a dictionary with the structure node->(export-list)
6411 e4376078 Iustin Pop
        where export-list is a list of the instances exported on
6412 e4376078 Iustin Pop
        that node.
6413 a8083063 Iustin Pop

6414 a8083063 Iustin Pop
    """
6415 b04285f2 Guido Trotter
    rpcresult = self.rpc.call_export_list(self.nodes)
6416 b04285f2 Guido Trotter
    result = {}
6417 b04285f2 Guido Trotter
    for node in rpcresult:
6418 b04285f2 Guido Trotter
      if rpcresult[node].failed:
6419 b04285f2 Guido Trotter
        result[node] = False
6420 b04285f2 Guido Trotter
      else:
6421 b04285f2 Guido Trotter
        result[node] = rpcresult[node].data
6422 b04285f2 Guido Trotter
6423 b04285f2 Guido Trotter
    return result
6424 a8083063 Iustin Pop
6425 a8083063 Iustin Pop
6426 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
6427 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
6428 a8083063 Iustin Pop

6429 a8083063 Iustin Pop
  """
6430 a8083063 Iustin Pop
  HPATH = "instance-export"
6431 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
6432 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
6433 6657590e Guido Trotter
  REQ_BGL = False
6434 6657590e Guido Trotter
6435 6657590e Guido Trotter
  def ExpandNames(self):
6436 6657590e Guido Trotter
    self._ExpandAndLockInstance()
6437 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
6438 6657590e Guido Trotter
    #
6439 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
6440 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
6441 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
6442 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
6443 6657590e Guido Trotter
    #    then one to remove, after
6444 5bbd3f7f Michael Hanselmann
    #  - removing the removal operation altogether
6445 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6446 6657590e Guido Trotter
6447 6657590e Guido Trotter
  def DeclareLocks(self, level):
6448 6657590e Guido Trotter
    """Last minute lock declaration."""
6449 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
6450 a8083063 Iustin Pop
6451 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6452 a8083063 Iustin Pop
    """Build hooks env.
6453 a8083063 Iustin Pop

6454 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
6455 a8083063 Iustin Pop

6456 a8083063 Iustin Pop
    """
6457 a8083063 Iustin Pop
    env = {
6458 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
6459 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
6460 a8083063 Iustin Pop
      }
6461 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6462 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
6463 a8083063 Iustin Pop
          self.op.target_node]
6464 a8083063 Iustin Pop
    return env, nl, nl
6465 a8083063 Iustin Pop
6466 a8083063 Iustin Pop
  def CheckPrereq(self):
6467 a8083063 Iustin Pop
    """Check prerequisites.
6468 a8083063 Iustin Pop

6469 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
6470 a8083063 Iustin Pop

6471 a8083063 Iustin Pop
    """
6472 6657590e Guido Trotter
    instance_name = self.op.instance_name
6473 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
6474 6657590e Guido Trotter
    assert self.instance is not None, \
6475 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
6476 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
6477 a8083063 Iustin Pop
6478 6657590e Guido Trotter
    self.dst_node = self.cfg.GetNodeInfo(
6479 6657590e Guido Trotter
      self.cfg.ExpandNodeName(self.op.target_node))
6480 a8083063 Iustin Pop
6481 268b8e42 Iustin Pop
    if self.dst_node is None:
6482 268b8e42 Iustin Pop
      # This is wrong node name, not a non-locked node
6483 268b8e42 Iustin Pop
      raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node)
6484 aeb83a2b Iustin Pop
    _CheckNodeOnline(self, self.dst_node.name)
6485 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, self.dst_node.name)
6486 a8083063 Iustin Pop
6487 b6023d6c Manuel Franceschini
    # instance disk type verification
6488 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
6489 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
6490 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
6491 b6023d6c Manuel Franceschini
                                   " file-based disks")
6492 b6023d6c Manuel Franceschini
6493 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6494 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
6495 a8083063 Iustin Pop

6496 a8083063 Iustin Pop
    """
6497 a8083063 Iustin Pop
    instance = self.instance
6498 a8083063 Iustin Pop
    dst_node = self.dst_node
6499 a8083063 Iustin Pop
    src_node = instance.primary_node
6500 a8083063 Iustin Pop
    if self.op.shutdown:
6501 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
6502 781de953 Iustin Pop
      result = self.rpc.call_instance_shutdown(src_node, instance)
6503 1fae010f Iustin Pop
      msg = result.RemoteFailMsg()
6504 1fae010f Iustin Pop
      if msg:
6505 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
6506 1fae010f Iustin Pop
                                 " node %s: %s" %
6507 1fae010f Iustin Pop
                                 (instance.name, src_node, msg))
6508 a8083063 Iustin Pop
6509 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
6510 a8083063 Iustin Pop
6511 a8083063 Iustin Pop
    snap_disks = []
6512 a8083063 Iustin Pop
6513 998c712c Iustin Pop
    # set the disks ID correctly since call_instance_start needs the
6514 998c712c Iustin Pop
    # correct drbd minor to create the symlinks
6515 998c712c Iustin Pop
    for disk in instance.disks:
6516 998c712c Iustin Pop
      self.cfg.SetDiskID(disk, src_node)
6517 998c712c Iustin Pop
6518 084f05a5 Iustin Pop
    # per-disk results
6519 084f05a5 Iustin Pop
    dresults = []
6520 a8083063 Iustin Pop
    try:
6521 a97da6b7 Iustin Pop
      for idx, disk in enumerate(instance.disks):
6522 19d7f90a Guido Trotter
        # new_dev_name will be a snapshot of an lvm leaf of the one we passed
6523 19d7f90a Guido Trotter
        new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk)
6524 781de953 Iustin Pop
        if new_dev_name.failed or not new_dev_name.data:
6525 a97da6b7 Iustin Pop
          self.LogWarning("Could not snapshot disk/%d on node %s",
6526 a97da6b7 Iustin Pop
                          idx, src_node)
6527 19d7f90a Guido Trotter
          snap_disks.append(False)
6528 19d7f90a Guido Trotter
        else:
6529 19d7f90a Guido Trotter
          new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
6530 781de953 Iustin Pop
                                 logical_id=(vgname, new_dev_name.data),
6531 781de953 Iustin Pop
                                 physical_id=(vgname, new_dev_name.data),
6532 19d7f90a Guido Trotter
                                 iv_name=disk.iv_name)
6533 19d7f90a Guido Trotter
          snap_disks.append(new_dev)
6534 a8083063 Iustin Pop
6535 a8083063 Iustin Pop
    finally:
6536 0d68c45d Iustin Pop
      if self.op.shutdown and instance.admin_up:
6537 0eca8e0c Iustin Pop
        result = self.rpc.call_instance_start(src_node, instance, None, None)
6538 dd279568 Iustin Pop
        msg = result.RemoteFailMsg()
6539 dd279568 Iustin Pop
        if msg:
6540 b9bddb6b Iustin Pop
          _ShutdownInstanceDisks(self, instance)
6541 dd279568 Iustin Pop
          raise errors.OpExecError("Could not start instance: %s" % msg)
6542 a8083063 Iustin Pop
6543 a8083063 Iustin Pop
    # TODO: check for size
6544 a8083063 Iustin Pop
6545 62c9ec92 Iustin Pop
    cluster_name = self.cfg.GetClusterName()
6546 74c47259 Iustin Pop
    for idx, dev in enumerate(snap_disks):
6547 19d7f90a Guido Trotter
      if dev:
6548 781de953 Iustin Pop
        result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
6549 781de953 Iustin Pop
                                               instance, cluster_name, idx)
6550 781de953 Iustin Pop
        if result.failed or not result.data:
6551 a97da6b7 Iustin Pop
          self.LogWarning("Could not export disk/%d from node %s to"
6552 a97da6b7 Iustin Pop
                          " node %s", idx, src_node, dst_node.name)
6553 084f05a5 Iustin Pop
          dresults.append(False)
6554 084f05a5 Iustin Pop
        else:
6555 084f05a5 Iustin Pop
          dresults.append(True)
6556 e1bc0878 Iustin Pop
        msg = self.rpc.call_blockdev_remove(src_node, dev).RemoteFailMsg()
6557 e1bc0878 Iustin Pop
        if msg:
6558 a97da6b7 Iustin Pop
          self.LogWarning("Could not remove snapshot for disk/%d from node"
6559 a97da6b7 Iustin Pop
                          " %s: %s", idx, src_node, msg)
6560 084f05a5 Iustin Pop
      else:
6561 084f05a5 Iustin Pop
        dresults.append(False)
6562 a8083063 Iustin Pop
6563 781de953 Iustin Pop
    result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
6564 084f05a5 Iustin Pop
    fin_resu = True
6565 781de953 Iustin Pop
    if result.failed or not result.data:
6566 19d7f90a Guido Trotter
      self.LogWarning("Could not finalize export for instance %s on node %s",
6567 19d7f90a Guido Trotter
                      instance.name, dst_node.name)
6568 084f05a5 Iustin Pop
      fin_resu = False
6569 a8083063 Iustin Pop
6570 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
6571 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
6572 a8083063 Iustin Pop
6573 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
6574 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
6575 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
6576 a8083063 Iustin Pop
    if nodelist:
6577 72737a7f Iustin Pop
      exportlist = self.rpc.call_export_list(nodelist)
6578 a8083063 Iustin Pop
      for node in exportlist:
6579 781de953 Iustin Pop
        if exportlist[node].failed:
6580 781de953 Iustin Pop
          continue
6581 781de953 Iustin Pop
        if instance.name in exportlist[node].data:
6582 72737a7f Iustin Pop
          if not self.rpc.call_export_remove(node, instance.name):
6583 19d7f90a Guido Trotter
            self.LogWarning("Could not remove older export for instance %s"
6584 19d7f90a Guido Trotter
                            " on node %s", instance.name, node)
6585 084f05a5 Iustin Pop
    return fin_resu, dresults
6586 5c947f38 Iustin Pop
6587 5c947f38 Iustin Pop
6588 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
6589 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
6590 9ac99fda Guido Trotter

6591 9ac99fda Guido Trotter
  """
6592 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
6593 3656b3af Guido Trotter
  REQ_BGL = False
6594 3656b3af Guido Trotter
6595 3656b3af Guido Trotter
  def ExpandNames(self):
6596 3656b3af Guido Trotter
    self.needed_locks = {}
6597 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
6598 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
6599 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
6600 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6601 9ac99fda Guido Trotter
6602 9ac99fda Guido Trotter
  def CheckPrereq(self):
6603 9ac99fda Guido Trotter
    """Check prerequisites.
6604 9ac99fda Guido Trotter
    """
6605 9ac99fda Guido Trotter
    pass
6606 9ac99fda Guido Trotter
6607 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
6608 9ac99fda Guido Trotter
    """Remove any export.
6609 9ac99fda Guido Trotter

6610 9ac99fda Guido Trotter
    """
6611 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
6612 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
6613 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
6614 9ac99fda Guido Trotter
    fqdn_warn = False
6615 9ac99fda Guido Trotter
    if not instance_name:
6616 9ac99fda Guido Trotter
      fqdn_warn = True
6617 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
6618 9ac99fda Guido Trotter
6619 72737a7f Iustin Pop
    exportlist = self.rpc.call_export_list(self.acquired_locks[
6620 72737a7f Iustin Pop
      locking.LEVEL_NODE])
6621 9ac99fda Guido Trotter
    found = False
6622 9ac99fda Guido Trotter
    for node in exportlist:
6623 781de953 Iustin Pop
      if exportlist[node].failed:
6624 25361b9a Iustin Pop
        self.LogWarning("Failed to query node %s, continuing" % node)
6625 781de953 Iustin Pop
        continue
6626 781de953 Iustin Pop
      if instance_name in exportlist[node].data:
6627 9ac99fda Guido Trotter
        found = True
6628 781de953 Iustin Pop
        result = self.rpc.call_export_remove(node, instance_name)
6629 781de953 Iustin Pop
        if result.failed or not result.data:
6630 9a4f63d1 Iustin Pop
          logging.error("Could not remove export for instance %s"
6631 9a4f63d1 Iustin Pop
                        " on node %s", instance_name, node)
6632 9ac99fda Guido Trotter
6633 9ac99fda Guido Trotter
    if fqdn_warn and not found:
6634 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
6635 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
6636 9ac99fda Guido Trotter
                  " Domain Name.")
6637 9ac99fda Guido Trotter
6638 9ac99fda Guido Trotter
6639 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
6640 5c947f38 Iustin Pop
  """Generic tags LU.
6641 5c947f38 Iustin Pop

6642 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
6643 5c947f38 Iustin Pop

6644 5c947f38 Iustin Pop
  """
6645 5c947f38 Iustin Pop
6646 8646adce Guido Trotter
  def ExpandNames(self):
6647 8646adce Guido Trotter
    self.needed_locks = {}
6648 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
6649 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
6650 5c947f38 Iustin Pop
      if name is None:
6651 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
6652 3ecf6786 Iustin Pop
                                   (self.op.name,))
6653 5c947f38 Iustin Pop
      self.op.name = name
6654 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = name
6655 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
6656 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
6657 5c947f38 Iustin Pop
      if name is None:
6658 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
6659 3ecf6786 Iustin Pop
                                   (self.op.name,))
6660 5c947f38 Iustin Pop
      self.op.name = name
6661 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = name
6662 8646adce Guido Trotter
6663 8646adce Guido Trotter
  def CheckPrereq(self):
6664 8646adce Guido Trotter
    """Check prerequisites.
6665 8646adce Guido Trotter

6666 8646adce Guido Trotter
    """
6667 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
6668 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
6669 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
6670 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
6671 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
6672 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
6673 5c947f38 Iustin Pop
    else:
6674 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
6675 3ecf6786 Iustin Pop
                                 str(self.op.kind))
6676 5c947f38 Iustin Pop
6677 5c947f38 Iustin Pop
6678 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
6679 5c947f38 Iustin Pop
  """Returns the tags of a given object.
6680 5c947f38 Iustin Pop

6681 5c947f38 Iustin Pop
  """
6682 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
6683 8646adce Guido Trotter
  REQ_BGL = False
6684 5c947f38 Iustin Pop
6685 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6686 5c947f38 Iustin Pop
    """Returns the tag list.
6687 5c947f38 Iustin Pop

6688 5c947f38 Iustin Pop
    """
6689 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
6690 5c947f38 Iustin Pop
6691 5c947f38 Iustin Pop
6692 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
6693 73415719 Iustin Pop
  """Searches the tags for a given pattern.
6694 73415719 Iustin Pop

6695 73415719 Iustin Pop
  """
6696 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
6697 8646adce Guido Trotter
  REQ_BGL = False
6698 8646adce Guido Trotter
6699 8646adce Guido Trotter
  def ExpandNames(self):
6700 8646adce Guido Trotter
    self.needed_locks = {}
6701 73415719 Iustin Pop
6702 73415719 Iustin Pop
  def CheckPrereq(self):
6703 73415719 Iustin Pop
    """Check prerequisites.
6704 73415719 Iustin Pop

6705 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
6706 73415719 Iustin Pop

6707 73415719 Iustin Pop
    """
6708 73415719 Iustin Pop
    try:
6709 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
6710 73415719 Iustin Pop
    except re.error, err:
6711 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
6712 73415719 Iustin Pop
                                 (self.op.pattern, err))
6713 73415719 Iustin Pop
6714 73415719 Iustin Pop
  def Exec(self, feedback_fn):
6715 73415719 Iustin Pop
    """Returns the tag list.
6716 73415719 Iustin Pop

6717 73415719 Iustin Pop
    """
6718 73415719 Iustin Pop
    cfg = self.cfg
6719 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
6720 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
6721 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
6722 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
6723 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
6724 73415719 Iustin Pop
    results = []
6725 73415719 Iustin Pop
    for path, target in tgts:
6726 73415719 Iustin Pop
      for tag in target.GetTags():
6727 73415719 Iustin Pop
        if self.re.search(tag):
6728 73415719 Iustin Pop
          results.append((path, tag))
6729 73415719 Iustin Pop
    return results
6730 73415719 Iustin Pop
6731 73415719 Iustin Pop
6732 f27302fa Iustin Pop
class LUAddTags(TagsLU):
6733 5c947f38 Iustin Pop
  """Sets a tag on a given object.
6734 5c947f38 Iustin Pop

6735 5c947f38 Iustin Pop
  """
6736 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
6737 8646adce Guido Trotter
  REQ_BGL = False
6738 5c947f38 Iustin Pop
6739 5c947f38 Iustin Pop
  def CheckPrereq(self):
6740 5c947f38 Iustin Pop
    """Check prerequisites.
6741 5c947f38 Iustin Pop

6742 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
6743 5c947f38 Iustin Pop

6744 5c947f38 Iustin Pop
    """
6745 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
6746 f27302fa Iustin Pop
    for tag in self.op.tags:
6747 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
6748 5c947f38 Iustin Pop
6749 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6750 5c947f38 Iustin Pop
    """Sets the tag.
6751 5c947f38 Iustin Pop

6752 5c947f38 Iustin Pop
    """
6753 5c947f38 Iustin Pop
    try:
6754 f27302fa Iustin Pop
      for tag in self.op.tags:
6755 f27302fa Iustin Pop
        self.target.AddTag(tag)
6756 5c947f38 Iustin Pop
    except errors.TagError, err:
6757 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
6758 5c947f38 Iustin Pop
    try:
6759 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
6760 5c947f38 Iustin Pop
    except errors.ConfigurationError:
6761 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
6762 3ecf6786 Iustin Pop
                                " config file and the operation has been"
6763 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
6764 5c947f38 Iustin Pop
6765 5c947f38 Iustin Pop
6766 f27302fa Iustin Pop
class LUDelTags(TagsLU):
6767 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
6768 5c947f38 Iustin Pop

6769 5c947f38 Iustin Pop
  """
6770 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
6771 8646adce Guido Trotter
  REQ_BGL = False
6772 5c947f38 Iustin Pop
6773 5c947f38 Iustin Pop
  def CheckPrereq(self):
6774 5c947f38 Iustin Pop
    """Check prerequisites.
6775 5c947f38 Iustin Pop

6776 5c947f38 Iustin Pop
    This checks that we have the given tag.
6777 5c947f38 Iustin Pop

6778 5c947f38 Iustin Pop
    """
6779 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
6780 f27302fa Iustin Pop
    for tag in self.op.tags:
6781 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
6782 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
6783 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
6784 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
6785 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
6786 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
6787 f27302fa Iustin Pop
      diff_names.sort()
6788 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
6789 f27302fa Iustin Pop
                                 (",".join(diff_names)))
6790 5c947f38 Iustin Pop
6791 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6792 5c947f38 Iustin Pop
    """Remove the tag from the object.
6793 5c947f38 Iustin Pop

6794 5c947f38 Iustin Pop
    """
6795 f27302fa Iustin Pop
    for tag in self.op.tags:
6796 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
6797 5c947f38 Iustin Pop
    try:
6798 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
6799 5c947f38 Iustin Pop
    except errors.ConfigurationError:
6800 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
6801 3ecf6786 Iustin Pop
                                " config file and the operation has been"
6802 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
6803 06009e27 Iustin Pop
6804 0eed6e61 Guido Trotter
6805 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
6806 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
6807 06009e27 Iustin Pop

6808 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
6809 06009e27 Iustin Pop
  time.
6810 06009e27 Iustin Pop

6811 06009e27 Iustin Pop
  """
6812 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
6813 fbe9022f Guido Trotter
  REQ_BGL = False
6814 06009e27 Iustin Pop
6815 fbe9022f Guido Trotter
  def ExpandNames(self):
6816 fbe9022f Guido Trotter
    """Expand names and set required locks.
6817 06009e27 Iustin Pop

6818 fbe9022f Guido Trotter
    This expands the node list, if any.
6819 06009e27 Iustin Pop

6820 06009e27 Iustin Pop
    """
6821 fbe9022f Guido Trotter
    self.needed_locks = {}
6822 06009e27 Iustin Pop
    if self.op.on_nodes:
6823 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
6824 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
6825 fbe9022f Guido Trotter
      # more information.
6826 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
6827 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
6828 fbe9022f Guido Trotter
6829 fbe9022f Guido Trotter
  def CheckPrereq(self):
6830 fbe9022f Guido Trotter
    """Check prerequisites.
6831 fbe9022f Guido Trotter

6832 fbe9022f Guido Trotter
    """
6833 06009e27 Iustin Pop
6834 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
6835 06009e27 Iustin Pop
    """Do the actual sleep.
6836 06009e27 Iustin Pop

6837 06009e27 Iustin Pop
    """
6838 06009e27 Iustin Pop
    if self.op.on_master:
6839 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
6840 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
6841 06009e27 Iustin Pop
    if self.op.on_nodes:
6842 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
6843 06009e27 Iustin Pop
      if not result:
6844 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
6845 06009e27 Iustin Pop
      for node, node_result in result.items():
6846 781de953 Iustin Pop
        node_result.Raise()
6847 781de953 Iustin Pop
        if not node_result.data:
6848 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
6849 781de953 Iustin Pop
                                   " result: %s" % (node, node_result.data))
6850 d61df03e Iustin Pop
6851 d61df03e Iustin Pop
6852 d1c2dd75 Iustin Pop
class IAllocator(object):
6853 d1c2dd75 Iustin Pop
  """IAllocator framework.
6854 d61df03e Iustin Pop

6855 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
6856 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
6857 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
6858 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
6859 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
6860 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
6861 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
6862 d1c2dd75 Iustin Pop
      easy usage
6863 d61df03e Iustin Pop

6864 d61df03e Iustin Pop
  """
6865 29859cb7 Iustin Pop
  _ALLO_KEYS = [
6866 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
6867 8cc7e742 Guido Trotter
    "os", "tags", "nics", "vcpus", "hypervisor",
6868 d1c2dd75 Iustin Pop
    ]
6869 29859cb7 Iustin Pop
  _RELO_KEYS = [
6870 29859cb7 Iustin Pop
    "relocate_from",
6871 29859cb7 Iustin Pop
    ]
6872 d1c2dd75 Iustin Pop
6873 72737a7f Iustin Pop
  def __init__(self, lu, mode, name, **kwargs):
6874 72737a7f Iustin Pop
    self.lu = lu
6875 d1c2dd75 Iustin Pop
    # init buffer variables
6876 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
6877 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
6878 29859cb7 Iustin Pop
    self.mode = mode
6879 29859cb7 Iustin Pop
    self.name = name
6880 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
6881 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
6882 a0add446 Iustin Pop
    self.hypervisor = None
6883 29859cb7 Iustin Pop
    self.relocate_from = None
6884 27579978 Iustin Pop
    # computed fields
6885 27579978 Iustin Pop
    self.required_nodes = None
6886 d1c2dd75 Iustin Pop
    # init result fields
6887 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
6888 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6889 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
6890 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6891 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
6892 29859cb7 Iustin Pop
    else:
6893 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
6894 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
6895 d1c2dd75 Iustin Pop
    for key in kwargs:
6896 29859cb7 Iustin Pop
      if key not in keyset:
6897 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
6898 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
6899 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
6900 29859cb7 Iustin Pop
    for key in keyset:
6901 d1c2dd75 Iustin Pop
      if key not in kwargs:
6902 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
6903 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
6904 d1c2dd75 Iustin Pop
    self._BuildInputData()
6905 d1c2dd75 Iustin Pop
6906 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
6907 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
6908 d1c2dd75 Iustin Pop

6909 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
6910 d1c2dd75 Iustin Pop

6911 d1c2dd75 Iustin Pop
    """
6912 72737a7f Iustin Pop
    cfg = self.lu.cfg
6913 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
6914 d1c2dd75 Iustin Pop
    # cluster data
6915 d1c2dd75 Iustin Pop
    data = {
6916 77031881 Iustin Pop
      "version": constants.IALLOCATOR_VERSION,
6917 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
6918 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
6919 1325da74 Iustin Pop
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
6920 d1c2dd75 Iustin Pop
      # we don't have job IDs
6921 d61df03e Iustin Pop
      }
6922 b57e9819 Guido Trotter
    iinfo = cfg.GetAllInstancesInfo().values()
6923 b57e9819 Guido Trotter
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
6924 6286519f Iustin Pop
6925 d1c2dd75 Iustin Pop
    # node data
6926 d1c2dd75 Iustin Pop
    node_results = {}
6927 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
6928 8cc7e742 Guido Trotter
6929 8cc7e742 Guido Trotter
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6930 a0add446 Iustin Pop
      hypervisor_name = self.hypervisor
6931 8cc7e742 Guido Trotter
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6932 a0add446 Iustin Pop
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
6933 8cc7e742 Guido Trotter
6934 72737a7f Iustin Pop
    node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
6935 a0add446 Iustin Pop
                                           hypervisor_name)
6936 18640d69 Guido Trotter
    node_iinfo = self.lu.rpc.call_all_instances_info(node_list,
6937 18640d69 Guido Trotter
                       cluster_info.enabled_hypervisors)
6938 1325da74 Iustin Pop
    for nname, nresult in node_data.items():
6939 1325da74 Iustin Pop
      # first fill in static (config-based) values
6940 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
6941 d1c2dd75 Iustin Pop
      pnr = {
6942 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
6943 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
6944 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
6945 fc0fe88c Iustin Pop
        "offline": ninfo.offline,
6946 0b2454b9 Iustin Pop
        "drained": ninfo.drained,
6947 1325da74 Iustin Pop
        "master_candidate": ninfo.master_candidate,
6948 d1c2dd75 Iustin Pop
        }
6949 1325da74 Iustin Pop
6950 0d853843 Iustin Pop
      if not (ninfo.offline or ninfo.drained):
6951 1325da74 Iustin Pop
        nresult.Raise()
6952 1325da74 Iustin Pop
        if not isinstance(nresult.data, dict):
6953 1325da74 Iustin Pop
          raise errors.OpExecError("Can't get data for node %s" % nname)
6954 1325da74 Iustin Pop
        remote_info = nresult.data
6955 1325da74 Iustin Pop
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
6956 1325da74 Iustin Pop
                     'vg_size', 'vg_free', 'cpu_total']:
6957 1325da74 Iustin Pop
          if attr not in remote_info:
6958 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' didn't return attribute"
6959 1325da74 Iustin Pop
                                     " '%s'" % (nname, attr))
6960 1325da74 Iustin Pop
          try:
6961 1325da74 Iustin Pop
            remote_info[attr] = int(remote_info[attr])
6962 1325da74 Iustin Pop
          except ValueError, err:
6963 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' returned invalid value"
6964 1325da74 Iustin Pop
                                     " for '%s': %s" % (nname, attr, err))
6965 1325da74 Iustin Pop
        # compute memory used by primary instances
6966 1325da74 Iustin Pop
        i_p_mem = i_p_up_mem = 0
6967 1325da74 Iustin Pop
        for iinfo, beinfo in i_list:
6968 1325da74 Iustin Pop
          if iinfo.primary_node == nname:
6969 1325da74 Iustin Pop
            i_p_mem += beinfo[constants.BE_MEMORY]
6970 1325da74 Iustin Pop
            if iinfo.name not in node_iinfo[nname].data:
6971 1325da74 Iustin Pop
              i_used_mem = 0
6972 1325da74 Iustin Pop
            else:
6973 1325da74 Iustin Pop
              i_used_mem = int(node_iinfo[nname].data[iinfo.name]['memory'])
6974 1325da74 Iustin Pop
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
6975 1325da74 Iustin Pop
            remote_info['memory_free'] -= max(0, i_mem_diff)
6976 1325da74 Iustin Pop
6977 1325da74 Iustin Pop
            if iinfo.admin_up:
6978 1325da74 Iustin Pop
              i_p_up_mem += beinfo[constants.BE_MEMORY]
6979 1325da74 Iustin Pop
6980 1325da74 Iustin Pop
        # compute memory used by instances
6981 1325da74 Iustin Pop
        pnr_dyn = {
6982 1325da74 Iustin Pop
          "total_memory": remote_info['memory_total'],
6983 1325da74 Iustin Pop
          "reserved_memory": remote_info['memory_dom0'],
6984 1325da74 Iustin Pop
          "free_memory": remote_info['memory_free'],
6985 1325da74 Iustin Pop
          "total_disk": remote_info['vg_size'],
6986 1325da74 Iustin Pop
          "free_disk": remote_info['vg_free'],
6987 1325da74 Iustin Pop
          "total_cpus": remote_info['cpu_total'],
6988 1325da74 Iustin Pop
          "i_pri_memory": i_p_mem,
6989 1325da74 Iustin Pop
          "i_pri_up_memory": i_p_up_mem,
6990 1325da74 Iustin Pop
          }
6991 1325da74 Iustin Pop
        pnr.update(pnr_dyn)
6992 1325da74 Iustin Pop
6993 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
6994 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
6995 d1c2dd75 Iustin Pop
6996 d1c2dd75 Iustin Pop
    # instance data
6997 d1c2dd75 Iustin Pop
    instance_data = {}
6998 338e51e8 Iustin Pop
    for iinfo, beinfo in i_list:
6999 d1c2dd75 Iustin Pop
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
7000 d1c2dd75 Iustin Pop
                  for n in iinfo.nics]
7001 d1c2dd75 Iustin Pop
      pir = {
7002 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
7003 1325da74 Iustin Pop
        "admin_up": iinfo.admin_up,
7004 338e51e8 Iustin Pop
        "vcpus": beinfo[constants.BE_VCPUS],
7005 338e51e8 Iustin Pop
        "memory": beinfo[constants.BE_MEMORY],
7006 d1c2dd75 Iustin Pop
        "os": iinfo.os,
7007 1325da74 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
7008 d1c2dd75 Iustin Pop
        "nics": nic_data,
7009 1325da74 Iustin Pop
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
7010 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
7011 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
7012 d1c2dd75 Iustin Pop
        }
7013 88ae4f85 Iustin Pop
      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
7014 88ae4f85 Iustin Pop
                                                 pir["disks"])
7015 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
7016 d61df03e Iustin Pop
7017 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
7018 d61df03e Iustin Pop
7019 d1c2dd75 Iustin Pop
    self.in_data = data
7020 d61df03e Iustin Pop
7021 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
7022 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
7023 d61df03e Iustin Pop

7024 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
7025 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
7026 d61df03e Iustin Pop

7027 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
7028 d1c2dd75 Iustin Pop
    done.
7029 d61df03e Iustin Pop

7030 d1c2dd75 Iustin Pop
    """
7031 d1c2dd75 Iustin Pop
    data = self.in_data
7032 d1c2dd75 Iustin Pop
7033 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
7034 d1c2dd75 Iustin Pop
7035 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
7036 27579978 Iustin Pop
      self.required_nodes = 2
7037 27579978 Iustin Pop
    else:
7038 27579978 Iustin Pop
      self.required_nodes = 1
7039 d1c2dd75 Iustin Pop
    request = {
7040 d1c2dd75 Iustin Pop
      "type": "allocate",
7041 d1c2dd75 Iustin Pop
      "name": self.name,
7042 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
7043 d1c2dd75 Iustin Pop
      "tags": self.tags,
7044 d1c2dd75 Iustin Pop
      "os": self.os,
7045 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
7046 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
7047 d1c2dd75 Iustin Pop
      "disks": self.disks,
7048 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
7049 d1c2dd75 Iustin Pop
      "nics": self.nics,
7050 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
7051 d1c2dd75 Iustin Pop
      }
7052 d1c2dd75 Iustin Pop
    data["request"] = request
7053 298fe380 Iustin Pop
7054 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
7055 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
7056 298fe380 Iustin Pop

7057 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
7058 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
7059 d61df03e Iustin Pop

7060 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
7061 d1c2dd75 Iustin Pop
    done.
7062 d61df03e Iustin Pop

7063 d1c2dd75 Iustin Pop
    """
7064 72737a7f Iustin Pop
    instance = self.lu.cfg.GetInstanceInfo(self.name)
7065 27579978 Iustin Pop
    if instance is None:
7066 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
7067 27579978 Iustin Pop
                                   " IAllocator" % self.name)
7068 27579978 Iustin Pop
7069 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
7070 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
7071 27579978 Iustin Pop
7072 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
7073 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
7074 2a139bb0 Iustin Pop
7075 27579978 Iustin Pop
    self.required_nodes = 1
7076 dafc7302 Guido Trotter
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
7077 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
7078 27579978 Iustin Pop
7079 d1c2dd75 Iustin Pop
    request = {
7080 2a139bb0 Iustin Pop
      "type": "relocate",
7081 d1c2dd75 Iustin Pop
      "name": self.name,
7082 27579978 Iustin Pop
      "disk_space_total": disk_space,
7083 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
7084 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
7085 d1c2dd75 Iustin Pop
      }
7086 27579978 Iustin Pop
    self.in_data["request"] = request
7087 d61df03e Iustin Pop
7088 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
7089 d1c2dd75 Iustin Pop
    """Build input data structures.
7090 d61df03e Iustin Pop

7091 d1c2dd75 Iustin Pop
    """
7092 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
7093 d61df03e Iustin Pop
7094 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
7095 d1c2dd75 Iustin Pop
      self._AddNewInstance()
7096 d1c2dd75 Iustin Pop
    else:
7097 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
7098 d61df03e Iustin Pop
7099 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
7100 d61df03e Iustin Pop
7101 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
7102 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
7103 298fe380 Iustin Pop

7104 d1c2dd75 Iustin Pop
    """
7105 72737a7f Iustin Pop
    if call_fn is None:
7106 72737a7f Iustin Pop
      call_fn = self.lu.rpc.call_iallocator_runner
7107 298fe380 Iustin Pop
7108 72737a7f Iustin Pop
    result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
7109 781de953 Iustin Pop
    result.Raise()
7110 298fe380 Iustin Pop
7111 781de953 Iustin Pop
    if not isinstance(result.data, (list, tuple)) or len(result.data) != 4:
7112 8d528b7c Iustin Pop
      raise errors.OpExecError("Invalid result from master iallocator runner")
7113 8d528b7c Iustin Pop
7114 781de953 Iustin Pop
    rcode, stdout, stderr, fail = result.data
7115 8d528b7c Iustin Pop
7116 8d528b7c Iustin Pop
    if rcode == constants.IARUN_NOTFOUND:
7117 8d528b7c Iustin Pop
      raise errors.OpExecError("Can't find allocator '%s'" % name)
7118 8d528b7c Iustin Pop
    elif rcode == constants.IARUN_FAILURE:
7119 38206f3c Iustin Pop
      raise errors.OpExecError("Instance allocator call failed: %s,"
7120 38206f3c Iustin Pop
                               " output: %s" % (fail, stdout+stderr))
7121 8d528b7c Iustin Pop
    self.out_text = stdout
7122 d1c2dd75 Iustin Pop
    if validate:
7123 d1c2dd75 Iustin Pop
      self._ValidateResult()
7124 298fe380 Iustin Pop
7125 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
7126 d1c2dd75 Iustin Pop
    """Process the allocator results.
7127 538475ca Iustin Pop

7128 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
7129 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
7130 538475ca Iustin Pop

7131 d1c2dd75 Iustin Pop
    """
7132 d1c2dd75 Iustin Pop
    try:
7133 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
7134 d1c2dd75 Iustin Pop
    except Exception, err:
7135 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
7136 d1c2dd75 Iustin Pop
7137 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
7138 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
7139 538475ca Iustin Pop
7140 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
7141 d1c2dd75 Iustin Pop
      if key not in rdict:
7142 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
7143 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
7144 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
7145 538475ca Iustin Pop
7146 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
7147 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
7148 d1c2dd75 Iustin Pop
                               " is not a list")
7149 d1c2dd75 Iustin Pop
    self.out_data = rdict
7150 538475ca Iustin Pop
7151 538475ca Iustin Pop
7152 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
7153 d61df03e Iustin Pop
  """Run allocator tests.
7154 d61df03e Iustin Pop

7155 d61df03e Iustin Pop
  This LU runs the allocator tests
7156 d61df03e Iustin Pop

7157 d61df03e Iustin Pop
  """
7158 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
7159 d61df03e Iustin Pop
7160 d61df03e Iustin Pop
  def CheckPrereq(self):
7161 d61df03e Iustin Pop
    """Check prerequisites.
7162 d61df03e Iustin Pop

7163 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
7164 d61df03e Iustin Pop

7165 d61df03e Iustin Pop
    """
7166 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
7167 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
7168 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
7169 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
7170 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
7171 d61df03e Iustin Pop
                                     attr)
7172 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
7173 d61df03e Iustin Pop
      if iname is not None:
7174 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
7175 d61df03e Iustin Pop
                                   iname)
7176 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
7177 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
7178 d61df03e Iustin Pop
      for row in self.op.nics:
7179 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
7180 d61df03e Iustin Pop
            "mac" not in row or
7181 d61df03e Iustin Pop
            "ip" not in row or
7182 d61df03e Iustin Pop
            "bridge" not in row):
7183 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
7184 d61df03e Iustin Pop
                                     " 'nics' parameter")
7185 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
7186 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
7187 d61df03e Iustin Pop
      for row in self.op.disks:
7188 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
7189 d61df03e Iustin Pop
            "size" not in row or
7190 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
7191 d61df03e Iustin Pop
            "mode" not in row or
7192 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
7193 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
7194 d61df03e Iustin Pop
                                     " 'disks' parameter")
7195 8901997e Iustin Pop
      if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None:
7196 8cc7e742 Guido Trotter
        self.op.hypervisor = self.cfg.GetHypervisorType()
7197 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
7198 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
7199 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
7200 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
7201 d61df03e Iustin Pop
      if fname is None:
7202 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
7203 d61df03e Iustin Pop
                                   self.op.name)
7204 d61df03e Iustin Pop
      self.op.name = fname
7205 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
7206 d61df03e Iustin Pop
    else:
7207 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
7208 d61df03e Iustin Pop
                                 self.op.mode)
7209 d61df03e Iustin Pop
7210 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
7211 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
7212 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
7213 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
7214 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
7215 d61df03e Iustin Pop
                                 self.op.direction)
7216 d61df03e Iustin Pop
7217 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
7218 d61df03e Iustin Pop
    """Run the allocator test.
7219 d61df03e Iustin Pop

7220 d61df03e Iustin Pop
    """
7221 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
7222 72737a7f Iustin Pop
      ial = IAllocator(self,
7223 29859cb7 Iustin Pop
                       mode=self.op.mode,
7224 29859cb7 Iustin Pop
                       name=self.op.name,
7225 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
7226 29859cb7 Iustin Pop
                       disks=self.op.disks,
7227 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
7228 29859cb7 Iustin Pop
                       os=self.op.os,
7229 29859cb7 Iustin Pop
                       tags=self.op.tags,
7230 29859cb7 Iustin Pop
                       nics=self.op.nics,
7231 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
7232 8cc7e742 Guido Trotter
                       hypervisor=self.op.hypervisor,
7233 29859cb7 Iustin Pop
                       )
7234 29859cb7 Iustin Pop
    else:
7235 72737a7f Iustin Pop
      ial = IAllocator(self,
7236 29859cb7 Iustin Pop
                       mode=self.op.mode,
7237 29859cb7 Iustin Pop
                       name=self.op.name,
7238 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
7239 29859cb7 Iustin Pop
                       )
7240 d61df03e Iustin Pop
7241 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
7242 d1c2dd75 Iustin Pop
      result = ial.in_text
7243 298fe380 Iustin Pop
    else:
7244 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
7245 d1c2dd75 Iustin Pop
      result = ial.out_text
7246 298fe380 Iustin Pop
    return result