Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 4c4b5058

History | View | Annotate | Download (252.9 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import time
29 a8083063 Iustin Pop
import re
30 a8083063 Iustin Pop
import platform
31 ffa1c0dc Iustin Pop
import logging
32 74409b12 Iustin Pop
import copy
33 a8083063 Iustin Pop
34 a8083063 Iustin Pop
from ganeti import ssh
35 a8083063 Iustin Pop
from ganeti import utils
36 a8083063 Iustin Pop
from ganeti import errors
37 a8083063 Iustin Pop
from ganeti import hypervisor
38 6048c986 Guido Trotter
from ganeti import locking
39 a8083063 Iustin Pop
from ganeti import constants
40 a8083063 Iustin Pop
from ganeti import objects
41 8d14b30d Iustin Pop
from ganeti import serializer
42 112f18a5 Iustin Pop
from ganeti import ssconf
43 d61df03e Iustin Pop
44 d61df03e Iustin Pop
45 a8083063 Iustin Pop
class LogicalUnit(object):
46 396e1b78 Michael Hanselmann
  """Logical Unit base class.
47 a8083063 Iustin Pop

48 a8083063 Iustin Pop
  Subclasses must follow these rules:
49 d465bdc8 Guido Trotter
    - implement ExpandNames
50 d465bdc8 Guido Trotter
    - implement CheckPrereq
51 a8083063 Iustin Pop
    - implement Exec
52 a8083063 Iustin Pop
    - implement BuildHooksEnv
53 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
54 05f86716 Guido Trotter
    - optionally redefine their run requirements:
55 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
56 05f86716 Guido Trotter

57 05f86716 Guido Trotter
  Note that all commands require root permissions.
58 a8083063 Iustin Pop

59 a8083063 Iustin Pop
  """
60 a8083063 Iustin Pop
  HPATH = None
61 a8083063 Iustin Pop
  HTYPE = None
62 a8083063 Iustin Pop
  _OP_REQP = []
63 7e55040e Guido Trotter
  REQ_BGL = True
64 a8083063 Iustin Pop
65 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
66 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
67 a8083063 Iustin Pop

68 5bbd3f7f Michael Hanselmann
    This needs to be overridden in derived classes in order to check op
69 a8083063 Iustin Pop
    validity.
70 a8083063 Iustin Pop

71 a8083063 Iustin Pop
    """
72 5bfac263 Iustin Pop
    self.proc = processor
73 a8083063 Iustin Pop
    self.op = op
74 77b657a3 Guido Trotter
    self.cfg = context.cfg
75 77b657a3 Guido Trotter
    self.context = context
76 72737a7f Iustin Pop
    self.rpc = rpc
77 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
78 d465bdc8 Guido Trotter
    self.needed_locks = None
79 6683bba2 Guido Trotter
    self.acquired_locks = {}
80 3977a4c1 Guido Trotter
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
81 ca2a79e1 Guido Trotter
    self.add_locks = {}
82 ca2a79e1 Guido Trotter
    self.remove_locks = {}
83 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
84 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
85 c92b310a Michael Hanselmann
    self.__ssh = None
86 86d9d3bb Iustin Pop
    # logging
87 fe267188 Iustin Pop
    self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103
88 fe267188 Iustin Pop
    self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103
89 c92b310a Michael Hanselmann
90 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
91 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
92 a8083063 Iustin Pop
      if attr_val is None:
93 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
94 3ecf6786 Iustin Pop
                                   attr_name)
95 4be4691d Iustin Pop
    self.CheckArguments()
96 a8083063 Iustin Pop
97 c92b310a Michael Hanselmann
  def __GetSSH(self):
98 c92b310a Michael Hanselmann
    """Returns the SshRunner object
99 c92b310a Michael Hanselmann

100 c92b310a Michael Hanselmann
    """
101 c92b310a Michael Hanselmann
    if not self.__ssh:
102 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
103 c92b310a Michael Hanselmann
    return self.__ssh
104 c92b310a Michael Hanselmann
105 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
106 c92b310a Michael Hanselmann
107 4be4691d Iustin Pop
  def CheckArguments(self):
108 4be4691d Iustin Pop
    """Check syntactic validity for the opcode arguments.
109 4be4691d Iustin Pop

110 4be4691d Iustin Pop
    This method is for doing a simple syntactic check and ensure
111 4be4691d Iustin Pop
    validity of opcode parameters, without any cluster-related
112 4be4691d Iustin Pop
    checks. While the same can be accomplished in ExpandNames and/or
113 4be4691d Iustin Pop
    CheckPrereq, doing these separate is better because:
114 4be4691d Iustin Pop

115 4be4691d Iustin Pop
      - ExpandNames is left as as purely a lock-related function
116 5bbd3f7f Michael Hanselmann
      - CheckPrereq is run after we have acquired locks (and possible
117 4be4691d Iustin Pop
        waited for them)
118 4be4691d Iustin Pop

119 4be4691d Iustin Pop
    The function is allowed to change the self.op attribute so that
120 4be4691d Iustin Pop
    later methods can no longer worry about missing parameters.
121 4be4691d Iustin Pop

122 4be4691d Iustin Pop
    """
123 4be4691d Iustin Pop
    pass
124 4be4691d Iustin Pop
125 d465bdc8 Guido Trotter
  def ExpandNames(self):
126 d465bdc8 Guido Trotter
    """Expand names for this LU.
127 d465bdc8 Guido Trotter

128 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
129 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
130 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
131 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
132 d465bdc8 Guido Trotter

133 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
134 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
135 d465bdc8 Guido Trotter
    as values. Rules:
136 e4376078 Iustin Pop

137 e4376078 Iustin Pop
      - use an empty dict if you don't need any lock
138 e4376078 Iustin Pop
      - if you don't need any lock at a particular level omit that level
139 e4376078 Iustin Pop
      - don't put anything for the BGL level
140 e4376078 Iustin Pop
      - if you want all locks at a level use locking.ALL_SET as a value
141 d465bdc8 Guido Trotter

142 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
143 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
144 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
145 3977a4c1 Guido Trotter

146 e4376078 Iustin Pop
    Examples::
147 e4376078 Iustin Pop

148 e4376078 Iustin Pop
      # Acquire all nodes and one instance
149 e4376078 Iustin Pop
      self.needed_locks = {
150 e4376078 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
151 e4376078 Iustin Pop
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
152 e4376078 Iustin Pop
      }
153 e4376078 Iustin Pop
      # Acquire just two nodes
154 e4376078 Iustin Pop
      self.needed_locks = {
155 e4376078 Iustin Pop
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
156 e4376078 Iustin Pop
      }
157 e4376078 Iustin Pop
      # Acquire no locks
158 e4376078 Iustin Pop
      self.needed_locks = {} # No, you can't leave it to the default value None
159 d465bdc8 Guido Trotter

160 d465bdc8 Guido Trotter
    """
161 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
162 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
163 d465bdc8 Guido Trotter
    # time.
164 d465bdc8 Guido Trotter
    if self.REQ_BGL:
165 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
166 d465bdc8 Guido Trotter
    else:
167 d465bdc8 Guido Trotter
      raise NotImplementedError
168 d465bdc8 Guido Trotter
169 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
170 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
171 fb8dcb62 Guido Trotter

172 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
173 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
174 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
175 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
176 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
177 fb8dcb62 Guido Trotter
    default it does nothing.
178 fb8dcb62 Guido Trotter

179 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
180 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
181 fb8dcb62 Guido Trotter

182 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
183 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
184 fb8dcb62 Guido Trotter

185 fb8dcb62 Guido Trotter
    """
186 fb8dcb62 Guido Trotter
187 a8083063 Iustin Pop
  def CheckPrereq(self):
188 a8083063 Iustin Pop
    """Check prerequisites for this LU.
189 a8083063 Iustin Pop

190 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
191 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
192 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
193 a8083063 Iustin Pop
    allowed.
194 a8083063 Iustin Pop

195 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
196 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
197 a8083063 Iustin Pop

198 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
199 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
200 a8083063 Iustin Pop

201 a8083063 Iustin Pop
    """
202 a8083063 Iustin Pop
    raise NotImplementedError
203 a8083063 Iustin Pop
204 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
205 a8083063 Iustin Pop
    """Execute the LU.
206 a8083063 Iustin Pop

207 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
208 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
209 a8083063 Iustin Pop
    code, or expected.
210 a8083063 Iustin Pop

211 a8083063 Iustin Pop
    """
212 a8083063 Iustin Pop
    raise NotImplementedError
213 a8083063 Iustin Pop
214 a8083063 Iustin Pop
  def BuildHooksEnv(self):
215 a8083063 Iustin Pop
    """Build hooks environment for this LU.
216 a8083063 Iustin Pop

217 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
218 a8083063 Iustin Pop
    containing the environment that will be used for running the
219 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
220 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
221 a8083063 Iustin Pop
    the hook should run after the execution.
222 a8083063 Iustin Pop

223 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
224 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
225 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
226 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
227 a8083063 Iustin Pop

228 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
229 a8083063 Iustin Pop

230 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
231 a8083063 Iustin Pop
    not be called.
232 a8083063 Iustin Pop

233 a8083063 Iustin Pop
    """
234 a8083063 Iustin Pop
    raise NotImplementedError
235 a8083063 Iustin Pop
236 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
237 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
238 1fce5219 Guido Trotter

239 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
240 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
241 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
242 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
243 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
244 1fce5219 Guido Trotter

245 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
246 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
247 e4376078 Iustin Pop
    @param hook_results: the results of the multi-node hooks rpc call
248 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
249 e4376078 Iustin Pop
    @param lu_result: the previous Exec result this LU had, or None
250 e4376078 Iustin Pop
        in the PRE phase
251 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
252 e4376078 Iustin Pop
        and hook results
253 1fce5219 Guido Trotter

254 1fce5219 Guido Trotter
    """
255 1fce5219 Guido Trotter
    return lu_result
256 1fce5219 Guido Trotter
257 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
258 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
259 43905206 Guido Trotter

260 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
261 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
262 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
263 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
264 43905206 Guido Trotter
    before.
265 43905206 Guido Trotter

266 43905206 Guido Trotter
    """
267 43905206 Guido Trotter
    if self.needed_locks is None:
268 43905206 Guido Trotter
      self.needed_locks = {}
269 43905206 Guido Trotter
    else:
270 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
271 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
272 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
273 43905206 Guido Trotter
    if expanded_name is None:
274 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
275 43905206 Guido Trotter
                                  self.op.instance_name)
276 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
277 43905206 Guido Trotter
    self.op.instance_name = expanded_name
278 43905206 Guido Trotter
279 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
280 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
281 c4a2fee1 Guido Trotter

282 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
283 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
284 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
285 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
286 c4a2fee1 Guido Trotter

287 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
288 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
289 c4a2fee1 Guido Trotter

290 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
291 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
292 c4a2fee1 Guido Trotter

293 e4376078 Iustin Pop
    If should be called in DeclareLocks in a way similar to::
294 c4a2fee1 Guido Trotter

295 e4376078 Iustin Pop
      if level == locking.LEVEL_NODE:
296 e4376078 Iustin Pop
        self._LockInstancesNodes()
297 c4a2fee1 Guido Trotter

298 a82ce292 Guido Trotter
    @type primary_only: boolean
299 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
300 a82ce292 Guido Trotter

301 c4a2fee1 Guido Trotter
    """
302 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
303 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
304 c4a2fee1 Guido Trotter
305 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
306 c4a2fee1 Guido Trotter
307 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
308 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
309 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
310 c4a2fee1 Guido Trotter
    wanted_nodes = []
311 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
312 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
313 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
314 a82ce292 Guido Trotter
      if not primary_only:
315 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
316 9513b6ab Guido Trotter
317 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
318 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
319 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
320 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
321 c4a2fee1 Guido Trotter
322 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
323 c4a2fee1 Guido Trotter
324 a8083063 Iustin Pop
325 fe267188 Iustin Pop
class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223
326 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
327 a8083063 Iustin Pop

328 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
329 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
330 a8083063 Iustin Pop

331 a8083063 Iustin Pop
  """
332 a8083063 Iustin Pop
  HPATH = None
333 a8083063 Iustin Pop
  HTYPE = None
334 a8083063 Iustin Pop
335 a8083063 Iustin Pop
336 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
337 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
338 83120a01 Michael Hanselmann

339 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
340 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
341 e4376078 Iustin Pop
  @type nodes: list
342 e4376078 Iustin Pop
  @param nodes: list of node names or None for all nodes
343 e4376078 Iustin Pop
  @rtype: list
344 e4376078 Iustin Pop
  @return: the list of nodes, sorted
345 e4376078 Iustin Pop
  @raise errors.OpProgrammerError: if the nodes parameter is wrong type
346 83120a01 Michael Hanselmann

347 83120a01 Michael Hanselmann
  """
348 3312b702 Iustin Pop
  if not isinstance(nodes, list):
349 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
350 dcb93971 Michael Hanselmann
351 ea47808a Guido Trotter
  if not nodes:
352 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
353 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
354 dcb93971 Michael Hanselmann
355 ea47808a Guido Trotter
  wanted = []
356 ea47808a Guido Trotter
  for name in nodes:
357 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
358 ea47808a Guido Trotter
    if node is None:
359 ea47808a Guido Trotter
      raise errors.OpPrereqError("No such node name '%s'" % name)
360 ea47808a Guido Trotter
    wanted.append(node)
361 dcb93971 Michael Hanselmann
362 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
363 3312b702 Iustin Pop
364 3312b702 Iustin Pop
365 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
366 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
367 3312b702 Iustin Pop

368 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
369 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
370 e4376078 Iustin Pop
  @type instances: list
371 e4376078 Iustin Pop
  @param instances: list of instance names or None for all instances
372 e4376078 Iustin Pop
  @rtype: list
373 e4376078 Iustin Pop
  @return: the list of instances, sorted
374 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if the instances parameter is wrong type
375 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if any of the passed instances is not found
376 3312b702 Iustin Pop

377 3312b702 Iustin Pop
  """
378 3312b702 Iustin Pop
  if not isinstance(instances, list):
379 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
380 3312b702 Iustin Pop
381 3312b702 Iustin Pop
  if instances:
382 3312b702 Iustin Pop
    wanted = []
383 3312b702 Iustin Pop
384 3312b702 Iustin Pop
    for name in instances:
385 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
386 3312b702 Iustin Pop
      if instance is None:
387 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
388 3312b702 Iustin Pop
      wanted.append(instance)
389 3312b702 Iustin Pop
390 3312b702 Iustin Pop
  else:
391 a7f5dc98 Iustin Pop
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
392 a7f5dc98 Iustin Pop
  return wanted
393 dcb93971 Michael Hanselmann
394 dcb93971 Michael Hanselmann
395 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
396 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
397 83120a01 Michael Hanselmann

398 a2d2e1a7 Iustin Pop
  @type static: L{utils.FieldSet}
399 31bf511f Iustin Pop
  @param static: static fields set
400 a2d2e1a7 Iustin Pop
  @type dynamic: L{utils.FieldSet}
401 31bf511f Iustin Pop
  @param dynamic: dynamic fields set
402 83120a01 Michael Hanselmann

403 83120a01 Michael Hanselmann
  """
404 a2d2e1a7 Iustin Pop
  f = utils.FieldSet()
405 31bf511f Iustin Pop
  f.Extend(static)
406 31bf511f Iustin Pop
  f.Extend(dynamic)
407 dcb93971 Michael Hanselmann
408 31bf511f Iustin Pop
  delta = f.NonMatching(selected)
409 31bf511f Iustin Pop
  if delta:
410 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
411 31bf511f Iustin Pop
                               % ",".join(delta))
412 dcb93971 Michael Hanselmann
413 dcb93971 Michael Hanselmann
414 a5961235 Iustin Pop
def _CheckBooleanOpField(op, name):
415 a5961235 Iustin Pop
  """Validates boolean opcode parameters.
416 a5961235 Iustin Pop

417 a5961235 Iustin Pop
  This will ensure that an opcode parameter is either a boolean value,
418 a5961235 Iustin Pop
  or None (but that it always exists).
419 a5961235 Iustin Pop

420 a5961235 Iustin Pop
  """
421 a5961235 Iustin Pop
  val = getattr(op, name, None)
422 a5961235 Iustin Pop
  if not (val is None or isinstance(val, bool)):
423 a5961235 Iustin Pop
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
424 a5961235 Iustin Pop
                               (name, str(val)))
425 a5961235 Iustin Pop
  setattr(op, name, val)
426 a5961235 Iustin Pop
427 a5961235 Iustin Pop
428 a5961235 Iustin Pop
def _CheckNodeOnline(lu, node):
429 a5961235 Iustin Pop
  """Ensure that a given node is online.
430 a5961235 Iustin Pop

431 a5961235 Iustin Pop
  @param lu: the LU on behalf of which we make the check
432 a5961235 Iustin Pop
  @param node: the node to check
433 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is offline
434 a5961235 Iustin Pop

435 a5961235 Iustin Pop
  """
436 a5961235 Iustin Pop
  if lu.cfg.GetNodeInfo(node).offline:
437 a5961235 Iustin Pop
    raise errors.OpPrereqError("Can't use offline node %s" % node)
438 a5961235 Iustin Pop
439 a5961235 Iustin Pop
440 733a2b6a Iustin Pop
def _CheckNodeNotDrained(lu, node):
441 733a2b6a Iustin Pop
  """Ensure that a given node is not drained.
442 733a2b6a Iustin Pop

443 733a2b6a Iustin Pop
  @param lu: the LU on behalf of which we make the check
444 733a2b6a Iustin Pop
  @param node: the node to check
445 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is drained
446 733a2b6a Iustin Pop

447 733a2b6a Iustin Pop
  """
448 733a2b6a Iustin Pop
  if lu.cfg.GetNodeInfo(node).drained:
449 733a2b6a Iustin Pop
    raise errors.OpPrereqError("Can't use drained node %s" % node)
450 733a2b6a Iustin Pop
451 733a2b6a Iustin Pop
452 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
453 67fc3042 Iustin Pop
                          memory, vcpus, nics, disk_template, disks,
454 7c4d6c7b Michael Hanselmann
                          bep, hvp, hypervisor_name):
455 e4376078 Iustin Pop
  """Builds instance related env variables for hooks
456 e4376078 Iustin Pop

457 e4376078 Iustin Pop
  This builds the hook environment from individual variables.
458 e4376078 Iustin Pop

459 e4376078 Iustin Pop
  @type name: string
460 e4376078 Iustin Pop
  @param name: the name of the instance
461 e4376078 Iustin Pop
  @type primary_node: string
462 e4376078 Iustin Pop
  @param primary_node: the name of the instance's primary node
463 e4376078 Iustin Pop
  @type secondary_nodes: list
464 e4376078 Iustin Pop
  @param secondary_nodes: list of secondary nodes as strings
465 e4376078 Iustin Pop
  @type os_type: string
466 e4376078 Iustin Pop
  @param os_type: the name of the instance's OS
467 0d68c45d Iustin Pop
  @type status: boolean
468 0d68c45d Iustin Pop
  @param status: the should_run status of the instance
469 e4376078 Iustin Pop
  @type memory: string
470 e4376078 Iustin Pop
  @param memory: the memory size of the instance
471 e4376078 Iustin Pop
  @type vcpus: string
472 e4376078 Iustin Pop
  @param vcpus: the count of VCPUs the instance has
473 e4376078 Iustin Pop
  @type nics: list
474 e4376078 Iustin Pop
  @param nics: list of tuples (ip, bridge, mac) representing
475 e4376078 Iustin Pop
      the NICs the instance  has
476 2c2690c9 Iustin Pop
  @type disk_template: string
477 5bbd3f7f Michael Hanselmann
  @param disk_template: the disk template of the instance
478 2c2690c9 Iustin Pop
  @type disks: list
479 2c2690c9 Iustin Pop
  @param disks: the list of (size, mode) pairs
480 67fc3042 Iustin Pop
  @type bep: dict
481 67fc3042 Iustin Pop
  @param bep: the backend parameters for the instance
482 67fc3042 Iustin Pop
  @type hvp: dict
483 67fc3042 Iustin Pop
  @param hvp: the hypervisor parameters for the instance
484 7c4d6c7b Michael Hanselmann
  @type hypervisor_name: string
485 7c4d6c7b Michael Hanselmann
  @param hypervisor_name: the hypervisor for the instance
486 e4376078 Iustin Pop
  @rtype: dict
487 e4376078 Iustin Pop
  @return: the hook environment for this instance
488 ecb215b5 Michael Hanselmann

489 396e1b78 Michael Hanselmann
  """
490 0d68c45d Iustin Pop
  if status:
491 0d68c45d Iustin Pop
    str_status = "up"
492 0d68c45d Iustin Pop
  else:
493 0d68c45d Iustin Pop
    str_status = "down"
494 396e1b78 Michael Hanselmann
  env = {
495 0e137c28 Iustin Pop
    "OP_TARGET": name,
496 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
497 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
498 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
499 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
500 0d68c45d Iustin Pop
    "INSTANCE_STATUS": str_status,
501 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
502 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
503 2c2690c9 Iustin Pop
    "INSTANCE_DISK_TEMPLATE": disk_template,
504 7c4d6c7b Michael Hanselmann
    "INSTANCE_HYPERVISOR": hypervisor_name,
505 396e1b78 Michael Hanselmann
  }
506 396e1b78 Michael Hanselmann
507 396e1b78 Michael Hanselmann
  if nics:
508 396e1b78 Michael Hanselmann
    nic_count = len(nics)
509 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
510 396e1b78 Michael Hanselmann
      if ip is None:
511 396e1b78 Michael Hanselmann
        ip = ""
512 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
513 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
514 2c2690c9 Iustin Pop
      env["INSTANCE_NIC%d_MAC" % idx] = mac
515 396e1b78 Michael Hanselmann
  else:
516 396e1b78 Michael Hanselmann
    nic_count = 0
517 396e1b78 Michael Hanselmann
518 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
519 396e1b78 Michael Hanselmann
520 2c2690c9 Iustin Pop
  if disks:
521 2c2690c9 Iustin Pop
    disk_count = len(disks)
522 2c2690c9 Iustin Pop
    for idx, (size, mode) in enumerate(disks):
523 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_SIZE" % idx] = size
524 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_MODE" % idx] = mode
525 2c2690c9 Iustin Pop
  else:
526 2c2690c9 Iustin Pop
    disk_count = 0
527 2c2690c9 Iustin Pop
528 2c2690c9 Iustin Pop
  env["INSTANCE_DISK_COUNT"] = disk_count
529 2c2690c9 Iustin Pop
530 67fc3042 Iustin Pop
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
531 67fc3042 Iustin Pop
    for key, value in source.items():
532 67fc3042 Iustin Pop
      env["INSTANCE_%s_%s" % (kind, key)] = value
533 67fc3042 Iustin Pop
534 396e1b78 Michael Hanselmann
  return env
535 396e1b78 Michael Hanselmann
536 396e1b78 Michael Hanselmann
537 338e51e8 Iustin Pop
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
538 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
539 ecb215b5 Michael Hanselmann

540 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
541 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
542 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
543 e4376078 Iustin Pop
  @param instance: the instance for which we should build the
544 e4376078 Iustin Pop
      environment
545 e4376078 Iustin Pop
  @type override: dict
546 e4376078 Iustin Pop
  @param override: dictionary with key/values that will override
547 e4376078 Iustin Pop
      our values
548 e4376078 Iustin Pop
  @rtype: dict
549 e4376078 Iustin Pop
  @return: the hook environment dictionary
550 e4376078 Iustin Pop

551 ecb215b5 Michael Hanselmann
  """
552 67fc3042 Iustin Pop
  cluster = lu.cfg.GetClusterInfo()
553 67fc3042 Iustin Pop
  bep = cluster.FillBE(instance)
554 67fc3042 Iustin Pop
  hvp = cluster.FillHV(instance)
555 396e1b78 Michael Hanselmann
  args = {
556 396e1b78 Michael Hanselmann
    'name': instance.name,
557 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
558 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
559 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
560 0d68c45d Iustin Pop
    'status': instance.admin_up,
561 338e51e8 Iustin Pop
    'memory': bep[constants.BE_MEMORY],
562 338e51e8 Iustin Pop
    'vcpus': bep[constants.BE_VCPUS],
563 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
564 2c2690c9 Iustin Pop
    'disk_template': instance.disk_template,
565 2c2690c9 Iustin Pop
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
566 67fc3042 Iustin Pop
    'bep': bep,
567 67fc3042 Iustin Pop
    'hvp': hvp,
568 b0c63e2b Iustin Pop
    'hypervisor_name': instance.hypervisor,
569 396e1b78 Michael Hanselmann
  }
570 396e1b78 Michael Hanselmann
  if override:
571 396e1b78 Michael Hanselmann
    args.update(override)
572 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
573 396e1b78 Michael Hanselmann
574 396e1b78 Michael Hanselmann
575 ec0292f1 Iustin Pop
def _AdjustCandidatePool(lu):
576 ec0292f1 Iustin Pop
  """Adjust the candidate pool after node operations.
577 ec0292f1 Iustin Pop

578 ec0292f1 Iustin Pop
  """
579 ec0292f1 Iustin Pop
  mod_list = lu.cfg.MaintainCandidatePool()
580 ec0292f1 Iustin Pop
  if mod_list:
581 ec0292f1 Iustin Pop
    lu.LogInfo("Promoted nodes to master candidate role: %s",
582 ee513a66 Iustin Pop
               ", ".join(node.name for node in mod_list))
583 ec0292f1 Iustin Pop
    for name in mod_list:
584 ec0292f1 Iustin Pop
      lu.context.ReaddNode(name)
585 ec0292f1 Iustin Pop
  mc_now, mc_max = lu.cfg.GetMasterCandidateStats()
586 ec0292f1 Iustin Pop
  if mc_now > mc_max:
587 ec0292f1 Iustin Pop
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
588 ec0292f1 Iustin Pop
               (mc_now, mc_max))
589 ec0292f1 Iustin Pop
590 ec0292f1 Iustin Pop
591 b9bddb6b Iustin Pop
def _CheckInstanceBridgesExist(lu, instance):
592 5bbd3f7f Michael Hanselmann
  """Check that the bridges needed by an instance exist.
593 bf6929a2 Alexander Schreiber

594 bf6929a2 Alexander Schreiber
  """
595 5bbd3f7f Michael Hanselmann
  # check bridges existence
596 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
597 781de953 Iustin Pop
  result = lu.rpc.call_bridges_exist(instance.primary_node, brlist)
598 781de953 Iustin Pop
  result.Raise()
599 781de953 Iustin Pop
  if not result.data:
600 781de953 Iustin Pop
    raise errors.OpPrereqError("One or more target bridges %s does not"
601 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
602 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
603 bf6929a2 Alexander Schreiber
604 bf6929a2 Alexander Schreiber
605 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
606 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
607 a8083063 Iustin Pop

608 a8083063 Iustin Pop
  """
609 a8083063 Iustin Pop
  _OP_REQP = []
610 a8083063 Iustin Pop
611 a8083063 Iustin Pop
  def CheckPrereq(self):
612 a8083063 Iustin Pop
    """Check prerequisites.
613 a8083063 Iustin Pop

614 a8083063 Iustin Pop
    This checks whether the cluster is empty.
615 a8083063 Iustin Pop

616 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
617 a8083063 Iustin Pop

618 a8083063 Iustin Pop
    """
619 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
620 a8083063 Iustin Pop
621 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
622 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
623 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
624 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
625 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
626 db915bd1 Michael Hanselmann
    if instancelist:
627 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
628 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
629 a8083063 Iustin Pop
630 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
631 a8083063 Iustin Pop
    """Destroys the cluster.
632 a8083063 Iustin Pop

633 a8083063 Iustin Pop
    """
634 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
635 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
636 781de953 Iustin Pop
    result.Raise()
637 781de953 Iustin Pop
    if not result.data:
638 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
639 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
640 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
641 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
642 140aa4a8 Iustin Pop
    return master
643 a8083063 Iustin Pop
644 a8083063 Iustin Pop
645 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
646 a8083063 Iustin Pop
  """Verifies the cluster status.
647 a8083063 Iustin Pop

648 a8083063 Iustin Pop
  """
649 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
650 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
651 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
652 d4b9d97f Guido Trotter
  REQ_BGL = False
653 d4b9d97f Guido Trotter
654 d4b9d97f Guido Trotter
  def ExpandNames(self):
655 d4b9d97f Guido Trotter
    self.needed_locks = {
656 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
657 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
658 d4b9d97f Guido Trotter
    }
659 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
660 a8083063 Iustin Pop
661 25361b9a Iustin Pop
  def _VerifyNode(self, nodeinfo, file_list, local_cksum,
662 6d2e83d5 Iustin Pop
                  node_result, feedback_fn, master_files,
663 cc9e1230 Guido Trotter
                  drbd_map, vg_name):
664 a8083063 Iustin Pop
    """Run multiple tests against a node.
665 a8083063 Iustin Pop

666 112f18a5 Iustin Pop
    Test list:
667 e4376078 Iustin Pop

668 a8083063 Iustin Pop
      - compares ganeti version
669 5bbd3f7f Michael Hanselmann
      - checks vg existence and size > 20G
670 a8083063 Iustin Pop
      - checks config file checksum
671 a8083063 Iustin Pop
      - checks ssh to other nodes
672 a8083063 Iustin Pop

673 112f18a5 Iustin Pop
    @type nodeinfo: L{objects.Node}
674 112f18a5 Iustin Pop
    @param nodeinfo: the node to check
675 e4376078 Iustin Pop
    @param file_list: required list of files
676 e4376078 Iustin Pop
    @param local_cksum: dictionary of local files and their checksums
677 e4376078 Iustin Pop
    @param node_result: the results from the node
678 e4376078 Iustin Pop
    @param feedback_fn: function used to accumulate results
679 112f18a5 Iustin Pop
    @param master_files: list of files that only masters should have
680 6d2e83d5 Iustin Pop
    @param drbd_map: the useddrbd minors for this node, in
681 6d2e83d5 Iustin Pop
        form of minor: (instance, must_exist) which correspond to instances
682 6d2e83d5 Iustin Pop
        and their running status
683 cc9e1230 Guido Trotter
    @param vg_name: Ganeti Volume Group (result of self.cfg.GetVGName())
684 098c0958 Michael Hanselmann

685 a8083063 Iustin Pop
    """
686 112f18a5 Iustin Pop
    node = nodeinfo.name
687 25361b9a Iustin Pop
688 25361b9a Iustin Pop
    # main result, node_result should be a non-empty dict
689 25361b9a Iustin Pop
    if not node_result or not isinstance(node_result, dict):
690 25361b9a Iustin Pop
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
691 25361b9a Iustin Pop
      return True
692 25361b9a Iustin Pop
693 a8083063 Iustin Pop
    # compares ganeti version
694 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
695 25361b9a Iustin Pop
    remote_version = node_result.get('version', None)
696 e9ce0a64 Iustin Pop
    if not (remote_version and isinstance(remote_version, (list, tuple)) and
697 e9ce0a64 Iustin Pop
            len(remote_version) == 2):
698 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
699 a8083063 Iustin Pop
      return True
700 a8083063 Iustin Pop
701 e9ce0a64 Iustin Pop
    if local_version != remote_version[0]:
702 e9ce0a64 Iustin Pop
      feedback_fn("  - ERROR: incompatible protocol versions: master %s,"
703 e9ce0a64 Iustin Pop
                  " node %s %s" % (local_version, node, remote_version[0]))
704 a8083063 Iustin Pop
      return True
705 a8083063 Iustin Pop
706 e9ce0a64 Iustin Pop
    # node seems compatible, we can actually try to look into its results
707 a8083063 Iustin Pop
708 a8083063 Iustin Pop
    bad = False
709 e9ce0a64 Iustin Pop
710 e9ce0a64 Iustin Pop
    # full package version
711 e9ce0a64 Iustin Pop
    if constants.RELEASE_VERSION != remote_version[1]:
712 e9ce0a64 Iustin Pop
      feedback_fn("  - WARNING: software version mismatch: master %s,"
713 e9ce0a64 Iustin Pop
                  " node %s %s" %
714 e9ce0a64 Iustin Pop
                  (constants.RELEASE_VERSION, node, remote_version[1]))
715 e9ce0a64 Iustin Pop
716 e9ce0a64 Iustin Pop
    # checks vg existence and size > 20G
717 cc9e1230 Guido Trotter
    if vg_name is not None:
718 cc9e1230 Guido Trotter
      vglist = node_result.get(constants.NV_VGLIST, None)
719 cc9e1230 Guido Trotter
      if not vglist:
720 cc9e1230 Guido Trotter
        feedback_fn("  - ERROR: unable to check volume groups on node %s." %
721 cc9e1230 Guido Trotter
                        (node,))
722 a8083063 Iustin Pop
        bad = True
723 cc9e1230 Guido Trotter
      else:
724 cc9e1230 Guido Trotter
        vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
725 cc9e1230 Guido Trotter
                                              constants.MIN_VG_SIZE)
726 cc9e1230 Guido Trotter
        if vgstatus:
727 cc9e1230 Guido Trotter
          feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
728 cc9e1230 Guido Trotter
          bad = True
729 a8083063 Iustin Pop
730 a8083063 Iustin Pop
    # checks config file checksum
731 a8083063 Iustin Pop
732 25361b9a Iustin Pop
    remote_cksum = node_result.get(constants.NV_FILELIST, None)
733 25361b9a Iustin Pop
    if not isinstance(remote_cksum, dict):
734 a8083063 Iustin Pop
      bad = True
735 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
736 a8083063 Iustin Pop
    else:
737 a8083063 Iustin Pop
      for file_name in file_list:
738 112f18a5 Iustin Pop
        node_is_mc = nodeinfo.master_candidate
739 112f18a5 Iustin Pop
        must_have_file = file_name not in master_files
740 a8083063 Iustin Pop
        if file_name not in remote_cksum:
741 112f18a5 Iustin Pop
          if node_is_mc or must_have_file:
742 112f18a5 Iustin Pop
            bad = True
743 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' missing" % file_name)
744 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
745 112f18a5 Iustin Pop
          if node_is_mc or must_have_file:
746 112f18a5 Iustin Pop
            bad = True
747 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
748 112f18a5 Iustin Pop
          else:
749 112f18a5 Iustin Pop
            # not candidate and this is not a must-have file
750 112f18a5 Iustin Pop
            bad = True
751 e631cb25 Iustin Pop
            feedback_fn("  - ERROR: file '%s' should not exist on non master"
752 e631cb25 Iustin Pop
                        " candidates (and the file is outdated)" % file_name)
753 112f18a5 Iustin Pop
        else:
754 112f18a5 Iustin Pop
          # all good, except non-master/non-must have combination
755 112f18a5 Iustin Pop
          if not node_is_mc and not must_have_file:
756 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' should not exist on non master"
757 112f18a5 Iustin Pop
                        " candidates" % file_name)
758 a8083063 Iustin Pop
759 25361b9a Iustin Pop
    # checks ssh to any
760 25361b9a Iustin Pop
761 25361b9a Iustin Pop
    if constants.NV_NODELIST not in node_result:
762 a8083063 Iustin Pop
      bad = True
763 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
764 a8083063 Iustin Pop
    else:
765 25361b9a Iustin Pop
      if node_result[constants.NV_NODELIST]:
766 a8083063 Iustin Pop
        bad = True
767 25361b9a Iustin Pop
        for node in node_result[constants.NV_NODELIST]:
768 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
769 25361b9a Iustin Pop
                          (node, node_result[constants.NV_NODELIST][node]))
770 25361b9a Iustin Pop
771 25361b9a Iustin Pop
    if constants.NV_NODENETTEST not in node_result:
772 9d4bfc96 Iustin Pop
      bad = True
773 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
774 9d4bfc96 Iustin Pop
    else:
775 25361b9a Iustin Pop
      if node_result[constants.NV_NODENETTEST]:
776 9d4bfc96 Iustin Pop
        bad = True
777 25361b9a Iustin Pop
        nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys())
778 9d4bfc96 Iustin Pop
        for node in nlist:
779 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
780 25361b9a Iustin Pop
                          (node, node_result[constants.NV_NODENETTEST][node]))
781 9d4bfc96 Iustin Pop
782 25361b9a Iustin Pop
    hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
783 e69d05fd Iustin Pop
    if isinstance(hyp_result, dict):
784 e69d05fd Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
785 e69d05fd Iustin Pop
        if hv_result is not None:
786 e69d05fd Iustin Pop
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
787 e69d05fd Iustin Pop
                      (hv_name, hv_result))
788 6d2e83d5 Iustin Pop
789 6d2e83d5 Iustin Pop
    # check used drbd list
790 cc9e1230 Guido Trotter
    if vg_name is not None:
791 cc9e1230 Guido Trotter
      used_minors = node_result.get(constants.NV_DRBDLIST, [])
792 cc9e1230 Guido Trotter
      if not isinstance(used_minors, (tuple, list)):
793 cc9e1230 Guido Trotter
        feedback_fn("  - ERROR: cannot parse drbd status file: %s" %
794 cc9e1230 Guido Trotter
                    str(used_minors))
795 cc9e1230 Guido Trotter
      else:
796 cc9e1230 Guido Trotter
        for minor, (iname, must_exist) in drbd_map.items():
797 cc9e1230 Guido Trotter
          if minor not in used_minors and must_exist:
798 35e994e9 Iustin Pop
            feedback_fn("  - ERROR: drbd minor %d of instance %s is"
799 35e994e9 Iustin Pop
                        " not active" % (minor, iname))
800 cc9e1230 Guido Trotter
            bad = True
801 cc9e1230 Guido Trotter
        for minor in used_minors:
802 cc9e1230 Guido Trotter
          if minor not in drbd_map:
803 35e994e9 Iustin Pop
            feedback_fn("  - ERROR: unallocated drbd minor %d is in use" %
804 35e994e9 Iustin Pop
                        minor)
805 cc9e1230 Guido Trotter
            bad = True
806 6d2e83d5 Iustin Pop
807 a8083063 Iustin Pop
    return bad
808 a8083063 Iustin Pop
809 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
810 0a66c968 Iustin Pop
                      node_instance, feedback_fn, n_offline):
811 a8083063 Iustin Pop
    """Verify an instance.
812 a8083063 Iustin Pop

813 a8083063 Iustin Pop
    This function checks to see if the required block devices are
814 a8083063 Iustin Pop
    available on the instance's node.
815 a8083063 Iustin Pop

816 a8083063 Iustin Pop
    """
817 a8083063 Iustin Pop
    bad = False
818 a8083063 Iustin Pop
819 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
820 a8083063 Iustin Pop
821 a8083063 Iustin Pop
    node_vol_should = {}
822 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
823 a8083063 Iustin Pop
824 a8083063 Iustin Pop
    for node in node_vol_should:
825 0a66c968 Iustin Pop
      if node in n_offline:
826 0a66c968 Iustin Pop
        # ignore missing volumes on offline nodes
827 0a66c968 Iustin Pop
        continue
828 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
829 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
830 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
831 a8083063 Iustin Pop
                          (volume, node))
832 a8083063 Iustin Pop
          bad = True
833 a8083063 Iustin Pop
834 0d68c45d Iustin Pop
    if instanceconfig.admin_up:
835 0a66c968 Iustin Pop
      if ((node_current not in node_instance or
836 0a66c968 Iustin Pop
          not instance in node_instance[node_current]) and
837 0a66c968 Iustin Pop
          node_current not in n_offline):
838 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
839 a8083063 Iustin Pop
                        (instance, node_current))
840 a8083063 Iustin Pop
        bad = True
841 a8083063 Iustin Pop
842 a8083063 Iustin Pop
    for node in node_instance:
843 a8083063 Iustin Pop
      if (not node == node_current):
844 a8083063 Iustin Pop
        if instance in node_instance[node]:
845 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
846 a8083063 Iustin Pop
                          (instance, node))
847 a8083063 Iustin Pop
          bad = True
848 a8083063 Iustin Pop
849 6a438c98 Michael Hanselmann
    return bad
850 a8083063 Iustin Pop
851 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
852 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
853 a8083063 Iustin Pop

854 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
855 a8083063 Iustin Pop
    reported as unknown.
856 a8083063 Iustin Pop

857 a8083063 Iustin Pop
    """
858 a8083063 Iustin Pop
    bad = False
859 a8083063 Iustin Pop
860 a8083063 Iustin Pop
    for node in node_vol_is:
861 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
862 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
863 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
864 a8083063 Iustin Pop
                      (volume, node))
865 a8083063 Iustin Pop
          bad = True
866 a8083063 Iustin Pop
    return bad
867 a8083063 Iustin Pop
868 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
869 a8083063 Iustin Pop
    """Verify the list of running instances.
870 a8083063 Iustin Pop

871 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
872 a8083063 Iustin Pop

873 a8083063 Iustin Pop
    """
874 a8083063 Iustin Pop
    bad = False
875 a8083063 Iustin Pop
    for node in node_instance:
876 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
877 a8083063 Iustin Pop
        if runninginstance not in instancelist:
878 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
879 a8083063 Iustin Pop
                          (runninginstance, node))
880 a8083063 Iustin Pop
          bad = True
881 a8083063 Iustin Pop
    return bad
882 a8083063 Iustin Pop
883 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
884 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
885 2b3b6ddd Guido Trotter

886 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
887 2b3b6ddd Guido Trotter
    was primary for.
888 2b3b6ddd Guido Trotter

889 2b3b6ddd Guido Trotter
    """
890 2b3b6ddd Guido Trotter
    bad = False
891 2b3b6ddd Guido Trotter
892 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
893 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
894 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
895 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
896 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
897 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
898 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
899 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
900 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
901 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
902 2b3b6ddd Guido Trotter
        needed_mem = 0
903 2b3b6ddd Guido Trotter
        for instance in instances:
904 338e51e8 Iustin Pop
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
905 c0f2b229 Iustin Pop
          if bep[constants.BE_AUTO_BALANCE]:
906 3924700f Iustin Pop
            needed_mem += bep[constants.BE_MEMORY]
907 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
908 5bbd3f7f Michael Hanselmann
          feedback_fn("  - ERROR: not enough memory on node %s to accommodate"
909 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
910 2b3b6ddd Guido Trotter
          bad = True
911 2b3b6ddd Guido Trotter
    return bad
912 2b3b6ddd Guido Trotter
913 a8083063 Iustin Pop
  def CheckPrereq(self):
914 a8083063 Iustin Pop
    """Check prerequisites.
915 a8083063 Iustin Pop

916 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
917 e54c4c5e Guido Trotter
    all its members are valid.
918 a8083063 Iustin Pop

919 a8083063 Iustin Pop
    """
920 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
921 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
922 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
923 a8083063 Iustin Pop
924 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
925 d8fff41c Guido Trotter
    """Build hooks env.
926 d8fff41c Guido Trotter

927 5bbd3f7f Michael Hanselmann
    Cluster-Verify hooks just ran in the post phase and their failure makes
928 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
929 d8fff41c Guido Trotter

930 d8fff41c Guido Trotter
    """
931 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
932 35e994e9 Iustin Pop
    env = {
933 35e994e9 Iustin Pop
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
934 35e994e9 Iustin Pop
      }
935 35e994e9 Iustin Pop
    for node in self.cfg.GetAllNodesInfo().values():
936 35e994e9 Iustin Pop
      env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
937 35e994e9 Iustin Pop
938 d8fff41c Guido Trotter
    return env, [], all_nodes
939 d8fff41c Guido Trotter
940 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
941 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
942 a8083063 Iustin Pop

943 a8083063 Iustin Pop
    """
944 a8083063 Iustin Pop
    bad = False
945 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
946 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
947 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
948 a8083063 Iustin Pop
949 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
950 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
951 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
952 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
953 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
954 6d2e83d5 Iustin Pop
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
955 6d2e83d5 Iustin Pop
                        for iname in instancelist)
956 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
957 3924700f Iustin Pop
    i_non_a_balanced = [] # Non auto-balanced instances
958 0a66c968 Iustin Pop
    n_offline = [] # List of offline nodes
959 22f0f71d Iustin Pop
    n_drained = [] # List of nodes being drained
960 a8083063 Iustin Pop
    node_volume = {}
961 a8083063 Iustin Pop
    node_instance = {}
962 9c9c7d30 Guido Trotter
    node_info = {}
963 26b6af5e Guido Trotter
    instance_cfg = {}
964 a8083063 Iustin Pop
965 a8083063 Iustin Pop
    # FIXME: verify OS list
966 a8083063 Iustin Pop
    # do local checksums
967 112f18a5 Iustin Pop
    master_files = [constants.CLUSTER_CONF_FILE]
968 112f18a5 Iustin Pop
969 112f18a5 Iustin Pop
    file_names = ssconf.SimpleStore().GetFileList()
970 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
971 699777f2 Michael Hanselmann
    file_names.append(constants.RAPI_CERT_FILE)
972 112f18a5 Iustin Pop
    file_names.extend(master_files)
973 112f18a5 Iustin Pop
974 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
975 a8083063 Iustin Pop
976 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
977 a8083063 Iustin Pop
    node_verify_param = {
978 25361b9a Iustin Pop
      constants.NV_FILELIST: file_names,
979 82e37788 Iustin Pop
      constants.NV_NODELIST: [node.name for node in nodeinfo
980 82e37788 Iustin Pop
                              if not node.offline],
981 25361b9a Iustin Pop
      constants.NV_HYPERVISOR: hypervisors,
982 25361b9a Iustin Pop
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
983 82e37788 Iustin Pop
                                  node.secondary_ip) for node in nodeinfo
984 82e37788 Iustin Pop
                                 if not node.offline],
985 25361b9a Iustin Pop
      constants.NV_INSTANCELIST: hypervisors,
986 25361b9a Iustin Pop
      constants.NV_VERSION: None,
987 25361b9a Iustin Pop
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
988 a8083063 Iustin Pop
      }
989 cc9e1230 Guido Trotter
    if vg_name is not None:
990 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_VGLIST] = None
991 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_LVLIST] = vg_name
992 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_DRBDLIST] = None
993 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
994 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
995 a8083063 Iustin Pop
996 3924700f Iustin Pop
    cluster = self.cfg.GetClusterInfo()
997 112f18a5 Iustin Pop
    master_node = self.cfg.GetMasterNode()
998 6d2e83d5 Iustin Pop
    all_drbd_map = self.cfg.ComputeDRBDMap()
999 6d2e83d5 Iustin Pop
1000 112f18a5 Iustin Pop
    for node_i in nodeinfo:
1001 112f18a5 Iustin Pop
      node = node_i.name
1002 25361b9a Iustin Pop
      nresult = all_nvinfo[node].data
1003 25361b9a Iustin Pop
1004 0a66c968 Iustin Pop
      if node_i.offline:
1005 0a66c968 Iustin Pop
        feedback_fn("* Skipping offline node %s" % (node,))
1006 0a66c968 Iustin Pop
        n_offline.append(node)
1007 0a66c968 Iustin Pop
        continue
1008 0a66c968 Iustin Pop
1009 112f18a5 Iustin Pop
      if node == master_node:
1010 25361b9a Iustin Pop
        ntype = "master"
1011 112f18a5 Iustin Pop
      elif node_i.master_candidate:
1012 25361b9a Iustin Pop
        ntype = "master candidate"
1013 22f0f71d Iustin Pop
      elif node_i.drained:
1014 22f0f71d Iustin Pop
        ntype = "drained"
1015 22f0f71d Iustin Pop
        n_drained.append(node)
1016 112f18a5 Iustin Pop
      else:
1017 25361b9a Iustin Pop
        ntype = "regular"
1018 112f18a5 Iustin Pop
      feedback_fn("* Verifying node %s (%s)" % (node, ntype))
1019 25361b9a Iustin Pop
1020 25361b9a Iustin Pop
      if all_nvinfo[node].failed or not isinstance(nresult, dict):
1021 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
1022 25361b9a Iustin Pop
        bad = True
1023 25361b9a Iustin Pop
        continue
1024 25361b9a Iustin Pop
1025 6d2e83d5 Iustin Pop
      node_drbd = {}
1026 6d2e83d5 Iustin Pop
      for minor, instance in all_drbd_map[node].items():
1027 c614e5fb Iustin Pop
        if instance not in instanceinfo:
1028 c614e5fb Iustin Pop
          feedback_fn("  - ERROR: ghost instance '%s' in temporary DRBD map" %
1029 c614e5fb Iustin Pop
                      instance)
1030 c614e5fb Iustin Pop
          # ghost instance should not be running, but otherwise we
1031 c614e5fb Iustin Pop
          # don't give double warnings (both ghost instance and
1032 c614e5fb Iustin Pop
          # unallocated minor in use)
1033 c614e5fb Iustin Pop
          node_drbd[minor] = (instance, False)
1034 c614e5fb Iustin Pop
        else:
1035 c614e5fb Iustin Pop
          instance = instanceinfo[instance]
1036 c614e5fb Iustin Pop
          node_drbd[minor] = (instance.name, instance.admin_up)
1037 112f18a5 Iustin Pop
      result = self._VerifyNode(node_i, file_names, local_checksums,
1038 6d2e83d5 Iustin Pop
                                nresult, feedback_fn, master_files,
1039 cc9e1230 Guido Trotter
                                node_drbd, vg_name)
1040 a8083063 Iustin Pop
      bad = bad or result
1041 a8083063 Iustin Pop
1042 25361b9a Iustin Pop
      lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1043 cc9e1230 Guido Trotter
      if vg_name is None:
1044 cc9e1230 Guido Trotter
        node_volume[node] = {}
1045 cc9e1230 Guido Trotter
      elif isinstance(lvdata, basestring):
1046 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
1047 26f15862 Iustin Pop
                    (node, utils.SafeEncode(lvdata)))
1048 b63ed789 Iustin Pop
        bad = True
1049 b63ed789 Iustin Pop
        node_volume[node] = {}
1050 25361b9a Iustin Pop
      elif not isinstance(lvdata, dict):
1051 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (lvlist)" % (node,))
1052 a8083063 Iustin Pop
        bad = True
1053 a8083063 Iustin Pop
        continue
1054 b63ed789 Iustin Pop
      else:
1055 25361b9a Iustin Pop
        node_volume[node] = lvdata
1056 a8083063 Iustin Pop
1057 a8083063 Iustin Pop
      # node_instance
1058 25361b9a Iustin Pop
      idata = nresult.get(constants.NV_INSTANCELIST, None)
1059 25361b9a Iustin Pop
      if not isinstance(idata, list):
1060 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (instancelist)" %
1061 25361b9a Iustin Pop
                    (node,))
1062 a8083063 Iustin Pop
        bad = True
1063 a8083063 Iustin Pop
        continue
1064 a8083063 Iustin Pop
1065 25361b9a Iustin Pop
      node_instance[node] = idata
1066 a8083063 Iustin Pop
1067 9c9c7d30 Guido Trotter
      # node_info
1068 25361b9a Iustin Pop
      nodeinfo = nresult.get(constants.NV_HVINFO, None)
1069 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
1070 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (hvinfo)" % (node,))
1071 9c9c7d30 Guido Trotter
        bad = True
1072 9c9c7d30 Guido Trotter
        continue
1073 9c9c7d30 Guido Trotter
1074 9c9c7d30 Guido Trotter
      try:
1075 9c9c7d30 Guido Trotter
        node_info[node] = {
1076 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
1077 93e4c50b Guido Trotter
          "pinst": [],
1078 93e4c50b Guido Trotter
          "sinst": [],
1079 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
1080 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
1081 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
1082 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
1083 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
1084 36e7da50 Guido Trotter
          # secondary.
1085 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
1086 9c9c7d30 Guido Trotter
        }
1087 cc9e1230 Guido Trotter
        # FIXME: devise a free space model for file based instances as well
1088 cc9e1230 Guido Trotter
        if vg_name is not None:
1089 9a198532 Iustin Pop
          if (constants.NV_VGLIST not in nresult or
1090 9a198532 Iustin Pop
              vg_name not in nresult[constants.NV_VGLIST]):
1091 9a198532 Iustin Pop
            feedback_fn("  - ERROR: node %s didn't return data for the"
1092 9a198532 Iustin Pop
                        " volume group '%s' - it is either missing or broken" %
1093 9a198532 Iustin Pop
                        (node, vg_name))
1094 9a198532 Iustin Pop
            bad = True
1095 9a198532 Iustin Pop
            continue
1096 cc9e1230 Guido Trotter
          node_info[node]["dfree"] = int(nresult[constants.NV_VGLIST][vg_name])
1097 9a198532 Iustin Pop
      except (ValueError, KeyError):
1098 9a198532 Iustin Pop
        feedback_fn("  - ERROR: invalid nodeinfo value returned"
1099 9a198532 Iustin Pop
                    " from node %s" % (node,))
1100 9c9c7d30 Guido Trotter
        bad = True
1101 9c9c7d30 Guido Trotter
        continue
1102 9c9c7d30 Guido Trotter
1103 a8083063 Iustin Pop
    node_vol_should = {}
1104 a8083063 Iustin Pop
1105 a8083063 Iustin Pop
    for instance in instancelist:
1106 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
1107 6d2e83d5 Iustin Pop
      inst_config = instanceinfo[instance]
1108 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
1109 0a66c968 Iustin Pop
                                     node_instance, feedback_fn, n_offline)
1110 c5705f58 Guido Trotter
      bad = bad or result
1111 832261fd Iustin Pop
      inst_nodes_offline = []
1112 a8083063 Iustin Pop
1113 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
1114 a8083063 Iustin Pop
1115 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
1116 26b6af5e Guido Trotter
1117 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
1118 93e4c50b Guido Trotter
      if pnode in node_info:
1119 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
1120 0a66c968 Iustin Pop
      elif pnode not in n_offline:
1121 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
1122 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
1123 93e4c50b Guido Trotter
        bad = True
1124 93e4c50b Guido Trotter
1125 832261fd Iustin Pop
      if pnode in n_offline:
1126 832261fd Iustin Pop
        inst_nodes_offline.append(pnode)
1127 832261fd Iustin Pop
1128 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
1129 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
1130 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
1131 93e4c50b Guido Trotter
      # supported either.
1132 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
1133 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
1134 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
1135 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
1136 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
1137 93e4c50b Guido Trotter
                    % instance)
1138 93e4c50b Guido Trotter
1139 c0f2b229 Iustin Pop
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1140 3924700f Iustin Pop
        i_non_a_balanced.append(instance)
1141 3924700f Iustin Pop
1142 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
1143 93e4c50b Guido Trotter
        if snode in node_info:
1144 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
1145 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
1146 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
1147 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
1148 0a66c968 Iustin Pop
        elif snode not in n_offline:
1149 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
1150 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
1151 832261fd Iustin Pop
          bad = True
1152 832261fd Iustin Pop
        if snode in n_offline:
1153 832261fd Iustin Pop
          inst_nodes_offline.append(snode)
1154 832261fd Iustin Pop
1155 832261fd Iustin Pop
      if inst_nodes_offline:
1156 832261fd Iustin Pop
        # warn that the instance lives on offline nodes, and set bad=True
1157 832261fd Iustin Pop
        feedback_fn("  - ERROR: instance lives on offline node(s) %s" %
1158 832261fd Iustin Pop
                    ", ".join(inst_nodes_offline))
1159 832261fd Iustin Pop
        bad = True
1160 93e4c50b Guido Trotter
1161 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
1162 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
1163 a8083063 Iustin Pop
                                       feedback_fn)
1164 a8083063 Iustin Pop
    bad = bad or result
1165 a8083063 Iustin Pop
1166 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
1167 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
1168 a8083063 Iustin Pop
                                         feedback_fn)
1169 a8083063 Iustin Pop
    bad = bad or result
1170 a8083063 Iustin Pop
1171 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1172 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
1173 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
1174 e54c4c5e Guido Trotter
      bad = bad or result
1175 2b3b6ddd Guido Trotter
1176 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
1177 2b3b6ddd Guido Trotter
    if i_non_redundant:
1178 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1179 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
1180 2b3b6ddd Guido Trotter
1181 3924700f Iustin Pop
    if i_non_a_balanced:
1182 3924700f Iustin Pop
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1183 3924700f Iustin Pop
                  % len(i_non_a_balanced))
1184 3924700f Iustin Pop
1185 0a66c968 Iustin Pop
    if n_offline:
1186 0a66c968 Iustin Pop
      feedback_fn("  - NOTICE: %d offline node(s) found." % len(n_offline))
1187 0a66c968 Iustin Pop
1188 22f0f71d Iustin Pop
    if n_drained:
1189 22f0f71d Iustin Pop
      feedback_fn("  - NOTICE: %d drained node(s) found." % len(n_drained))
1190 22f0f71d Iustin Pop
1191 34290825 Michael Hanselmann
    return not bad
1192 a8083063 Iustin Pop
1193 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1194 5bbd3f7f Michael Hanselmann
    """Analyze the post-hooks' result
1195 e4376078 Iustin Pop

1196 e4376078 Iustin Pop
    This method analyses the hook result, handles it, and sends some
1197 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
1198 d8fff41c Guido Trotter

1199 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1200 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1201 e4376078 Iustin Pop
    @param hooks_results: the results of the multi-node hooks rpc call
1202 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
1203 e4376078 Iustin Pop
    @param lu_result: previous Exec result
1204 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
1205 e4376078 Iustin Pop
        and hook results
1206 d8fff41c Guido Trotter

1207 d8fff41c Guido Trotter
    """
1208 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
1209 38206f3c Iustin Pop
    # their results
1210 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
1211 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
1212 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
1213 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
1214 d8fff41c Guido Trotter
      if not hooks_results:
1215 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
1216 d8fff41c Guido Trotter
        lu_result = 1
1217 d8fff41c Guido Trotter
      else:
1218 d8fff41c Guido Trotter
        for node_name in hooks_results:
1219 d8fff41c Guido Trotter
          show_node_header = True
1220 d8fff41c Guido Trotter
          res = hooks_results[node_name]
1221 25361b9a Iustin Pop
          if res.failed or res.data is False or not isinstance(res.data, list):
1222 0a66c968 Iustin Pop
            if res.offline:
1223 0a66c968 Iustin Pop
              # no need to warn or set fail return value
1224 0a66c968 Iustin Pop
              continue
1225 25361b9a Iustin Pop
            feedback_fn("    Communication failure in hooks execution")
1226 d8fff41c Guido Trotter
            lu_result = 1
1227 d8fff41c Guido Trotter
            continue
1228 25361b9a Iustin Pop
          for script, hkr, output in res.data:
1229 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
1230 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
1231 d8fff41c Guido Trotter
              # failing hooks on that node
1232 d8fff41c Guido Trotter
              if show_node_header:
1233 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
1234 d8fff41c Guido Trotter
                show_node_header = False
1235 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
1236 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
1237 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
1238 d8fff41c Guido Trotter
              lu_result = 1
1239 d8fff41c Guido Trotter
1240 d8fff41c Guido Trotter
      return lu_result
1241 d8fff41c Guido Trotter
1242 a8083063 Iustin Pop
1243 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
1244 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
1245 2c95a8d4 Iustin Pop

1246 2c95a8d4 Iustin Pop
  """
1247 2c95a8d4 Iustin Pop
  _OP_REQP = []
1248 d4b9d97f Guido Trotter
  REQ_BGL = False
1249 d4b9d97f Guido Trotter
1250 d4b9d97f Guido Trotter
  def ExpandNames(self):
1251 d4b9d97f Guido Trotter
    self.needed_locks = {
1252 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1253 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1254 d4b9d97f Guido Trotter
    }
1255 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1256 2c95a8d4 Iustin Pop
1257 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
1258 2c95a8d4 Iustin Pop
    """Check prerequisites.
1259 2c95a8d4 Iustin Pop

1260 2c95a8d4 Iustin Pop
    This has no prerequisites.
1261 2c95a8d4 Iustin Pop

1262 2c95a8d4 Iustin Pop
    """
1263 2c95a8d4 Iustin Pop
    pass
1264 2c95a8d4 Iustin Pop
1265 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
1266 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
1267 2c95a8d4 Iustin Pop

1268 2c95a8d4 Iustin Pop
    """
1269 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
1270 2c95a8d4 Iustin Pop
1271 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
1272 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1273 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
1274 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
1275 2c95a8d4 Iustin Pop
1276 2c95a8d4 Iustin Pop
    nv_dict = {}
1277 2c95a8d4 Iustin Pop
    for inst in instances:
1278 2c95a8d4 Iustin Pop
      inst_lvs = {}
1279 0d68c45d Iustin Pop
      if (not inst.admin_up or
1280 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
1281 2c95a8d4 Iustin Pop
        continue
1282 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
1283 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1284 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
1285 2c95a8d4 Iustin Pop
        for vol in vol_list:
1286 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
1287 2c95a8d4 Iustin Pop
1288 2c95a8d4 Iustin Pop
    if not nv_dict:
1289 2c95a8d4 Iustin Pop
      return result
1290 2c95a8d4 Iustin Pop
1291 72737a7f Iustin Pop
    node_lvs = self.rpc.call_volume_list(nodes, vg_name)
1292 2c95a8d4 Iustin Pop
1293 2c95a8d4 Iustin Pop
    for node in nodes:
1294 2c95a8d4 Iustin Pop
      # node_volume
1295 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
1296 781de953 Iustin Pop
      if lvs.failed:
1297 0a66c968 Iustin Pop
        if not lvs.offline:
1298 0a66c968 Iustin Pop
          self.LogWarning("Connection to node %s failed: %s" %
1299 0a66c968 Iustin Pop
                          (node, lvs.data))
1300 781de953 Iustin Pop
        continue
1301 781de953 Iustin Pop
      lvs = lvs.data
1302 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
1303 9a4f63d1 Iustin Pop
        logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
1304 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
1305 ea9ddc07 Iustin Pop
        continue
1306 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
1307 9a4f63d1 Iustin Pop
        logging.warning("Connection to node %s failed or invalid data"
1308 9a4f63d1 Iustin Pop
                        " returned", node)
1309 2c95a8d4 Iustin Pop
        res_nodes.append(node)
1310 2c95a8d4 Iustin Pop
        continue
1311 2c95a8d4 Iustin Pop
1312 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
1313 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
1314 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
1315 b63ed789 Iustin Pop
            and inst.name not in res_instances):
1316 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
1317 2c95a8d4 Iustin Pop
1318 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1319 b63ed789 Iustin Pop
    # data better
1320 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1321 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1322 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1323 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1324 b63ed789 Iustin Pop
1325 2c95a8d4 Iustin Pop
    return result
1326 2c95a8d4 Iustin Pop
1327 2c95a8d4 Iustin Pop
1328 60975797 Iustin Pop
class LURepairDiskSizes(NoHooksLU):
1329 60975797 Iustin Pop
  """Verifies the cluster disks sizes.
1330 60975797 Iustin Pop

1331 60975797 Iustin Pop
  """
1332 60975797 Iustin Pop
  _OP_REQP = ["instances"]
1333 60975797 Iustin Pop
  REQ_BGL = False
1334 60975797 Iustin Pop
1335 60975797 Iustin Pop
  def ExpandNames(self):
1336 60975797 Iustin Pop
1337 60975797 Iustin Pop
    if not isinstance(self.op.instances, list):
1338 60975797 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'")
1339 60975797 Iustin Pop
1340 60975797 Iustin Pop
    if self.op.instances:
1341 60975797 Iustin Pop
      self.wanted_names = []
1342 60975797 Iustin Pop
      for name in self.op.instances:
1343 60975797 Iustin Pop
        full_name = self.cfg.ExpandInstanceName(name)
1344 60975797 Iustin Pop
        if full_name is None:
1345 60975797 Iustin Pop
          raise errors.OpPrereqError("Instance '%s' not known" % name)
1346 60975797 Iustin Pop
        self.wanted_names.append(full_name)
1347 60975797 Iustin Pop
      self.needed_locks = {
1348 60975797 Iustin Pop
        locking.LEVEL_NODE: [],
1349 60975797 Iustin Pop
        locking.LEVEL_INSTANCE: self.wanted_names,
1350 60975797 Iustin Pop
        }
1351 60975797 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1352 60975797 Iustin Pop
    else:
1353 60975797 Iustin Pop
      self.wanted_names = None
1354 60975797 Iustin Pop
      self.needed_locks = {
1355 60975797 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
1356 60975797 Iustin Pop
        locking.LEVEL_INSTANCE: locking.ALL_SET,
1357 60975797 Iustin Pop
        }
1358 60975797 Iustin Pop
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1359 60975797 Iustin Pop
1360 60975797 Iustin Pop
  def DeclareLocks(self, level):
1361 60975797 Iustin Pop
    if level == locking.LEVEL_NODE and self.wanted_names is not None:
1362 60975797 Iustin Pop
      self._LockInstancesNodes(primary_only=True)
1363 60975797 Iustin Pop
1364 60975797 Iustin Pop
  def CheckPrereq(self):
1365 60975797 Iustin Pop
    """Check prerequisites.
1366 60975797 Iustin Pop

1367 60975797 Iustin Pop
    This only checks the optional instance list against the existing names.
1368 60975797 Iustin Pop

1369 60975797 Iustin Pop
    """
1370 60975797 Iustin Pop
    if self.wanted_names is None:
1371 60975797 Iustin Pop
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
1372 60975797 Iustin Pop
1373 60975797 Iustin Pop
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
1374 60975797 Iustin Pop
                             in self.wanted_names]
1375 60975797 Iustin Pop
1376 b775c337 Iustin Pop
  def _EnsureChildSizes(self, disk):
1377 b775c337 Iustin Pop
    """Ensure children of the disk have the needed disk size.
1378 b775c337 Iustin Pop

1379 b775c337 Iustin Pop
    This is valid mainly for DRBD8 and fixes an issue where the
1380 b775c337 Iustin Pop
    children have smaller disk size.
1381 b775c337 Iustin Pop

1382 b775c337 Iustin Pop
    @param disk: an L{ganeti.objects.Disk} object
1383 b775c337 Iustin Pop

1384 b775c337 Iustin Pop
    """
1385 b775c337 Iustin Pop
    if disk.dev_type == constants.LD_DRBD8:
1386 b775c337 Iustin Pop
      assert disk.children, "Empty children for DRBD8?"
1387 b775c337 Iustin Pop
      fchild = disk.children[0]
1388 b775c337 Iustin Pop
      mismatch = fchild.size < disk.size
1389 b775c337 Iustin Pop
      if mismatch:
1390 b775c337 Iustin Pop
        self.LogInfo("Child disk has size %d, parent %d, fixing",
1391 b775c337 Iustin Pop
                     fchild.size, disk.size)
1392 b775c337 Iustin Pop
        fchild.size = disk.size
1393 b775c337 Iustin Pop
1394 b775c337 Iustin Pop
      # and we recurse on this child only, not on the metadev
1395 b775c337 Iustin Pop
      return self._EnsureChildSizes(fchild) or mismatch
1396 b775c337 Iustin Pop
    else:
1397 b775c337 Iustin Pop
      return False
1398 b775c337 Iustin Pop
1399 60975797 Iustin Pop
  def Exec(self, feedback_fn):
1400 60975797 Iustin Pop
    """Verify the size of cluster disks.
1401 60975797 Iustin Pop

1402 60975797 Iustin Pop
    """
1403 60975797 Iustin Pop
    # TODO: check child disks too
1404 60975797 Iustin Pop
    # TODO: check differences in size between primary/secondary nodes
1405 60975797 Iustin Pop
    per_node_disks = {}
1406 60975797 Iustin Pop
    for instance in self.wanted_instances:
1407 60975797 Iustin Pop
      pnode = instance.primary_node
1408 60975797 Iustin Pop
      if pnode not in per_node_disks:
1409 60975797 Iustin Pop
        per_node_disks[pnode] = []
1410 60975797 Iustin Pop
      for idx, disk in enumerate(instance.disks):
1411 60975797 Iustin Pop
        per_node_disks[pnode].append((instance, idx, disk))
1412 60975797 Iustin Pop
1413 60975797 Iustin Pop
    changed = []
1414 60975797 Iustin Pop
    for node, dskl in per_node_disks.items():
1415 4d9e6835 Iustin Pop
      newl = [v[2].Copy() for v in dskl]
1416 4d9e6835 Iustin Pop
      for dsk in newl:
1417 4d9e6835 Iustin Pop
        self.cfg.SetDiskID(dsk, node)
1418 4d9e6835 Iustin Pop
      result = self.rpc.call_blockdev_getsizes(node, newl)
1419 60975797 Iustin Pop
      if result.failed:
1420 60975797 Iustin Pop
        self.LogWarning("Failure in blockdev_getsizes call to node"
1421 60975797 Iustin Pop
                        " %s, ignoring", node)
1422 60975797 Iustin Pop
        continue
1423 60975797 Iustin Pop
      if len(result.data) != len(dskl):
1424 60975797 Iustin Pop
        self.LogWarning("Invalid result from node %s, ignoring node results",
1425 60975797 Iustin Pop
                        node)
1426 60975797 Iustin Pop
        continue
1427 60975797 Iustin Pop
      for ((instance, idx, disk), size) in zip(dskl, result.data):
1428 60975797 Iustin Pop
        if size is None:
1429 60975797 Iustin Pop
          self.LogWarning("Disk %d of instance %s did not return size"
1430 60975797 Iustin Pop
                          " information, ignoring", idx, instance.name)
1431 60975797 Iustin Pop
          continue
1432 60975797 Iustin Pop
        if not isinstance(size, (int, long)):
1433 60975797 Iustin Pop
          self.LogWarning("Disk %d of instance %s did not return valid"
1434 60975797 Iustin Pop
                          " size information, ignoring", idx, instance.name)
1435 60975797 Iustin Pop
          continue
1436 60975797 Iustin Pop
        size = size >> 20
1437 60975797 Iustin Pop
        if size != disk.size:
1438 60975797 Iustin Pop
          self.LogInfo("Disk %d of instance %s has mismatched size,"
1439 60975797 Iustin Pop
                       " correcting: recorded %d, actual %d", idx,
1440 60975797 Iustin Pop
                       instance.name, disk.size, size)
1441 60975797 Iustin Pop
          disk.size = size
1442 60975797 Iustin Pop
          self.cfg.Update(instance)
1443 60975797 Iustin Pop
          changed.append((instance.name, idx, size))
1444 b775c337 Iustin Pop
        if self._EnsureChildSizes(disk):
1445 b775c337 Iustin Pop
          self.cfg.Update(instance)
1446 b775c337 Iustin Pop
          changed.append((instance.name, idx, disk.size))
1447 60975797 Iustin Pop
    return changed
1448 60975797 Iustin Pop
1449 60975797 Iustin Pop
1450 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1451 07bd8a51 Iustin Pop
  """Rename the cluster.
1452 07bd8a51 Iustin Pop

1453 07bd8a51 Iustin Pop
  """
1454 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1455 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1456 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1457 07bd8a51 Iustin Pop
1458 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1459 07bd8a51 Iustin Pop
    """Build hooks env.
1460 07bd8a51 Iustin Pop

1461 07bd8a51 Iustin Pop
    """
1462 07bd8a51 Iustin Pop
    env = {
1463 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1464 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1465 07bd8a51 Iustin Pop
      }
1466 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1467 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1468 07bd8a51 Iustin Pop
1469 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1470 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1471 07bd8a51 Iustin Pop

1472 07bd8a51 Iustin Pop
    """
1473 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1474 07bd8a51 Iustin Pop
1475 bcf043c9 Iustin Pop
    new_name = hostname.name
1476 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1477 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
1478 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
1479 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1480 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1481 07bd8a51 Iustin Pop
                                 " cluster has changed")
1482 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1483 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1484 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1485 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1486 07bd8a51 Iustin Pop
                                   new_ip)
1487 07bd8a51 Iustin Pop
1488 07bd8a51 Iustin Pop
    self.op.name = new_name
1489 07bd8a51 Iustin Pop
1490 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1491 07bd8a51 Iustin Pop
    """Rename the cluster.
1492 07bd8a51 Iustin Pop

1493 07bd8a51 Iustin Pop
    """
1494 07bd8a51 Iustin Pop
    clustername = self.op.name
1495 07bd8a51 Iustin Pop
    ip = self.ip
1496 07bd8a51 Iustin Pop
1497 07bd8a51 Iustin Pop
    # shutdown the master IP
1498 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
1499 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
1500 781de953 Iustin Pop
    if result.failed or not result.data:
1501 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1502 07bd8a51 Iustin Pop
1503 07bd8a51 Iustin Pop
    try:
1504 55cf7d83 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
1505 55cf7d83 Iustin Pop
      cluster.cluster_name = clustername
1506 55cf7d83 Iustin Pop
      cluster.master_ip = ip
1507 55cf7d83 Iustin Pop
      self.cfg.Update(cluster)
1508 ec85e3d5 Iustin Pop
1509 ec85e3d5 Iustin Pop
      # update the known hosts file
1510 ec85e3d5 Iustin Pop
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
1511 ec85e3d5 Iustin Pop
      node_list = self.cfg.GetNodeList()
1512 ec85e3d5 Iustin Pop
      try:
1513 ec85e3d5 Iustin Pop
        node_list.remove(master)
1514 ec85e3d5 Iustin Pop
      except ValueError:
1515 ec85e3d5 Iustin Pop
        pass
1516 ec85e3d5 Iustin Pop
      result = self.rpc.call_upload_file(node_list,
1517 ec85e3d5 Iustin Pop
                                         constants.SSH_KNOWN_HOSTS_FILE)
1518 ec85e3d5 Iustin Pop
      for to_node, to_result in result.iteritems():
1519 ec85e3d5 Iustin Pop
        if to_result.failed or not to_result.data:
1520 d1dc3548 Iustin Pop
          logging.error("Copy of file %s to node %s failed",
1521 d1dc3548 Iustin Pop
                        constants.SSH_KNOWN_HOSTS_FILE, to_node)
1522 ec85e3d5 Iustin Pop
1523 07bd8a51 Iustin Pop
    finally:
1524 2503680f Guido Trotter
      result = self.rpc.call_node_start_master(master, False, False)
1525 781de953 Iustin Pop
      if result.failed or not result.data:
1526 86d9d3bb Iustin Pop
        self.LogWarning("Could not re-enable the master role on"
1527 86d9d3bb Iustin Pop
                        " the master, please restart manually.")
1528 07bd8a51 Iustin Pop
1529 07bd8a51 Iustin Pop
1530 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1531 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1532 8084f9f6 Manuel Franceschini

1533 e4376078 Iustin Pop
  @type disk: L{objects.Disk}
1534 e4376078 Iustin Pop
  @param disk: the disk to check
1535 5bbd3f7f Michael Hanselmann
  @rtype: boolean
1536 e4376078 Iustin Pop
  @return: boolean indicating whether a LD_LV dev_type was found or not
1537 8084f9f6 Manuel Franceschini

1538 8084f9f6 Manuel Franceschini
  """
1539 8084f9f6 Manuel Franceschini
  if disk.children:
1540 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1541 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1542 8084f9f6 Manuel Franceschini
        return True
1543 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1544 8084f9f6 Manuel Franceschini
1545 8084f9f6 Manuel Franceschini
1546 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1547 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1548 8084f9f6 Manuel Franceschini

1549 8084f9f6 Manuel Franceschini
  """
1550 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1551 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1552 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1553 c53279cf Guido Trotter
  REQ_BGL = False
1554 c53279cf Guido Trotter
1555 3994f455 Iustin Pop
  def CheckArguments(self):
1556 4b7735f9 Iustin Pop
    """Check parameters
1557 4b7735f9 Iustin Pop

1558 4b7735f9 Iustin Pop
    """
1559 4b7735f9 Iustin Pop
    if not hasattr(self.op, "candidate_pool_size"):
1560 4b7735f9 Iustin Pop
      self.op.candidate_pool_size = None
1561 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1562 4b7735f9 Iustin Pop
      try:
1563 4b7735f9 Iustin Pop
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1564 3994f455 Iustin Pop
      except (ValueError, TypeError), err:
1565 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1566 4b7735f9 Iustin Pop
                                   str(err))
1567 4b7735f9 Iustin Pop
      if self.op.candidate_pool_size < 1:
1568 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("At least one master candidate needed")
1569 4b7735f9 Iustin Pop
1570 c53279cf Guido Trotter
  def ExpandNames(self):
1571 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
1572 c53279cf Guido Trotter
    # all nodes to be modified.
1573 c53279cf Guido Trotter
    self.needed_locks = {
1574 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1575 c53279cf Guido Trotter
    }
1576 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1577 8084f9f6 Manuel Franceschini
1578 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1579 8084f9f6 Manuel Franceschini
    """Build hooks env.
1580 8084f9f6 Manuel Franceschini

1581 8084f9f6 Manuel Franceschini
    """
1582 8084f9f6 Manuel Franceschini
    env = {
1583 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1584 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1585 8084f9f6 Manuel Franceschini
      }
1586 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1587 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1588 8084f9f6 Manuel Franceschini
1589 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1590 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1591 8084f9f6 Manuel Franceschini

1592 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1593 5f83e263 Iustin Pop
    if the given volume group is valid.
1594 8084f9f6 Manuel Franceschini

1595 8084f9f6 Manuel Franceschini
    """
1596 779c15bb Iustin Pop
    if self.op.vg_name is not None and not self.op.vg_name:
1597 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
1598 8084f9f6 Manuel Franceschini
      for inst in instances:
1599 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1600 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1601 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1602 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1603 8084f9f6 Manuel Franceschini
1604 779c15bb Iustin Pop
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1605 779c15bb Iustin Pop
1606 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1607 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1608 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
1609 8084f9f6 Manuel Franceschini
      for node in node_list:
1610 781de953 Iustin Pop
        if vglist[node].failed:
1611 781de953 Iustin Pop
          # ignoring down node
1612 781de953 Iustin Pop
          self.LogWarning("Node %s unreachable/error, ignoring" % node)
1613 781de953 Iustin Pop
          continue
1614 781de953 Iustin Pop
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].data,
1615 781de953 Iustin Pop
                                              self.op.vg_name,
1616 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1617 8084f9f6 Manuel Franceschini
        if vgstatus:
1618 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1619 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1620 8084f9f6 Manuel Franceschini
1621 779c15bb Iustin Pop
    self.cluster = cluster = self.cfg.GetClusterInfo()
1622 d4b72030 Guido Trotter
    # validate beparams changes
1623 779c15bb Iustin Pop
    if self.op.beparams:
1624 a5728081 Guido Trotter
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
1625 779c15bb Iustin Pop
      self.new_beparams = cluster.FillDict(
1626 779c15bb Iustin Pop
        cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)
1627 779c15bb Iustin Pop
1628 779c15bb Iustin Pop
    # hypervisor list/parameters
1629 779c15bb Iustin Pop
    self.new_hvparams = cluster.FillDict(cluster.hvparams, {})
1630 779c15bb Iustin Pop
    if self.op.hvparams:
1631 779c15bb Iustin Pop
      if not isinstance(self.op.hvparams, dict):
1632 779c15bb Iustin Pop
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1633 779c15bb Iustin Pop
      for hv_name, hv_dict in self.op.hvparams.items():
1634 779c15bb Iustin Pop
        if hv_name not in self.new_hvparams:
1635 779c15bb Iustin Pop
          self.new_hvparams[hv_name] = hv_dict
1636 779c15bb Iustin Pop
        else:
1637 779c15bb Iustin Pop
          self.new_hvparams[hv_name].update(hv_dict)
1638 779c15bb Iustin Pop
1639 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1640 779c15bb Iustin Pop
      self.hv_list = self.op.enabled_hypervisors
1641 b119bccb Guido Trotter
      if not self.hv_list:
1642 b119bccb Guido Trotter
        raise errors.OpPrereqError("Enabled hypervisors list must contain at"
1643 b119bccb Guido Trotter
                                   " least one member")
1644 b119bccb Guido Trotter
      invalid_hvs = set(self.hv_list) - constants.HYPER_TYPES
1645 b119bccb Guido Trotter
      if invalid_hvs:
1646 b119bccb Guido Trotter
        raise errors.OpPrereqError("Enabled hypervisors contains invalid"
1647 b119bccb Guido Trotter
                                   " entries: %s" % invalid_hvs)
1648 779c15bb Iustin Pop
    else:
1649 779c15bb Iustin Pop
      self.hv_list = cluster.enabled_hypervisors
1650 779c15bb Iustin Pop
1651 779c15bb Iustin Pop
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1652 779c15bb Iustin Pop
      # either the enabled list has changed, or the parameters have, validate
1653 779c15bb Iustin Pop
      for hv_name, hv_params in self.new_hvparams.items():
1654 779c15bb Iustin Pop
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1655 779c15bb Iustin Pop
            (self.op.enabled_hypervisors and
1656 779c15bb Iustin Pop
             hv_name in self.op.enabled_hypervisors)):
1657 779c15bb Iustin Pop
          # either this is a new hypervisor, or its parameters have changed
1658 779c15bb Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
1659 a5728081 Guido Trotter
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1660 779c15bb Iustin Pop
          hv_class.CheckParameterSyntax(hv_params)
1661 779c15bb Iustin Pop
          _CheckHVParams(self, node_list, hv_name, hv_params)
1662 779c15bb Iustin Pop
1663 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1664 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1665 8084f9f6 Manuel Franceschini

1666 8084f9f6 Manuel Franceschini
    """
1667 779c15bb Iustin Pop
    if self.op.vg_name is not None:
1668 b2482333 Guido Trotter
      new_volume = self.op.vg_name
1669 b2482333 Guido Trotter
      if not new_volume:
1670 b2482333 Guido Trotter
        new_volume = None
1671 b2482333 Guido Trotter
      if new_volume != self.cfg.GetVGName():
1672 b2482333 Guido Trotter
        self.cfg.SetVGName(new_volume)
1673 779c15bb Iustin Pop
      else:
1674 779c15bb Iustin Pop
        feedback_fn("Cluster LVM configuration already in desired"
1675 779c15bb Iustin Pop
                    " state, not changing")
1676 779c15bb Iustin Pop
    if self.op.hvparams:
1677 779c15bb Iustin Pop
      self.cluster.hvparams = self.new_hvparams
1678 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1679 779c15bb Iustin Pop
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1680 779c15bb Iustin Pop
    if self.op.beparams:
1681 779c15bb Iustin Pop
      self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams
1682 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1683 4b7735f9 Iustin Pop
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
1684 75e914fb Iustin Pop
      # we need to update the pool size here, otherwise the save will fail
1685 75e914fb Iustin Pop
      _AdjustCandidatePool(self)
1686 4b7735f9 Iustin Pop
1687 779c15bb Iustin Pop
    self.cfg.Update(self.cluster)
1688 8084f9f6 Manuel Franceschini
1689 8084f9f6 Manuel Franceschini
1690 afee0879 Iustin Pop
class LURedistributeConfig(NoHooksLU):
1691 afee0879 Iustin Pop
  """Force the redistribution of cluster configuration.
1692 afee0879 Iustin Pop

1693 afee0879 Iustin Pop
  This is a very simple LU.
1694 afee0879 Iustin Pop

1695 afee0879 Iustin Pop
  """
1696 afee0879 Iustin Pop
  _OP_REQP = []
1697 afee0879 Iustin Pop
  REQ_BGL = False
1698 afee0879 Iustin Pop
1699 afee0879 Iustin Pop
  def ExpandNames(self):
1700 afee0879 Iustin Pop
    self.needed_locks = {
1701 afee0879 Iustin Pop
      locking.LEVEL_NODE: locking.ALL_SET,
1702 afee0879 Iustin Pop
    }
1703 afee0879 Iustin Pop
    self.share_locks[locking.LEVEL_NODE] = 1
1704 afee0879 Iustin Pop
1705 afee0879 Iustin Pop
  def CheckPrereq(self):
1706 afee0879 Iustin Pop
    """Check prerequisites.
1707 afee0879 Iustin Pop

1708 afee0879 Iustin Pop
    """
1709 afee0879 Iustin Pop
1710 afee0879 Iustin Pop
  def Exec(self, feedback_fn):
1711 afee0879 Iustin Pop
    """Redistribute the configuration.
1712 afee0879 Iustin Pop

1713 afee0879 Iustin Pop
    """
1714 afee0879 Iustin Pop
    self.cfg.Update(self.cfg.GetClusterInfo())
1715 afee0879 Iustin Pop
1716 afee0879 Iustin Pop
1717 b9bddb6b Iustin Pop
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1718 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1719 a8083063 Iustin Pop

1720 a8083063 Iustin Pop
  """
1721 a8083063 Iustin Pop
  if not instance.disks:
1722 a8083063 Iustin Pop
    return True
1723 a8083063 Iustin Pop
1724 a8083063 Iustin Pop
  if not oneshot:
1725 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1726 a8083063 Iustin Pop
1727 a8083063 Iustin Pop
  node = instance.primary_node
1728 a8083063 Iustin Pop
1729 a8083063 Iustin Pop
  for dev in instance.disks:
1730 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
1731 a8083063 Iustin Pop
1732 a8083063 Iustin Pop
  retries = 0
1733 fbafd7a8 Iustin Pop
  degr_retries = 10 # in seconds, as we sleep 1 second each time
1734 a8083063 Iustin Pop
  while True:
1735 a8083063 Iustin Pop
    max_time = 0
1736 a8083063 Iustin Pop
    done = True
1737 a8083063 Iustin Pop
    cumul_degraded = False
1738 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1739 781de953 Iustin Pop
    if rstats.failed or not rstats.data:
1740 86d9d3bb Iustin Pop
      lu.LogWarning("Can't get any data from node %s", node)
1741 a8083063 Iustin Pop
      retries += 1
1742 a8083063 Iustin Pop
      if retries >= 10:
1743 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1744 3ecf6786 Iustin Pop
                                 " aborting." % node)
1745 a8083063 Iustin Pop
      time.sleep(6)
1746 a8083063 Iustin Pop
      continue
1747 781de953 Iustin Pop
    rstats = rstats.data
1748 a8083063 Iustin Pop
    retries = 0
1749 1492cca7 Iustin Pop
    for i, mstat in enumerate(rstats):
1750 a8083063 Iustin Pop
      if mstat is None:
1751 86d9d3bb Iustin Pop
        lu.LogWarning("Can't compute data for node %s/%s",
1752 86d9d3bb Iustin Pop
                           node, instance.disks[i].iv_name)
1753 a8083063 Iustin Pop
        continue
1754 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1755 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1756 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1757 a8083063 Iustin Pop
      if perc_done is not None:
1758 a8083063 Iustin Pop
        done = False
1759 a8083063 Iustin Pop
        if est_time is not None:
1760 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1761 a8083063 Iustin Pop
          max_time = est_time
1762 a8083063 Iustin Pop
        else:
1763 a8083063 Iustin Pop
          rem_time = "no time estimate"
1764 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1765 b9bddb6b Iustin Pop
                        (instance.disks[i].iv_name, perc_done, rem_time))
1766 fbafd7a8 Iustin Pop
1767 fbafd7a8 Iustin Pop
    # if we're done but degraded, let's do a few small retries, to
1768 fbafd7a8 Iustin Pop
    # make sure we see a stable and not transient situation; therefore
1769 fbafd7a8 Iustin Pop
    # we force restart of the loop
1770 fbafd7a8 Iustin Pop
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
1771 fbafd7a8 Iustin Pop
      logging.info("Degraded disks found, %d retries left", degr_retries)
1772 fbafd7a8 Iustin Pop
      degr_retries -= 1
1773 fbafd7a8 Iustin Pop
      time.sleep(1)
1774 fbafd7a8 Iustin Pop
      continue
1775 fbafd7a8 Iustin Pop
1776 a8083063 Iustin Pop
    if done or oneshot:
1777 a8083063 Iustin Pop
      break
1778 a8083063 Iustin Pop
1779 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
1780 a8083063 Iustin Pop
1781 a8083063 Iustin Pop
  if done:
1782 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1783 a8083063 Iustin Pop
  return not cumul_degraded
1784 a8083063 Iustin Pop
1785 a8083063 Iustin Pop
1786 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1787 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1788 a8083063 Iustin Pop

1789 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1790 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1791 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1792 0834c866 Iustin Pop

1793 a8083063 Iustin Pop
  """
1794 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
1795 0834c866 Iustin Pop
  if ldisk:
1796 0834c866 Iustin Pop
    idx = 6
1797 0834c866 Iustin Pop
  else:
1798 0834c866 Iustin Pop
    idx = 5
1799 a8083063 Iustin Pop
1800 a8083063 Iustin Pop
  result = True
1801 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1802 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
1803 23829f6f Iustin Pop
    msg = rstats.RemoteFailMsg()
1804 23829f6f Iustin Pop
    if msg:
1805 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
1806 23829f6f Iustin Pop
      result = False
1807 23829f6f Iustin Pop
    elif not rstats.payload:
1808 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s", node)
1809 a8083063 Iustin Pop
      result = False
1810 a8083063 Iustin Pop
    else:
1811 23829f6f Iustin Pop
      result = result and (not rstats.payload[idx])
1812 a8083063 Iustin Pop
  if dev.children:
1813 a8083063 Iustin Pop
    for child in dev.children:
1814 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1815 a8083063 Iustin Pop
1816 a8083063 Iustin Pop
  return result
1817 a8083063 Iustin Pop
1818 a8083063 Iustin Pop
1819 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1820 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1821 a8083063 Iustin Pop

1822 a8083063 Iustin Pop
  """
1823 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1824 6bf01bbb Guido Trotter
  REQ_BGL = False
1825 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet()
1826 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
1827 a8083063 Iustin Pop
1828 6bf01bbb Guido Trotter
  def ExpandNames(self):
1829 1f9430d6 Iustin Pop
    if self.op.names:
1830 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1831 1f9430d6 Iustin Pop
1832 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1833 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1834 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1835 1f9430d6 Iustin Pop
1836 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
1837 a6ab004b Iustin Pop
    # Temporary removal of locks, should be reverted later
1838 a6ab004b Iustin Pop
    # TODO: reintroduce locks when they are lighter-weight
1839 6bf01bbb Guido Trotter
    self.needed_locks = {}
1840 a6ab004b Iustin Pop
    #self.share_locks[locking.LEVEL_NODE] = 1
1841 a6ab004b Iustin Pop
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1842 6bf01bbb Guido Trotter
1843 6bf01bbb Guido Trotter
  def CheckPrereq(self):
1844 6bf01bbb Guido Trotter
    """Check prerequisites.
1845 6bf01bbb Guido Trotter

1846 6bf01bbb Guido Trotter
    """
1847 6bf01bbb Guido Trotter
1848 1f9430d6 Iustin Pop
  @staticmethod
1849 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1850 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1851 1f9430d6 Iustin Pop

1852 e4376078 Iustin Pop
    @param node_list: a list with the names of all nodes
1853 e4376078 Iustin Pop
    @param rlist: a map with node names as keys and OS objects as values
1854 1f9430d6 Iustin Pop

1855 e4376078 Iustin Pop
    @rtype: dict
1856 5fcc718f Iustin Pop
    @return: a dictionary with osnames as keys and as value another map, with
1857 e4376078 Iustin Pop
        nodes as keys and list of OS objects as values, eg::
1858 e4376078 Iustin Pop

1859 e4376078 Iustin Pop
          {"debian-etch": {"node1": [<object>,...],
1860 e4376078 Iustin Pop
                           "node2": [<object>,]}
1861 e4376078 Iustin Pop
          }
1862 1f9430d6 Iustin Pop

1863 1f9430d6 Iustin Pop
    """
1864 1f9430d6 Iustin Pop
    all_os = {}
1865 a6ab004b Iustin Pop
    # we build here the list of nodes that didn't fail the RPC (at RPC
1866 a6ab004b Iustin Pop
    # level), so that nodes with a non-responding node daemon don't
1867 a6ab004b Iustin Pop
    # make all OSes invalid
1868 a6ab004b Iustin Pop
    good_nodes = [node_name for node_name in rlist
1869 a6ab004b Iustin Pop
                  if not rlist[node_name].failed]
1870 1f9430d6 Iustin Pop
    for node_name, nr in rlist.iteritems():
1871 781de953 Iustin Pop
      if nr.failed or not nr.data:
1872 1f9430d6 Iustin Pop
        continue
1873 781de953 Iustin Pop
      for os_obj in nr.data:
1874 b4de68a9 Iustin Pop
        if os_obj.name not in all_os:
1875 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1876 1f9430d6 Iustin Pop
          # for each node in node_list
1877 b4de68a9 Iustin Pop
          all_os[os_obj.name] = {}
1878 a6ab004b Iustin Pop
          for nname in good_nodes:
1879 b4de68a9 Iustin Pop
            all_os[os_obj.name][nname] = []
1880 b4de68a9 Iustin Pop
        all_os[os_obj.name][node_name].append(os_obj)
1881 1f9430d6 Iustin Pop
    return all_os
1882 a8083063 Iustin Pop
1883 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1884 a8083063 Iustin Pop
    """Compute the list of OSes.
1885 a8083063 Iustin Pop

1886 a8083063 Iustin Pop
    """
1887 a6ab004b Iustin Pop
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
1888 94a02bb5 Iustin Pop
    node_data = self.rpc.call_os_diagnose(valid_nodes)
1889 a8083063 Iustin Pop
    if node_data == False:
1890 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1891 94a02bb5 Iustin Pop
    pol = self._DiagnoseByOS(valid_nodes, node_data)
1892 1f9430d6 Iustin Pop
    output = []
1893 1f9430d6 Iustin Pop
    for os_name, os_data in pol.iteritems():
1894 1f9430d6 Iustin Pop
      row = []
1895 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1896 1f9430d6 Iustin Pop
        if field == "name":
1897 1f9430d6 Iustin Pop
          val = os_name
1898 1f9430d6 Iustin Pop
        elif field == "valid":
1899 1f9430d6 Iustin Pop
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1900 1f9430d6 Iustin Pop
        elif field == "node_status":
1901 1f9430d6 Iustin Pop
          val = {}
1902 1f9430d6 Iustin Pop
          for node_name, nos_list in os_data.iteritems():
1903 1f9430d6 Iustin Pop
            val[node_name] = [(v.status, v.path) for v in nos_list]
1904 1f9430d6 Iustin Pop
        else:
1905 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1906 1f9430d6 Iustin Pop
        row.append(val)
1907 1f9430d6 Iustin Pop
      output.append(row)
1908 1f9430d6 Iustin Pop
1909 1f9430d6 Iustin Pop
    return output
1910 a8083063 Iustin Pop
1911 a8083063 Iustin Pop
1912 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1913 a8083063 Iustin Pop
  """Logical unit for removing a node.
1914 a8083063 Iustin Pop

1915 a8083063 Iustin Pop
  """
1916 a8083063 Iustin Pop
  HPATH = "node-remove"
1917 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1918 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1919 a8083063 Iustin Pop
1920 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1921 a8083063 Iustin Pop
    """Build hooks env.
1922 a8083063 Iustin Pop

1923 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1924 d08869ee Guido Trotter
    node would then be impossible to remove.
1925 a8083063 Iustin Pop

1926 a8083063 Iustin Pop
    """
1927 396e1b78 Michael Hanselmann
    env = {
1928 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1929 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1930 396e1b78 Michael Hanselmann
      }
1931 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1932 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1933 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1934 a8083063 Iustin Pop
1935 a8083063 Iustin Pop
  def CheckPrereq(self):
1936 a8083063 Iustin Pop
    """Check prerequisites.
1937 a8083063 Iustin Pop

1938 a8083063 Iustin Pop
    This checks:
1939 a8083063 Iustin Pop
     - the node exists in the configuration
1940 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1941 a8083063 Iustin Pop
     - it's not the master
1942 a8083063 Iustin Pop

1943 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
1944 a8083063 Iustin Pop

1945 a8083063 Iustin Pop
    """
1946 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1947 a8083063 Iustin Pop
    if node is None:
1948 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1949 a8083063 Iustin Pop
1950 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1951 a8083063 Iustin Pop
1952 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
1953 a8083063 Iustin Pop
    if node.name == masternode:
1954 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1955 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1956 a8083063 Iustin Pop
1957 a8083063 Iustin Pop
    for instance_name in instance_list:
1958 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1959 6b12959c Iustin Pop
      if node.name in instance.all_nodes:
1960 6b12959c Iustin Pop
        raise errors.OpPrereqError("Instance %s is still running on the node,"
1961 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1962 a8083063 Iustin Pop
    self.op.node_name = node.name
1963 a8083063 Iustin Pop
    self.node = node
1964 a8083063 Iustin Pop
1965 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1966 a8083063 Iustin Pop
    """Removes the node from the cluster.
1967 a8083063 Iustin Pop

1968 a8083063 Iustin Pop
    """
1969 a8083063 Iustin Pop
    node = self.node
1970 9a4f63d1 Iustin Pop
    logging.info("Stopping the node daemon and removing configs from node %s",
1971 9a4f63d1 Iustin Pop
                 node.name)
1972 a8083063 Iustin Pop
1973 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
1974 a8083063 Iustin Pop
1975 72737a7f Iustin Pop
    self.rpc.call_node_leave_cluster(node.name)
1976 c8a0948f Michael Hanselmann
1977 eb1742d5 Guido Trotter
    # Promote nodes to master candidate as needed
1978 ec0292f1 Iustin Pop
    _AdjustCandidatePool(self)
1979 eb1742d5 Guido Trotter
1980 a8083063 Iustin Pop
1981 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1982 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1983 a8083063 Iustin Pop

1984 a8083063 Iustin Pop
  """
1985 bc8e4a1a Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
1986 35705d8f Guido Trotter
  REQ_BGL = False
1987 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet(
1988 31bf511f Iustin Pop
    "dtotal", "dfree",
1989 31bf511f Iustin Pop
    "mtotal", "mnode", "mfree",
1990 31bf511f Iustin Pop
    "bootid",
1991 0105bad3 Iustin Pop
    "ctotal", "cnodes", "csockets",
1992 31bf511f Iustin Pop
    )
1993 31bf511f Iustin Pop
1994 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(
1995 31bf511f Iustin Pop
    "name", "pinst_cnt", "sinst_cnt",
1996 31bf511f Iustin Pop
    "pinst_list", "sinst_list",
1997 31bf511f Iustin Pop
    "pip", "sip", "tags",
1998 31bf511f Iustin Pop
    "serial_no",
1999 0e67cdbe Iustin Pop
    "master_candidate",
2000 0e67cdbe Iustin Pop
    "master",
2001 9ddb5e45 Iustin Pop
    "offline",
2002 0b2454b9 Iustin Pop
    "drained",
2003 c120ff34 Iustin Pop
    "role",
2004 31bf511f Iustin Pop
    )
2005 a8083063 Iustin Pop
2006 35705d8f Guido Trotter
  def ExpandNames(self):
2007 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2008 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2009 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2010 a8083063 Iustin Pop
2011 35705d8f Guido Trotter
    self.needed_locks = {}
2012 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2013 c8d8b4c8 Iustin Pop
2014 c8d8b4c8 Iustin Pop
    if self.op.names:
2015 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
2016 35705d8f Guido Trotter
    else:
2017 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
2018 c8d8b4c8 Iustin Pop
2019 bc8e4a1a Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
2020 bc8e4a1a Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
2021 c8d8b4c8 Iustin Pop
    if self.do_locking:
2022 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
2023 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
2024 c8d8b4c8 Iustin Pop
2025 35705d8f Guido Trotter
2026 35705d8f Guido Trotter
  def CheckPrereq(self):
2027 35705d8f Guido Trotter
    """Check prerequisites.
2028 35705d8f Guido Trotter

2029 35705d8f Guido Trotter
    """
2030 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
2031 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
2032 c8d8b4c8 Iustin Pop
    pass
2033 a8083063 Iustin Pop
2034 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2035 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2036 a8083063 Iustin Pop

2037 a8083063 Iustin Pop
    """
2038 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
2039 c8d8b4c8 Iustin Pop
    if self.do_locking:
2040 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
2041 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
2042 3fa93523 Guido Trotter
      nodenames = self.wanted
2043 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
2044 3fa93523 Guido Trotter
      if missing:
2045 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
2046 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
2047 c8d8b4c8 Iustin Pop
    else:
2048 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
2049 c1f1cbb2 Iustin Pop
2050 c1f1cbb2 Iustin Pop
    nodenames = utils.NiceSort(nodenames)
2051 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
2052 a8083063 Iustin Pop
2053 a8083063 Iustin Pop
    # begin data gathering
2054 a8083063 Iustin Pop
2055 bc8e4a1a Iustin Pop
    if self.do_node_query:
2056 a8083063 Iustin Pop
      live_data = {}
2057 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
2058 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
2059 a8083063 Iustin Pop
      for name in nodenames:
2060 781de953 Iustin Pop
        nodeinfo = node_data[name]
2061 781de953 Iustin Pop
        if not nodeinfo.failed and nodeinfo.data:
2062 781de953 Iustin Pop
          nodeinfo = nodeinfo.data
2063 d599d686 Iustin Pop
          fn = utils.TryConvert
2064 a8083063 Iustin Pop
          live_data[name] = {
2065 d599d686 Iustin Pop
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
2066 d599d686 Iustin Pop
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
2067 d599d686 Iustin Pop
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
2068 d599d686 Iustin Pop
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
2069 d599d686 Iustin Pop
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
2070 d599d686 Iustin Pop
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
2071 d599d686 Iustin Pop
            "bootid": nodeinfo.get('bootid', None),
2072 0105bad3 Iustin Pop
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
2073 0105bad3 Iustin Pop
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
2074 a8083063 Iustin Pop
            }
2075 a8083063 Iustin Pop
        else:
2076 a8083063 Iustin Pop
          live_data[name] = {}
2077 a8083063 Iustin Pop
    else:
2078 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
2079 a8083063 Iustin Pop
2080 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
2081 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
2082 a8083063 Iustin Pop
2083 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
2084 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
2085 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
2086 4dfd6266 Iustin Pop
      inst_data = self.cfg.GetAllInstancesInfo()
2087 a8083063 Iustin Pop
2088 4dfd6266 Iustin Pop
      for instance_name, inst in inst_data.items():
2089 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
2090 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
2091 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
2092 ec223efb Iustin Pop
          if secnode in node_to_secondary:
2093 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
2094 a8083063 Iustin Pop
2095 0e67cdbe Iustin Pop
    master_node = self.cfg.GetMasterNode()
2096 0e67cdbe Iustin Pop
2097 a8083063 Iustin Pop
    # end data gathering
2098 a8083063 Iustin Pop
2099 a8083063 Iustin Pop
    output = []
2100 a8083063 Iustin Pop
    for node in nodelist:
2101 a8083063 Iustin Pop
      node_output = []
2102 a8083063 Iustin Pop
      for field in self.op.output_fields:
2103 a8083063 Iustin Pop
        if field == "name":
2104 a8083063 Iustin Pop
          val = node.name
2105 ec223efb Iustin Pop
        elif field == "pinst_list":
2106 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
2107 ec223efb Iustin Pop
        elif field == "sinst_list":
2108 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
2109 ec223efb Iustin Pop
        elif field == "pinst_cnt":
2110 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
2111 ec223efb Iustin Pop
        elif field == "sinst_cnt":
2112 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
2113 a8083063 Iustin Pop
        elif field == "pip":
2114 a8083063 Iustin Pop
          val = node.primary_ip
2115 a8083063 Iustin Pop
        elif field == "sip":
2116 a8083063 Iustin Pop
          val = node.secondary_ip
2117 130a6a6f Iustin Pop
        elif field == "tags":
2118 130a6a6f Iustin Pop
          val = list(node.GetTags())
2119 38d7239a Iustin Pop
        elif field == "serial_no":
2120 38d7239a Iustin Pop
          val = node.serial_no
2121 0e67cdbe Iustin Pop
        elif field == "master_candidate":
2122 0e67cdbe Iustin Pop
          val = node.master_candidate
2123 0e67cdbe Iustin Pop
        elif field == "master":
2124 0e67cdbe Iustin Pop
          val = node.name == master_node
2125 9ddb5e45 Iustin Pop
        elif field == "offline":
2126 9ddb5e45 Iustin Pop
          val = node.offline
2127 0b2454b9 Iustin Pop
        elif field == "drained":
2128 0b2454b9 Iustin Pop
          val = node.drained
2129 31bf511f Iustin Pop
        elif self._FIELDS_DYNAMIC.Matches(field):
2130 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
2131 c120ff34 Iustin Pop
        elif field == "role":
2132 c120ff34 Iustin Pop
          if node.name == master_node:
2133 c120ff34 Iustin Pop
            val = "M"
2134 c120ff34 Iustin Pop
          elif node.master_candidate:
2135 c120ff34 Iustin Pop
            val = "C"
2136 c120ff34 Iustin Pop
          elif node.drained:
2137 c120ff34 Iustin Pop
            val = "D"
2138 c120ff34 Iustin Pop
          elif node.offline:
2139 c120ff34 Iustin Pop
            val = "O"
2140 c120ff34 Iustin Pop
          else:
2141 c120ff34 Iustin Pop
            val = "R"
2142 a8083063 Iustin Pop
        else:
2143 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2144 a8083063 Iustin Pop
        node_output.append(val)
2145 a8083063 Iustin Pop
      output.append(node_output)
2146 a8083063 Iustin Pop
2147 a8083063 Iustin Pop
    return output
2148 a8083063 Iustin Pop
2149 a8083063 Iustin Pop
2150 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
2151 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
2152 dcb93971 Michael Hanselmann

2153 dcb93971 Michael Hanselmann
  """
2154 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
2155 21a15682 Guido Trotter
  REQ_BGL = False
2156 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
2157 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("node")
2158 21a15682 Guido Trotter
2159 21a15682 Guido Trotter
  def ExpandNames(self):
2160 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2161 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2162 21a15682 Guido Trotter
                       selected=self.op.output_fields)
2163 21a15682 Guido Trotter
2164 21a15682 Guido Trotter
    self.needed_locks = {}
2165 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2166 21a15682 Guido Trotter
    if not self.op.nodes:
2167 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2168 21a15682 Guido Trotter
    else:
2169 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
2170 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
2171 dcb93971 Michael Hanselmann
2172 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
2173 dcb93971 Michael Hanselmann
    """Check prerequisites.
2174 dcb93971 Michael Hanselmann

2175 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
2176 dcb93971 Michael Hanselmann

2177 dcb93971 Michael Hanselmann
    """
2178 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
2179 dcb93971 Michael Hanselmann
2180 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
2181 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
2182 dcb93971 Michael Hanselmann

2183 dcb93971 Michael Hanselmann
    """
2184 a7ba5e53 Iustin Pop
    nodenames = self.nodes
2185 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
2186 dcb93971 Michael Hanselmann
2187 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
2188 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
2189 dcb93971 Michael Hanselmann
2190 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
2191 dcb93971 Michael Hanselmann
2192 dcb93971 Michael Hanselmann
    output = []
2193 dcb93971 Michael Hanselmann
    for node in nodenames:
2194 781de953 Iustin Pop
      if node not in volumes or volumes[node].failed or not volumes[node].data:
2195 37d19eb2 Michael Hanselmann
        continue
2196 37d19eb2 Michael Hanselmann
2197 781de953 Iustin Pop
      node_vols = volumes[node].data[:]
2198 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
2199 dcb93971 Michael Hanselmann
2200 dcb93971 Michael Hanselmann
      for vol in node_vols:
2201 dcb93971 Michael Hanselmann
        node_output = []
2202 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
2203 dcb93971 Michael Hanselmann
          if field == "node":
2204 dcb93971 Michael Hanselmann
            val = node
2205 dcb93971 Michael Hanselmann
          elif field == "phys":
2206 dcb93971 Michael Hanselmann
            val = vol['dev']
2207 dcb93971 Michael Hanselmann
          elif field == "vg":
2208 dcb93971 Michael Hanselmann
            val = vol['vg']
2209 dcb93971 Michael Hanselmann
          elif field == "name":
2210 dcb93971 Michael Hanselmann
            val = vol['name']
2211 dcb93971 Michael Hanselmann
          elif field == "size":
2212 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
2213 dcb93971 Michael Hanselmann
          elif field == "instance":
2214 dcb93971 Michael Hanselmann
            for inst in ilist:
2215 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
2216 dcb93971 Michael Hanselmann
                continue
2217 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
2218 dcb93971 Michael Hanselmann
                val = inst.name
2219 dcb93971 Michael Hanselmann
                break
2220 dcb93971 Michael Hanselmann
            else:
2221 dcb93971 Michael Hanselmann
              val = '-'
2222 dcb93971 Michael Hanselmann
          else:
2223 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
2224 dcb93971 Michael Hanselmann
          node_output.append(str(val))
2225 dcb93971 Michael Hanselmann
2226 dcb93971 Michael Hanselmann
        output.append(node_output)
2227 dcb93971 Michael Hanselmann
2228 dcb93971 Michael Hanselmann
    return output
2229 dcb93971 Michael Hanselmann
2230 dcb93971 Michael Hanselmann
2231 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
2232 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
2233 a8083063 Iustin Pop

2234 a8083063 Iustin Pop
  """
2235 a8083063 Iustin Pop
  HPATH = "node-add"
2236 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2237 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
2238 a8083063 Iustin Pop
2239 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2240 a8083063 Iustin Pop
    """Build hooks env.
2241 a8083063 Iustin Pop

2242 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
2243 a8083063 Iustin Pop

2244 a8083063 Iustin Pop
    """
2245 a8083063 Iustin Pop
    env = {
2246 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
2247 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
2248 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
2249 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
2250 a8083063 Iustin Pop
      }
2251 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
2252 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
2253 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
2254 a8083063 Iustin Pop
2255 a8083063 Iustin Pop
  def CheckPrereq(self):
2256 a8083063 Iustin Pop
    """Check prerequisites.
2257 a8083063 Iustin Pop

2258 a8083063 Iustin Pop
    This checks:
2259 a8083063 Iustin Pop
     - the new node is not already in the config
2260 a8083063 Iustin Pop
     - it is resolvable
2261 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
2262 a8083063 Iustin Pop

2263 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
2264 a8083063 Iustin Pop

2265 a8083063 Iustin Pop
    """
2266 a8083063 Iustin Pop
    node_name = self.op.node_name
2267 a8083063 Iustin Pop
    cfg = self.cfg
2268 a8083063 Iustin Pop
2269 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
2270 a8083063 Iustin Pop
2271 bcf043c9 Iustin Pop
    node = dns_data.name
2272 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
2273 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
2274 a8083063 Iustin Pop
    if secondary_ip is None:
2275 a8083063 Iustin Pop
      secondary_ip = primary_ip
2276 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
2277 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
2278 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
2279 e7c6e02b Michael Hanselmann
2280 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
2281 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
2282 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
2283 e7c6e02b Michael Hanselmann
                                 node)
2284 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
2285 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
2286 a8083063 Iustin Pop
2287 a8083063 Iustin Pop
    for existing_node_name in node_list:
2288 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
2289 e7c6e02b Michael Hanselmann
2290 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
2291 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
2292 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
2293 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
2294 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
2295 e7c6e02b Michael Hanselmann
        continue
2296 e7c6e02b Michael Hanselmann
2297 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
2298 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
2299 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
2300 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
2301 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
2302 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
2303 a8083063 Iustin Pop
2304 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
2305 a8083063 Iustin Pop
    # same as for the master
2306 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
2307 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
2308 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
2309 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
2310 a8083063 Iustin Pop
      if master_singlehomed:
2311 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
2312 3ecf6786 Iustin Pop
                                   " new node has one")
2313 a8083063 Iustin Pop
      else:
2314 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
2315 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
2316 a8083063 Iustin Pop
2317 5bbd3f7f Michael Hanselmann
    # checks reachability
2318 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
2319 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
2320 a8083063 Iustin Pop
2321 a8083063 Iustin Pop
    if not newbie_singlehomed:
2322 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
2323 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
2324 b15d625f Iustin Pop
                           source=myself.secondary_ip):
2325 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
2326 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
2327 a8083063 Iustin Pop
2328 0fff97e9 Guido Trotter
    cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2329 a8ae3eb5 Iustin Pop
    if self.op.readd:
2330 a8ae3eb5 Iustin Pop
      exceptions = [node]
2331 a8ae3eb5 Iustin Pop
    else:
2332 a8ae3eb5 Iustin Pop
      exceptions = []
2333 a8ae3eb5 Iustin Pop
    mc_now, mc_max = self.cfg.GetMasterCandidateStats(exceptions)
2334 a8ae3eb5 Iustin Pop
    # the new node will increase mc_max with one, so:
2335 a8ae3eb5 Iustin Pop
    mc_max = min(mc_max + 1, cp_size)
2336 a8ae3eb5 Iustin Pop
    self.master_candidate = mc_now < mc_max
2337 0fff97e9 Guido Trotter
2338 a8ae3eb5 Iustin Pop
    if self.op.readd:
2339 a8ae3eb5 Iustin Pop
      self.new_node = self.cfg.GetNodeInfo(node)
2340 a8ae3eb5 Iustin Pop
      assert self.new_node is not None, "Can't retrieve locked node %s" % node
2341 a8ae3eb5 Iustin Pop
    else:
2342 a8ae3eb5 Iustin Pop
      self.new_node = objects.Node(name=node,
2343 a8ae3eb5 Iustin Pop
                                   primary_ip=primary_ip,
2344 a8ae3eb5 Iustin Pop
                                   secondary_ip=secondary_ip,
2345 a8ae3eb5 Iustin Pop
                                   master_candidate=self.master_candidate,
2346 a8ae3eb5 Iustin Pop
                                   offline=False, drained=False)
2347 a8083063 Iustin Pop
2348 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2349 a8083063 Iustin Pop
    """Adds the new node to the cluster.
2350 a8083063 Iustin Pop

2351 a8083063 Iustin Pop
    """
2352 a8083063 Iustin Pop
    new_node = self.new_node
2353 a8083063 Iustin Pop
    node = new_node.name
2354 a8083063 Iustin Pop
2355 a8ae3eb5 Iustin Pop
    # for re-adds, reset the offline/drained/master-candidate flags;
2356 a8ae3eb5 Iustin Pop
    # we need to reset here, otherwise offline would prevent RPC calls
2357 a8ae3eb5 Iustin Pop
    # later in the procedure; this also means that if the re-add
2358 a8ae3eb5 Iustin Pop
    # fails, we are left with a non-offlined, broken node
2359 a8ae3eb5 Iustin Pop
    if self.op.readd:
2360 a8ae3eb5 Iustin Pop
      new_node.drained = new_node.offline = False
2361 a8ae3eb5 Iustin Pop
      self.LogInfo("Readding a node, the offline/drained flags were reset")
2362 a8ae3eb5 Iustin Pop
      # if we demote the node, we do cleanup later in the procedure
2363 a8ae3eb5 Iustin Pop
      new_node.master_candidate = self.master_candidate
2364 a8ae3eb5 Iustin Pop
2365 a8ae3eb5 Iustin Pop
    # notify the user about any possible mc promotion
2366 a8ae3eb5 Iustin Pop
    if new_node.master_candidate:
2367 a8ae3eb5 Iustin Pop
      self.LogInfo("Node will be a master candidate")
2368 a8ae3eb5 Iustin Pop
2369 a8083063 Iustin Pop
    # check connectivity
2370 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
2371 781de953 Iustin Pop
    result.Raise()
2372 781de953 Iustin Pop
    if result.data:
2373 781de953 Iustin Pop
      if constants.PROTOCOL_VERSION == result.data:
2374 9a4f63d1 Iustin Pop
        logging.info("Communication to node %s fine, sw version %s match",
2375 781de953 Iustin Pop
                     node, result.data)
2376 a8083063 Iustin Pop
      else:
2377 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
2378 3ecf6786 Iustin Pop
                                 " node version %s" %
2379 781de953 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result.data))
2380 a8083063 Iustin Pop
    else:
2381 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
2382 a8083063 Iustin Pop
2383 a8083063 Iustin Pop
    # setup ssh on node
2384 9a4f63d1 Iustin Pop
    logging.info("Copy ssh key to node %s", node)
2385 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
2386 a8083063 Iustin Pop
    keyarray = []
2387 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
2388 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
2389 70d9e3d8 Iustin Pop
                priv_key, pub_key]
2390 a8083063 Iustin Pop
2391 a8083063 Iustin Pop
    for i in keyfiles:
2392 a8083063 Iustin Pop
      f = open(i, 'r')
2393 a8083063 Iustin Pop
      try:
2394 a8083063 Iustin Pop
        keyarray.append(f.read())
2395 a8083063 Iustin Pop
      finally:
2396 a8083063 Iustin Pop
        f.close()
2397 a8083063 Iustin Pop
2398 72737a7f Iustin Pop
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
2399 72737a7f Iustin Pop
                                    keyarray[2],
2400 72737a7f Iustin Pop
                                    keyarray[3], keyarray[4], keyarray[5])
2401 a8083063 Iustin Pop
2402 a1b805fb Iustin Pop
    msg = result.RemoteFailMsg()
2403 a1b805fb Iustin Pop
    if msg:
2404 a1b805fb Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the"
2405 a1b805fb Iustin Pop
                               " new node: %s" % msg)
2406 a8083063 Iustin Pop
2407 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
2408 aafb303d Guido Trotter
    if self.cfg.GetClusterInfo().modify_etc_hosts:
2409 aafb303d Guido Trotter
      utils.AddHostToEtcHosts(new_node.name)
2410 c8a0948f Michael Hanselmann
2411 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
2412 781de953 Iustin Pop
      result = self.rpc.call_node_has_ip_address(new_node.name,
2413 781de953 Iustin Pop
                                                 new_node.secondary_ip)
2414 781de953 Iustin Pop
      if result.failed or not result.data:
2415 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
2416 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
2417 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
2418 a8083063 Iustin Pop
2419 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
2420 5c0527ed Guido Trotter
    node_verify_param = {
2421 5c0527ed Guido Trotter
      'nodelist': [node],
2422 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
2423 5c0527ed Guido Trotter
    }
2424 5c0527ed Guido Trotter
2425 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
2426 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
2427 5c0527ed Guido Trotter
    for verifier in node_verify_list:
2428 f08ce603 Guido Trotter
      if result[verifier].failed or not result[verifier].data:
2429 5c0527ed Guido Trotter
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
2430 5c0527ed Guido Trotter
                                 " for remote verification" % verifier)
2431 781de953 Iustin Pop
      if result[verifier].data['nodelist']:
2432 781de953 Iustin Pop
        for failed in result[verifier].data['nodelist']:
2433 31821208 Iustin Pop
          feedback_fn("ssh/hostname verification failed"
2434 31821208 Iustin Pop
                      " (checking from %s): %s" %
2435 bafc1d90 Iustin Pop
                      (verifier, result[verifier].data['nodelist'][failed]))
2436 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
2437 ff98055b Iustin Pop
2438 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
2439 a8083063 Iustin Pop
    # including the node just added
2440 d6a02168 Michael Hanselmann
    myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
2441 102b115b Michael Hanselmann
    dist_nodes = self.cfg.GetNodeList()
2442 102b115b Michael Hanselmann
    if not self.op.readd:
2443 102b115b Michael Hanselmann
      dist_nodes.append(node)
2444 a8083063 Iustin Pop
    if myself.name in dist_nodes:
2445 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
2446 a8083063 Iustin Pop
2447 9a4f63d1 Iustin Pop
    logging.debug("Copying hosts and known_hosts to all nodes")
2448 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
2449 72737a7f Iustin Pop
      result = self.rpc.call_upload_file(dist_nodes, fname)
2450 ec85e3d5 Iustin Pop
      for to_node, to_result in result.iteritems():
2451 ec85e3d5 Iustin Pop
        if to_result.failed or not to_result.data:
2452 9a4f63d1 Iustin Pop
          logging.error("Copy of file %s to node %s failed", fname, to_node)
2453 a8083063 Iustin Pop
2454 d6a02168 Michael Hanselmann
    to_copy = []
2455 2928f08d Guido Trotter
    enabled_hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
2456 ccd905ac Guido Trotter
    if constants.HTS_COPY_VNC_PASSWORD.intersection(enabled_hypervisors):
2457 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
2458 2928f08d Guido Trotter
2459 a8083063 Iustin Pop
    for fname in to_copy:
2460 72737a7f Iustin Pop
      result = self.rpc.call_upload_file([node], fname)
2461 781de953 Iustin Pop
      if result[node].failed or not result[node]:
2462 9a4f63d1 Iustin Pop
        logging.error("Could not copy file %s to node %s", fname, node)
2463 a8083063 Iustin Pop
2464 d8470559 Michael Hanselmann
    if self.op.readd:
2465 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
2466 a8ae3eb5 Iustin Pop
      # make sure we redistribute the config
2467 a8ae3eb5 Iustin Pop
      self.cfg.Update(new_node)
2468 a8ae3eb5 Iustin Pop
      # and make sure the new node will not have old files around
2469 a8ae3eb5 Iustin Pop
      if not new_node.master_candidate:
2470 a8ae3eb5 Iustin Pop
        result = self.rpc.call_node_demote_from_mc(new_node.name)
2471 a8ae3eb5 Iustin Pop
        msg = result.RemoteFailMsg()
2472 a8ae3eb5 Iustin Pop
        if msg:
2473 a8ae3eb5 Iustin Pop
          self.LogWarning("Node failed to demote itself from master"
2474 a8ae3eb5 Iustin Pop
                          " candidate status: %s" % msg)
2475 d8470559 Michael Hanselmann
    else:
2476 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
2477 a8083063 Iustin Pop
2478 a8083063 Iustin Pop
2479 b31c8676 Iustin Pop
class LUSetNodeParams(LogicalUnit):
2480 b31c8676 Iustin Pop
  """Modifies the parameters of a node.
2481 b31c8676 Iustin Pop

2482 b31c8676 Iustin Pop
  """
2483 b31c8676 Iustin Pop
  HPATH = "node-modify"
2484 b31c8676 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2485 b31c8676 Iustin Pop
  _OP_REQP = ["node_name"]
2486 b31c8676 Iustin Pop
  REQ_BGL = False
2487 b31c8676 Iustin Pop
2488 b31c8676 Iustin Pop
  def CheckArguments(self):
2489 b31c8676 Iustin Pop
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2490 b31c8676 Iustin Pop
    if node_name is None:
2491 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2492 b31c8676 Iustin Pop
    self.op.node_name = node_name
2493 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'master_candidate')
2494 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'offline')
2495 c9d443ea Iustin Pop
    _CheckBooleanOpField(self.op, 'drained')
2496 c9d443ea Iustin Pop
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
2497 c9d443ea Iustin Pop
    if all_mods.count(None) == 3:
2498 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Please pass at least one modification")
2499 c9d443ea Iustin Pop
    if all_mods.count(True) > 1:
2500 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Can't set the node into more than one"
2501 c9d443ea Iustin Pop
                                 " state at the same time")
2502 b31c8676 Iustin Pop
2503 b31c8676 Iustin Pop
  def ExpandNames(self):
2504 b31c8676 Iustin Pop
    self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
2505 b31c8676 Iustin Pop
2506 b31c8676 Iustin Pop
  def BuildHooksEnv(self):
2507 b31c8676 Iustin Pop
    """Build hooks env.
2508 b31c8676 Iustin Pop

2509 b31c8676 Iustin Pop
    This runs on the master node.
2510 b31c8676 Iustin Pop

2511 b31c8676 Iustin Pop
    """
2512 b31c8676 Iustin Pop
    env = {
2513 b31c8676 Iustin Pop
      "OP_TARGET": self.op.node_name,
2514 b31c8676 Iustin Pop
      "MASTER_CANDIDATE": str(self.op.master_candidate),
2515 3a5ba66a Iustin Pop
      "OFFLINE": str(self.op.offline),
2516 c9d443ea Iustin Pop
      "DRAINED": str(self.op.drained),
2517 b31c8676 Iustin Pop
      }
2518 b31c8676 Iustin Pop
    nl = [self.cfg.GetMasterNode(),
2519 b31c8676 Iustin Pop
          self.op.node_name]
2520 b31c8676 Iustin Pop
    return env, nl, nl
2521 b31c8676 Iustin Pop
2522 b31c8676 Iustin Pop
  def CheckPrereq(self):
2523 b31c8676 Iustin Pop
    """Check prerequisites.
2524 b31c8676 Iustin Pop

2525 b31c8676 Iustin Pop
    This only checks the instance list against the existing names.
2526 b31c8676 Iustin Pop

2527 b31c8676 Iustin Pop
    """
2528 3a5ba66a Iustin Pop
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
2529 b31c8676 Iustin Pop
2530 97c61d46 Iustin Pop
    if (self.op.master_candidate is not None or
2531 97c61d46 Iustin Pop
        self.op.drained is not None or
2532 97c61d46 Iustin Pop
        self.op.offline is not None):
2533 97c61d46 Iustin Pop
      # we can't change the master's node flags
2534 97c61d46 Iustin Pop
      if self.op.node_name == self.cfg.GetMasterNode():
2535 97c61d46 Iustin Pop
        raise errors.OpPrereqError("The master role can be changed"
2536 97c61d46 Iustin Pop
                                   " only via masterfailover")
2537 97c61d46 Iustin Pop
2538 c9d443ea Iustin Pop
    if ((self.op.master_candidate == False or self.op.offline == True or
2539 c9d443ea Iustin Pop
         self.op.drained == True) and node.master_candidate):
2540 3e83dd48 Iustin Pop
      cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2541 3a5ba66a Iustin Pop
      num_candidates, _ = self.cfg.GetMasterCandidateStats()
2542 3e83dd48 Iustin Pop
      if num_candidates <= cp_size:
2543 3e83dd48 Iustin Pop
        msg = ("Not enough master candidates (desired"
2544 3e83dd48 Iustin Pop
               " %d, new value will be %d)" % (cp_size, num_candidates-1))
2545 3a5ba66a Iustin Pop
        if self.op.force:
2546 3e83dd48 Iustin Pop
          self.LogWarning(msg)
2547 3e83dd48 Iustin Pop
        else:
2548 3e83dd48 Iustin Pop
          raise errors.OpPrereqError(msg)
2549 3e83dd48 Iustin Pop
2550 c9d443ea Iustin Pop
    if (self.op.master_candidate == True and
2551 c9d443ea Iustin Pop
        ((node.offline and not self.op.offline == False) or
2552 c9d443ea Iustin Pop
         (node.drained and not self.op.drained == False))):
2553 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
2554 949bdabe Iustin Pop
                                 " to master_candidate" % node.name)
2555 3a5ba66a Iustin Pop
2556 b31c8676 Iustin Pop
    return
2557 b31c8676 Iustin Pop
2558 b31c8676 Iustin Pop
  def Exec(self, feedback_fn):
2559 b31c8676 Iustin Pop
    """Modifies a node.
2560 b31c8676 Iustin Pop

2561 b31c8676 Iustin Pop
    """
2562 3a5ba66a Iustin Pop
    node = self.node
2563 b31c8676 Iustin Pop
2564 b31c8676 Iustin Pop
    result = []
2565 c9d443ea Iustin Pop
    changed_mc = False
2566 b31c8676 Iustin Pop
2567 3a5ba66a Iustin Pop
    if self.op.offline is not None:
2568 3a5ba66a Iustin Pop
      node.offline = self.op.offline
2569 3a5ba66a Iustin Pop
      result.append(("offline", str(self.op.offline)))
2570 c9d443ea Iustin Pop
      if self.op.offline == True:
2571 c9d443ea Iustin Pop
        if node.master_candidate:
2572 c9d443ea Iustin Pop
          node.master_candidate = False
2573 c9d443ea Iustin Pop
          changed_mc = True
2574 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to offline"))
2575 c9d443ea Iustin Pop
        if node.drained:
2576 c9d443ea Iustin Pop
          node.drained = False
2577 c9d443ea Iustin Pop
          result.append(("drained", "clear drained status due to offline"))
2578 3a5ba66a Iustin Pop
2579 b31c8676 Iustin Pop
    if self.op.master_candidate is not None:
2580 b31c8676 Iustin Pop
      node.master_candidate = self.op.master_candidate
2581 c9d443ea Iustin Pop
      changed_mc = True
2582 b31c8676 Iustin Pop
      result.append(("master_candidate", str(self.op.master_candidate)))
2583 56aa9fd5 Iustin Pop
      if self.op.master_candidate == False:
2584 56aa9fd5 Iustin Pop
        rrc = self.rpc.call_node_demote_from_mc(node.name)
2585 0959c824 Iustin Pop
        msg = rrc.RemoteFailMsg()
2586 0959c824 Iustin Pop
        if msg:
2587 0959c824 Iustin Pop
          self.LogWarning("Node failed to demote itself: %s" % msg)
2588 b31c8676 Iustin Pop
2589 c9d443ea Iustin Pop
    if self.op.drained is not None:
2590 c9d443ea Iustin Pop
      node.drained = self.op.drained
2591 82e12743 Iustin Pop
      result.append(("drained", str(self.op.drained)))
2592 c9d443ea Iustin Pop
      if self.op.drained == True:
2593 c9d443ea Iustin Pop
        if node.master_candidate:
2594 c9d443ea Iustin Pop
          node.master_candidate = False
2595 c9d443ea Iustin Pop
          changed_mc = True
2596 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to drain"))
2597 dec0d9da Iustin Pop
          rrc = self.rpc.call_node_demote_from_mc(node.name)
2598 dec0d9da Iustin Pop
          msg = rrc.RemoteFailMsg()
2599 dec0d9da Iustin Pop
          if msg:
2600 dec0d9da Iustin Pop
            self.LogWarning("Node failed to demote itself: %s" % msg)
2601 c9d443ea Iustin Pop
        if node.offline:
2602 c9d443ea Iustin Pop
          node.offline = False
2603 c9d443ea Iustin Pop
          result.append(("offline", "clear offline status due to drain"))
2604 c9d443ea Iustin Pop
2605 b31c8676 Iustin Pop
    # this will trigger configuration file update, if needed
2606 b31c8676 Iustin Pop
    self.cfg.Update(node)
2607 b31c8676 Iustin Pop
    # this will trigger job queue propagation or cleanup
2608 c9d443ea Iustin Pop
    if changed_mc:
2609 3a26773f Iustin Pop
      self.context.ReaddNode(node)
2610 b31c8676 Iustin Pop
2611 b31c8676 Iustin Pop
    return result
2612 b31c8676 Iustin Pop
2613 b31c8676 Iustin Pop
2614 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
2615 a8083063 Iustin Pop
  """Query cluster configuration.
2616 a8083063 Iustin Pop

2617 a8083063 Iustin Pop
  """
2618 a8083063 Iustin Pop
  _OP_REQP = []
2619 642339cf Guido Trotter
  REQ_BGL = False
2620 642339cf Guido Trotter
2621 642339cf Guido Trotter
  def ExpandNames(self):
2622 642339cf Guido Trotter
    self.needed_locks = {}
2623 a8083063 Iustin Pop
2624 a8083063 Iustin Pop
  def CheckPrereq(self):
2625 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
2626 a8083063 Iustin Pop

2627 a8083063 Iustin Pop
    """
2628 a8083063 Iustin Pop
    pass
2629 a8083063 Iustin Pop
2630 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2631 a8083063 Iustin Pop
    """Return cluster config.
2632 a8083063 Iustin Pop

2633 a8083063 Iustin Pop
    """
2634 469f88e1 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
2635 a8083063 Iustin Pop
    result = {
2636 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
2637 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
2638 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
2639 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
2640 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
2641 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
2642 469f88e1 Iustin Pop
      "name": cluster.cluster_name,
2643 469f88e1 Iustin Pop
      "master": cluster.master_node,
2644 02691904 Alexander Schreiber
      "default_hypervisor": cluster.default_hypervisor,
2645 469f88e1 Iustin Pop
      "enabled_hypervisors": cluster.enabled_hypervisors,
2646 b8810fec Michael Hanselmann
      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
2647 7c4d6c7b Michael Hanselmann
                        for hypervisor_name in cluster.enabled_hypervisors]),
2648 469f88e1 Iustin Pop
      "beparams": cluster.beparams,
2649 4b7735f9 Iustin Pop
      "candidate_pool_size": cluster.candidate_pool_size,
2650 7a56b411 Guido Trotter
      "default_bridge": cluster.default_bridge,
2651 7a56b411 Guido Trotter
      "master_netdev": cluster.master_netdev,
2652 7a56b411 Guido Trotter
      "volume_group_name": cluster.volume_group_name,
2653 7a56b411 Guido Trotter
      "file_storage_dir": cluster.file_storage_dir,
2654 c118d1f4 Michael Hanselmann
      "tags": list(cluster.GetTags()),
2655 a8083063 Iustin Pop
      }
2656 a8083063 Iustin Pop
2657 a8083063 Iustin Pop
    return result
2658 a8083063 Iustin Pop
2659 a8083063 Iustin Pop
2660 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
2661 ae5849b5 Michael Hanselmann
  """Return configuration values.
2662 a8083063 Iustin Pop

2663 a8083063 Iustin Pop
  """
2664 a8083063 Iustin Pop
  _OP_REQP = []
2665 642339cf Guido Trotter
  REQ_BGL = False
2666 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet()
2667 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag")
2668 642339cf Guido Trotter
2669 642339cf Guido Trotter
  def ExpandNames(self):
2670 642339cf Guido Trotter
    self.needed_locks = {}
2671 a8083063 Iustin Pop
2672 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2673 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2674 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
2675 ae5849b5 Michael Hanselmann
2676 a8083063 Iustin Pop
  def CheckPrereq(self):
2677 a8083063 Iustin Pop
    """No prerequisites.
2678 a8083063 Iustin Pop

2679 a8083063 Iustin Pop
    """
2680 a8083063 Iustin Pop
    pass
2681 a8083063 Iustin Pop
2682 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2683 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
2684 a8083063 Iustin Pop

2685 a8083063 Iustin Pop
    """
2686 ae5849b5 Michael Hanselmann
    values = []
2687 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
2688 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
2689 3ccafd0e Iustin Pop
        entry = self.cfg.GetClusterName()
2690 ae5849b5 Michael Hanselmann
      elif field == "master_node":
2691 3ccafd0e Iustin Pop
        entry = self.cfg.GetMasterNode()
2692 3ccafd0e Iustin Pop
      elif field == "drain_flag":
2693 3ccafd0e Iustin Pop
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2694 ae5849b5 Michael Hanselmann
      else:
2695 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
2696 3ccafd0e Iustin Pop
      values.append(entry)
2697 ae5849b5 Michael Hanselmann
    return values
2698 a8083063 Iustin Pop
2699 a8083063 Iustin Pop
2700 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
2701 a8083063 Iustin Pop
  """Bring up an instance's disks.
2702 a8083063 Iustin Pop

2703 a8083063 Iustin Pop
  """
2704 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2705 f22a8ba3 Guido Trotter
  REQ_BGL = False
2706 f22a8ba3 Guido Trotter
2707 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2708 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2709 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2710 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2711 f22a8ba3 Guido Trotter
2712 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2713 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2714 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2715 a8083063 Iustin Pop
2716 a8083063 Iustin Pop
  def CheckPrereq(self):
2717 a8083063 Iustin Pop
    """Check prerequisites.
2718 a8083063 Iustin Pop

2719 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2720 a8083063 Iustin Pop

2721 a8083063 Iustin Pop
    """
2722 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2723 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2724 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2725 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
2726 b4ec07f8 Iustin Pop
    if not hasattr(self.op, "ignore_size"):
2727 b4ec07f8 Iustin Pop
      self.op.ignore_size = False
2728 a8083063 Iustin Pop
2729 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2730 a8083063 Iustin Pop
    """Activate the disks.
2731 a8083063 Iustin Pop

2732 a8083063 Iustin Pop
    """
2733 b4ec07f8 Iustin Pop
    disks_ok, disks_info = \
2734 b4ec07f8 Iustin Pop
              _AssembleInstanceDisks(self, self.instance,
2735 b4ec07f8 Iustin Pop
                                     ignore_size=self.op.ignore_size)
2736 a8083063 Iustin Pop
    if not disks_ok:
2737 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
2738 a8083063 Iustin Pop
2739 a8083063 Iustin Pop
    return disks_info
2740 a8083063 Iustin Pop
2741 a8083063 Iustin Pop
2742 e3443b36 Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False,
2743 e3443b36 Iustin Pop
                           ignore_size=False):
2744 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
2745 a8083063 Iustin Pop

2746 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
2747 a8083063 Iustin Pop

2748 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
2749 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
2750 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
2751 e4376078 Iustin Pop
  @param instance: the instance for whose disks we assemble
2752 e4376078 Iustin Pop
  @type ignore_secondaries: boolean
2753 e4376078 Iustin Pop
  @param ignore_secondaries: if true, errors on secondary nodes
2754 e4376078 Iustin Pop
      won't result in an error return from the function
2755 e3443b36 Iustin Pop
  @type ignore_size: boolean
2756 e3443b36 Iustin Pop
  @param ignore_size: if true, the current known size of the disk
2757 e3443b36 Iustin Pop
      will not be used during the disk activation, useful for cases
2758 e3443b36 Iustin Pop
      when the size is wrong
2759 e4376078 Iustin Pop
  @return: False if the operation failed, otherwise a list of
2760 e4376078 Iustin Pop
      (host, instance_visible_name, node_visible_name)
2761 e4376078 Iustin Pop
      with the mapping from node devices to instance devices
2762 a8083063 Iustin Pop

2763 a8083063 Iustin Pop
  """
2764 a8083063 Iustin Pop
  device_info = []
2765 a8083063 Iustin Pop
  disks_ok = True
2766 fdbd668d Iustin Pop
  iname = instance.name
2767 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
2768 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
2769 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
2770 fdbd668d Iustin Pop
2771 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
2772 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
2773 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
2774 fdbd668d Iustin Pop
  # SyncSource, etc.)
2775 fdbd668d Iustin Pop
2776 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
2777 a8083063 Iustin Pop
  for inst_disk in instance.disks:
2778 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2779 e3443b36 Iustin Pop
      if ignore_size:
2780 e3443b36 Iustin Pop
        node_disk = node_disk.Copy()
2781 e3443b36 Iustin Pop
        node_disk.UnsetSize()
2782 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2783 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2784 53c14ef1 Iustin Pop
      msg = result.RemoteFailMsg()
2785 53c14ef1 Iustin Pop
      if msg:
2786 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2787 53c14ef1 Iustin Pop
                           " (is_primary=False, pass=1): %s",
2788 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
2789 fdbd668d Iustin Pop
        if not ignore_secondaries:
2790 a8083063 Iustin Pop
          disks_ok = False
2791 fdbd668d Iustin Pop
2792 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
2793 fdbd668d Iustin Pop
2794 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
2795 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
2796 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2797 fdbd668d Iustin Pop
      if node != instance.primary_node:
2798 fdbd668d Iustin Pop
        continue
2799 e3443b36 Iustin Pop
      if ignore_size:
2800 e3443b36 Iustin Pop
        node_disk = node_disk.Copy()
2801 e3443b36 Iustin Pop
        node_disk.UnsetSize()
2802 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2803 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2804 53c14ef1 Iustin Pop
      msg = result.RemoteFailMsg()
2805 53c14ef1 Iustin Pop
      if msg:
2806 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2807 53c14ef1 Iustin Pop
                           " (is_primary=True, pass=2): %s",
2808 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
2809 fdbd668d Iustin Pop
        disks_ok = False
2810 1dff8e07 Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name,
2811 1dff8e07 Iustin Pop
                        result.payload))
2812 a8083063 Iustin Pop
2813 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
2814 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
2815 b352ab5b Iustin Pop
  # improving the logical/physical id handling
2816 b352ab5b Iustin Pop
  for disk in instance.disks:
2817 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
2818 b352ab5b Iustin Pop
2819 a8083063 Iustin Pop
  return disks_ok, device_info
2820 a8083063 Iustin Pop
2821 a8083063 Iustin Pop
2822 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
2823 3ecf6786 Iustin Pop
  """Start the disks of an instance.
2824 3ecf6786 Iustin Pop

2825 3ecf6786 Iustin Pop
  """
2826 7c4d6c7b Michael Hanselmann
  disks_ok, _ = _AssembleInstanceDisks(lu, instance,
2827 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
2828 fe7b0351 Michael Hanselmann
  if not disks_ok:
2829 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
2830 fe7b0351 Michael Hanselmann
    if force is not None and not force:
2831 86d9d3bb Iustin Pop
      lu.proc.LogWarning("", hint="If the message above refers to a"
2832 86d9d3bb Iustin Pop
                         " secondary node,"
2833 86d9d3bb Iustin Pop
                         " you can retry the operation using '--force'.")
2834 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
2835 fe7b0351 Michael Hanselmann
2836 fe7b0351 Michael Hanselmann
2837 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
2838 a8083063 Iustin Pop
  """Shutdown an instance's disks.
2839 a8083063 Iustin Pop

2840 a8083063 Iustin Pop
  """
2841 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2842 f22a8ba3 Guido Trotter
  REQ_BGL = False
2843 f22a8ba3 Guido Trotter
2844 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2845 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2846 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2847 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2848 f22a8ba3 Guido Trotter
2849 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2850 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2851 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2852 a8083063 Iustin Pop
2853 a8083063 Iustin Pop
  def CheckPrereq(self):
2854 a8083063 Iustin Pop
    """Check prerequisites.
2855 a8083063 Iustin Pop

2856 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2857 a8083063 Iustin Pop

2858 a8083063 Iustin Pop
    """
2859 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2860 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2861 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2862 a8083063 Iustin Pop
2863 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2864 a8083063 Iustin Pop
    """Deactivate the disks
2865 a8083063 Iustin Pop

2866 a8083063 Iustin Pop
    """
2867 a8083063 Iustin Pop
    instance = self.instance
2868 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
2869 a8083063 Iustin Pop
2870 a8083063 Iustin Pop
2871 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
2872 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
2873 155d6c75 Guido Trotter

2874 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
2875 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
2876 155d6c75 Guido Trotter

2877 155d6c75 Guido Trotter
  """
2878 72737a7f Iustin Pop
  ins_l = lu.rpc.call_instance_list([instance.primary_node],
2879 72737a7f Iustin Pop
                                      [instance.hypervisor])
2880 155d6c75 Guido Trotter
  ins_l = ins_l[instance.primary_node]
2881 781de953 Iustin Pop
  if ins_l.failed or not isinstance(ins_l.data, list):
2882 155d6c75 Guido Trotter
    raise errors.OpExecError("Can't contact node '%s'" %
2883 155d6c75 Guido Trotter
                             instance.primary_node)
2884 155d6c75 Guido Trotter
2885 781de953 Iustin Pop
  if instance.name in ins_l.data:
2886 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
2887 155d6c75 Guido Trotter
                             " block devices.")
2888 155d6c75 Guido Trotter
2889 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
2890 a8083063 Iustin Pop
2891 a8083063 Iustin Pop
2892 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2893 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
2894 a8083063 Iustin Pop

2895 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
2896 a8083063 Iustin Pop

2897 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
2898 a8083063 Iustin Pop
  ignored.
2899 a8083063 Iustin Pop

2900 a8083063 Iustin Pop
  """
2901 cacfd1fd Iustin Pop
  all_result = True
2902 a8083063 Iustin Pop
  for disk in instance.disks:
2903 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2904 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
2905 781de953 Iustin Pop
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
2906 cacfd1fd Iustin Pop
      msg = result.RemoteFailMsg()
2907 cacfd1fd Iustin Pop
      if msg:
2908 cacfd1fd Iustin Pop
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
2909 cacfd1fd Iustin Pop
                      disk.iv_name, node, msg)
2910 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
2911 cacfd1fd Iustin Pop
          all_result = False
2912 cacfd1fd Iustin Pop
  return all_result
2913 a8083063 Iustin Pop
2914 a8083063 Iustin Pop
2915 9ca87a96 Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
2916 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
2917 d4f16fd9 Iustin Pop

2918 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
2919 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
2920 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
2921 d4f16fd9 Iustin Pop
  exception.
2922 d4f16fd9 Iustin Pop

2923 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
2924 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
2925 e69d05fd Iustin Pop
  @type node: C{str}
2926 e69d05fd Iustin Pop
  @param node: the node to check
2927 e69d05fd Iustin Pop
  @type reason: C{str}
2928 e69d05fd Iustin Pop
  @param reason: string to use in the error message
2929 e69d05fd Iustin Pop
  @type requested: C{int}
2930 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
2931 9ca87a96 Iustin Pop
  @type hypervisor_name: C{str}
2932 9ca87a96 Iustin Pop
  @param hypervisor_name: the hypervisor to ask for memory stats
2933 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2934 e69d05fd Iustin Pop
      we cannot check the node
2935 d4f16fd9 Iustin Pop

2936 d4f16fd9 Iustin Pop
  """
2937 9ca87a96 Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
2938 781de953 Iustin Pop
  nodeinfo[node].Raise()
2939 781de953 Iustin Pop
  free_mem = nodeinfo[node].data.get('memory_free')
2940 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2941 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2942 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
2943 d4f16fd9 Iustin Pop
  if requested > free_mem:
2944 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2945 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
2946 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
2947 d4f16fd9 Iustin Pop
2948 d4f16fd9 Iustin Pop
2949 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2950 a8083063 Iustin Pop
  """Starts an instance.
2951 a8083063 Iustin Pop

2952 a8083063 Iustin Pop
  """
2953 a8083063 Iustin Pop
  HPATH = "instance-start"
2954 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2955 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2956 e873317a Guido Trotter
  REQ_BGL = False
2957 e873317a Guido Trotter
2958 e873317a Guido Trotter
  def ExpandNames(self):
2959 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2960 a8083063 Iustin Pop
2961 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2962 a8083063 Iustin Pop
    """Build hooks env.
2963 a8083063 Iustin Pop

2964 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2965 a8083063 Iustin Pop

2966 a8083063 Iustin Pop
    """
2967 a8083063 Iustin Pop
    env = {
2968 a8083063 Iustin Pop
      "FORCE": self.op.force,
2969 a8083063 Iustin Pop
      }
2970 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2971 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2972 a8083063 Iustin Pop
    return env, nl, nl
2973 a8083063 Iustin Pop
2974 a8083063 Iustin Pop
  def CheckPrereq(self):
2975 a8083063 Iustin Pop
    """Check prerequisites.
2976 a8083063 Iustin Pop

2977 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2978 a8083063 Iustin Pop

2979 a8083063 Iustin Pop
    """
2980 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2981 e873317a Guido Trotter
    assert self.instance is not None, \
2982 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2983 a8083063 Iustin Pop
2984 d04aaa2f Iustin Pop
    # extra beparams
2985 d04aaa2f Iustin Pop
    self.beparams = getattr(self.op, "beparams", {})
2986 d04aaa2f Iustin Pop
    if self.beparams:
2987 d04aaa2f Iustin Pop
      if not isinstance(self.beparams, dict):
2988 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid beparams passed: %s, expected"
2989 d04aaa2f Iustin Pop
                                   " dict" % (type(self.beparams), ))
2990 d04aaa2f Iustin Pop
      # fill the beparams dict
2991 d04aaa2f Iustin Pop
      utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
2992 d04aaa2f Iustin Pop
      self.op.beparams = self.beparams
2993 d04aaa2f Iustin Pop
2994 d04aaa2f Iustin Pop
    # extra hvparams
2995 d04aaa2f Iustin Pop
    self.hvparams = getattr(self.op, "hvparams", {})
2996 d04aaa2f Iustin Pop
    if self.hvparams:
2997 d04aaa2f Iustin Pop
      if not isinstance(self.hvparams, dict):
2998 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid hvparams passed: %s, expected"
2999 d04aaa2f Iustin Pop
                                   " dict" % (type(self.hvparams), ))
3000 d04aaa2f Iustin Pop
3001 d04aaa2f Iustin Pop
      # check hypervisor parameter syntax (locally)
3002 d04aaa2f Iustin Pop
      cluster = self.cfg.GetClusterInfo()
3003 d04aaa2f Iustin Pop
      utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
3004 d04aaa2f Iustin Pop
      filled_hvp = cluster.FillDict(cluster.hvparams[instance.hypervisor],
3005 d04aaa2f Iustin Pop
                                    instance.hvparams)
3006 d04aaa2f Iustin Pop
      filled_hvp.update(self.hvparams)
3007 d04aaa2f Iustin Pop
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
3008 d04aaa2f Iustin Pop
      hv_type.CheckParameterSyntax(filled_hvp)
3009 d04aaa2f Iustin Pop
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
3010 d04aaa2f Iustin Pop
      self.op.hvparams = self.hvparams
3011 d04aaa2f Iustin Pop
3012 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3013 7527a8a4 Iustin Pop
3014 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3015 5bbd3f7f Michael Hanselmann
    # check bridges existence
3016 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
3017 a8083063 Iustin Pop
3018 f1926756 Guido Trotter
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3019 f1926756 Guido Trotter
                                              instance.name,
3020 f1926756 Guido Trotter
                                              instance.hypervisor)
3021 f1926756 Guido Trotter
    remote_info.Raise()
3022 f1926756 Guido Trotter
    if not remote_info.data:
3023 f1926756 Guido Trotter
      _CheckNodeFreeMemory(self, instance.primary_node,
3024 f1926756 Guido Trotter
                           "starting instance %s" % instance.name,
3025 f1926756 Guido Trotter
                           bep[constants.BE_MEMORY], instance.hypervisor)
3026 d4f16fd9 Iustin Pop
3027 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3028 a8083063 Iustin Pop
    """Start the instance.
3029 a8083063 Iustin Pop

3030 a8083063 Iustin Pop
    """
3031 a8083063 Iustin Pop
    instance = self.instance
3032 a8083063 Iustin Pop
    force = self.op.force
3033 a8083063 Iustin Pop
3034 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
3035 fe482621 Iustin Pop
3036 a8083063 Iustin Pop
    node_current = instance.primary_node
3037 a8083063 Iustin Pop
3038 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
3039 a8083063 Iustin Pop
3040 d04aaa2f Iustin Pop
    result = self.rpc.call_instance_start(node_current, instance,
3041 d04aaa2f Iustin Pop
                                          self.hvparams, self.beparams)
3042 dd279568 Iustin Pop
    msg = result.RemoteFailMsg()
3043 dd279568 Iustin Pop
    if msg:
3044 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
3045 dd279568 Iustin Pop
      raise errors.OpExecError("Could not start instance: %s" % msg)
3046 a8083063 Iustin Pop
3047 a8083063 Iustin Pop
3048 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
3049 bf6929a2 Alexander Schreiber
  """Reboot an instance.
3050 bf6929a2 Alexander Schreiber

3051 bf6929a2 Alexander Schreiber
  """
3052 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
3053 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
3054 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
3055 e873317a Guido Trotter
  REQ_BGL = False
3056 e873317a Guido Trotter
3057 e873317a Guido Trotter
  def ExpandNames(self):
3058 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
3059 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
3060 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
3061 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
3062 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
3063 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
3064 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
3065 e873317a Guido Trotter
    self._ExpandAndLockInstance()
3066 bf6929a2 Alexander Schreiber
3067 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
3068 bf6929a2 Alexander Schreiber
    """Build hooks env.
3069 bf6929a2 Alexander Schreiber

3070 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
3071 bf6929a2 Alexander Schreiber

3072 bf6929a2 Alexander Schreiber
    """
3073 bf6929a2 Alexander Schreiber
    env = {
3074 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
3075 2c2690c9 Iustin Pop
      "REBOOT_TYPE": self.op.reboot_type,
3076 bf6929a2 Alexander Schreiber
      }
3077 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3078 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3079 bf6929a2 Alexander Schreiber
    return env, nl, nl
3080 bf6929a2 Alexander Schreiber
3081 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
3082 bf6929a2 Alexander Schreiber
    """Check prerequisites.
3083 bf6929a2 Alexander Schreiber

3084 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
3085 bf6929a2 Alexander Schreiber

3086 bf6929a2 Alexander Schreiber
    """
3087 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3088 e873317a Guido Trotter
    assert self.instance is not None, \
3089 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3090 bf6929a2 Alexander Schreiber
3091 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3092 7527a8a4 Iustin Pop
3093 5bbd3f7f Michael Hanselmann
    # check bridges existence
3094 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
3095 bf6929a2 Alexander Schreiber
3096 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
3097 bf6929a2 Alexander Schreiber
    """Reboot the instance.
3098 bf6929a2 Alexander Schreiber

3099 bf6929a2 Alexander Schreiber
    """
3100 bf6929a2 Alexander Schreiber
    instance = self.instance
3101 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
3102 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
3103 bf6929a2 Alexander Schreiber
3104 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
3105 bf6929a2 Alexander Schreiber
3106 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
3107 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
3108 ae48ac32 Iustin Pop
      for disk in instance.disks:
3109 ae48ac32 Iustin Pop
        self.cfg.SetDiskID(disk, node_current)
3110 781de953 Iustin Pop
      result = self.rpc.call_instance_reboot(node_current, instance,
3111 07813a9e Iustin Pop
                                             reboot_type)
3112 489fcbe9 Iustin Pop
      msg = result.RemoteFailMsg()
3113 489fcbe9 Iustin Pop
      if msg:
3114 489fcbe9 Iustin Pop
        raise errors.OpExecError("Could not reboot instance: %s" % msg)
3115 bf6929a2 Alexander Schreiber
    else:
3116 1fae010f Iustin Pop
      result = self.rpc.call_instance_shutdown(node_current, instance)
3117 1fae010f Iustin Pop
      msg = result.RemoteFailMsg()
3118 1fae010f Iustin Pop
      if msg:
3119 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance for"
3120 1fae010f Iustin Pop
                                 " full reboot: %s" % msg)
3121 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
3122 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
3123 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(node_current, instance, None, None)
3124 dd279568 Iustin Pop
      msg = result.RemoteFailMsg()
3125 dd279568 Iustin Pop
      if msg:
3126 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3127 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance for"
3128 dd279568 Iustin Pop
                                 " full reboot: %s" % msg)
3129 bf6929a2 Alexander Schreiber
3130 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
3131 bf6929a2 Alexander Schreiber
3132 bf6929a2 Alexander Schreiber
3133 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
3134 a8083063 Iustin Pop
  """Shutdown an instance.
3135 a8083063 Iustin Pop

3136 a8083063 Iustin Pop
  """
3137 a8083063 Iustin Pop
  HPATH = "instance-stop"
3138 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3139 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3140 e873317a Guido Trotter
  REQ_BGL = False
3141 e873317a Guido Trotter
3142 e873317a Guido Trotter
  def ExpandNames(self):
3143 e873317a Guido Trotter
    self._ExpandAndLockInstance()
3144 a8083063 Iustin Pop
3145 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3146 a8083063 Iustin Pop
    """Build hooks env.
3147 a8083063 Iustin Pop

3148 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3149 a8083063 Iustin Pop

3150 a8083063 Iustin Pop
    """
3151 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3152 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3153 a8083063 Iustin Pop
    return env, nl, nl
3154 a8083063 Iustin Pop
3155 a8083063 Iustin Pop
  def CheckPrereq(self):
3156 a8083063 Iustin Pop
    """Check prerequisites.
3157 a8083063 Iustin Pop

3158 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3159 a8083063 Iustin Pop

3160 a8083063 Iustin Pop
    """
3161 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3162 e873317a Guido Trotter
    assert self.instance is not None, \
3163 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3164 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
3165 a8083063 Iustin Pop
3166 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3167 a8083063 Iustin Pop
    """Shutdown the instance.
3168 a8083063 Iustin Pop

3169 a8083063 Iustin Pop
    """
3170 a8083063 Iustin Pop
    instance = self.instance
3171 a8083063 Iustin Pop
    node_current = instance.primary_node
3172 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
3173 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(node_current, instance)
3174 1fae010f Iustin Pop
    msg = result.RemoteFailMsg()
3175 1fae010f Iustin Pop
    if msg:
3176 1fae010f Iustin Pop
      self.proc.LogWarning("Could not shutdown instance: %s" % msg)
3177 a8083063 Iustin Pop
3178 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
3179 a8083063 Iustin Pop
3180 a8083063 Iustin Pop
3181 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
3182 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
3183 fe7b0351 Michael Hanselmann

3184 fe7b0351 Michael Hanselmann
  """
3185 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
3186 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
3187 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
3188 4e0b4d2d Guido Trotter
  REQ_BGL = False
3189 4e0b4d2d Guido Trotter
3190 4e0b4d2d Guido Trotter
  def ExpandNames(self):
3191 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
3192 fe7b0351 Michael Hanselmann
3193 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
3194 fe7b0351 Michael Hanselmann
    """Build hooks env.
3195 fe7b0351 Michael Hanselmann

3196 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
3197 fe7b0351 Michael Hanselmann

3198 fe7b0351 Michael Hanselmann
    """
3199 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3200 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3201 fe7b0351 Michael Hanselmann
    return env, nl, nl
3202 fe7b0351 Michael Hanselmann
3203 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
3204 fe7b0351 Michael Hanselmann
    """Check prerequisites.
3205 fe7b0351 Michael Hanselmann

3206 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
3207 fe7b0351 Michael Hanselmann

3208 fe7b0351 Michael Hanselmann
    """
3209 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3210 4e0b4d2d Guido Trotter
    assert instance is not None, \
3211 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3212 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3213 4e0b4d2d Guido Trotter
3214 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
3215 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
3216 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3217 0d68c45d Iustin Pop
    if instance.admin_up:
3218 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3219 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3220 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3221 72737a7f Iustin Pop
                                              instance.name,
3222 72737a7f Iustin Pop
                                              instance.hypervisor)
3223 b4874c9e Guido Trotter
    remote_info.Raise()
3224 b4874c9e Guido Trotter
    if remote_info.data:
3225 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3226 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
3227 3ecf6786 Iustin Pop
                                  instance.primary_node))
3228 d0834de3 Michael Hanselmann
3229 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
3230 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
3231 d0834de3 Michael Hanselmann
      # OS verification
3232 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
3233 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
3234 d0834de3 Michael Hanselmann
      if pnode is None:
3235 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
3236 3ecf6786 Iustin Pop
                                   self.op.pnode)
3237 781de953 Iustin Pop
      result = self.rpc.call_os_get(pnode.name, self.op.os_type)
3238 781de953 Iustin Pop
      result.Raise()
3239 781de953 Iustin Pop
      if not isinstance(result.data, objects.OS):
3240 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
3241 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
3242 d0834de3 Michael Hanselmann
3243 fe7b0351 Michael Hanselmann
    self.instance = instance
3244 fe7b0351 Michael Hanselmann
3245 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
3246 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
3247 fe7b0351 Michael Hanselmann

3248 fe7b0351 Michael Hanselmann
    """
3249 fe7b0351 Michael Hanselmann
    inst = self.instance
3250 fe7b0351 Michael Hanselmann
3251 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
3252 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
3253 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
3254 97abc79f Iustin Pop
      self.cfg.Update(inst)
3255 d0834de3 Michael Hanselmann
3256 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
3257 fe7b0351 Michael Hanselmann
    try:
3258 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
3259 781de953 Iustin Pop
      result = self.rpc.call_instance_os_add(inst.primary_node, inst)
3260 20e01edd Iustin Pop
      msg = result.RemoteFailMsg()
3261 20e01edd Iustin Pop
      if msg:
3262 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
3263 20e01edd Iustin Pop
                                 " on node %s: %s" %
3264 20e01edd Iustin Pop
                                 (inst.name, inst.primary_node, msg))
3265 fe7b0351 Michael Hanselmann
    finally:
3266 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
3267 fe7b0351 Michael Hanselmann
3268 fe7b0351 Michael Hanselmann
3269 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
3270 decd5f45 Iustin Pop
  """Rename an instance.
3271 decd5f45 Iustin Pop

3272 decd5f45 Iustin Pop
  """
3273 decd5f45 Iustin Pop
  HPATH = "instance-rename"
3274 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3275 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
3276 decd5f45 Iustin Pop
3277 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
3278 decd5f45 Iustin Pop
    """Build hooks env.
3279 decd5f45 Iustin Pop

3280 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3281 decd5f45 Iustin Pop

3282 decd5f45 Iustin Pop
    """
3283 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3284 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
3285 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3286 decd5f45 Iustin Pop
    return env, nl, nl
3287 decd5f45 Iustin Pop
3288 decd5f45 Iustin Pop
  def CheckPrereq(self):
3289 decd5f45 Iustin Pop
    """Check prerequisites.
3290 decd5f45 Iustin Pop

3291 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
3292 decd5f45 Iustin Pop

3293 decd5f45 Iustin Pop
    """
3294 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3295 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3296 decd5f45 Iustin Pop
    if instance is None:
3297 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3298 decd5f45 Iustin Pop
                                 self.op.instance_name)
3299 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3300 7527a8a4 Iustin Pop
3301 0d68c45d Iustin Pop
    if instance.admin_up:
3302 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3303 decd5f45 Iustin Pop
                                 self.op.instance_name)
3304 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3305 72737a7f Iustin Pop
                                              instance.name,
3306 72737a7f Iustin Pop
                                              instance.hypervisor)
3307 781de953 Iustin Pop
    remote_info.Raise()
3308 781de953 Iustin Pop
    if remote_info.data:
3309 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3310 decd5f45 Iustin Pop
                                 (self.op.instance_name,
3311 decd5f45 Iustin Pop
                                  instance.primary_node))
3312 decd5f45 Iustin Pop
    self.instance = instance
3313 decd5f45 Iustin Pop
3314 decd5f45 Iustin Pop
    # new name verification
3315 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
3316 decd5f45 Iustin Pop
3317 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
3318 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
3319 7bde3275 Guido Trotter
    if new_name in instance_list:
3320 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3321 c09f363f Manuel Franceschini
                                 new_name)
3322 7bde3275 Guido Trotter
3323 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
3324 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
3325 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3326 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
3327 decd5f45 Iustin Pop
3328 decd5f45 Iustin Pop
3329 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
3330 decd5f45 Iustin Pop
    """Reinstall the instance.
3331 decd5f45 Iustin Pop

3332 decd5f45 Iustin Pop
    """
3333 decd5f45 Iustin Pop
    inst = self.instance
3334 decd5f45 Iustin Pop
    old_name = inst.name
3335 decd5f45 Iustin Pop
3336 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
3337 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3338 b23c4333 Manuel Franceschini
3339 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
3340 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
3341 cb4e8387 Iustin Pop
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
3342 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
3343 decd5f45 Iustin Pop
3344 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
3345 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
3346 decd5f45 Iustin Pop
3347 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
3348 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3349 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
3350 72737a7f Iustin Pop
                                                     old_file_storage_dir,
3351 72737a7f Iustin Pop
                                                     new_file_storage_dir)
3352 781de953 Iustin Pop
      result.Raise()
3353 781de953 Iustin Pop
      if not result.data:
3354 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not connect to node '%s' to rename"
3355 b23c4333 Manuel Franceschini
                                 " directory '%s' to '%s' (but the instance"
3356 b23c4333 Manuel Franceschini
                                 " has been renamed in Ganeti)" % (
3357 b23c4333 Manuel Franceschini
                                 inst.primary_node, old_file_storage_dir,
3358 b23c4333 Manuel Franceschini
                                 new_file_storage_dir))
3359 b23c4333 Manuel Franceschini
3360 781de953 Iustin Pop
      if not result.data[0]:
3361 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
3362 b23c4333 Manuel Franceschini
                                 " (but the instance has been renamed in"
3363 b23c4333 Manuel Franceschini
                                 " Ganeti)" % (old_file_storage_dir,
3364 b23c4333 Manuel Franceschini
                                               new_file_storage_dir))
3365 b23c4333 Manuel Franceschini
3366 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
3367 decd5f45 Iustin Pop
    try:
3368 781de953 Iustin Pop
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
3369 781de953 Iustin Pop
                                                 old_name)
3370 96841384 Iustin Pop
      msg = result.RemoteFailMsg()
3371 96841384 Iustin Pop
      if msg:
3372 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
3373 96841384 Iustin Pop
               " (but the instance has been renamed in Ganeti): %s" %
3374 96841384 Iustin Pop
               (inst.name, inst.primary_node, msg))
3375 86d9d3bb Iustin Pop
        self.proc.LogWarning(msg)
3376 decd5f45 Iustin Pop
    finally:
3377 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
3378 decd5f45 Iustin Pop
3379 decd5f45 Iustin Pop
3380 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
3381 a8083063 Iustin Pop
  """Remove an instance.
3382 a8083063 Iustin Pop

3383 a8083063 Iustin Pop
  """
3384 a8083063 Iustin Pop
  HPATH = "instance-remove"
3385 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3386 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
3387 cf472233 Guido Trotter
  REQ_BGL = False
3388 cf472233 Guido Trotter
3389 cf472233 Guido Trotter
  def ExpandNames(self):
3390 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
3391 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3392 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3393 cf472233 Guido Trotter
3394 cf472233 Guido Trotter
  def DeclareLocks(self, level):
3395 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
3396 cf472233 Guido Trotter
      self._LockInstancesNodes()
3397 a8083063 Iustin Pop
3398 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3399 a8083063 Iustin Pop
    """Build hooks env.
3400 a8083063 Iustin Pop

3401 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3402 a8083063 Iustin Pop

3403 a8083063 Iustin Pop
    """
3404 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3405 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
3406 a8083063 Iustin Pop
    return env, nl, nl
3407 a8083063 Iustin Pop
3408 a8083063 Iustin Pop
  def CheckPrereq(self):
3409 a8083063 Iustin Pop
    """Check prerequisites.
3410 a8083063 Iustin Pop

3411 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3412 a8083063 Iustin Pop

3413 a8083063 Iustin Pop
    """
3414 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3415 cf472233 Guido Trotter
    assert self.instance is not None, \
3416 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3417 a8083063 Iustin Pop
3418 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3419 a8083063 Iustin Pop
    """Remove the instance.
3420 a8083063 Iustin Pop

3421 a8083063 Iustin Pop
    """
3422 a8083063 Iustin Pop
    instance = self.instance
3423 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
3424 9a4f63d1 Iustin Pop
                 instance.name, instance.primary_node)
3425 a8083063 Iustin Pop
3426 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
3427 1fae010f Iustin Pop
    msg = result.RemoteFailMsg()
3428 1fae010f Iustin Pop
    if msg:
3429 1d67656e Iustin Pop
      if self.op.ignore_failures:
3430 1fae010f Iustin Pop
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
3431 1d67656e Iustin Pop
      else:
3432 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
3433 1fae010f Iustin Pop
                                 " node %s: %s" %
3434 1fae010f Iustin Pop
                                 (instance.name, instance.primary_node, msg))
3435 a8083063 Iustin Pop
3436 9a4f63d1 Iustin Pop
    logging.info("Removing block devices for instance %s", instance.name)
3437 a8083063 Iustin Pop
3438 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
3439 1d67656e Iustin Pop
      if self.op.ignore_failures:
3440 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
3441 1d67656e Iustin Pop
      else:
3442 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
3443 a8083063 Iustin Pop
3444 9a4f63d1 Iustin Pop
    logging.info("Removing instance %s out of cluster config", instance.name)
3445 a8083063 Iustin Pop
3446 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
3447 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
3448 a8083063 Iustin Pop
3449 a8083063 Iustin Pop
3450 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
3451 a8083063 Iustin Pop
  """Logical unit for querying instances.
3452 a8083063 Iustin Pop

3453 a8083063 Iustin Pop
  """
3454 ec79568d Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
3455 7eb9d8f7 Guido Trotter
  REQ_BGL = False
3456 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
3457 5b460366 Iustin Pop
                                    "admin_state",
3458 a2d2e1a7 Iustin Pop
                                    "disk_template", "ip", "mac", "bridge",
3459 a2d2e1a7 Iustin Pop
                                    "sda_size", "sdb_size", "vcpus", "tags",
3460 a2d2e1a7 Iustin Pop
                                    "network_port", "beparams",
3461 8aec325c Iustin Pop
                                    r"(disk)\.(size)/([0-9]+)",
3462 8aec325c Iustin Pop
                                    r"(disk)\.(sizes)", "disk_usage",
3463 8aec325c Iustin Pop
                                    r"(nic)\.(mac|ip|bridge)/([0-9]+)",
3464 8aec325c Iustin Pop
                                    r"(nic)\.(macs|ips|bridges)",
3465 8aec325c Iustin Pop
                                    r"(disk|nic)\.(count)",
3466 a2d2e1a7 Iustin Pop
                                    "serial_no", "hypervisor", "hvparams",] +
3467 a2d2e1a7 Iustin Pop
                                  ["hv/%s" % name
3468 a2d2e1a7 Iustin Pop
                                   for name in constants.HVS_PARAMETERS] +
3469 a2d2e1a7 Iustin Pop
                                  ["be/%s" % name
3470 a2d2e1a7 Iustin Pop
                                   for name in constants.BES_PARAMETERS])
3471 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
3472 31bf511f Iustin Pop
3473 a8083063 Iustin Pop
3474 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
3475 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
3476 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
3477 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
3478 a8083063 Iustin Pop
3479 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
3480 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
3481 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
3482 7eb9d8f7 Guido Trotter
3483 57a2fb91 Iustin Pop
    if self.op.names:
3484 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
3485 7eb9d8f7 Guido Trotter
    else:
3486 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
3487 7eb9d8f7 Guido Trotter
3488 ec79568d Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
3489 ec79568d Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
3490 57a2fb91 Iustin Pop
    if self.do_locking:
3491 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3492 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
3493 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3494 7eb9d8f7 Guido Trotter
3495 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
3496 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
3497 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
3498 7eb9d8f7 Guido Trotter
3499 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
3500 7eb9d8f7 Guido Trotter
    """Check prerequisites.
3501 7eb9d8f7 Guido Trotter

3502 7eb9d8f7 Guido Trotter
    """
3503 57a2fb91 Iustin Pop
    pass
3504 069dcc86 Iustin Pop
3505 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3506 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
3507 a8083063 Iustin Pop

3508 a8083063 Iustin Pop
    """
3509 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
3510 a7f5dc98 Iustin Pop
    if self.wanted == locking.ALL_SET:
3511 a7f5dc98 Iustin Pop
      # caller didn't specify instance names, so ordering is not important
3512 a7f5dc98 Iustin Pop
      if self.do_locking:
3513 a7f5dc98 Iustin Pop
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
3514 a7f5dc98 Iustin Pop
      else:
3515 a7f5dc98 Iustin Pop
        instance_names = all_info.keys()
3516 a7f5dc98 Iustin Pop
      instance_names = utils.NiceSort(instance_names)
3517 57a2fb91 Iustin Pop
    else:
3518 a7f5dc98 Iustin Pop
      # caller did specify names, so we must keep the ordering
3519 a7f5dc98 Iustin Pop
      if self.do_locking:
3520 a7f5dc98 Iustin Pop
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
3521 a7f5dc98 Iustin Pop
      else:
3522 a7f5dc98 Iustin Pop
        tgt_set = all_info.keys()
3523 a7f5dc98 Iustin Pop
      missing = set(self.wanted).difference(tgt_set)
3524 a7f5dc98 Iustin Pop
      if missing:
3525 a7f5dc98 Iustin Pop
        raise errors.OpExecError("Some instances were removed before"
3526 a7f5dc98 Iustin Pop
                                 " retrieving their data: %s" % missing)
3527 a7f5dc98 Iustin Pop
      instance_names = self.wanted
3528 c1f1cbb2 Iustin Pop
3529 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
3530 a8083063 Iustin Pop
3531 a8083063 Iustin Pop
    # begin data gathering
3532 a8083063 Iustin Pop
3533 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
3534 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
3535 a8083063 Iustin Pop
3536 a8083063 Iustin Pop
    bad_nodes = []
3537 cbfc4681 Iustin Pop
    off_nodes = []
3538 ec79568d Iustin Pop
    if self.do_node_query:
3539 a8083063 Iustin Pop
      live_data = {}
3540 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
3541 a8083063 Iustin Pop
      for name in nodes:
3542 a8083063 Iustin Pop
        result = node_data[name]
3543 cbfc4681 Iustin Pop
        if result.offline:
3544 cbfc4681 Iustin Pop
          # offline nodes will be in both lists
3545 cbfc4681 Iustin Pop
          off_nodes.append(name)
3546 781de953 Iustin Pop
        if result.failed:
3547 a8083063 Iustin Pop
          bad_nodes.append(name)
3548 781de953 Iustin Pop
        else:
3549 781de953 Iustin Pop
          if result.data:
3550 781de953 Iustin Pop
            live_data.update(result.data)
3551 781de953 Iustin Pop
            # else no instance is alive
3552 a8083063 Iustin Pop
    else:
3553 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
3554 a8083063 Iustin Pop
3555 a8083063 Iustin Pop
    # end data gathering
3556 a8083063 Iustin Pop
3557 5018a335 Iustin Pop
    HVPREFIX = "hv/"
3558 338e51e8 Iustin Pop
    BEPREFIX = "be/"
3559 a8083063 Iustin Pop
    output = []
3560 a8083063 Iustin Pop
    for instance in instance_list:
3561 a8083063 Iustin Pop
      iout = []
3562 5018a335 Iustin Pop
      i_hv = self.cfg.GetClusterInfo().FillHV(instance)
3563 338e51e8 Iustin Pop
      i_be = self.cfg.GetClusterInfo().FillBE(instance)
3564 a8083063 Iustin Pop
      for field in self.op.output_fields:
3565 71c1af58 Iustin Pop
        st_match = self._FIELDS_STATIC.Matches(field)
3566 a8083063 Iustin Pop
        if field == "name":
3567 a8083063 Iustin Pop
          val = instance.name
3568 a8083063 Iustin Pop
        elif field == "os":
3569 a8083063 Iustin Pop
          val = instance.os
3570 a8083063 Iustin Pop
        elif field == "pnode":
3571 a8083063 Iustin Pop
          val = instance.primary_node
3572 a8083063 Iustin Pop
        elif field == "snodes":
3573 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
3574 a8083063 Iustin Pop
        elif field == "admin_state":
3575 0d68c45d Iustin Pop
          val = instance.admin_up
3576 a8083063 Iustin Pop
        elif field == "oper_state":
3577 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
3578 8a23d2d3 Iustin Pop
            val = None
3579 a8083063 Iustin Pop
          else:
3580 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
3581 d8052456 Iustin Pop
        elif field == "status":
3582 cbfc4681 Iustin Pop
          if instance.primary_node in off_nodes:
3583 cbfc4681 Iustin Pop
            val = "ERROR_nodeoffline"
3584 cbfc4681 Iustin Pop
          elif instance.primary_node in bad_nodes:
3585 d8052456 Iustin Pop
            val = "ERROR_nodedown"
3586 d8052456 Iustin Pop
          else:
3587 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
3588 d8052456 Iustin Pop
            if running:
3589 0d68c45d Iustin Pop
              if instance.admin_up:
3590 d8052456 Iustin Pop
                val = "running"
3591 d8052456 Iustin Pop
              else:
3592 d8052456 Iustin Pop
                val = "ERROR_up"
3593 d8052456 Iustin Pop
            else:
3594 0d68c45d Iustin Pop
              if instance.admin_up:
3595 d8052456 Iustin Pop
                val = "ERROR_down"
3596 d8052456 Iustin Pop
              else:
3597 d8052456 Iustin Pop
                val = "ADMIN_down"
3598 a8083063 Iustin Pop
        elif field == "oper_ram":
3599 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
3600 8a23d2d3 Iustin Pop
            val = None
3601 a8083063 Iustin Pop
          elif instance.name in live_data:
3602 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
3603 a8083063 Iustin Pop
          else:
3604 a8083063 Iustin Pop
            val = "-"
3605 c1ce76bb Iustin Pop
        elif field == "vcpus":
3606 c1ce76bb Iustin Pop
          val = i_be[constants.BE_VCPUS]
3607 a8083063 Iustin Pop
        elif field == "disk_template":
3608 a8083063 Iustin Pop
          val = instance.disk_template
3609 a8083063 Iustin Pop
        elif field == "ip":
3610 39a02558 Guido Trotter
          if instance.nics:
3611 39a02558 Guido Trotter
            val = instance.nics[0].ip
3612 39a02558 Guido Trotter
          else:
3613 39a02558 Guido Trotter
            val = None
3614 a8083063 Iustin Pop
        elif field == "bridge":
3615 39a02558 Guido Trotter
          if instance.nics:
3616 39a02558 Guido Trotter
            val = instance.nics[0].bridge
3617 39a02558 Guido Trotter
          else:
3618 39a02558 Guido Trotter
            val = None
3619 a8083063 Iustin Pop
        elif field == "mac":
3620 39a02558 Guido Trotter
          if instance.nics:
3621 39a02558 Guido Trotter
            val = instance.nics[0].mac
3622 39a02558 Guido Trotter
          else:
3623 39a02558 Guido Trotter
            val = None
3624 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
3625 ad24e046 Iustin Pop
          idx = ord(field[2]) - ord('a')
3626 ad24e046 Iustin Pop
          try:
3627 ad24e046 Iustin Pop
            val = instance.FindDisk(idx).size
3628 ad24e046 Iustin Pop
          except errors.OpPrereqError:
3629 8a23d2d3 Iustin Pop
            val = None
3630 024e157f Iustin Pop
        elif field == "disk_usage": # total disk usage per node
3631 024e157f Iustin Pop
          disk_sizes = [{'size': disk.size} for disk in instance.disks]
3632 024e157f Iustin Pop
          val = _ComputeDiskSize(instance.disk_template, disk_sizes)
3633 130a6a6f Iustin Pop
        elif field == "tags":
3634 130a6a6f Iustin Pop
          val = list(instance.GetTags())
3635 38d7239a Iustin Pop
        elif field == "serial_no":
3636 38d7239a Iustin Pop
          val = instance.serial_no
3637 5018a335 Iustin Pop
        elif field == "network_port":
3638 5018a335 Iustin Pop
          val = instance.network_port
3639 338e51e8 Iustin Pop
        elif field == "hypervisor":
3640 338e51e8 Iustin Pop
          val = instance.hypervisor
3641 338e51e8 Iustin Pop
        elif field == "hvparams":
3642 338e51e8 Iustin Pop
          val = i_hv
3643 5018a335 Iustin Pop
        elif (field.startswith(HVPREFIX) and
3644 5018a335 Iustin Pop
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
3645 5018a335 Iustin Pop
          val = i_hv.get(field[len(HVPREFIX):], None)
3646 338e51e8 Iustin Pop
        elif field == "beparams":
3647 338e51e8 Iustin Pop
          val = i_be
3648 338e51e8 Iustin Pop
        elif (field.startswith(BEPREFIX) and
3649 338e51e8 Iustin Pop
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
3650 338e51e8 Iustin Pop
          val = i_be.get(field[len(BEPREFIX):], None)
3651 71c1af58 Iustin Pop
        elif st_match and st_match.groups():
3652 71c1af58 Iustin Pop
          # matches a variable list
3653 71c1af58 Iustin Pop
          st_groups = st_match.groups()
3654 71c1af58 Iustin Pop
          if st_groups and st_groups[0] == "disk":
3655 71c1af58 Iustin Pop
            if st_groups[1] == "count":
3656 71c1af58 Iustin Pop
              val = len(instance.disks)
3657 41a776da Iustin Pop
            elif st_groups[1] == "sizes":
3658 41a776da Iustin Pop
              val = [disk.size for disk in instance.disks]
3659 71c1af58 Iustin Pop
            elif st_groups[1] == "size":
3660 3e0cea06 Iustin Pop
              try:
3661 3e0cea06 Iustin Pop
                val = instance.FindDisk(st_groups[2]).size
3662 3e0cea06 Iustin Pop
              except errors.OpPrereqError:
3663 71c1af58 Iustin Pop
                val = None
3664 71c1af58 Iustin Pop
            else:
3665 71c1af58 Iustin Pop
              assert False, "Unhandled disk parameter"
3666 71c1af58 Iustin Pop
          elif st_groups[0] == "nic":
3667 71c1af58 Iustin Pop
            if st_groups[1] == "count":
3668 71c1af58 Iustin Pop
              val = len(instance.nics)
3669 41a776da Iustin Pop
            elif st_groups[1] == "macs":
3670 41a776da Iustin Pop
              val = [nic.mac for nic in instance.nics]
3671 41a776da Iustin Pop
            elif st_groups[1] == "ips":
3672 41a776da Iustin Pop
              val = [nic.ip for nic in instance.nics]
3673 41a776da Iustin Pop
            elif st_groups[1] == "bridges":
3674 41a776da Iustin Pop
              val = [nic.bridge for nic in instance.nics]
3675 71c1af58 Iustin Pop
            else:
3676 71c1af58 Iustin Pop
              # index-based item
3677 71c1af58 Iustin Pop
              nic_idx = int(st_groups[2])
3678 71c1af58 Iustin Pop
              if nic_idx >= len(instance.nics):
3679 71c1af58 Iustin Pop
                val = None
3680 71c1af58 Iustin Pop
              else:
3681 71c1af58 Iustin Pop
                if st_groups[1] == "mac":
3682 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].mac
3683 71c1af58 Iustin Pop
                elif st_groups[1] == "ip":
3684 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].ip
3685 71c1af58 Iustin Pop
                elif st_groups[1] == "bridge":
3686 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].bridge
3687 71c1af58 Iustin Pop
                else:
3688 71c1af58 Iustin Pop
                  assert False, "Unhandled NIC parameter"
3689 71c1af58 Iustin Pop
          else:
3690 c1ce76bb Iustin Pop
            assert False, ("Declared but unhandled variable parameter '%s'" %
3691 c1ce76bb Iustin Pop
                           field)
3692 a8083063 Iustin Pop
        else:
3693 c1ce76bb Iustin Pop
          assert False, "Declared but unhandled parameter '%s'" % field
3694 a8083063 Iustin Pop
        iout.append(val)
3695 a8083063 Iustin Pop
      output.append(iout)
3696 a8083063 Iustin Pop
3697 a8083063 Iustin Pop
    return output
3698 a8083063 Iustin Pop
3699 a8083063 Iustin Pop
3700 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
3701 a8083063 Iustin Pop
  """Failover an instance.
3702 a8083063 Iustin Pop

3703 a8083063 Iustin Pop
  """
3704 a8083063 Iustin Pop
  HPATH = "instance-failover"
3705 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3706 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
3707 c9e5c064 Guido Trotter
  REQ_BGL = False
3708 c9e5c064 Guido Trotter
3709 c9e5c064 Guido Trotter
  def ExpandNames(self):
3710 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
3711 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3712 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3713 c9e5c064 Guido Trotter
3714 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
3715 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
3716 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
3717 a8083063 Iustin Pop
3718 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3719 a8083063 Iustin Pop
    """Build hooks env.
3720 a8083063 Iustin Pop

3721 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3722 a8083063 Iustin Pop

3723 a8083063 Iustin Pop
    """
3724 a8083063 Iustin Pop
    env = {
3725 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
3726 a8083063 Iustin Pop
      }
3727 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3728 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3729 a8083063 Iustin Pop
    return env, nl, nl
3730 a8083063 Iustin Pop
3731 a8083063 Iustin Pop
  def CheckPrereq(self):
3732 a8083063 Iustin Pop
    """Check prerequisites.
3733 a8083063 Iustin Pop

3734 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3735 a8083063 Iustin Pop

3736 a8083063 Iustin Pop
    """
3737 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3738 c9e5c064 Guido Trotter
    assert self.instance is not None, \
3739 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3740 a8083063 Iustin Pop
3741 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3742 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3743 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
3744 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
3745 2a710df1 Michael Hanselmann
3746 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
3747 2a710df1 Michael Hanselmann
    if not secondary_nodes:
3748 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
3749 abdf0113 Iustin Pop
                                   "a mirrored disk template")
3750 2a710df1 Michael Hanselmann
3751 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
3752 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, target_node)
3753 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, target_node)
3754 d27776f0 Iustin Pop
3755 d27776f0 Iustin Pop
    if instance.admin_up:
3756 d27776f0 Iustin Pop
      # check memory requirements on the secondary node
3757 d27776f0 Iustin Pop
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
3758 d27776f0 Iustin Pop
                           instance.name, bep[constants.BE_MEMORY],
3759 d27776f0 Iustin Pop
                           instance.hypervisor)
3760 d27776f0 Iustin Pop
    else:
3761 d27776f0 Iustin Pop
      self.LogInfo("Not checking memory on the secondary node as"
3762 d27776f0 Iustin Pop
                   " instance will not be started")
3763 3a7c308e Guido Trotter
3764 5bbd3f7f Michael Hanselmann
    # check bridge existence
3765 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
3766 781de953 Iustin Pop
    result = self.rpc.call_bridges_exist(target_node, brlist)
3767 781de953 Iustin Pop
    result.Raise()
3768 781de953 Iustin Pop
    if not result.data:
3769 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
3770 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
3771 50ff9a7a Iustin Pop
                                 (brlist, target_node))
3772 a8083063 Iustin Pop
3773 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3774 a8083063 Iustin Pop
    """Failover an instance.
3775 a8083063 Iustin Pop

3776 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
3777 a8083063 Iustin Pop
    starting it on the secondary.
3778 a8083063 Iustin Pop

3779 a8083063 Iustin Pop
    """
3780 a8083063 Iustin Pop
    instance = self.instance
3781 a8083063 Iustin Pop
3782 a8083063 Iustin Pop
    source_node = instance.primary_node
3783 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
3784 a8083063 Iustin Pop
3785 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
3786 a8083063 Iustin Pop
    for dev in instance.disks:
3787 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
3788 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
3789 0d68c45d Iustin Pop
        if instance.admin_up and not self.op.ignore_consistency:
3790 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
3791 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
3792 a8083063 Iustin Pop
3793 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
3794 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
3795 9a4f63d1 Iustin Pop
                 instance.name, source_node)
3796 a8083063 Iustin Pop
3797 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(source_node, instance)
3798 1fae010f Iustin Pop
    msg = result.RemoteFailMsg()
3799 1fae010f Iustin Pop
    if msg:
3800 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
3801 86d9d3bb Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
3802 1fae010f Iustin Pop
                             " Proceeding anyway. Please make sure node"
3803 1fae010f Iustin Pop
                             " %s is down. Error details: %s",
3804 1fae010f Iustin Pop
                             instance.name, source_node, source_node, msg)
3805 24a40d57 Iustin Pop
      else:
3806 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
3807 1fae010f Iustin Pop
                                 " node %s: %s" %
3808 1fae010f Iustin Pop
                                 (instance.name, source_node, msg))
3809 a8083063 Iustin Pop
3810 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
3811 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3812 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
3813 a8083063 Iustin Pop
3814 a8083063 Iustin Pop
    instance.primary_node = target_node
3815 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
3816 b6102dab Guido Trotter
    self.cfg.Update(instance)
3817 a8083063 Iustin Pop
3818 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
3819 0d68c45d Iustin Pop
    if instance.admin_up:
3820 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
3821 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s",
3822 9a4f63d1 Iustin Pop
                   instance.name, target_node)
3823 12a0cfbe Guido Trotter
3824 7c4d6c7b Michael Hanselmann
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
3825 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
3826 12a0cfbe Guido Trotter
      if not disks_ok:
3827 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3828 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
3829 a8083063 Iustin Pop
3830 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
3831 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance, None, None)
3832 dd279568 Iustin Pop
      msg = result.RemoteFailMsg()
3833 dd279568 Iustin Pop
      if msg:
3834 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3835 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
3836 dd279568 Iustin Pop
                                 (instance.name, target_node, msg))
3837 a8083063 Iustin Pop
3838 a8083063 Iustin Pop
3839 53c776b5 Iustin Pop
class LUMigrateInstance(LogicalUnit):
3840 53c776b5 Iustin Pop
  """Migrate an instance.
3841 53c776b5 Iustin Pop

3842 53c776b5 Iustin Pop
  This is migration without shutting down, compared to the failover,
3843 53c776b5 Iustin Pop
  which is done with shutdown.
3844 53c776b5 Iustin Pop

3845 53c776b5 Iustin Pop
  """
3846 53c776b5 Iustin Pop
  HPATH = "instance-migrate"
3847 53c776b5 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3848 53c776b5 Iustin Pop
  _OP_REQP = ["instance_name", "live", "cleanup"]
3849 53c776b5 Iustin Pop
3850 53c776b5 Iustin Pop
  REQ_BGL = False
3851 53c776b5 Iustin Pop
3852 53c776b5 Iustin Pop
  def ExpandNames(self):
3853 53c776b5 Iustin Pop
    self._ExpandAndLockInstance()
3854 53c776b5 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
3855 53c776b5 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3856 53c776b5 Iustin Pop
3857 53c776b5 Iustin Pop
  def DeclareLocks(self, level):
3858 53c776b5 Iustin Pop
    if level == locking.LEVEL_NODE:
3859 53c776b5 Iustin Pop
      self._LockInstancesNodes()
3860 53c776b5 Iustin Pop
3861 53c776b5 Iustin Pop
  def BuildHooksEnv(self):
3862 53c776b5 Iustin Pop
    """Build hooks env.
3863 53c776b5 Iustin Pop

3864 53c776b5 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3865 53c776b5 Iustin Pop

3866 53c776b5 Iustin Pop
    """
3867 53c776b5 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3868 2c2690c9 Iustin Pop
    env["MIGRATE_LIVE"] = self.op.live
3869 2c2690c9 Iustin Pop
    env["MIGRATE_CLEANUP"] = self.op.cleanup
3870 53c776b5 Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3871 53c776b5 Iustin Pop
    return env, nl, nl
3872 53c776b5 Iustin Pop
3873 53c776b5 Iustin Pop
  def CheckPrereq(self):
3874 53c776b5 Iustin Pop
    """Check prerequisites.
3875 53c776b5 Iustin Pop

3876 53c776b5 Iustin Pop
    This checks that the instance is in the cluster.
3877 53c776b5 Iustin Pop

3878 53c776b5 Iustin Pop
    """
3879 53c776b5 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3880 53c776b5 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3881 53c776b5 Iustin Pop
    if instance is None:
3882 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3883 53c776b5 Iustin Pop
                                 self.op.instance_name)
3884 53c776b5 Iustin Pop
3885 53c776b5 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
3886 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3887 53c776b5 Iustin Pop
                                 " drbd8, cannot migrate.")
3888 53c776b5 Iustin Pop
3889 53c776b5 Iustin Pop
    secondary_nodes = instance.secondary_nodes
3890 53c776b5 Iustin Pop
    if not secondary_nodes:
3891 733a2b6a Iustin Pop
      raise errors.ConfigurationError("No secondary node but using"
3892 733a2b6a Iustin Pop
                                      " drbd8 disk template")
3893 53c776b5 Iustin Pop
3894 53c776b5 Iustin Pop
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
3895 53c776b5 Iustin Pop
3896 53c776b5 Iustin Pop
    target_node = secondary_nodes[0]
3897 53c776b5 Iustin Pop
    # check memory requirements on the secondary node
3898 53c776b5 Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
3899 53c776b5 Iustin Pop
                         instance.name, i_be[constants.BE_MEMORY],
3900 53c776b5 Iustin Pop
                         instance.hypervisor)
3901 53c776b5 Iustin Pop
3902 5bbd3f7f Michael Hanselmann
    # check bridge existence
3903 53c776b5 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
3904 53c776b5 Iustin Pop
    result = self.rpc.call_bridges_exist(target_node, brlist)
3905 53c776b5 Iustin Pop
    if result.failed or not result.data:
3906 53c776b5 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
3907 53c776b5 Iustin Pop
                                 " exist on destination node '%s'" %
3908 53c776b5 Iustin Pop
                                 (brlist, target_node))
3909 53c776b5 Iustin Pop
3910 53c776b5 Iustin Pop
    if not self.op.cleanup:
3911 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, target_node)
3912 53c776b5 Iustin Pop
      result = self.rpc.call_instance_migratable(instance.primary_node,
3913 53c776b5 Iustin Pop
                                                 instance)
3914 53c776b5 Iustin Pop
      msg = result.RemoteFailMsg()
3915 53c776b5 Iustin Pop
      if msg:
3916 53c776b5 Iustin Pop
        raise errors.OpPrereqError("Can't migrate: %s - please use failover" %
3917 53c776b5 Iustin Pop
                                   msg)
3918 53c776b5 Iustin Pop
3919 53c776b5 Iustin Pop
    self.instance = instance
3920 53c776b5 Iustin Pop
3921 53c776b5 Iustin Pop
  def _WaitUntilSync(self):
3922 53c776b5 Iustin Pop
    """Poll with custom rpc for disk sync.
3923 53c776b5 Iustin Pop

3924 53c776b5 Iustin Pop
    This uses our own step-based rpc call.
3925 53c776b5 Iustin Pop

3926 53c776b5 Iustin Pop
    """
3927 53c776b5 Iustin Pop
    self.feedback_fn("* wait until resync is done")
3928 53c776b5 Iustin Pop
    all_done = False
3929 53c776b5 Iustin Pop
    while not all_done:
3930 53c776b5 Iustin Pop
      all_done = True
3931 53c776b5 Iustin Pop
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
3932 53c776b5 Iustin Pop
                                            self.nodes_ip,
3933 53c776b5 Iustin Pop
                                            self.instance.disks)
3934 53c776b5 Iustin Pop
      min_percent = 100
3935 53c776b5 Iustin Pop
      for node, nres in result.items():
3936 53c776b5 Iustin Pop
        msg = nres.RemoteFailMsg()
3937 53c776b5 Iustin Pop
        if msg:
3938 53c776b5 Iustin Pop
          raise errors.OpExecError("Cannot resync disks on node %s: %s" %
3939 53c776b5 Iustin Pop
                                   (node, msg))
3940 0959c824 Iustin Pop
        node_done, node_percent = nres.payload
3941 53c776b5 Iustin Pop
        all_done = all_done and node_done
3942 53c776b5 Iustin Pop
        if node_percent is not None:
3943 53c776b5 Iustin Pop
          min_percent = min(min_percent, node_percent)
3944 53c776b5 Iustin Pop
      if not all_done:
3945 53c776b5 Iustin Pop
        if min_percent < 100:
3946 53c776b5 Iustin Pop
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
3947 53c776b5 Iustin Pop
        time.sleep(2)
3948 53c776b5 Iustin Pop
3949 53c776b5 Iustin Pop
  def _EnsureSecondary(self, node):
3950 53c776b5 Iustin Pop
    """Demote a node to secondary.
3951 53c776b5 Iustin Pop

3952 53c776b5 Iustin Pop
    """
3953 53c776b5 Iustin Pop
    self.feedback_fn("* switching node %s to secondary mode" % node)
3954 53c776b5 Iustin Pop
3955 53c776b5 Iustin Pop
    for dev in self.instance.disks:
3956 53c776b5 Iustin Pop
      self.cfg.SetDiskID(dev, node)
3957 53c776b5 Iustin Pop
3958 53c776b5 Iustin Pop
    result = self.rpc.call_blockdev_close(node, self.instance.name,
3959 53c776b5 Iustin Pop
                                          self.instance.disks)
3960 53c776b5 Iustin Pop
    msg = result.RemoteFailMsg()
3961 53c776b5 Iustin Pop
    if msg:
3962 53c776b5 Iustin Pop
      raise errors.OpExecError("Cannot change disk to secondary on node %s,"
3963 53c776b5 Iustin Pop
                               " error %s" % (node, msg))
3964 53c776b5 Iustin Pop
3965 53c776b5 Iustin Pop
  def _GoStandalone(self):
3966 53c776b5 Iustin Pop
    """Disconnect from the network.
3967 53c776b5 Iustin Pop

3968 53c776b5 Iustin Pop
    """
3969 53c776b5 Iustin Pop
    self.feedback_fn("* changing into standalone mode")
3970 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
3971 53c776b5 Iustin Pop
                                               self.instance.disks)
3972 53c776b5 Iustin Pop
    for node, nres in result.items():
3973 53c776b5 Iustin Pop
      msg = nres.RemoteFailMsg()
3974 53c776b5 Iustin Pop
      if msg:
3975 53c776b5 Iustin Pop
        raise errors.OpExecError("Cannot disconnect disks node %s,"
3976 53c776b5 Iustin Pop
                                 " error %s" % (node, msg))
3977 53c776b5 Iustin Pop
3978 53c776b5 Iustin Pop
  def _GoReconnect(self, multimaster):
3979 53c776b5 Iustin Pop
    """Reconnect to the network.
3980 53c776b5 Iustin Pop

3981 53c776b5 Iustin Pop
    """
3982 53c776b5 Iustin Pop
    if multimaster:
3983 53c776b5 Iustin Pop
      msg = "dual-master"
3984 53c776b5 Iustin Pop
    else:
3985 53c776b5 Iustin Pop
      msg = "single-master"
3986 53c776b5 Iustin Pop
    self.feedback_fn("* changing disks into %s mode" % msg)
3987 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
3988 53c776b5 Iustin Pop
                                           self.instance.disks,
3989 53c776b5 Iustin Pop
                                           self.instance.name, multimaster)
3990 53c776b5 Iustin Pop
    for node, nres in result.items():
3991 53c776b5 Iustin Pop
      msg = nres.RemoteFailMsg()
3992 53c776b5 Iustin Pop
      if msg:
3993 53c776b5 Iustin Pop
        raise errors.OpExecError("Cannot change disks config on node %s,"
3994 53c776b5 Iustin Pop
                                 " error: %s" % (node, msg))
3995 53c776b5 Iustin Pop
3996 53c776b5 Iustin Pop
  def _ExecCleanup(self):
3997 53c776b5 Iustin Pop
    """Try to cleanup after a failed migration.
3998 53c776b5 Iustin Pop

3999 53c776b5 Iustin Pop
    The cleanup is done by:
4000 53c776b5 Iustin Pop
      - check that the instance is running only on one node
4001 53c776b5 Iustin Pop
        (and update the config if needed)
4002 53c776b5 Iustin Pop
      - change disks on its secondary node to secondary
4003 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
4004 53c776b5 Iustin Pop
      - disconnect from the network
4005 53c776b5 Iustin Pop
      - change disks into single-master mode
4006 53c776b5 Iustin Pop
      - wait again until disks are fully synchronized
4007 53c776b5 Iustin Pop

4008 53c776b5 Iustin Pop
    """
4009 53c776b5 Iustin Pop
    instance = self.instance
4010 53c776b5 Iustin Pop
    target_node = self.target_node
4011 53c776b5 Iustin Pop
    source_node = self.source_node
4012 53c776b5 Iustin Pop
4013 53c776b5 Iustin Pop
    # check running on only one node
4014 53c776b5 Iustin Pop
    self.feedback_fn("* checking where the instance actually runs"
4015 53c776b5 Iustin Pop
                     " (if this hangs, the hypervisor might be in"
4016 53c776b5 Iustin Pop
                     " a bad state)")
4017 53c776b5 Iustin Pop
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
4018 53c776b5 Iustin Pop
    for node, result in ins_l.items():
4019 53c776b5 Iustin Pop
      result.Raise()
4020 53c776b5 Iustin Pop
      if not isinstance(result.data, list):
4021 53c776b5 Iustin Pop
        raise errors.OpExecError("Can't contact node '%s'" % node)
4022 53c776b5 Iustin Pop
4023 53c776b5 Iustin Pop
    runningon_source = instance.name in ins_l[source_node].data
4024 53c776b5 Iustin Pop
    runningon_target = instance.name in ins_l[target_node].data
4025 53c776b5 Iustin Pop
4026 53c776b5 Iustin Pop
    if runningon_source and runningon_target:
4027 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance seems to be running on two nodes,"
4028 53c776b5 Iustin Pop
                               " or the hypervisor is confused. You will have"
4029 53c776b5 Iustin Pop
                               " to ensure manually that it runs only on one"
4030 53c776b5 Iustin Pop
                               " and restart this operation.")
4031 53c776b5 Iustin Pop
4032 53c776b5 Iustin Pop
    if not (runningon_source or runningon_target):
4033 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance does not seem to be running at all."
4034 53c776b5 Iustin Pop
                               " In this case, it's safer to repair by"
4035 53c776b5 Iustin Pop
                               " running 'gnt-instance stop' to ensure disk"
4036 53c776b5 Iustin Pop
                               " shutdown, and then restarting it.")
4037 53c776b5 Iustin Pop
4038 53c776b5 Iustin Pop
    if runningon_target:
4039 53c776b5 Iustin Pop
      # the migration has actually succeeded, we need to update the config
4040 53c776b5 Iustin Pop
      self.feedback_fn("* instance running on secondary node (%s),"
4041 53c776b5 Iustin Pop
                       " updating config" % target_node)
4042 53c776b5 Iustin Pop
      instance.primary_node = target_node
4043 53c776b5 Iustin Pop
      self.cfg.Update(instance)
4044 53c776b5 Iustin Pop
      demoted_node = source_node
4045 53c776b5 Iustin Pop
    else:
4046 53c776b5 Iustin Pop
      self.feedback_fn("* instance confirmed to be running on its"
4047 53c776b5 Iustin Pop
                       " primary node (%s)" % source_node)
4048 53c776b5 Iustin Pop
      demoted_node = target_node
4049 53c776b5 Iustin Pop
4050 53c776b5 Iustin Pop
    self._EnsureSecondary(demoted_node)
4051 53c776b5 Iustin Pop
    try:
4052 53c776b5 Iustin Pop
      self._WaitUntilSync()
4053 53c776b5 Iustin Pop
    except errors.OpExecError:
4054 53c776b5 Iustin Pop
      # we ignore here errors, since if the device is standalone, it
4055 53c776b5 Iustin Pop
      # won't be able to sync
4056 53c776b5 Iustin Pop
      pass
4057 53c776b5 Iustin Pop
    self._GoStandalone()
4058 53c776b5 Iustin Pop
    self._GoReconnect(False)
4059 53c776b5 Iustin Pop
    self._WaitUntilSync()
4060 53c776b5 Iustin Pop
4061 53c776b5 Iustin Pop
    self.feedback_fn("* done")
4062 53c776b5 Iustin Pop
4063 6906a9d8 Guido Trotter
  def _RevertDiskStatus(self):
4064 6906a9d8 Guido Trotter
    """Try to revert the disk status after a failed migration.
4065 6906a9d8 Guido Trotter

4066 6906a9d8 Guido Trotter
    """
4067 6906a9d8 Guido Trotter
    target_node = self.target_node
4068 6906a9d8 Guido Trotter
    try:
4069 6906a9d8 Guido Trotter
      self._EnsureSecondary(target_node)
4070 6906a9d8 Guido Trotter
      self._GoStandalone()
4071 6906a9d8 Guido Trotter
      self._GoReconnect(False)
4072 6906a9d8 Guido Trotter
      self._WaitUntilSync()
4073 6906a9d8 Guido Trotter
    except errors.OpExecError, err:
4074 6906a9d8 Guido Trotter
      self.LogWarning("Migration failed and I can't reconnect the"
4075 6906a9d8 Guido Trotter
                      " drives: error '%s'\n"
4076 6906a9d8 Guido Trotter
                      "Please look and recover the instance status" %
4077 6906a9d8 Guido Trotter
                      str(err))
4078 6906a9d8 Guido Trotter
4079 6906a9d8 Guido Trotter
  def _AbortMigration(self):
4080 6906a9d8 Guido Trotter
    """Call the hypervisor code to abort a started migration.
4081 6906a9d8 Guido Trotter

4082 6906a9d8 Guido Trotter
    """
4083 6906a9d8 Guido Trotter
    instance = self.instance
4084 6906a9d8 Guido Trotter
    target_node = self.target_node
4085 6906a9d8 Guido Trotter
    migration_info = self.migration_info
4086 6906a9d8 Guido Trotter
4087 6906a9d8 Guido Trotter
    abort_result = self.rpc.call_finalize_migration(target_node,
4088 6906a9d8 Guido Trotter
                                                    instance,
4089 6906a9d8 Guido Trotter
                                                    migration_info,
4090 6906a9d8 Guido Trotter
                                                    False)
4091 6906a9d8 Guido Trotter
    abort_msg = abort_result.RemoteFailMsg()
4092 6906a9d8 Guido Trotter
    if abort_msg:
4093 6906a9d8 Guido Trotter
      logging.error("Aborting migration failed on target node %s: %s" %
4094 6906a9d8 Guido Trotter
                    (target_node, abort_msg))
4095 6906a9d8 Guido Trotter
      # Don't raise an exception here, as we stil have to try to revert the
4096 6906a9d8 Guido Trotter
      # disk status, even if this step failed.
4097 6906a9d8 Guido Trotter
4098 53c776b5 Iustin Pop
  def _ExecMigration(self):
4099 53c776b5 Iustin Pop
    """Migrate an instance.
4100 53c776b5 Iustin Pop

4101 53c776b5 Iustin Pop
    The migrate is done by:
4102 53c776b5 Iustin Pop
      - change the disks into dual-master mode
4103 53c776b5 Iustin Pop
      - wait until disks are fully synchronized again
4104 53c776b5 Iustin Pop
      - migrate the instance
4105 53c776b5 Iustin Pop
      - change disks on the new secondary node (the old primary) to secondary
4106 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
4107 53c776b5 Iustin Pop
      - change disks into single-master mode
4108 53c776b5 Iustin Pop

4109 53c776b5 Iustin Pop
    """
4110 53c776b5 Iustin Pop
    instance = self.instance
4111 53c776b5 Iustin Pop
    target_node = self.target_node
4112 53c776b5 Iustin Pop
    source_node = self.source_node
4113 53c776b5 Iustin Pop
4114 53c776b5 Iustin Pop
    self.feedback_fn("* checking disk consistency between source and target")
4115 53c776b5 Iustin Pop
    for dev in instance.disks:
4116 53c776b5 Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
4117 53c776b5 Iustin Pop
        raise errors.OpExecError("Disk %s is degraded or not fully"
4118 53c776b5 Iustin Pop
                                 " synchronized on target node,"
4119 53c776b5 Iustin Pop
                                 " aborting migrate." % dev.iv_name)
4120 53c776b5 Iustin Pop
4121 6906a9d8 Guido Trotter
    # First get the migration information from the remote node
4122 6906a9d8 Guido Trotter
    result = self.rpc.call_migration_info(source_node, instance)
4123 6906a9d8 Guido Trotter
    msg = result.RemoteFailMsg()
4124 6906a9d8 Guido Trotter
    if msg:
4125 6906a9d8 Guido Trotter
      log_err = ("Failed fetching source migration information from %s: %s" %
4126 0959c824 Iustin Pop
                 (source_node, msg))
4127 6906a9d8 Guido Trotter
      logging.error(log_err)
4128 6906a9d8 Guido Trotter
      raise errors.OpExecError(log_err)
4129 6906a9d8 Guido Trotter
4130 0959c824 Iustin Pop
    self.migration_info = migration_info = result.payload
4131 6906a9d8 Guido Trotter
4132 6906a9d8 Guido Trotter
    # Then switch the disks to master/master mode
4133 53c776b5 Iustin Pop
    self._EnsureSecondary(target_node)
4134 53c776b5 Iustin Pop
    self._GoStandalone()
4135 53c776b5 Iustin Pop
    self._GoReconnect(True)
4136 53c776b5 Iustin Pop
    self._WaitUntilSync()
4137 53c776b5 Iustin Pop
4138 6906a9d8 Guido Trotter
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
4139 6906a9d8 Guido Trotter
    result = self.rpc.call_accept_instance(target_node,
4140 6906a9d8 Guido Trotter
                                           instance,
4141 6906a9d8 Guido Trotter
                                           migration_info,
4142 6906a9d8 Guido Trotter
                                           self.nodes_ip[target_node])
4143 6906a9d8 Guido Trotter
4144 6906a9d8 Guido Trotter
    msg = result.RemoteFailMsg()
4145 6906a9d8 Guido Trotter
    if msg:
4146 6906a9d8 Guido Trotter
      logging.error("Instance pre-migration failed, trying to revert"
4147 6906a9d8 Guido Trotter
                    " disk status: %s", msg)
4148 6906a9d8 Guido Trotter
      self._AbortMigration()
4149 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
4150 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
4151 6906a9d8 Guido Trotter
                               (instance.name, msg))
4152 6906a9d8 Guido Trotter
4153 53c776b5 Iustin Pop
    self.feedback_fn("* migrating instance to %s" % target_node)
4154 53c776b5 Iustin Pop
    time.sleep(10)
4155 53c776b5 Iustin Pop
    result = self.rpc.call_instance_migrate(source_node, instance,
4156 53c776b5 Iustin Pop
                                            self.nodes_ip[target_node],
4157 53c776b5 Iustin Pop
                                            self.op.live)
4158 53c776b5 Iustin Pop
    msg = result.RemoteFailMsg()
4159 53c776b5 Iustin Pop
    if msg:
4160 53c776b5 Iustin Pop
      logging.error("Instance migration failed, trying to revert"
4161 53c776b5 Iustin Pop
                    " disk status: %s", msg)
4162 6906a9d8 Guido Trotter
      self._AbortMigration()
4163 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
4164 53c776b5 Iustin Pop
      raise errors.OpExecError("Could not migrate instance %s: %s" %
4165 53c776b5 Iustin Pop
                               (instance.name, msg))
4166 53c776b5 Iustin Pop
    time.sleep(10)
4167 53c776b5 Iustin Pop
4168 53c776b5 Iustin Pop
    instance.primary_node = target_node
4169 53c776b5 Iustin Pop
    # distribute new instance config to the other nodes
4170 53c776b5 Iustin Pop
    self.cfg.Update(instance)
4171 53c776b5 Iustin Pop
4172 6906a9d8 Guido Trotter
    result = self.rpc.call_finalize_migration(target_node,
4173 6906a9d8 Guido Trotter
                                              instance,
4174 6906a9d8 Guido Trotter
                                              migration_info,
4175 6906a9d8 Guido Trotter
                                              True)
4176 6906a9d8 Guido Trotter
    msg = result.RemoteFailMsg()
4177 6906a9d8 Guido Trotter
    if msg:
4178 6906a9d8 Guido Trotter
      logging.error("Instance migration succeeded, but finalization failed:"
4179 6906a9d8 Guido Trotter
                    " %s" % msg)
4180 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not finalize instance migration: %s" %
4181 6906a9d8 Guido Trotter
                               msg)
4182 6906a9d8 Guido Trotter
4183 53c776b5 Iustin Pop
    self._EnsureSecondary(source_node)
4184 53c776b5 Iustin Pop
    self._WaitUntilSync()
4185 53c776b5 Iustin Pop
    self._GoStandalone()
4186 53c776b5 Iustin Pop
    self._GoReconnect(False)
4187 53c776b5 Iustin Pop
    self._WaitUntilSync()
4188 53c776b5 Iustin Pop
4189 53c776b5 Iustin Pop
    self.feedback_fn("* done")
4190 53c776b5 Iustin Pop
4191 53c776b5 Iustin Pop
  def Exec(self, feedback_fn):
4192 53c776b5 Iustin Pop
    """Perform the migration.
4193 53c776b5 Iustin Pop

4194 53c776b5 Iustin Pop
    """
4195 53c776b5 Iustin Pop
    self.feedback_fn = feedback_fn
4196 53c776b5 Iustin Pop
4197 53c776b5 Iustin Pop
    self.source_node = self.instance.primary_node
4198 53c776b5 Iustin Pop
    self.target_node = self.instance.secondary_nodes[0]
4199 53c776b5 Iustin Pop
    self.all_nodes = [self.source_node, self.target_node]
4200 53c776b5 Iustin Pop
    self.nodes_ip = {
4201 53c776b5 Iustin Pop
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
4202 53c776b5 Iustin Pop
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
4203 53c776b5 Iustin Pop
      }
4204 53c776b5 Iustin Pop
    if self.op.cleanup:
4205 53c776b5 Iustin Pop
      return self._ExecCleanup()
4206 53c776b5 Iustin Pop
    else:
4207 53c776b5 Iustin Pop
      return self._ExecMigration()
4208 53c776b5 Iustin Pop
4209 53c776b5 Iustin Pop
4210 428958aa Iustin Pop
def _CreateBlockDev(lu, node, instance, device, force_create,
4211 428958aa Iustin Pop
                    info, force_open):
4212 428958aa Iustin Pop
  """Create a tree of block devices on a given node.
4213 a8083063 Iustin Pop

4214 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
4215 a8083063 Iustin Pop
  all its children.
4216 a8083063 Iustin Pop

4217 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
4218 a8083063 Iustin Pop

4219 428958aa Iustin Pop
  @param lu: the lu on whose behalf we execute
4220 428958aa Iustin Pop
  @param node: the node on which to create the device
4221 428958aa Iustin Pop
  @type instance: L{objects.Instance}
4222 428958aa Iustin Pop
  @param instance: the instance which owns the device
4223 428958aa Iustin Pop
  @type device: L{objects.Disk}
4224 428958aa Iustin Pop
  @param device: the device to create
4225 428958aa Iustin Pop
  @type force_create: boolean
4226 428958aa Iustin Pop
  @param force_create: whether to force creation of this device; this
4227 428958aa Iustin Pop
      will be change to True whenever we find a device which has
4228 428958aa Iustin Pop
      CreateOnSecondary() attribute
4229 428958aa Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
4230 428958aa Iustin Pop
      (this will be represented as a LVM tag)
4231 428958aa Iustin Pop
  @type force_open: boolean
4232 428958aa Iustin Pop
  @param force_open: this parameter will be passes to the
4233 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
4234 428958aa Iustin Pop
      whether we run on primary or not, and it affects both
4235 428958aa Iustin Pop
      the child assembly and the device own Open() execution
4236 428958aa Iustin Pop

4237 a8083063 Iustin Pop
  """
4238 a8083063 Iustin Pop
  if device.CreateOnSecondary():
4239 428958aa Iustin Pop
    force_create = True
4240 796cab27 Iustin Pop
4241 a8083063 Iustin Pop
  if device.children:
4242 a8083063 Iustin Pop
    for child in device.children:
4243 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, child, force_create,
4244 428958aa Iustin Pop
                      info, force_open)
4245 a8083063 Iustin Pop
4246 428958aa Iustin Pop
  if not force_create:
4247 796cab27 Iustin Pop
    return
4248 796cab27 Iustin Pop
4249 de12473a Iustin Pop
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
4250 de12473a Iustin Pop
4251 de12473a Iustin Pop
4252 de12473a Iustin Pop
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
4253 de12473a Iustin Pop
  """Create a single block device on a given node.
4254 de12473a Iustin Pop

4255 de12473a Iustin Pop
  This will not recurse over children of the device, so they must be
4256 de12473a Iustin Pop
  created in advance.
4257 de12473a Iustin Pop

4258 de12473a Iustin Pop
  @param lu: the lu on whose behalf we execute
4259 de12473a Iustin Pop
  @param node: the node on which to create the device
4260 de12473a Iustin Pop
  @type instance: L{objects.Instance}
4261 de12473a Iustin Pop
  @param instance: the instance which owns the device
4262 de12473a Iustin Pop
  @type device: L{objects.Disk}
4263 de12473a Iustin Pop
  @param device: the device to create
4264 de12473a Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
4265 de12473a Iustin Pop
      (this will be represented as a LVM tag)
4266 de12473a Iustin Pop
  @type force_open: boolean
4267 de12473a Iustin Pop
  @param force_open: this parameter will be passes to the
4268 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
4269 de12473a Iustin Pop
      whether we run on primary or not, and it affects both
4270 de12473a Iustin Pop
      the child assembly and the device own Open() execution
4271 de12473a Iustin Pop

4272 de12473a Iustin Pop
  """
4273 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
4274 7d81697f Iustin Pop
  result = lu.rpc.call_blockdev_create(node, device, device.size,
4275 428958aa Iustin Pop
                                       instance.name, force_open, info)
4276 7d81697f Iustin Pop
  msg = result.RemoteFailMsg()
4277 7d81697f Iustin Pop
  if msg:
4278 428958aa Iustin Pop
    raise errors.OpExecError("Can't create block device %s on"
4279 7d81697f Iustin Pop
                             " node %s for instance %s: %s" %
4280 7d81697f Iustin Pop
                             (device, node, instance.name, msg))
4281 a8083063 Iustin Pop
  if device.physical_id is None:
4282 0959c824 Iustin Pop
    device.physical_id = result.payload
4283 a8083063 Iustin Pop
4284 a8083063 Iustin Pop
4285 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
4286 923b1523 Iustin Pop
  """Generate a suitable LV name.
4287 923b1523 Iustin Pop

4288 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
4289 923b1523 Iustin Pop

4290 923b1523 Iustin Pop
  """
4291 923b1523 Iustin Pop
  results = []
4292 923b1523 Iustin Pop
  for val in exts:
4293 b9bddb6b Iustin Pop
    new_id = lu.cfg.GenerateUniqueID()
4294 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
4295 923b1523 Iustin Pop
  return results
4296 923b1523 Iustin Pop
4297 923b1523 Iustin Pop
4298 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
4299 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
4300 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
4301 a1f445d3 Iustin Pop

4302 a1f445d3 Iustin Pop
  """
4303 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
4304 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
4305 b9bddb6b Iustin Pop
  shared_secret = lu.cfg.GenerateDRBDSecret()
4306 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
4307 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
4308 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
4309 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
4310 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
4311 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
4312 f9518d38 Iustin Pop
                                      p_minor, s_minor,
4313 f9518d38 Iustin Pop
                                      shared_secret),
4314 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
4315 a1f445d3 Iustin Pop
                          iv_name=iv_name)
4316 a1f445d3 Iustin Pop
  return drbd_dev
4317 a1f445d3 Iustin Pop
4318 7c0d6283 Michael Hanselmann
4319 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
4320 a8083063 Iustin Pop
                          instance_name, primary_node,
4321 08db7c5c Iustin Pop
                          secondary_nodes, disk_info,
4322 e2a65344 Iustin Pop
                          file_storage_dir, file_driver,
4323 e2a65344 Iustin Pop
                          base_index):
4324 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
4325 a8083063 Iustin Pop

4326 a8083063 Iustin Pop
  """
4327 a8083063 Iustin Pop
  #TODO: compute space requirements
4328 a8083063 Iustin Pop
4329 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
4330 08db7c5c Iustin Pop
  disk_count = len(disk_info)
4331 08db7c5c Iustin Pop
  disks = []
4332 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
4333 08db7c5c Iustin Pop
    pass
4334 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
4335 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
4336 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
4337 923b1523 Iustin Pop
4338 fb4b324b Guido Trotter
    names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
4339 08db7c5c Iustin Pop
                                      for i in range(disk_count)])
4340 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4341 e2a65344 Iustin Pop
      disk_index = idx + base_index
4342 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
4343 08db7c5c Iustin Pop
                              logical_id=(vgname, names[idx]),
4344 6ec66eae Iustin Pop
                              iv_name="disk/%d" % disk_index,
4345 6ec66eae Iustin Pop
                              mode=disk["mode"])
4346 08db7c5c Iustin Pop
      disks.append(disk_dev)
4347 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
4348 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
4349 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
4350 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
4351 08db7c5c Iustin Pop
    minors = lu.cfg.AllocateDRBDMinor(
4352 08db7c5c Iustin Pop
      [primary_node, remote_node] * len(disk_info), instance_name)
4353 08db7c5c Iustin Pop
4354 e6c1ff2f Iustin Pop
    names = []
4355 fb4b324b Guido Trotter
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
4356 e6c1ff2f Iustin Pop
                                               for i in range(disk_count)]):
4357 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_data")
4358 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_meta")
4359 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4360 112050d9 Iustin Pop
      disk_index = idx + base_index
4361 08db7c5c Iustin Pop
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
4362 08db7c5c Iustin Pop
                                      disk["size"], names[idx*2:idx*2+2],
4363 e2a65344 Iustin Pop
                                      "disk/%d" % disk_index,
4364 08db7c5c Iustin Pop
                                      minors[idx*2], minors[idx*2+1])
4365 6ec66eae Iustin Pop
      disk_dev.mode = disk["mode"]
4366 08db7c5c Iustin Pop
      disks.append(disk_dev)
4367 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
4368 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
4369 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
4370 0f1a06e3 Manuel Franceschini
4371 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4372 112050d9 Iustin Pop
      disk_index = idx + base_index
4373 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
4374 e2a65344 Iustin Pop
                              iv_name="disk/%d" % disk_index,
4375 08db7c5c Iustin Pop
                              logical_id=(file_driver,
4376 08db7c5c Iustin Pop
                                          "%s/disk%d" % (file_storage_dir,
4377 43e99cff Guido Trotter
                                                         disk_index)),
4378 6ec66eae Iustin Pop
                              mode=disk["mode"])
4379 08db7c5c Iustin Pop
      disks.append(disk_dev)
4380 a8083063 Iustin Pop
  else:
4381 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
4382 a8083063 Iustin Pop
  return disks
4383 a8083063 Iustin Pop
4384 a8083063 Iustin Pop
4385 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
4386 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
4387 3ecf6786 Iustin Pop

4388 3ecf6786 Iustin Pop
  """
4389 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
4390 a0c3fea1 Michael Hanselmann
4391 a0c3fea1 Michael Hanselmann
4392 b9bddb6b Iustin Pop
def _CreateDisks(lu, instance):
4393 a8083063 Iustin Pop
  """Create all disks for an instance.
4394 a8083063 Iustin Pop

4395 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
4396 a8083063 Iustin Pop

4397 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
4398 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
4399 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
4400 e4376078 Iustin Pop
  @param instance: the instance whose disks we should create
4401 e4376078 Iustin Pop
  @rtype: boolean
4402 e4376078 Iustin Pop
  @return: the success of the creation
4403 a8083063 Iustin Pop

4404 a8083063 Iustin Pop
  """
4405 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
4406 428958aa Iustin Pop
  pnode = instance.primary_node
4407 a0c3fea1 Michael Hanselmann
4408 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
4409 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4410 428958aa Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
4411 0f1a06e3 Manuel Franceschini
4412 781de953 Iustin Pop
    if result.failed or not result.data:
4413 428958aa Iustin Pop
      raise errors.OpExecError("Could not connect to node '%s'" % pnode)
4414 0f1a06e3 Manuel Franceschini
4415 781de953 Iustin Pop
    if not result.data[0]:
4416 796cab27 Iustin Pop
      raise errors.OpExecError("Failed to create directory '%s'" %
4417 796cab27 Iustin Pop
                               file_storage_dir)
4418 0f1a06e3 Manuel Franceschini
4419 24991749 Iustin Pop
  # Note: this needs to be kept in sync with adding of disks in
4420 24991749 Iustin Pop
  # LUSetInstanceParams
4421 a8083063 Iustin Pop
  for device in instance.disks:
4422 9a4f63d1 Iustin Pop
    logging.info("Creating volume %s for instance %s",
4423 9a4f63d1 Iustin Pop
                 device.iv_name, instance.name)
4424 a8083063 Iustin Pop
    #HARDCODE
4425 428958aa Iustin Pop
    for node in instance.all_nodes:
4426 428958aa Iustin Pop
      f_create = node == pnode
4427 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
4428 a8083063 Iustin Pop
4429 a8083063 Iustin Pop
4430 b9bddb6b Iustin Pop
def _RemoveDisks(lu, instance):
4431 a8083063 Iustin Pop
  """Remove all disks for an instance.
4432 a8083063 Iustin Pop

4433 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
4434 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
4435 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
4436 a8083063 Iustin Pop
  with `_CreateDisks()`).
4437 a8083063 Iustin Pop

4438 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
4439 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
4440 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
4441 e4376078 Iustin Pop
  @param instance: the instance whose disks we should remove
4442 e4376078 Iustin Pop
  @rtype: boolean
4443 e4376078 Iustin Pop
  @return: the success of the removal
4444 a8083063 Iustin Pop

4445 a8083063 Iustin Pop
  """
4446 9a4f63d1 Iustin Pop
  logging.info("Removing block devices for instance %s", instance.name)
4447 a8083063 Iustin Pop
4448 e1bc0878 Iustin Pop
  all_result = True
4449 a8083063 Iustin Pop
  for device in instance.disks:
4450 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
4451 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
4452 e1bc0878 Iustin Pop
      msg = lu.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
4453 e1bc0878 Iustin Pop
      if msg:
4454 e1bc0878 Iustin Pop
        lu.LogWarning("Could not remove block device %s on node %s,"
4455 e1bc0878 Iustin Pop
                      " continuing anyway: %s", device.iv_name, node, msg)
4456 e1bc0878 Iustin Pop
        all_result = False
4457 0f1a06e3 Manuel Franceschini
4458 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
4459 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4460 781de953 Iustin Pop
    result = lu.rpc.call_file_storage_dir_remove(instance.primary_node,
4461 781de953 Iustin Pop
                                                 file_storage_dir)
4462 781de953 Iustin Pop
    if result.failed or not result.data:
4463 9a4f63d1 Iustin Pop
      logging.error("Could not remove directory '%s'", file_storage_dir)
4464 e1bc0878 Iustin Pop
      all_result = False
4465 0f1a06e3 Manuel Franceschini
4466 e1bc0878 Iustin Pop
  return all_result
4467 a8083063 Iustin Pop
4468 a8083063 Iustin Pop
4469 08db7c5c Iustin Pop
def _ComputeDiskSize(disk_template, disks):
4470 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
4471 e2fe6369 Iustin Pop

4472 e2fe6369 Iustin Pop
  """
4473 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
4474 e2fe6369 Iustin Pop
  req_size_dict = {
4475 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
4476 08db7c5c Iustin Pop
    constants.DT_PLAIN: sum(d["size"] for d in disks),
4477 08db7c5c Iustin Pop
    # 128 MB are added for drbd metadata for each disk
4478 08db7c5c Iustin Pop
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
4479 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
4480 e2fe6369 Iustin Pop
  }
4481 e2fe6369 Iustin Pop
4482 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
4483 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
4484 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
4485 e2fe6369 Iustin Pop
4486 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
4487 e2fe6369 Iustin Pop
4488 e2fe6369 Iustin Pop
4489 74409b12 Iustin Pop
def _CheckHVParams(lu, nodenames, hvname, hvparams):
4490 74409b12 Iustin Pop
  """Hypervisor parameter validation.
4491 74409b12 Iustin Pop

4492 74409b12 Iustin Pop
  This function abstract the hypervisor parameter validation to be
4493 74409b12 Iustin Pop
  used in both instance create and instance modify.
4494 74409b12 Iustin Pop

4495 74409b12 Iustin Pop
  @type lu: L{LogicalUnit}
4496 74409b12 Iustin Pop
  @param lu: the logical unit for which we check
4497 74409b12 Iustin Pop
  @type nodenames: list
4498 74409b12 Iustin Pop
  @param nodenames: the list of nodes on which we should check
4499 74409b12 Iustin Pop
  @type hvname: string
4500 74409b12 Iustin Pop
  @param hvname: the name of the hypervisor we should use
4501 74409b12 Iustin Pop
  @type hvparams: dict
4502 74409b12 Iustin Pop
  @param hvparams: the parameters which we need to check
4503 74409b12 Iustin Pop
  @raise errors.OpPrereqError: if the parameters are not valid
4504 74409b12 Iustin Pop

4505 74409b12 Iustin Pop
  """
4506 74409b12 Iustin Pop
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
4507 74409b12 Iustin Pop
                                                  hvname,
4508 74409b12 Iustin Pop
                                                  hvparams)
4509 74409b12 Iustin Pop
  for node in nodenames:
4510 781de953 Iustin Pop
    info = hvinfo[node]
4511 68c6f21c Iustin Pop
    if info.offline:
4512 68c6f21c Iustin Pop
      continue
4513 0959c824 Iustin Pop
    msg = info.RemoteFailMsg()
4514 0959c824 Iustin Pop
    if msg:
4515 d64769a8 Iustin Pop
      raise errors.OpPrereqError("Hypervisor parameter validation"
4516 d64769a8 Iustin Pop
                                 " failed on node %s: %s" % (node, msg))
4517 74409b12 Iustin Pop
4518 74409b12 Iustin Pop
4519 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
4520 a8083063 Iustin Pop
  """Create an instance.
4521 a8083063 Iustin Pop

4522 a8083063 Iustin Pop
  """
4523 a8083063 Iustin Pop
  HPATH = "instance-add"
4524 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4525 08db7c5c Iustin Pop
  _OP_REQP = ["instance_name", "disks", "disk_template",
4526 08db7c5c Iustin Pop
              "mode", "start",
4527 08db7c5c Iustin Pop
              "wait_for_sync", "ip_check", "nics",
4528 338e51e8 Iustin Pop
              "hvparams", "beparams"]
4529 7baf741d Guido Trotter
  REQ_BGL = False
4530 7baf741d Guido Trotter
4531 7baf741d Guido Trotter
  def _ExpandNode(self, node):
4532 7baf741d Guido Trotter
    """Expands and checks one node name.
4533 7baf741d Guido Trotter

4534 7baf741d Guido Trotter
    """
4535 7baf741d Guido Trotter
    node_full = self.cfg.ExpandNodeName(node)
4536 7baf741d Guido Trotter
    if node_full is None:
4537 7baf741d Guido Trotter
      raise errors.OpPrereqError("Unknown node %s" % node)
4538 7baf741d Guido Trotter
    return node_full
4539 7baf741d Guido Trotter
4540 7baf741d Guido Trotter
  def ExpandNames(self):
4541 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
4542 7baf741d Guido Trotter

4543 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
4544 7baf741d Guido Trotter

4545 7baf741d Guido Trotter
    """
4546 7baf741d Guido Trotter
    self.needed_locks = {}
4547 7baf741d Guido Trotter
4548 7baf741d Guido Trotter
    # set optional parameters to none if they don't exist
4549 6785674e Iustin Pop
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
4550 7baf741d Guido Trotter
      if not hasattr(self.op, attr):
4551 7baf741d Guido Trotter
        setattr(self.op, attr, None)
4552 7baf741d Guido Trotter
4553 4b2f38dd Iustin Pop
    # cheap checks, mostly valid constants given
4554 4b2f38dd Iustin Pop
4555 7baf741d Guido Trotter
    # verify creation mode
4556 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
4557 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
4558 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
4559 7baf741d Guido Trotter
                                 self.op.mode)
4560 4b2f38dd Iustin Pop
4561 7baf741d Guido Trotter
    # disk template and mirror node verification
4562 7baf741d Guido Trotter
    if self.op.disk_template not in constants.DISK_TEMPLATES:
4563 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid disk template name")
4564 7baf741d Guido Trotter
4565 4b2f38dd Iustin Pop
    if self.op.hypervisor is None:
4566 4b2f38dd Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
4567 4b2f38dd Iustin Pop
4568 8705eb96 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
4569 8705eb96 Iustin Pop
    enabled_hvs = cluster.enabled_hypervisors
4570 4b2f38dd Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
4571 4b2f38dd Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
4572 4b2f38dd Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
4573 4b2f38dd Iustin Pop
                                  ",".join(enabled_hvs)))
4574 4b2f38dd Iustin Pop
4575 6785674e Iustin Pop
    # check hypervisor parameter syntax (locally)
4576 a5728081 Guido Trotter
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
4577 8705eb96 Iustin Pop
    filled_hvp = cluster.FillDict(cluster.hvparams[self.op.hypervisor],
4578 8705eb96 Iustin Pop
                                  self.op.hvparams)
4579 6785674e Iustin Pop
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
4580 8705eb96 Iustin Pop
    hv_type.CheckParameterSyntax(filled_hvp)
4581 67fc3042 Iustin Pop
    self.hv_full = filled_hvp
4582 6785674e Iustin Pop
4583 338e51e8 Iustin Pop
    # fill and remember the beparams dict
4584 a5728081 Guido Trotter
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
4585 338e51e8 Iustin Pop
    self.be_full = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
4586 338e51e8 Iustin Pop
                                    self.op.beparams)
4587 338e51e8 Iustin Pop
4588 7baf741d Guido Trotter
    #### instance parameters check
4589 7baf741d Guido Trotter
4590 7baf741d Guido Trotter
    # instance name verification
4591 7baf741d Guido Trotter
    hostname1 = utils.HostInfo(self.op.instance_name)
4592 7baf741d Guido Trotter
    self.op.instance_name = instance_name = hostname1.name
4593 7baf741d Guido Trotter
4594 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
4595 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
4596 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
4597 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
4598 7baf741d Guido Trotter
                                 instance_name)
4599 7baf741d Guido Trotter
4600 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
4601 7baf741d Guido Trotter
4602 08db7c5c Iustin Pop
    # NIC buildup
4603 08db7c5c Iustin Pop
    self.nics = []
4604 08db7c5c Iustin Pop
    for nic in self.op.nics:
4605 08db7c5c Iustin Pop
      # ip validity checks
4606 08db7c5c Iustin Pop
      ip = nic.get("ip", None)
4607 08db7c5c Iustin Pop
      if ip is None or ip.lower() == "none":
4608 08db7c5c Iustin Pop
        nic_ip = None
4609 08db7c5c Iustin Pop
      elif ip.lower() == constants.VALUE_AUTO:
4610 08db7c5c Iustin Pop
        nic_ip = hostname1.ip
4611 08db7c5c Iustin Pop
      else:
4612 08db7c5c Iustin Pop
        if not utils.IsValidIP(ip):
4613 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
4614 08db7c5c Iustin Pop
                                     " like a valid IP" % ip)
4615 08db7c5c Iustin Pop
        nic_ip = ip
4616 08db7c5c Iustin Pop
4617 08db7c5c Iustin Pop
      # MAC address verification
4618 08db7c5c Iustin Pop
      mac = nic.get("mac", constants.VALUE_AUTO)
4619 08db7c5c Iustin Pop
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4620 08db7c5c Iustin Pop
        if not utils.IsValidMac(mac.lower()):
4621 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Invalid MAC address specified: %s" %
4622 08db7c5c Iustin Pop
                                     mac)
4623 87e43988 Iustin Pop
        else:
4624 87e43988 Iustin Pop
          # or validate/reserve the current one
4625 87e43988 Iustin Pop
          if self.cfg.IsMacInUse(mac):
4626 87e43988 Iustin Pop
            raise errors.OpPrereqError("MAC address %s already in use"
4627 87e43988 Iustin Pop
                                       " in cluster" % mac)
4628 87e43988 Iustin Pop
4629 08db7c5c Iustin Pop
      # bridge verification
4630 9939547b Iustin Pop
      bridge = nic.get("bridge", None)
4631 9939547b Iustin Pop
      if bridge is None:
4632 9939547b Iustin Pop
        bridge = self.cfg.GetDefBridge()
4633 08db7c5c Iustin Pop
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, bridge=bridge))
4634 08db7c5c Iustin Pop
4635 08db7c5c Iustin Pop
    # disk checks/pre-build
4636 08db7c5c Iustin Pop
    self.disks = []
4637 08db7c5c Iustin Pop
    for disk in self.op.disks:
4638 08db7c5c Iustin Pop
      mode = disk.get("mode", constants.DISK_RDWR)
4639 08db7c5c Iustin Pop
      if mode not in constants.DISK_ACCESS_SET:
4640 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
4641 08db7c5c Iustin Pop
                                   mode)
4642 08db7c5c Iustin Pop
      size = disk.get("size", None)
4643 08db7c5c Iustin Pop
      if size is None:
4644 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Missing disk size")
4645 08db7c5c Iustin Pop
      try:
4646 08db7c5c Iustin Pop
        size = int(size)
4647 08db7c5c Iustin Pop
      except ValueError:
4648 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk size '%s'" % size)
4649 08db7c5c Iustin Pop
      self.disks.append({"size": size, "mode": mode})
4650 08db7c5c Iustin Pop
4651 7baf741d Guido Trotter
    # used in CheckPrereq for ip ping check
4652 7baf741d Guido Trotter
    self.check_ip = hostname1.ip
4653 7baf741d Guido Trotter
4654 7baf741d Guido Trotter
    # file storage checks
4655 7baf741d Guido Trotter
    if (self.op.file_driver and
4656 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
4657 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
4658 7baf741d Guido Trotter
                                 self.op.file_driver)
4659 7baf741d Guido Trotter
4660 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
4661 7baf741d Guido Trotter
      raise errors.OpPrereqError("File storage directory path not absolute")
4662 7baf741d Guido Trotter
4663 7baf741d Guido Trotter
    ### Node/iallocator related checks
4664 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
4665 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
4666 7baf741d Guido Trotter
                                 " node must be given")
4667 7baf741d Guido Trotter
4668 7baf741d Guido Trotter
    if self.op.iallocator:
4669 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4670 7baf741d Guido Trotter
    else:
4671 7baf741d Guido Trotter
      self.op.pnode = self._ExpandNode(self.op.pnode)
4672 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
4673 7baf741d Guido Trotter
      if self.op.snode is not None:
4674 7baf741d Guido Trotter
        self.op.snode = self._ExpandNode(self.op.snode)
4675 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
4676 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
4677 7baf741d Guido Trotter
4678 7baf741d Guido Trotter
    # in case of import lock the source node too
4679 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
4680 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
4681 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
4682 7baf741d Guido Trotter
4683 b9322a9f Guido Trotter
      if src_path is None:
4684 b9322a9f Guido Trotter
        self.op.src_path = src_path = self.op.instance_name
4685 b9322a9f Guido Trotter
4686 b9322a9f Guido Trotter
      if src_node is None:
4687 b9322a9f Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4688 b9322a9f Guido Trotter
        self.op.src_node = None
4689 b9322a9f Guido Trotter
        if os.path.isabs(src_path):
4690 b9322a9f Guido Trotter
          raise errors.OpPrereqError("Importing an instance from an absolute"
4691 b9322a9f Guido Trotter
                                     " path requires a source node option.")
4692 b9322a9f Guido Trotter
      else:
4693 b9322a9f Guido Trotter
        self.op.src_node = src_node = self._ExpandNode(src_node)
4694 b9322a9f Guido Trotter
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4695 b9322a9f Guido Trotter
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
4696 b9322a9f Guido Trotter
        if not os.path.isabs(src_path):
4697 b9322a9f Guido Trotter
          self.op.src_path = src_path = \
4698 b9322a9f Guido Trotter
            os.path.join(constants.EXPORT_DIR, src_path)
4699 7baf741d Guido Trotter
4700 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
4701 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
4702 7baf741d Guido Trotter
        raise errors.OpPrereqError("No guest OS specified")
4703 a8083063 Iustin Pop
4704 538475ca Iustin Pop
  def _RunAllocator(self):
4705 538475ca Iustin Pop
    """Run the allocator based on input opcode.
4706 538475ca Iustin Pop

4707 538475ca Iustin Pop
    """
4708 08db7c5c Iustin Pop
    nics = [n.ToDict() for n in self.nics]
4709 72737a7f Iustin Pop
    ial = IAllocator(self,
4710 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
4711 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
4712 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
4713 d1c2dd75 Iustin Pop
                     tags=[],
4714 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
4715 338e51e8 Iustin Pop
                     vcpus=self.be_full[constants.BE_VCPUS],
4716 338e51e8 Iustin Pop
                     mem_size=self.be_full[constants.BE_MEMORY],
4717 08db7c5c Iustin Pop
                     disks=self.disks,
4718 d1c2dd75 Iustin Pop
                     nics=nics,
4719 8cc7e742 Guido Trotter
                     hypervisor=self.op.hypervisor,
4720 29859cb7 Iustin Pop
                     )
4721 d1c2dd75 Iustin Pop
4722 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
4723 d1c2dd75 Iustin Pop
4724 d1c2dd75 Iustin Pop
    if not ial.success:
4725 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
4726 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
4727 d1c2dd75 Iustin Pop
                                                           ial.info))
4728 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
4729 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4730 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
4731 97abc79f Iustin Pop
                                 (self.op.iallocator, len(ial.nodes),
4732 1ce4bbe3 René Nussbaumer
                                  ial.required_nodes))
4733 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
4734 86d9d3bb Iustin Pop
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
4735 86d9d3bb Iustin Pop
                 self.op.instance_name, self.op.iallocator,
4736 86d9d3bb Iustin Pop
                 ", ".join(ial.nodes))
4737 27579978 Iustin Pop
    if ial.required_nodes == 2:
4738 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
4739 538475ca Iustin Pop
4740 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4741 a8083063 Iustin Pop
    """Build hooks env.
4742 a8083063 Iustin Pop

4743 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4744 a8083063 Iustin Pop

4745 a8083063 Iustin Pop
    """
4746 a8083063 Iustin Pop
    env = {
4747 2c2690c9 Iustin Pop
      "ADD_MODE": self.op.mode,
4748 a8083063 Iustin Pop
      }
4749 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
4750 2c2690c9 Iustin Pop
      env["SRC_NODE"] = self.op.src_node
4751 2c2690c9 Iustin Pop
      env["SRC_PATH"] = self.op.src_path
4752 2c2690c9 Iustin Pop
      env["SRC_IMAGES"] = self.src_images
4753 396e1b78 Michael Hanselmann
4754 2c2690c9 Iustin Pop
    env.update(_BuildInstanceHookEnv(
4755 2c2690c9 Iustin Pop
      name=self.op.instance_name,
4756 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
4757 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
4758 4978db17 Iustin Pop
      status=self.op.start,
4759 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
4760 338e51e8 Iustin Pop
      memory=self.be_full[constants.BE_MEMORY],
4761 338e51e8 Iustin Pop
      vcpus=self.be_full[constants.BE_VCPUS],
4762 08db7c5c Iustin Pop
      nics=[(n.ip, n.bridge, n.mac) for n in self.nics],
4763 2c2690c9 Iustin Pop
      disk_template=self.op.disk_template,
4764 2c2690c9 Iustin Pop
      disks=[(d["size"], d["mode"]) for d in self.disks],
4765 67fc3042 Iustin Pop
      bep=self.be_full,
4766 67fc3042 Iustin Pop
      hvp=self.hv_full,
4767 3df6e710 Iustin Pop
      hypervisor_name=self.op.hypervisor,
4768 396e1b78 Michael Hanselmann
    ))
4769 a8083063 Iustin Pop
4770 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
4771 a8083063 Iustin Pop
          self.secondaries)
4772 a8083063 Iustin Pop
    return env, nl, nl
4773 a8083063 Iustin Pop
4774 a8083063 Iustin Pop
4775 a8083063 Iustin Pop
  def CheckPrereq(self):
4776 a8083063 Iustin Pop
    """Check prerequisites.
4777 a8083063 Iustin Pop

4778 a8083063 Iustin Pop
    """
4779 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
4780 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
4781 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
4782 eedc99de Manuel Franceschini
                                 " instances")
4783 eedc99de Manuel Franceschini
4784 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
4785 7baf741d Guido Trotter
      src_node = self.op.src_node
4786 7baf741d Guido Trotter
      src_path = self.op.src_path
4787 a8083063 Iustin Pop
4788 c0cbdc67 Guido Trotter
      if src_node is None:
4789 c0cbdc67 Guido Trotter
        exp_list = self.rpc.call_export_list(
4790 781de953 Iustin Pop
          self.acquired_locks[locking.LEVEL_NODE])
4791 c0cbdc67 Guido Trotter
        found = False
4792 c0cbdc67 Guido Trotter
        for node in exp_list:
4793 781de953 Iustin Pop
          if not exp_list[node].failed and src_path in exp_list[node].data:
4794 c0cbdc67 Guido Trotter
            found = True
4795 c0cbdc67 Guido Trotter
            self.op.src_node = src_node = node
4796 c0cbdc67 Guido Trotter
            self.op.src_path = src_path = os.path.join(constants.EXPORT_DIR,
4797 c0cbdc67 Guido Trotter
                                                       src_path)
4798 c0cbdc67 Guido Trotter
            break
4799 c0cbdc67 Guido Trotter
        if not found:
4800 c0cbdc67 Guido Trotter
          raise errors.OpPrereqError("No export found for relative path %s" %
4801 c0cbdc67 Guido Trotter
                                      src_path)
4802 c0cbdc67 Guido Trotter
4803 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, src_node)
4804 781de953 Iustin Pop
      result = self.rpc.call_export_info(src_node, src_path)
4805 781de953 Iustin Pop
      result.Raise()
4806 781de953 Iustin Pop
      if not result.data:
4807 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
4808 a8083063 Iustin Pop
4809 781de953 Iustin Pop
      export_info = result.data
4810 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
4811 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
4812 a8083063 Iustin Pop
4813 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
4814 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
4815 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
4816 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
4817 a8083063 Iustin Pop
4818 09acf207 Guido Trotter
      # Check that the new instance doesn't have less disks than the export
4819 08db7c5c Iustin Pop
      instance_disks = len(self.disks)
4820 09acf207 Guido Trotter
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
4821 09acf207 Guido Trotter
      if instance_disks < export_disks:
4822 09acf207 Guido Trotter
        raise errors.OpPrereqError("Not enough disks to import."
4823 09acf207 Guido Trotter
                                   " (instance: %d, export: %d)" %
4824 726d7d68 Iustin Pop
                                   (instance_disks, export_disks))
4825 a8083063 Iustin Pop
4826 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
4827 09acf207 Guido Trotter
      disk_images = []
4828 09acf207 Guido Trotter
      for idx in range(export_disks):
4829 09acf207 Guido Trotter
        option = 'disk%d_dump' % idx
4830 09acf207 Guido Trotter
        if export_info.has_option(constants.INISECT_INS, option):
4831 09acf207 Guido Trotter
          # FIXME: are the old os-es, disk sizes, etc. useful?
4832 09acf207 Guido Trotter
          export_name = export_info.get(constants.INISECT_INS, option)
4833 09acf207 Guido Trotter
          image = os.path.join(src_path, export_name)
4834 09acf207 Guido Trotter
          disk_images.append(image)
4835 09acf207 Guido Trotter
        else:
4836 09acf207 Guido Trotter
          disk_images.append(False)
4837 09acf207 Guido Trotter
4838 09acf207 Guido Trotter
      self.src_images = disk_images
4839 901a65c1 Iustin Pop
4840 b4364a6b Guido Trotter
      old_name = export_info.get(constants.INISECT_INS, 'name')
4841 b4364a6b Guido Trotter
      # FIXME: int() here could throw a ValueError on broken exports
4842 b4364a6b Guido Trotter
      exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
4843 b4364a6b Guido Trotter
      if self.op.instance_name == old_name:
4844 b4364a6b Guido Trotter
        for idx, nic in enumerate(self.nics):
4845 b4364a6b Guido Trotter
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
4846 b4364a6b Guido Trotter
            nic_mac_ini = 'nic%d_mac' % idx
4847 b4364a6b Guido Trotter
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
4848 bc89efc3 Guido Trotter
4849 295728df Guido Trotter
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
4850 7baf741d Guido Trotter
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
4851 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
4852 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
4853 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
4854 901a65c1 Iustin Pop
4855 901a65c1 Iustin Pop
    if self.op.ip_check:
4856 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
4857 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
4858 7b3a8fb5 Iustin Pop
                                   (self.check_ip, self.op.instance_name))
4859 901a65c1 Iustin Pop
4860 295728df Guido Trotter
    #### mac address generation
4861 295728df Guido Trotter
    # By generating here the mac address both the allocator and the hooks get
4862 295728df Guido Trotter
    # the real final mac address rather than the 'auto' or 'generate' value.
4863 295728df Guido Trotter
    # There is a race condition between the generation and the instance object
4864 295728df Guido Trotter
    # creation, which means that we know the mac is valid now, but we're not
4865 295728df Guido Trotter
    # sure it will be when we actually add the instance. If things go bad
4866 295728df Guido Trotter
    # adding the instance will abort because of a duplicate mac, and the
4867 295728df Guido Trotter
    # creation job will fail.
4868 295728df Guido Trotter
    for nic in self.nics:
4869 295728df Guido Trotter
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4870 295728df Guido Trotter
        nic.mac = self.cfg.GenerateMAC()
4871 295728df Guido Trotter
4872 538475ca Iustin Pop
    #### allocator run
4873 538475ca Iustin Pop
4874 538475ca Iustin Pop
    if self.op.iallocator is not None:
4875 538475ca Iustin Pop
      self._RunAllocator()
4876 0f1a06e3 Manuel Franceschini
4877 901a65c1 Iustin Pop
    #### node related checks
4878 901a65c1 Iustin Pop
4879 901a65c1 Iustin Pop
    # check primary node
4880 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
4881 7baf741d Guido Trotter
    assert self.pnode is not None, \
4882 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
4883 7527a8a4 Iustin Pop
    if pnode.offline:
4884 7527a8a4 Iustin Pop
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
4885 7527a8a4 Iustin Pop
                                 pnode.name)
4886 733a2b6a Iustin Pop
    if pnode.drained:
4887 733a2b6a Iustin Pop
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
4888 733a2b6a Iustin Pop
                                 pnode.name)
4889 7527a8a4 Iustin Pop
4890 901a65c1 Iustin Pop
    self.secondaries = []
4891 901a65c1 Iustin Pop
4892 901a65c1 Iustin Pop
    # mirror node verification
4893 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
4894 7baf741d Guido Trotter
      if self.op.snode is None:
4895 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
4896 3ecf6786 Iustin Pop
                                   " a mirror node")
4897 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
4898 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
4899 3ecf6786 Iustin Pop
                                   " the primary node.")
4900 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, self.op.snode)
4901 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, self.op.snode)
4902 733a2b6a Iustin Pop
      self.secondaries.append(self.op.snode)
4903 a8083063 Iustin Pop
4904 6785674e Iustin Pop
    nodenames = [pnode.name] + self.secondaries
4905 6785674e Iustin Pop
4906 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
4907 08db7c5c Iustin Pop
                                self.disks)
4908 ed1ebc60 Guido Trotter
4909 8d75db10 Iustin Pop
    # Check lv size requirements
4910 8d75db10 Iustin Pop
    if req_size is not None:
4911 72737a7f Iustin Pop
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4912 72737a7f Iustin Pop
                                         self.op.hypervisor)
4913 8d75db10 Iustin Pop
      for node in nodenames:
4914 781de953 Iustin Pop
        info = nodeinfo[node]
4915 781de953 Iustin Pop
        info.Raise()
4916 781de953 Iustin Pop
        info = info.data
4917 8d75db10 Iustin Pop
        if not info:
4918 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
4919 3e91897b Iustin Pop
                                     " from node '%s'" % node)
4920 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
4921 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
4922 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
4923 8d75db10 Iustin Pop
                                     " node %s" % node)
4924 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
4925 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
4926 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
4927 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
4928 ed1ebc60 Guido Trotter
4929 74409b12 Iustin Pop
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
4930 6785674e Iustin Pop
4931 a8083063 Iustin Pop
    # os verification
4932 781de953 Iustin Pop
    result = self.rpc.call_os_get(pnode.name, self.op.os_type)
4933 781de953 Iustin Pop
    result.Raise()
4934 6dfad215 Iustin Pop
    if not isinstance(result.data, objects.OS) or not result.data:
4935 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
4936 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
4937 a8083063 Iustin Pop
4938 901a65c1 Iustin Pop
    # bridge check on primary node
4939 08db7c5c Iustin Pop
    bridges = [n.bridge for n in self.nics]
4940 781de953 Iustin Pop
    result = self.rpc.call_bridges_exist(self.pnode.name, bridges)
4941 781de953 Iustin Pop
    result.Raise()
4942 781de953 Iustin Pop
    if not result.data:
4943 781de953 Iustin Pop
      raise errors.OpPrereqError("One of the target bridges '%s' does not"
4944 781de953 Iustin Pop
                                 " exist on destination node '%s'" %
4945 08db7c5c Iustin Pop
                                 (",".join(bridges), pnode.name))
4946 a8083063 Iustin Pop
4947 49ce1563 Iustin Pop
    # memory check on primary node
4948 49ce1563 Iustin Pop
    if self.op.start:
4949 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
4950 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
4951 338e51e8 Iustin Pop
                           self.be_full[constants.BE_MEMORY],
4952 338e51e8 Iustin Pop
                           self.op.hypervisor)
4953 49ce1563 Iustin Pop
4954 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4955 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
4956 a8083063 Iustin Pop

4957 a8083063 Iustin Pop
    """
4958 a8083063 Iustin Pop
    instance = self.op.instance_name
4959 a8083063 Iustin Pop
    pnode_name = self.pnode.name
4960 a8083063 Iustin Pop
4961 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
4962 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
4963 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
4964 2a6469d5 Alexander Schreiber
    else:
4965 2a6469d5 Alexander Schreiber
      network_port = None
4966 58acb49d Alexander Schreiber
4967 6785674e Iustin Pop
    ##if self.op.vnc_bind_address is None:
4968 6785674e Iustin Pop
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
4969 31a853d2 Iustin Pop
4970 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
4971 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
4972 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
4973 2c313123 Manuel Franceschini
    else:
4974 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
4975 2c313123 Manuel Franceschini
4976 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
4977 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
4978 d6a02168 Michael Hanselmann
                                        self.cfg.GetFileStorageDir(),
4979 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
4980 0f1a06e3 Manuel Franceschini
4981 0f1a06e3 Manuel Franceschini
4982 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
4983 a8083063 Iustin Pop
                                  self.op.disk_template,
4984 a8083063 Iustin Pop
                                  instance, pnode_name,
4985 08db7c5c Iustin Pop
                                  self.secondaries,
4986 08db7c5c Iustin Pop
                                  self.disks,
4987 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
4988 e2a65344 Iustin Pop
                                  self.op.file_driver,
4989 e2a65344 Iustin Pop
                                  0)
4990 a8083063 Iustin Pop
4991 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
4992 a8083063 Iustin Pop
                            primary_node=pnode_name,
4993 08db7c5c Iustin Pop
                            nics=self.nics, disks=disks,
4994 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
4995 4978db17 Iustin Pop
                            admin_up=False,
4996 58acb49d Alexander Schreiber
                            network_port=network_port,
4997 338e51e8 Iustin Pop
                            beparams=self.op.beparams,
4998 6785674e Iustin Pop
                            hvparams=self.op.hvparams,
4999 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
5000 a8083063 Iustin Pop
                            )
5001 a8083063 Iustin Pop
5002 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
5003 796cab27 Iustin Pop
    try:
5004 796cab27 Iustin Pop
      _CreateDisks(self, iobj)
5005 796cab27 Iustin Pop
    except errors.OpExecError:
5006 796cab27 Iustin Pop
      self.LogWarning("Device creation failed, reverting...")
5007 796cab27 Iustin Pop
      try:
5008 796cab27 Iustin Pop
        _RemoveDisks(self, iobj)
5009 796cab27 Iustin Pop
      finally:
5010 796cab27 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance)
5011 796cab27 Iustin Pop
        raise
5012 a8083063 Iustin Pop
5013 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
5014 a8083063 Iustin Pop
5015 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
5016 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
5017 7baf741d Guido Trotter
    # added the instance to the config
5018 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
5019 e36e96b4 Guido Trotter
    # Unlock all the nodes
5020 9c8971d7 Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
5021 9c8971d7 Guido Trotter
      nodes_keep = [self.op.src_node]
5022 9c8971d7 Guido Trotter
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
5023 9c8971d7 Guido Trotter
                       if node != self.op.src_node]
5024 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
5025 9c8971d7 Guido Trotter
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
5026 9c8971d7 Guido Trotter
    else:
5027 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE)
5028 9c8971d7 Guido Trotter
      del self.acquired_locks[locking.LEVEL_NODE]
5029 a8083063 Iustin Pop
5030 a8083063 Iustin Pop
    if self.op.wait_for_sync:
5031 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
5032 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
5033 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
5034 a8083063 Iustin Pop
      time.sleep(15)
5035 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
5036 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
5037 a8083063 Iustin Pop
    else:
5038 a8083063 Iustin Pop
      disk_abort = False
5039 a8083063 Iustin Pop
5040 a8083063 Iustin Pop
    if disk_abort:
5041 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
5042 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
5043 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
5044 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
5045 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
5046 3ecf6786 Iustin Pop
                               " this instance")
5047 a8083063 Iustin Pop
5048 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
5049 a8083063 Iustin Pop
                (instance, pnode_name))
5050 a8083063 Iustin Pop
5051 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
5052 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
5053 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
5054 781de953 Iustin Pop
        result = self.rpc.call_instance_os_add(pnode_name, iobj)
5055 20e01edd Iustin Pop
        msg = result.RemoteFailMsg()
5056 20e01edd Iustin Pop
        if msg:
5057 781de953 Iustin Pop
          raise errors.OpExecError("Could not add os for instance %s"
5058 20e01edd Iustin Pop
                                   " on node %s: %s" %
5059 20e01edd Iustin Pop
                                   (instance, pnode_name, msg))
5060 a8083063 Iustin Pop
5061 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
5062 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
5063 a8083063 Iustin Pop
        src_node = self.op.src_node
5064 09acf207 Guido Trotter
        src_images = self.src_images
5065 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
5066 6c0af70e Guido Trotter
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
5067 09acf207 Guido Trotter
                                                         src_node, src_images,
5068 6c0af70e Guido Trotter
                                                         cluster_name)
5069 781de953 Iustin Pop
        import_result.Raise()
5070 781de953 Iustin Pop
        for idx, result in enumerate(import_result.data):
5071 09acf207 Guido Trotter
          if not result:
5072 726d7d68 Iustin Pop
            self.LogWarning("Could not import the image %s for instance"
5073 726d7d68 Iustin Pop
                            " %s, disk %d, on node %s" %
5074 726d7d68 Iustin Pop
                            (src_images[idx], instance, idx, pnode_name))
5075 a8083063 Iustin Pop
      else:
5076 a8083063 Iustin Pop
        # also checked in the prereq part
5077 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
5078 3ecf6786 Iustin Pop
                                     % self.op.mode)
5079 a8083063 Iustin Pop
5080 a8083063 Iustin Pop
    if self.op.start:
5081 4978db17 Iustin Pop
      iobj.admin_up = True
5082 4978db17 Iustin Pop
      self.cfg.Update(iobj)
5083 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s", instance, pnode_name)
5084 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
5085 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
5086 dd279568 Iustin Pop
      msg = result.RemoteFailMsg()
5087 dd279568 Iustin Pop
      if msg:
5088 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance: %s" % msg)
5089 a8083063 Iustin Pop
5090 a8083063 Iustin Pop
5091 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
5092 a8083063 Iustin Pop
  """Connect to an instance's console.
5093 a8083063 Iustin Pop

5094 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
5095 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
5096 a8083063 Iustin Pop
  console.
5097 a8083063 Iustin Pop

5098 a8083063 Iustin Pop
  """
5099 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
5100 8659b73e Guido Trotter
  REQ_BGL = False
5101 8659b73e Guido Trotter
5102 8659b73e Guido Trotter
  def ExpandNames(self):
5103 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
5104 a8083063 Iustin Pop
5105 a8083063 Iustin Pop
  def CheckPrereq(self):
5106 a8083063 Iustin Pop
    """Check prerequisites.
5107 a8083063 Iustin Pop

5108 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
5109 a8083063 Iustin Pop

5110 a8083063 Iustin Pop
    """
5111 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5112 8659b73e Guido Trotter
    assert self.instance is not None, \
5113 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5114 513e896d Guido Trotter
    _CheckNodeOnline(self, self.instance.primary_node)
5115 a8083063 Iustin Pop
5116 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5117 a8083063 Iustin Pop
    """Connect to the console of an instance
5118 a8083063 Iustin Pop

5119 a8083063 Iustin Pop
    """
5120 a8083063 Iustin Pop
    instance = self.instance
5121 a8083063 Iustin Pop
    node = instance.primary_node
5122 a8083063 Iustin Pop
5123 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
5124 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
5125 781de953 Iustin Pop
    node_insts.Raise()
5126 a8083063 Iustin Pop
5127 781de953 Iustin Pop
    if instance.name not in node_insts.data:
5128 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
5129 a8083063 Iustin Pop
5130 9a4f63d1 Iustin Pop
    logging.debug("Connecting to console of %s on %s", instance.name, node)
5131 a8083063 Iustin Pop
5132 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
5133 5431b2e4 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
5134 5431b2e4 Guido Trotter
    # beparams and hvparams are passed separately, to avoid editing the
5135 5431b2e4 Guido Trotter
    # instance and then saving the defaults in the instance itself.
5136 5431b2e4 Guido Trotter
    hvparams = cluster.FillHV(instance)
5137 5431b2e4 Guido Trotter
    beparams = cluster.FillBE(instance)
5138 5431b2e4 Guido Trotter
    console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
5139 b047857b Michael Hanselmann
5140 82122173 Iustin Pop
    # build ssh cmdline
5141 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
5142 a8083063 Iustin Pop
5143 a8083063 Iustin Pop
5144 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
5145 a8083063 Iustin Pop
  """Replace the disks of an instance.
5146 a8083063 Iustin Pop

5147 a8083063 Iustin Pop
  """
5148 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
5149 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5150 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
5151 efd990e4 Guido Trotter
  REQ_BGL = False
5152 efd990e4 Guido Trotter
5153 7e9366f7 Iustin Pop
  def CheckArguments(self):
5154 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
5155 efd990e4 Guido Trotter
      self.op.remote_node = None
5156 7e9366f7 Iustin Pop
    if not hasattr(self.op, "iallocator"):
5157 7e9366f7 Iustin Pop
      self.op.iallocator = None
5158 7e9366f7 Iustin Pop
5159 7e9366f7 Iustin Pop
    # check for valid parameter combination
5160 7e9366f7 Iustin Pop
    cnt = [self.op.remote_node, self.op.iallocator].count(None)
5161 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_CHG:
5162 7e9366f7 Iustin Pop
      if cnt == 2:
5163 7e9366f7 Iustin Pop
        raise errors.OpPrereqError("When changing the secondary either an"
5164 7e9366f7 Iustin Pop
                                   " iallocator script must be used or the"
5165 7e9366f7 Iustin Pop
                                   " new node given")
5166 7e9366f7 Iustin Pop
      elif cnt == 0:
5167 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Give either the iallocator or the new"
5168 efd990e4 Guido Trotter
                                   " secondary, not both")
5169 7e9366f7 Iustin Pop
    else: # not replacing the secondary
5170 7e9366f7 Iustin Pop
      if cnt != 2:
5171 7e9366f7 Iustin Pop
        raise errors.OpPrereqError("The iallocator and new node options can"
5172 7e9366f7 Iustin Pop
                                   " be used only when changing the"
5173 7e9366f7 Iustin Pop
                                   " secondary node")
5174 7e9366f7 Iustin Pop
5175 7e9366f7 Iustin Pop
  def ExpandNames(self):
5176 7e9366f7 Iustin Pop
    self._ExpandAndLockInstance()
5177 7e9366f7 Iustin Pop
5178 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
5179 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5180 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
5181 efd990e4 Guido Trotter
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
5182 efd990e4 Guido Trotter
      if remote_node is None:
5183 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Node '%s' not known" %
5184 efd990e4 Guido Trotter
                                   self.op.remote_node)
5185 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
5186 3b559640 Iustin Pop
      # Warning: do not remove the locking of the new secondary here
5187 3b559640 Iustin Pop
      # unless DRBD8.AddChildren is changed to work in parallel;
5188 3b559640 Iustin Pop
      # currently it doesn't since parallel invocations of
5189 3b559640 Iustin Pop
      # FindUnusedMinor will conflict
5190 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
5191 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5192 efd990e4 Guido Trotter
    else:
5193 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
5194 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5195 efd990e4 Guido Trotter
5196 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
5197 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
5198 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
5199 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
5200 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
5201 efd990e4 Guido Trotter
      self._LockInstancesNodes()
5202 a8083063 Iustin Pop
5203 b6e82a65 Iustin Pop
  def _RunAllocator(self):
5204 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
5205 b6e82a65 Iustin Pop

5206 b6e82a65 Iustin Pop
    """
5207 72737a7f Iustin Pop
    ial = IAllocator(self,
5208 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
5209 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
5210 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
5211 b6e82a65 Iustin Pop
5212 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
5213 b6e82a65 Iustin Pop
5214 b6e82a65 Iustin Pop
    if not ial.success:
5215 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
5216 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
5217 b6e82a65 Iustin Pop
                                                           ial.info))
5218 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
5219 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
5220 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
5221 4c4b5058 Iustin Pop
                                 (self.op.iallocator,
5222 4c4b5058 Iustin Pop
                                  len(ial.nodes), ial.required_nodes))
5223 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
5224 86d9d3bb Iustin Pop
    self.LogInfo("Selected new secondary for the instance: %s",
5225 86d9d3bb Iustin Pop
                 self.op.remote_node)
5226 b6e82a65 Iustin Pop
5227 a8083063 Iustin Pop
  def BuildHooksEnv(self):
5228 a8083063 Iustin Pop
    """Build hooks env.
5229 a8083063 Iustin Pop

5230 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
5231 a8083063 Iustin Pop

5232 a8083063 Iustin Pop
    """
5233 a8083063 Iustin Pop
    env = {
5234 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
5235 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
5236 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
5237 a8083063 Iustin Pop
      }
5238 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5239 0834c866 Iustin Pop
    nl = [
5240 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
5241 0834c866 Iustin Pop
      self.instance.primary_node,
5242 0834c866 Iustin Pop
      ]
5243 0834c866 Iustin Pop
    if self.op.remote_node is not None:
5244 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
5245 a8083063 Iustin Pop
    return env, nl, nl
5246 a8083063 Iustin Pop
5247 a8083063 Iustin Pop
  def CheckPrereq(self):
5248 a8083063 Iustin Pop
    """Check prerequisites.
5249 a8083063 Iustin Pop

5250 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
5251 a8083063 Iustin Pop

5252 a8083063 Iustin Pop
    """
5253 efd990e4 Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5254 efd990e4 Guido Trotter
    assert instance is not None, \
5255 efd990e4 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5256 a8083063 Iustin Pop
    self.instance = instance
5257 a8083063 Iustin Pop
5258 7e9366f7 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
5259 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
5260 7e9366f7 Iustin Pop
                                 " instances")
5261 a8083063 Iustin Pop
5262 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
5263 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
5264 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
5265 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
5266 a8083063 Iustin Pop
5267 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
5268 a9e0c397 Iustin Pop
5269 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
5270 de8c7666 Guido Trotter
      self._RunAllocator()
5271 b6e82a65 Iustin Pop
5272 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
5273 a9e0c397 Iustin Pop
    if remote_node is not None:
5274 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
5275 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
5276 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
5277 a9e0c397 Iustin Pop
    else:
5278 a9e0c397 Iustin Pop
      self.remote_node_info = None
5279 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
5280 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
5281 3ecf6786 Iustin Pop
                                 " the instance.")
5282 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
5283 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("The specified node is already the"
5284 7e9366f7 Iustin Pop
                                 " secondary node of the instance.")
5285 7e9366f7 Iustin Pop
5286 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_PRI:
5287 7e9366f7 Iustin Pop
      n1 = self.tgt_node = instance.primary_node
5288 7e9366f7 Iustin Pop
      n2 = self.oth_node = self.sec_node
5289 7e9366f7 Iustin Pop
    elif self.op.mode == constants.REPLACE_DISK_SEC:
5290 7e9366f7 Iustin Pop
      n1 = self.tgt_node = self.sec_node
5291 7e9366f7 Iustin Pop
      n2 = self.oth_node = instance.primary_node
5292 7e9366f7 Iustin Pop
    elif self.op.mode == constants.REPLACE_DISK_CHG:
5293 7e9366f7 Iustin Pop
      n1 = self.new_node = remote_node
5294 7e9366f7 Iustin Pop
      n2 = self.oth_node = instance.primary_node
5295 7e9366f7 Iustin Pop
      self.tgt_node = self.sec_node
5296 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, remote_node)
5297 7e9366f7 Iustin Pop
    else:
5298 7e9366f7 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replace mode")
5299 7e9366f7 Iustin Pop
5300 7e9366f7 Iustin Pop
    _CheckNodeOnline(self, n1)
5301 7e9366f7 Iustin Pop
    _CheckNodeOnline(self, n2)
5302 a9e0c397 Iustin Pop
5303 54155f52 Iustin Pop
    if not self.op.disks:
5304 54155f52 Iustin Pop
      self.op.disks = range(len(instance.disks))
5305 54155f52 Iustin Pop
5306 54155f52 Iustin Pop
    for disk_idx in self.op.disks:
5307 3e0cea06 Iustin Pop
      instance.FindDisk(disk_idx)
5308 a8083063 Iustin Pop
5309 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
5310 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
5311 a9e0c397 Iustin Pop

5312 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
5313 e4376078 Iustin Pop

5314 e4376078 Iustin Pop
      1. for each disk to be replaced:
5315 e4376078 Iustin Pop

5316 e4376078 Iustin Pop
        1. create new LVs on the target node with unique names
5317 e4376078 Iustin Pop
        1. detach old LVs from the drbd device
5318 e4376078 Iustin Pop
        1. rename old LVs to name_replaced.<time_t>
5319 e4376078 Iustin Pop
        1. rename new LVs to old LVs
5320 e4376078 Iustin Pop
        1. attach the new LVs (with the old names now) to the drbd device
5321 e4376078 Iustin Pop

5322 e4376078 Iustin Pop
      1. wait for sync across all devices
5323 e4376078 Iustin Pop

5324 e4376078 Iustin Pop
      1. for each modified disk:
5325 e4376078 Iustin Pop

5326 e4376078 Iustin Pop
        1. remove old LVs (which have the name name_replaces.<time_t>)
5327 a9e0c397 Iustin Pop

5328 a9e0c397 Iustin Pop
    Failures are not very well handled.
5329 cff90b79 Iustin Pop

5330 a9e0c397 Iustin Pop
    """
5331 cff90b79 Iustin Pop
    steps_total = 6
5332 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
5333 a9e0c397 Iustin Pop
    instance = self.instance
5334 a9e0c397 Iustin Pop
    iv_names = {}
5335 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
5336 a9e0c397 Iustin Pop
    # start of work
5337 a9e0c397 Iustin Pop
    cfg = self.cfg
5338 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
5339 cff90b79 Iustin Pop
    oth_node = self.oth_node
5340 cff90b79 Iustin Pop
5341 cff90b79 Iustin Pop
    # Step: check device activation
5342 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
5343 cff90b79 Iustin Pop
    info("checking volume groups")
5344 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
5345 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([oth_node, tgt_node])
5346 cff90b79 Iustin Pop
    if not results:
5347 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
5348 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
5349 781de953 Iustin Pop
      res = results[node]
5350 781de953 Iustin Pop
      if res.failed or not res.data or my_vg not in res.data:
5351 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
5352 cff90b79 Iustin Pop
                                 (my_vg, node))
5353 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
5354 54155f52 Iustin Pop
      if idx not in self.op.disks:
5355 cff90b79 Iustin Pop
        continue
5356 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
5357 54155f52 Iustin Pop
        info("checking disk/%d on %s" % (idx, node))
5358 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
5359 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(node, dev)
5360 23829f6f Iustin Pop
        msg = result.RemoteFailMsg()
5361 23829f6f Iustin Pop
        if not msg and not result.payload:
5362 23829f6f Iustin Pop
          msg = "disk not found"
5363 23829f6f Iustin Pop
        if msg:
5364 23829f6f Iustin Pop
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5365 23829f6f Iustin Pop
                                   (idx, node, msg))
5366 cff90b79 Iustin Pop
5367 cff90b79 Iustin Pop
    # Step: check other node consistency
5368 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
5369 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
5370 54155f52 Iustin Pop
      if idx not in self.op.disks:
5371 cff90b79 Iustin Pop
        continue
5372 54155f52 Iustin Pop
      info("checking disk/%d consistency on %s" % (idx, oth_node))
5373 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, oth_node,
5374 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
5375 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
5376 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
5377 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
5378 cff90b79 Iustin Pop
5379 cff90b79 Iustin Pop
    # Step: create new storage
5380 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
5381 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
5382 54155f52 Iustin Pop
      if idx not in self.op.disks:
5383 a9e0c397 Iustin Pop
        continue
5384 a9e0c397 Iustin Pop
      size = dev.size
5385 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
5386 54155f52 Iustin Pop
      lv_names = [".disk%d_%s" % (idx, suf)
5387 54155f52 Iustin Pop
                  for suf in ["data", "meta"]]
5388 b9bddb6b Iustin Pop
      names = _GenerateUniqueNames(self, lv_names)
5389 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
5390 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
5391 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
5392 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
5393 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
5394 a9e0c397 Iustin Pop
      old_lvs = dev.children
5395 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
5396 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
5397 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
5398 428958aa Iustin Pop
      # we pass force_create=True to force the LVM creation
5399 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
5400 428958aa Iustin Pop
        _CreateBlockDev(self, tgt_node, instance, new_lv, True,
5401 428958aa Iustin Pop
                        _GetInstanceInfoText(instance), False)
5402 a9e0c397 Iustin Pop
5403 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
5404 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
5405 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
5406 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
5407 781de953 Iustin Pop
      result = self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs)
5408 781de953 Iustin Pop
      result.Raise()
5409 781de953 Iustin Pop
      if not result.data:
5410 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
5411 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
5412 cff90b79 Iustin Pop
      #dev.children = []
5413 cff90b79 Iustin Pop
      #cfg.Update(instance)
5414 a9e0c397 Iustin Pop
5415 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
5416 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
5417 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
5418 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
5419 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
5420 cff90b79 Iustin Pop
5421 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
5422 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
5423 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
5424 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
5425 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
5426 cff90b79 Iustin Pop
      rlist = []
5427 cff90b79 Iustin Pop
      for to_ren in old_lvs:
5428 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(tgt_node, to_ren)
5429 23829f6f Iustin Pop
        if not result.RemoteFailMsg() and result.payload:
5430 23829f6f Iustin Pop
          # device exists
5431 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
5432 cff90b79 Iustin Pop
5433 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
5434 781de953 Iustin Pop
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5435 781de953 Iustin Pop
      result.Raise()
5436 781de953 Iustin Pop
      if not result.data:
5437 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
5438 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
5439 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
5440 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
5441 781de953 Iustin Pop
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5442 781de953 Iustin Pop
      result.Raise()
5443 781de953 Iustin Pop
      if not result.data:
5444 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
5445 cff90b79 Iustin Pop
5446 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
5447 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
5448 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
5449 a9e0c397 Iustin Pop
5450 cff90b79 Iustin Pop
      for disk in old_lvs:
5451 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
5452 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
5453 a9e0c397 Iustin Pop
5454 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
5455 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
5456 4504c3d6 Iustin Pop
      result = self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs)
5457 781de953 Iustin Pop
      if result.failed or not result.data:
5458 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
5459 e1bc0878 Iustin Pop
          msg = self.rpc.call_blockdev_remove(tgt_node, new_lv).RemoteFailMsg()
5460 e1bc0878 Iustin Pop
          if msg:
5461 e1bc0878 Iustin Pop
            warning("Can't rollback device %s: %s", dev, msg,
5462 e1bc0878 Iustin Pop
                    hint="cleanup manually the unused logical volumes")
5463 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
5464 a9e0c397 Iustin Pop
5465 a9e0c397 Iustin Pop
      dev.children = new_lvs
5466 a9e0c397 Iustin Pop
      cfg.Update(instance)
5467 a9e0c397 Iustin Pop
5468 cff90b79 Iustin Pop
    # Step: wait for sync
5469 a9e0c397 Iustin Pop
5470 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
5471 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
5472 a9e0c397 Iustin Pop
    # return value
5473 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
5474 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
5475 a9e0c397 Iustin Pop
5476 a9e0c397 Iustin Pop
    # so check manually all the devices
5477 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5478 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
5479 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(instance.primary_node, dev)
5480 23829f6f Iustin Pop
      msg = result.RemoteFailMsg()
5481 23829f6f Iustin Pop
      if not msg and not result.payload:
5482 23829f6f Iustin Pop
        msg = "disk not found"
5483 23829f6f Iustin Pop
      if msg:
5484 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
5485 23829f6f Iustin Pop
                                 (name, msg))
5486 23829f6f Iustin Pop
      if result.payload[5]:
5487 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
5488 a9e0c397 Iustin Pop
5489 cff90b79 Iustin Pop
    # Step: remove old storage
5490 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
5491 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5492 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
5493 a9e0c397 Iustin Pop
      for lv in old_lvs:
5494 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
5495 e1bc0878 Iustin Pop
        msg = self.rpc.call_blockdev_remove(tgt_node, lv).RemoteFailMsg()
5496 e1bc0878 Iustin Pop
        if msg:
5497 e1bc0878 Iustin Pop
          warning("Can't remove old LV: %s" % msg,
5498 e1bc0878 Iustin Pop
                  hint="manually remove unused LVs")
5499 a9e0c397 Iustin Pop
          continue
5500 a9e0c397 Iustin Pop
5501 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
5502 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
5503 a9e0c397 Iustin Pop

5504 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
5505 a9e0c397 Iustin Pop
      - for all disks of the instance:
5506 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
5507 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
5508 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
5509 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
5510 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
5511 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
5512 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
5513 a9e0c397 Iustin Pop
          not network enabled
5514 a9e0c397 Iustin Pop
      - wait for sync across all devices
5515 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
5516 a9e0c397 Iustin Pop

5517 a9e0c397 Iustin Pop
    Failures are not very well handled.
5518 0834c866 Iustin Pop

5519 a9e0c397 Iustin Pop
    """
5520 0834c866 Iustin Pop
    steps_total = 6
5521 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
5522 a9e0c397 Iustin Pop
    instance = self.instance
5523 a9e0c397 Iustin Pop
    iv_names = {}
5524 a9e0c397 Iustin Pop
    # start of work
5525 a9e0c397 Iustin Pop
    cfg = self.cfg
5526 a9e0c397 Iustin Pop
    old_node = self.tgt_node
5527 a9e0c397 Iustin Pop
    new_node = self.new_node
5528 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
5529 a2d59d8b Iustin Pop
    nodes_ip = {
5530 a2d59d8b Iustin Pop
      old_node: self.cfg.GetNodeInfo(old_node).secondary_ip,
5531 a2d59d8b Iustin Pop
      new_node: self.cfg.GetNodeInfo(new_node).secondary_ip,
5532 a2d59d8b Iustin Pop
      pri_node: self.cfg.GetNodeInfo(pri_node).secondary_ip,
5533 a2d59d8b Iustin Pop
      }
5534 0834c866 Iustin Pop
5535 0834c866 Iustin Pop
    # Step: check device activation
5536 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
5537 0834c866 Iustin Pop
    info("checking volume groups")
5538 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
5539 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([pri_node, new_node])
5540 0834c866 Iustin Pop
    for node in pri_node, new_node:
5541 781de953 Iustin Pop
      res = results[node]
5542 781de953 Iustin Pop
      if res.failed or not res.data or my_vg not in res.data:
5543 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
5544 0834c866 Iustin Pop
                                 (my_vg, node))
5545 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5546 d418ebfb Iustin Pop
      if idx not in self.op.disks:
5547 0834c866 Iustin Pop
        continue
5548 d418ebfb Iustin Pop
      info("checking disk/%d on %s" % (idx, pri_node))
5549 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5550 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(pri_node, dev)
5551 23829f6f Iustin Pop
      msg = result.RemoteFailMsg()
5552 23829f6f Iustin Pop
      if not msg and not result.payload:
5553 23829f6f Iustin Pop
        msg = "disk not found"
5554 23829f6f Iustin Pop
      if msg:
5555 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5556 23829f6f Iustin Pop
                                 (idx, pri_node, msg))
5557 0834c866 Iustin Pop
5558 0834c866 Iustin Pop
    # Step: check other node consistency
5559 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
5560 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5561 d418ebfb Iustin Pop
      if idx not in self.op.disks:
5562 0834c866 Iustin Pop
        continue
5563 d418ebfb Iustin Pop
      info("checking disk/%d consistency on %s" % (idx, pri_node))
5564 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
5565 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
5566 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
5567 0834c866 Iustin Pop
                                 pri_node)
5568 0834c866 Iustin Pop
5569 0834c866 Iustin Pop
    # Step: create new storage
5570 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
5571 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5572 d418ebfb Iustin Pop
      info("adding new local storage on %s for disk/%d" %
5573 d418ebfb Iustin Pop
           (new_node, idx))
5574 428958aa Iustin Pop
      # we pass force_create=True to force LVM creation
5575 a9e0c397 Iustin Pop
      for new_lv in dev.children:
5576 428958aa Iustin Pop
        _CreateBlockDev(self, new_node, instance, new_lv, True,
5577 428958aa Iustin Pop
                        _GetInstanceInfoText(instance), False)
5578 a9e0c397 Iustin Pop
5579 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
5580 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
5581 a1578d63 Iustin Pop
    # error and the success paths
5582 a1578d63 Iustin Pop
    minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
5583 a1578d63 Iustin Pop
                                   instance.name)
5584 468b46f9 Iustin Pop
    logging.debug("Allocated minors %s" % (minors,))
5585 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
5586 d418ebfb Iustin Pop
    for idx, (dev, new_minor) in enumerate(zip(instance.disks, minors)):
5587 d418ebfb Iustin Pop
      info("activating a new drbd on %s for disk/%d" % (new_node, idx))
5588 a2d59d8b Iustin Pop
      # create new devices on new_node; note that we create two IDs:
5589 a2d59d8b Iustin Pop
      # one without port, so the drbd will be activated without
5590 a2d59d8b Iustin Pop
      # networking information on the new node at this stage, and one
5591 a2d59d8b Iustin Pop
      # with network, for the latter activation in step 4
5592 a2d59d8b Iustin Pop
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
5593 a2d59d8b Iustin Pop
      if pri_node == o_node1:
5594 a2d59d8b Iustin Pop
        p_minor = o_minor1
5595 ffa1c0dc Iustin Pop
      else:
5596 a2d59d8b Iustin Pop
        p_minor = o_minor2
5597 a2d59d8b Iustin Pop
5598 a2d59d8b Iustin Pop
      new_alone_id = (pri_node, new_node, None, p_minor, new_minor, o_secret)
5599 a2d59d8b Iustin Pop
      new_net_id = (pri_node, new_node, o_port, p_minor, new_minor, o_secret)
5600 a2d59d8b Iustin Pop
5601 a2d59d8b Iustin Pop
      iv_names[idx] = (dev, dev.children, new_net_id)
5602 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
5603 a2d59d8b Iustin Pop
                    new_net_id)
5604 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
5605 a2d59d8b Iustin Pop
                              logical_id=new_alone_id,
5606 8a6c7011 Iustin Pop
                              children=dev.children,
5607 8a6c7011 Iustin Pop
                              size=dev.size)
5608 796cab27 Iustin Pop
      try:
5609 de12473a Iustin Pop
        _CreateSingleBlockDev(self, new_node, instance, new_drbd,
5610 de12473a Iustin Pop
                              _GetInstanceInfoText(instance), False)
5611 82759cb1 Iustin Pop
      except errors.GenericError:
5612 a1578d63 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
5613 796cab27 Iustin Pop
        raise
5614 a9e0c397 Iustin Pop
5615 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5616 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
5617 d418ebfb Iustin Pop
      info("shutting down drbd for disk/%d on old node" % idx)
5618 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
5619 cacfd1fd Iustin Pop
      msg = self.rpc.call_blockdev_shutdown(old_node, dev).RemoteFailMsg()
5620 cacfd1fd Iustin Pop
      if msg:
5621 cacfd1fd Iustin Pop
        warning("Failed to shutdown drbd for disk/%d on old node: %s" %
5622 cacfd1fd Iustin Pop
                (idx, msg),
5623 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
5624 a9e0c397 Iustin Pop
5625 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
5626 a2d59d8b Iustin Pop
    result = self.rpc.call_drbd_disconnect_net([pri_node], nodes_ip,
5627 a2d59d8b Iustin Pop
                                               instance.disks)[pri_node]
5628 642445d9 Iustin Pop
5629 a2d59d8b Iustin Pop
    msg = result.RemoteFailMsg()
5630 a2d59d8b Iustin Pop
    if msg:
5631 a2d59d8b Iustin Pop
      # detaches didn't succeed (unlikely)
5632 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance.name)
5633 a2d59d8b Iustin Pop
      raise errors.OpExecError("Can't detach the disks from the network on"
5634 a2d59d8b Iustin Pop
                               " old node: %s" % (msg,))
5635 642445d9 Iustin Pop
5636 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
5637 642445d9 Iustin Pop
    # the instance to point to the new secondary
5638 642445d9 Iustin Pop
    info("updating instance configuration")
5639 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
5640 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
5641 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5642 642445d9 Iustin Pop
    cfg.Update(instance)
5643 a9e0c397 Iustin Pop
5644 642445d9 Iustin Pop
    # and now perform the drbd attach
5645 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
5646 a2d59d8b Iustin Pop
    result = self.rpc.call_drbd_attach_net([pri_node, new_node], nodes_ip,
5647 a2d59d8b Iustin Pop
                                           instance.disks, instance.name,
5648 a2d59d8b Iustin Pop
                                           False)
5649 a2d59d8b Iustin Pop
    for to_node, to_result in result.items():
5650 a2d59d8b Iustin Pop
      msg = to_result.RemoteFailMsg()
5651 a2d59d8b Iustin Pop
      if msg:
5652 a2d59d8b Iustin Pop
        warning("can't attach drbd disks on node %s: %s", to_node, msg,
5653 a2d59d8b Iustin Pop
                hint="please do a gnt-instance info to see the"
5654 a2d59d8b Iustin Pop
                " status of disks")
5655 a9e0c397 Iustin Pop
5656 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
5657 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
5658 a9e0c397 Iustin Pop
    # return value
5659 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
5660 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
5661 a9e0c397 Iustin Pop
5662 a9e0c397 Iustin Pop
    # so check manually all the devices
5663 d418ebfb Iustin Pop
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
5664 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5665 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(pri_node, dev)
5666 23829f6f Iustin Pop
      msg = result.RemoteFailMsg()
5667 23829f6f Iustin Pop
      if not msg and not result.payload:
5668 23829f6f Iustin Pop
        msg = "disk not found"
5669 23829f6f Iustin Pop
      if msg:
5670 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find DRBD device disk/%d: %s" %
5671 23829f6f Iustin Pop
                                 (idx, msg))
5672 23829f6f Iustin Pop
      if result.payload[5]:
5673 d418ebfb Iustin Pop
        raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx)
5674 a9e0c397 Iustin Pop
5675 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
5676 d418ebfb Iustin Pop
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
5677 d418ebfb Iustin Pop
      info("remove logical volumes for disk/%d" % idx)
5678 a9e0c397 Iustin Pop
      for lv in old_lvs:
5679 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
5680 e1bc0878 Iustin Pop
        msg = self.rpc.call_blockdev_remove(old_node, lv).RemoteFailMsg()
5681 e1bc0878 Iustin Pop
        if msg:
5682 e1bc0878 Iustin Pop
          warning("Can't remove LV on old secondary: %s", msg,
5683 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
5684 a9e0c397 Iustin Pop
5685 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
5686 a9e0c397 Iustin Pop
    """Execute disk replacement.
5687 a9e0c397 Iustin Pop

5688 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
5689 a9e0c397 Iustin Pop

5690 a9e0c397 Iustin Pop
    """
5691 a9e0c397 Iustin Pop
    instance = self.instance
5692 22985314 Guido Trotter
5693 22985314 Guido Trotter
    # Activate the instance disks if we're replacing them on a down instance
5694 0d68c45d Iustin Pop
    if not instance.admin_up:
5695 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, True)
5696 22985314 Guido Trotter
5697 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_CHG:
5698 7e9366f7 Iustin Pop
      fn = self._ExecD8Secondary
5699 a9e0c397 Iustin Pop
    else:
5700 7e9366f7 Iustin Pop
      fn = self._ExecD8DiskOnly
5701 22985314 Guido Trotter
5702 22985314 Guido Trotter
    ret = fn(feedback_fn)
5703 22985314 Guido Trotter
5704 22985314 Guido Trotter
    # Deactivate the instance disks if we're replacing them on a down instance
5705 0d68c45d Iustin Pop
    if not instance.admin_up:
5706 b9bddb6b Iustin Pop
      _SafeShutdownInstanceDisks(self, instance)
5707 22985314 Guido Trotter
5708 22985314 Guido Trotter
    return ret
5709 a9e0c397 Iustin Pop
5710 a8083063 Iustin Pop
5711 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
5712 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
5713 8729e0d7 Iustin Pop

5714 8729e0d7 Iustin Pop
  """
5715 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
5716 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5717 6605411d Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
5718 31e63dbf Guido Trotter
  REQ_BGL = False
5719 31e63dbf Guido Trotter
5720 31e63dbf Guido Trotter
  def ExpandNames(self):
5721 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
5722 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
5723 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5724 31e63dbf Guido Trotter
5725 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
5726 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
5727 31e63dbf Guido Trotter
      self._LockInstancesNodes()
5728 8729e0d7 Iustin Pop
5729 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
5730 8729e0d7 Iustin Pop
    """Build hooks env.
5731 8729e0d7 Iustin Pop

5732 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
5733 8729e0d7 Iustin Pop

5734 8729e0d7 Iustin Pop
    """
5735 8729e0d7 Iustin Pop
    env = {
5736 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
5737 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
5738 8729e0d7 Iustin Pop
      }
5739 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5740 8729e0d7 Iustin Pop
    nl = [
5741 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
5742 8729e0d7 Iustin Pop
      self.instance.primary_node,
5743 8729e0d7 Iustin Pop
      ]
5744 8729e0d7 Iustin Pop
    return env, nl, nl
5745 8729e0d7 Iustin Pop
5746 8729e0d7 Iustin Pop
  def CheckPrereq(self):
5747 8729e0d7 Iustin Pop
    """Check prerequisites.
5748 8729e0d7 Iustin Pop

5749 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
5750 8729e0d7 Iustin Pop

5751 8729e0d7 Iustin Pop
    """
5752 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5753 31e63dbf Guido Trotter
    assert instance is not None, \
5754 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5755 6b12959c Iustin Pop
    nodenames = list(instance.all_nodes)
5756 6b12959c Iustin Pop
    for node in nodenames:
5757 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, node)
5758 7527a8a4 Iustin Pop
5759 31e63dbf Guido Trotter
5760 8729e0d7 Iustin Pop
    self.instance = instance
5761 8729e0d7 Iustin Pop
5762 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
5763 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
5764 8729e0d7 Iustin Pop
                                 " growing.")
5765 8729e0d7 Iustin Pop
5766 ad24e046 Iustin Pop
    self.disk = instance.FindDisk(self.op.disk)
5767 8729e0d7 Iustin Pop
5768 72737a7f Iustin Pop
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
5769 72737a7f Iustin Pop
                                       instance.hypervisor)
5770 8729e0d7 Iustin Pop
    for node in nodenames:
5771 781de953 Iustin Pop
      info = nodeinfo[node]
5772 781de953 Iustin Pop
      if info.failed or not info.data:
5773 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
5774 8729e0d7 Iustin Pop
                                   " from node '%s'" % node)
5775 781de953 Iustin Pop
      vg_free = info.data.get('vg_free', None)
5776 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
5777 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
5778 8729e0d7 Iustin Pop
                                   " node %s" % node)
5779 781de953 Iustin Pop
      if self.op.amount > vg_free:
5780 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
5781 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
5782 781de953 Iustin Pop
                                   (node, vg_free, self.op.amount))
5783 8729e0d7 Iustin Pop
5784 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
5785 8729e0d7 Iustin Pop
    """Execute disk grow.
5786 8729e0d7 Iustin Pop

5787 8729e0d7 Iustin Pop
    """
5788 8729e0d7 Iustin Pop
    instance = self.instance
5789 ad24e046 Iustin Pop
    disk = self.disk
5790 6b12959c Iustin Pop
    for node in instance.all_nodes:
5791 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
5792 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
5793 0959c824 Iustin Pop
      msg = result.RemoteFailMsg()
5794 0959c824 Iustin Pop
      if msg:
5795 781de953 Iustin Pop
        raise errors.OpExecError("Grow request failed to node %s: %s" %
5796 0959c824 Iustin Pop
                                 (node, msg))
5797 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
5798 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
5799 6605411d Iustin Pop
    if self.op.wait_for_sync:
5800 cd4d138f Guido Trotter
      disk_abort = not _WaitForSync(self, instance)
5801 6605411d Iustin Pop
      if disk_abort:
5802 86d9d3bb Iustin Pop
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
5803 86d9d3bb Iustin Pop
                             " status.\nPlease check the instance.")
5804 8729e0d7 Iustin Pop
5805 8729e0d7 Iustin Pop
5806 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
5807 a8083063 Iustin Pop
  """Query runtime instance data.
5808 a8083063 Iustin Pop

5809 a8083063 Iustin Pop
  """
5810 57821cac Iustin Pop
  _OP_REQP = ["instances", "static"]
5811 a987fa48 Guido Trotter
  REQ_BGL = False
5812 ae5849b5 Michael Hanselmann
5813 a987fa48 Guido Trotter
  def ExpandNames(self):
5814 a987fa48 Guido Trotter
    self.needed_locks = {}
5815 a987fa48 Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
5816 a987fa48 Guido Trotter
5817 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
5818 a987fa48 Guido Trotter
      raise errors.OpPrereqError("Invalid argument type 'instances'")
5819 a987fa48 Guido Trotter
5820 a987fa48 Guido Trotter
    if self.op.instances:
5821 a987fa48 Guido Trotter
      self.wanted_names = []
5822 a987fa48 Guido Trotter
      for name in self.op.instances:
5823 a987fa48 Guido Trotter
        full_name = self.cfg.ExpandInstanceName(name)
5824 a987fa48 Guido Trotter
        if full_name is None:
5825 f57c76e4 Iustin Pop
          raise errors.OpPrereqError("Instance '%s' not known" % name)
5826 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
5827 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
5828 a987fa48 Guido Trotter
    else:
5829 a987fa48 Guido Trotter
      self.wanted_names = None
5830 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
5831 a987fa48 Guido Trotter
5832 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
5833 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5834 a987fa48 Guido Trotter
5835 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
5836 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
5837 a987fa48 Guido Trotter
      self._LockInstancesNodes()
5838 a8083063 Iustin Pop
5839 a8083063 Iustin Pop
  def CheckPrereq(self):
5840 a8083063 Iustin Pop
    """Check prerequisites.
5841 a8083063 Iustin Pop

5842 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
5843 a8083063 Iustin Pop

5844 a8083063 Iustin Pop
    """
5845 a987fa48 Guido Trotter
    if self.wanted_names is None:
5846 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
5847 a8083063 Iustin Pop
5848 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
5849 a987fa48 Guido Trotter
                             in self.wanted_names]
5850 a987fa48 Guido Trotter
    return
5851 a8083063 Iustin Pop
5852 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
5853 a8083063 Iustin Pop
    """Compute block device status.
5854 a8083063 Iustin Pop

5855 a8083063 Iustin Pop
    """
5856 57821cac Iustin Pop
    static = self.op.static
5857 57821cac Iustin Pop
    if not static:
5858 57821cac Iustin Pop
      self.cfg.SetDiskID(dev, instance.primary_node)
5859 57821cac Iustin Pop
      dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
5860 9854f5d0 Iustin Pop
      if dev_pstatus.offline:
5861 9854f5d0 Iustin Pop
        dev_pstatus = None
5862 9854f5d0 Iustin Pop
      else:
5863 9854f5d0 Iustin Pop
        msg = dev_pstatus.RemoteFailMsg()
5864 9854f5d0 Iustin Pop
        if msg:
5865 9854f5d0 Iustin Pop
          raise errors.OpExecError("Can't compute disk status for %s: %s" %
5866 9854f5d0 Iustin Pop
                                   (instance.name, msg))
5867 9854f5d0 Iustin Pop
        dev_pstatus = dev_pstatus.payload
5868 57821cac Iustin Pop
    else:
5869 57821cac Iustin Pop
      dev_pstatus = None
5870 57821cac Iustin Pop
5871 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
5872 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
5873 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
5874 a8083063 Iustin Pop
        snode = dev.logical_id[1]
5875 a8083063 Iustin Pop
      else:
5876 a8083063 Iustin Pop
        snode = dev.logical_id[0]
5877 a8083063 Iustin Pop
5878 57821cac Iustin Pop
    if snode and not static:
5879 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
5880 72737a7f Iustin Pop
      dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
5881 9854f5d0 Iustin Pop
      if dev_sstatus.offline:
5882 9854f5d0 Iustin Pop
        dev_sstatus = None
5883 9854f5d0 Iustin Pop
      else:
5884 9854f5d0 Iustin Pop
        msg = dev_sstatus.RemoteFailMsg()
5885 9854f5d0 Iustin Pop
        if msg:
5886 9854f5d0 Iustin Pop
          raise errors.OpExecError("Can't compute disk status for %s: %s" %
5887 9854f5d0 Iustin Pop
                                   (instance.name, msg))
5888 9854f5d0 Iustin Pop
        dev_sstatus = dev_sstatus.payload
5889 a8083063 Iustin Pop
    else:
5890 a8083063 Iustin Pop
      dev_sstatus = None
5891 a8083063 Iustin Pop
5892 a8083063 Iustin Pop
    if dev.children:
5893 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
5894 a8083063 Iustin Pop
                      for child in dev.children]
5895 a8083063 Iustin Pop
    else:
5896 a8083063 Iustin Pop
      dev_children = []
5897 a8083063 Iustin Pop
5898 a8083063 Iustin Pop
    data = {
5899 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
5900 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
5901 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
5902 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
5903 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
5904 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
5905 a8083063 Iustin Pop
      "children": dev_children,
5906 b6fdf8b8 Iustin Pop
      "mode": dev.mode,
5907 c98162a7 Iustin Pop
      "size": dev.size,
5908 a8083063 Iustin Pop
      }
5909 a8083063 Iustin Pop
5910 a8083063 Iustin Pop
    return data
5911 a8083063 Iustin Pop
5912 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5913 a8083063 Iustin Pop
    """Gather and return data"""
5914 a8083063 Iustin Pop
    result = {}
5915 338e51e8 Iustin Pop
5916 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
5917 338e51e8 Iustin Pop
5918 a8083063 Iustin Pop
    for instance in self.wanted_instances:
5919 57821cac Iustin Pop
      if not self.op.static:
5920 57821cac Iustin Pop
        remote_info = self.rpc.call_instance_info(instance.primary_node,
5921 57821cac Iustin Pop
                                                  instance.name,
5922 57821cac Iustin Pop
                                                  instance.hypervisor)
5923 781de953 Iustin Pop
        remote_info.Raise()
5924 781de953 Iustin Pop
        remote_info = remote_info.data
5925 57821cac Iustin Pop
        if remote_info and "state" in remote_info:
5926 57821cac Iustin Pop
          remote_state = "up"
5927 57821cac Iustin Pop
        else:
5928 57821cac Iustin Pop
          remote_state = "down"
5929 a8083063 Iustin Pop
      else:
5930 57821cac Iustin Pop
        remote_state = None
5931 0d68c45d Iustin Pop
      if instance.admin_up:
5932 a8083063 Iustin Pop
        config_state = "up"
5933 0d68c45d Iustin Pop
      else:
5934 0d68c45d Iustin Pop
        config_state = "down"
5935 a8083063 Iustin Pop
5936 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
5937 a8083063 Iustin Pop
               for device in instance.disks]
5938 a8083063 Iustin Pop
5939 a8083063 Iustin Pop
      idict = {
5940 a8083063 Iustin Pop
        "name": instance.name,
5941 a8083063 Iustin Pop
        "config_state": config_state,
5942 a8083063 Iustin Pop
        "run_state": remote_state,
5943 a8083063 Iustin Pop
        "pnode": instance.primary_node,
5944 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
5945 a8083063 Iustin Pop
        "os": instance.os,
5946 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
5947 a8083063 Iustin Pop
        "disks": disks,
5948 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
5949 24838135 Iustin Pop
        "network_port": instance.network_port,
5950 24838135 Iustin Pop
        "hv_instance": instance.hvparams,
5951 338e51e8 Iustin Pop
        "hv_actual": cluster.FillHV(instance),
5952 338e51e8 Iustin Pop
        "be_instance": instance.beparams,
5953 338e51e8 Iustin Pop
        "be_actual": cluster.FillBE(instance),
5954 a8083063 Iustin Pop
        }
5955 a8083063 Iustin Pop
5956 a8083063 Iustin Pop
      result[instance.name] = idict
5957 a8083063 Iustin Pop
5958 a8083063 Iustin Pop
    return result
5959 a8083063 Iustin Pop
5960 a8083063 Iustin Pop
5961 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
5962 a8083063 Iustin Pop
  """Modifies an instances's parameters.
5963 a8083063 Iustin Pop

5964 a8083063 Iustin Pop
  """
5965 a8083063 Iustin Pop
  HPATH = "instance-modify"
5966 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5967 24991749 Iustin Pop
  _OP_REQP = ["instance_name"]
5968 1a5c7281 Guido Trotter
  REQ_BGL = False
5969 1a5c7281 Guido Trotter
5970 24991749 Iustin Pop
  def CheckArguments(self):
5971 24991749 Iustin Pop
    if not hasattr(self.op, 'nics'):
5972 24991749 Iustin Pop
      self.op.nics = []
5973 24991749 Iustin Pop
    if not hasattr(self.op, 'disks'):
5974 24991749 Iustin Pop
      self.op.disks = []
5975 24991749 Iustin Pop
    if not hasattr(self.op, 'beparams'):
5976 24991749 Iustin Pop
      self.op.beparams = {}
5977 24991749 Iustin Pop
    if not hasattr(self.op, 'hvparams'):
5978 24991749 Iustin Pop
      self.op.hvparams = {}
5979 24991749 Iustin Pop
    self.op.force = getattr(self.op, "force", False)
5980 24991749 Iustin Pop
    if not (self.op.nics or self.op.disks or
5981 24991749 Iustin Pop
            self.op.hvparams or self.op.beparams):
5982 24991749 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
5983 24991749 Iustin Pop
5984 24991749 Iustin Pop
    # Disk validation
5985 24991749 Iustin Pop
    disk_addremove = 0
5986 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
5987 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
5988 24991749 Iustin Pop
        disk_addremove += 1
5989 24991749 Iustin Pop
        continue
5990 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
5991 24991749 Iustin Pop
        disk_addremove += 1
5992 24991749 Iustin Pop
      else:
5993 24991749 Iustin Pop
        if not isinstance(disk_op, int):
5994 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index")
5995 24991749 Iustin Pop
      if disk_op == constants.DDM_ADD:
5996 24991749 Iustin Pop
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
5997 6ec66eae Iustin Pop
        if mode not in constants.DISK_ACCESS_SET:
5998 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode)
5999 24991749 Iustin Pop
        size = disk_dict.get('size', None)
6000 24991749 Iustin Pop
        if size is None:
6001 24991749 Iustin Pop
          raise errors.OpPrereqError("Required disk parameter size missing")
6002 24991749 Iustin Pop
        try:
6003 24991749 Iustin Pop
          size = int(size)
6004 24991749 Iustin Pop
        except ValueError, err:
6005 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
6006 24991749 Iustin Pop
                                     str(err))
6007 24991749 Iustin Pop
        disk_dict['size'] = size
6008 24991749 Iustin Pop
      else:
6009 24991749 Iustin Pop
        # modification of disk
6010 24991749 Iustin Pop
        if 'size' in disk_dict:
6011 24991749 Iustin Pop
          raise errors.OpPrereqError("Disk size change not possible, use"
6012 24991749 Iustin Pop
                                     " grow-disk")
6013 24991749 Iustin Pop
6014 24991749 Iustin Pop
    if disk_addremove > 1:
6015 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one disk add or remove operation"
6016 24991749 Iustin Pop
                                 " supported at a time")
6017 24991749 Iustin Pop
6018 24991749 Iustin Pop
    # NIC validation
6019 24991749 Iustin Pop
    nic_addremove = 0
6020 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
6021 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
6022 24991749 Iustin Pop
        nic_addremove += 1
6023 24991749 Iustin Pop
        continue
6024 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
6025 24991749 Iustin Pop
        nic_addremove += 1
6026 24991749 Iustin Pop
      else:
6027 24991749 Iustin Pop
        if not isinstance(nic_op, int):
6028 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid nic index")
6029 24991749 Iustin Pop
6030 24991749 Iustin Pop
      # nic_dict should be a dict
6031 24991749 Iustin Pop
      nic_ip = nic_dict.get('ip', None)
6032 24991749 Iustin Pop
      if nic_ip is not None:
6033 5c44da6a Guido Trotter
        if nic_ip.lower() == constants.VALUE_NONE:
6034 24991749 Iustin Pop
          nic_dict['ip'] = None
6035 24991749 Iustin Pop
        else:
6036 24991749 Iustin Pop
          if not utils.IsValidIP(nic_ip):
6037 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip)
6038 5c44da6a Guido Trotter
6039 5c44da6a Guido Trotter
      if nic_op == constants.DDM_ADD:
6040 5c44da6a Guido Trotter
        nic_bridge = nic_dict.get('bridge', None)
6041 5c44da6a Guido Trotter
        if nic_bridge is None:
6042 5c44da6a Guido Trotter
          nic_dict['bridge'] = self.cfg.GetDefBridge()
6043 5c44da6a Guido Trotter
        nic_mac = nic_dict.get('mac', None)
6044 5c44da6a Guido Trotter
        if nic_mac is None:
6045 5c44da6a Guido Trotter
          nic_dict['mac'] = constants.VALUE_AUTO
6046 5c44da6a Guido Trotter
6047 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
6048 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
6049 24991749 Iustin Pop
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
6050 24991749 Iustin Pop
          if not utils.IsValidMac(nic_mac):
6051 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac)
6052 5c44da6a Guido Trotter
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
6053 5c44da6a Guido Trotter
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
6054 5c44da6a Guido Trotter
                                     " modifying an existing nic")
6055 5c44da6a Guido Trotter
6056 24991749 Iustin Pop
    if nic_addremove > 1:
6057 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one NIC add or remove operation"
6058 24991749 Iustin Pop
                                 " supported at a time")
6059 24991749 Iustin Pop
6060 1a5c7281 Guido Trotter
  def ExpandNames(self):
6061 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
6062 74409b12 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
6063 74409b12 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6064 74409b12 Iustin Pop
6065 74409b12 Iustin Pop
  def DeclareLocks(self, level):
6066 74409b12 Iustin Pop
    if level == locking.LEVEL_NODE:
6067 74409b12 Iustin Pop
      self._LockInstancesNodes()
6068 a8083063 Iustin Pop
6069 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6070 a8083063 Iustin Pop
    """Build hooks env.
6071 a8083063 Iustin Pop

6072 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
6073 a8083063 Iustin Pop

6074 a8083063 Iustin Pop
    """
6075 396e1b78 Michael Hanselmann
    args = dict()
6076 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.be_new:
6077 338e51e8 Iustin Pop
      args['memory'] = self.be_new[constants.BE_MEMORY]
6078 338e51e8 Iustin Pop
    if constants.BE_VCPUS in self.be_new:
6079 61be6ba4 Iustin Pop
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
6080 d8dcf3c9 Guido Trotter
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
6081 d8dcf3c9 Guido Trotter
    # information at all.
6082 d8dcf3c9 Guido Trotter
    if self.op.nics:
6083 d8dcf3c9 Guido Trotter
      args['nics'] = []
6084 d8dcf3c9 Guido Trotter
      nic_override = dict(self.op.nics)
6085 d8dcf3c9 Guido Trotter
      for idx, nic in enumerate(self.instance.nics):
6086 d8dcf3c9 Guido Trotter
        if idx in nic_override:
6087 d8dcf3c9 Guido Trotter
          this_nic_override = nic_override[idx]
6088 d8dcf3c9 Guido Trotter
        else:
6089 d8dcf3c9 Guido Trotter
          this_nic_override = {}
6090 d8dcf3c9 Guido Trotter
        if 'ip' in this_nic_override:
6091 d8dcf3c9 Guido Trotter
          ip = this_nic_override['ip']
6092 d8dcf3c9 Guido Trotter
        else:
6093 d8dcf3c9 Guido Trotter
          ip = nic.ip
6094 d8dcf3c9 Guido Trotter
        if 'bridge' in this_nic_override:
6095 d8dcf3c9 Guido Trotter
          bridge = this_nic_override['bridge']
6096 d8dcf3c9 Guido Trotter
        else:
6097 d8dcf3c9 Guido Trotter
          bridge = nic.bridge
6098 d8dcf3c9 Guido Trotter
        if 'mac' in this_nic_override:
6099 d8dcf3c9 Guido Trotter
          mac = this_nic_override['mac']
6100 d8dcf3c9 Guido Trotter
        else:
6101 d8dcf3c9 Guido Trotter
          mac = nic.mac
6102 d8dcf3c9 Guido Trotter
        args['nics'].append((ip, bridge, mac))
6103 d8dcf3c9 Guido Trotter
      if constants.DDM_ADD in nic_override:
6104 d8dcf3c9 Guido Trotter
        ip = nic_override[constants.DDM_ADD].get('ip', None)
6105 d8dcf3c9 Guido Trotter
        bridge = nic_override[constants.DDM_ADD]['bridge']
6106 d8dcf3c9 Guido Trotter
        mac = nic_override[constants.DDM_ADD]['mac']
6107 d8dcf3c9 Guido Trotter
        args['nics'].append((ip, bridge, mac))
6108 d8dcf3c9 Guido Trotter
      elif constants.DDM_REMOVE in nic_override:
6109 d8dcf3c9 Guido Trotter
        del args['nics'][-1]
6110 d8dcf3c9 Guido Trotter
6111 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
6112 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6113 a8083063 Iustin Pop
    return env, nl, nl
6114 a8083063 Iustin Pop
6115 a8083063 Iustin Pop
  def CheckPrereq(self):
6116 a8083063 Iustin Pop
    """Check prerequisites.
6117 a8083063 Iustin Pop

6118 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
6119 a8083063 Iustin Pop

6120 a8083063 Iustin Pop
    """
6121 7c4d6c7b Michael Hanselmann
    self.force = self.op.force
6122 a8083063 Iustin Pop
6123 74409b12 Iustin Pop
    # checking the new params on the primary/secondary nodes
6124 31a853d2 Iustin Pop
6125 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6126 1a5c7281 Guido Trotter
    assert self.instance is not None, \
6127 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
6128 6b12959c Iustin Pop
    pnode = instance.primary_node
6129 6b12959c Iustin Pop
    nodelist = list(instance.all_nodes)
6130 74409b12 Iustin Pop
6131 338e51e8 Iustin Pop
    # hvparams processing
6132 74409b12 Iustin Pop
    if self.op.hvparams:
6133 74409b12 Iustin Pop
      i_hvdict = copy.deepcopy(instance.hvparams)
6134 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
6135 8edcd611 Guido Trotter
        if val == constants.VALUE_DEFAULT:
6136 74409b12 Iustin Pop
          try:
6137 74409b12 Iustin Pop
            del i_hvdict[key]
6138 74409b12 Iustin Pop
          except KeyError:
6139 74409b12 Iustin Pop
            pass
6140 74409b12 Iustin Pop
        else:
6141 74409b12 Iustin Pop
          i_hvdict[key] = val
6142 74409b12 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
6143 a5728081 Guido Trotter
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
6144 74409b12 Iustin Pop
      hv_new = cluster.FillDict(cluster.hvparams[instance.hypervisor],
6145 74409b12 Iustin Pop
                                i_hvdict)
6146 74409b12 Iustin Pop
      # local check
6147 74409b12 Iustin Pop
      hypervisor.GetHypervisor(
6148 74409b12 Iustin Pop
        instance.hypervisor).CheckParameterSyntax(hv_new)
6149 74409b12 Iustin Pop
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
6150 338e51e8 Iustin Pop
      self.hv_new = hv_new # the new actual values
6151 338e51e8 Iustin Pop
      self.hv_inst = i_hvdict # the new dict (without defaults)
6152 338e51e8 Iustin Pop
    else:
6153 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
6154 338e51e8 Iustin Pop
6155 338e51e8 Iustin Pop
    # beparams processing
6156 338e51e8 Iustin Pop
    if self.op.beparams:
6157 338e51e8 Iustin Pop
      i_bedict = copy.deepcopy(instance.beparams)
6158 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
6159 8edcd611 Guido Trotter
        if val == constants.VALUE_DEFAULT:
6160 338e51e8 Iustin Pop
          try:
6161 338e51e8 Iustin Pop
            del i_bedict[key]
6162 338e51e8 Iustin Pop
          except KeyError:
6163 338e51e8 Iustin Pop
            pass
6164 338e51e8 Iustin Pop
        else:
6165 338e51e8 Iustin Pop
          i_bedict[key] = val
6166 338e51e8 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
6167 a5728081 Guido Trotter
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
6168 338e51e8 Iustin Pop
      be_new = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
6169 338e51e8 Iustin Pop
                                i_bedict)
6170 338e51e8 Iustin Pop
      self.be_new = be_new # the new actual values
6171 338e51e8 Iustin Pop
      self.be_inst = i_bedict # the new dict (without defaults)
6172 338e51e8 Iustin Pop
    else:
6173 b637ae4d Iustin Pop
      self.be_new = self.be_inst = {}
6174 74409b12 Iustin Pop
6175 cfefe007 Guido Trotter
    self.warn = []
6176 647a5d80 Iustin Pop
6177 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.op.beparams and not self.force:
6178 647a5d80 Iustin Pop
      mem_check_list = [pnode]
6179 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
6180 c0f2b229 Iustin Pop
        # either we changed auto_balance to yes or it was from before
6181 647a5d80 Iustin Pop
        mem_check_list.extend(instance.secondary_nodes)
6182 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
6183 72737a7f Iustin Pop
                                                  instance.hypervisor)
6184 647a5d80 Iustin Pop
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
6185 72737a7f Iustin Pop
                                         instance.hypervisor)
6186 781de953 Iustin Pop
      if nodeinfo[pnode].failed or not isinstance(nodeinfo[pnode].data, dict):
6187 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
6188 cfefe007 Guido Trotter
        self.warn.append("Can't get info from primary node %s" % pnode)
6189 cfefe007 Guido Trotter
      else:
6190 781de953 Iustin Pop
        if not instance_info.failed and instance_info.data:
6191 ade0e8cd Guido Trotter
          current_mem = int(instance_info.data['memory'])
6192 cfefe007 Guido Trotter
        else:
6193 cfefe007 Guido Trotter
          # Assume instance not running
6194 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
6195 cfefe007 Guido Trotter
          # and we have no other way to check)
6196 cfefe007 Guido Trotter
          current_mem = 0
6197 338e51e8 Iustin Pop
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
6198 781de953 Iustin Pop
                    nodeinfo[pnode].data['memory_free'])
6199 cfefe007 Guido Trotter
        if miss_mem > 0:
6200 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
6201 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
6202 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
6203 cfefe007 Guido Trotter
6204 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
6205 ea33068f Iustin Pop
        for node, nres in nodeinfo.iteritems():
6206 ea33068f Iustin Pop
          if node not in instance.secondary_nodes:
6207 ea33068f Iustin Pop
            continue
6208 781de953 Iustin Pop
          if nres.failed or not isinstance(nres.data, dict):
6209 647a5d80 Iustin Pop
            self.warn.append("Can't get info from secondary node %s" % node)
6210 781de953 Iustin Pop
          elif be_new[constants.BE_MEMORY] > nres.data['memory_free']:
6211 647a5d80 Iustin Pop
            self.warn.append("Not enough memory to failover instance to"
6212 647a5d80 Iustin Pop
                             " secondary node %s" % node)
6213 5bc84f33 Alexander Schreiber
6214 24991749 Iustin Pop
    # NIC processing
6215 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
6216 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
6217 24991749 Iustin Pop
        if not instance.nics:
6218 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance has no NICs, cannot remove")
6219 24991749 Iustin Pop
        continue
6220 24991749 Iustin Pop
      if nic_op != constants.DDM_ADD:
6221 24991749 Iustin Pop
        # an existing nic
6222 24991749 Iustin Pop
        if nic_op < 0 or nic_op >= len(instance.nics):
6223 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
6224 24991749 Iustin Pop
                                     " are 0 to %d" %
6225 24991749 Iustin Pop
                                     (nic_op, len(instance.nics)))
6226 5c44da6a Guido Trotter
      if 'bridge' in nic_dict:
6227 5c44da6a Guido Trotter
        nic_bridge = nic_dict['bridge']
6228 5c44da6a Guido Trotter
        if nic_bridge is None:
6229 5c44da6a Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic bridge to None')
6230 24991749 Iustin Pop
        if not self.rpc.call_bridges_exist(pnode, [nic_bridge]):
6231 24991749 Iustin Pop
          msg = ("Bridge '%s' doesn't exist on one of"
6232 24991749 Iustin Pop
                 " the instance nodes" % nic_bridge)
6233 24991749 Iustin Pop
          if self.force:
6234 24991749 Iustin Pop
            self.warn.append(msg)
6235 24991749 Iustin Pop
          else:
6236 24991749 Iustin Pop
            raise errors.OpPrereqError(msg)
6237 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
6238 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
6239 5c44da6a Guido Trotter
        if nic_mac is None:
6240 5c44da6a Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic mac to None')
6241 5c44da6a Guido Trotter
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
6242 5c44da6a Guido Trotter
          # otherwise generate the mac
6243 5c44da6a Guido Trotter
          nic_dict['mac'] = self.cfg.GenerateMAC()
6244 5c44da6a Guido Trotter
        else:
6245 5c44da6a Guido Trotter
          # or validate/reserve the current one
6246 5c44da6a Guido Trotter
          if self.cfg.IsMacInUse(nic_mac):
6247 5c44da6a Guido Trotter
            raise errors.OpPrereqError("MAC address %s already in use"
6248 5c44da6a Guido Trotter
                                       " in cluster" % nic_mac)
6249 24991749 Iustin Pop
6250 24991749 Iustin Pop
    # DISK processing
6251 24991749 Iustin Pop
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
6252 24991749 Iustin Pop
      raise errors.OpPrereqError("Disk operations not supported for"
6253 24991749 Iustin Pop
                                 " diskless instances")
6254 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
6255 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
6256 24991749 Iustin Pop
        if len(instance.disks) == 1:
6257 24991749 Iustin Pop
          raise errors.OpPrereqError("Cannot remove the last disk of"
6258 24991749 Iustin Pop
                                     " an instance")
6259 24991749 Iustin Pop
        ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
6260 24991749 Iustin Pop
        ins_l = ins_l[pnode]
6261 4cfb9426 Iustin Pop
        if ins_l.failed or not isinstance(ins_l.data, list):
6262 24991749 Iustin Pop
          raise errors.OpPrereqError("Can't contact node '%s'" % pnode)
6263 4cfb9426 Iustin Pop
        if instance.name in ins_l.data:
6264 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance is running, can't remove"
6265 24991749 Iustin Pop
                                     " disks.")
6266 24991749 Iustin Pop
6267 24991749 Iustin Pop
      if (disk_op == constants.DDM_ADD and
6268 24991749 Iustin Pop
          len(instance.nics) >= constants.MAX_DISKS):
6269 24991749 Iustin Pop
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
6270 24991749 Iustin Pop
                                   " add more" % constants.MAX_DISKS)
6271 24991749 Iustin Pop
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
6272 24991749 Iustin Pop
        # an existing disk
6273 24991749 Iustin Pop
        if disk_op < 0 or disk_op >= len(instance.disks):
6274 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
6275 24991749 Iustin Pop
                                     " are 0 to %d" %
6276 24991749 Iustin Pop
                                     (disk_op, len(instance.disks)))
6277 24991749 Iustin Pop
6278 a8083063 Iustin Pop
    return
6279 a8083063 Iustin Pop
6280 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6281 a8083063 Iustin Pop
    """Modifies an instance.
6282 a8083063 Iustin Pop

6283 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
6284 24991749 Iustin Pop

6285 a8083063 Iustin Pop
    """
6286 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
6287 cfefe007 Guido Trotter
    # feedback_fn there.
6288 cfefe007 Guido Trotter
    for warn in self.warn:
6289 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
6290 cfefe007 Guido Trotter
6291 a8083063 Iustin Pop
    result = []
6292 a8083063 Iustin Pop
    instance = self.instance
6293 24991749 Iustin Pop
    # disk changes
6294 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
6295 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
6296 24991749 Iustin Pop
        # remove the last disk
6297 24991749 Iustin Pop
        device = instance.disks.pop()
6298 24991749 Iustin Pop
        device_idx = len(instance.disks)
6299 24991749 Iustin Pop
        for node, disk in device.ComputeNodeTree(instance.primary_node):
6300 24991749 Iustin Pop
          self.cfg.SetDiskID(disk, node)
6301 e1bc0878 Iustin Pop
          msg = self.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
6302 e1bc0878 Iustin Pop
          if msg:
6303 e1bc0878 Iustin Pop
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
6304 e1bc0878 Iustin Pop
                            " continuing anyway", device_idx, node, msg)
6305 24991749 Iustin Pop
        result.append(("disk/%d" % device_idx, "remove"))
6306 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
6307 24991749 Iustin Pop
        # add a new disk
6308 24991749 Iustin Pop
        if instance.disk_template == constants.DT_FILE:
6309 24991749 Iustin Pop
          file_driver, file_path = instance.disks[0].logical_id
6310 24991749 Iustin Pop
          file_path = os.path.dirname(file_path)
6311 24991749 Iustin Pop
        else:
6312 24991749 Iustin Pop
          file_driver = file_path = None
6313 24991749 Iustin Pop
        disk_idx_base = len(instance.disks)
6314 24991749 Iustin Pop
        new_disk = _GenerateDiskTemplate(self,
6315 24991749 Iustin Pop
                                         instance.disk_template,
6316 32388e6d Iustin Pop
                                         instance.name, instance.primary_node,
6317 24991749 Iustin Pop
                                         instance.secondary_nodes,
6318 24991749 Iustin Pop
                                         [disk_dict],
6319 24991749 Iustin Pop
                                         file_path,
6320 24991749 Iustin Pop
                                         file_driver,
6321 24991749 Iustin Pop
                                         disk_idx_base)[0]
6322 24991749 Iustin Pop
        instance.disks.append(new_disk)
6323 24991749 Iustin Pop
        info = _GetInstanceInfoText(instance)
6324 24991749 Iustin Pop
6325 24991749 Iustin Pop
        logging.info("Creating volume %s for instance %s",
6326 24991749 Iustin Pop
                     new_disk.iv_name, instance.name)
6327 24991749 Iustin Pop
        # Note: this needs to be kept in sync with _CreateDisks
6328 24991749 Iustin Pop
        #HARDCODE
6329 428958aa Iustin Pop
        for node in instance.all_nodes:
6330 428958aa Iustin Pop
          f_create = node == instance.primary_node
6331 796cab27 Iustin Pop
          try:
6332 428958aa Iustin Pop
            _CreateBlockDev(self, node, instance, new_disk,
6333 428958aa Iustin Pop
                            f_create, info, f_create)
6334 1492cca7 Iustin Pop
          except errors.OpExecError, err:
6335 24991749 Iustin Pop
            self.LogWarning("Failed to create volume %s (%s) on"
6336 428958aa Iustin Pop
                            " node %s: %s",
6337 428958aa Iustin Pop
                            new_disk.iv_name, new_disk, node, err)
6338 24991749 Iustin Pop
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
6339 24991749 Iustin Pop
                       (new_disk.size, new_disk.mode)))
6340 24991749 Iustin Pop
      else:
6341 24991749 Iustin Pop
        # change a given disk
6342 24991749 Iustin Pop
        instance.disks[disk_op].mode = disk_dict['mode']
6343 24991749 Iustin Pop
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
6344 24991749 Iustin Pop
    # NIC changes
6345 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
6346 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
6347 24991749 Iustin Pop
        # remove the last nic
6348 24991749 Iustin Pop
        del instance.nics[-1]
6349 24991749 Iustin Pop
        result.append(("nic.%d" % len(instance.nics), "remove"))
6350 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
6351 5c44da6a Guido Trotter
        # mac and bridge should be set, by now
6352 5c44da6a Guido Trotter
        mac = nic_dict['mac']
6353 5c44da6a Guido Trotter
        bridge = nic_dict['bridge']
6354 24991749 Iustin Pop
        new_nic = objects.NIC(mac=mac, ip=nic_dict.get('ip', None),
6355 5c44da6a Guido Trotter
                              bridge=bridge)
6356 24991749 Iustin Pop
        instance.nics.append(new_nic)
6357 24991749 Iustin Pop
        result.append(("nic.%d" % (len(instance.nics) - 1),
6358 24991749 Iustin Pop
                       "add:mac=%s,ip=%s,bridge=%s" %
6359 24991749 Iustin Pop
                       (new_nic.mac, new_nic.ip, new_nic.bridge)))
6360 24991749 Iustin Pop
      else:
6361 24991749 Iustin Pop
        # change a given nic
6362 24991749 Iustin Pop
        for key in 'mac', 'ip', 'bridge':
6363 24991749 Iustin Pop
          if key in nic_dict:
6364 24991749 Iustin Pop
            setattr(instance.nics[nic_op], key, nic_dict[key])
6365 24991749 Iustin Pop
            result.append(("nic.%s/%d" % (key, nic_op), nic_dict[key]))
6366 24991749 Iustin Pop
6367 24991749 Iustin Pop
    # hvparams changes
6368 74409b12 Iustin Pop
    if self.op.hvparams:
6369 12649e35 Guido Trotter
      instance.hvparams = self.hv_inst
6370 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
6371 74409b12 Iustin Pop
        result.append(("hv/%s" % key, val))
6372 24991749 Iustin Pop
6373 24991749 Iustin Pop
    # beparams changes
6374 338e51e8 Iustin Pop
    if self.op.beparams:
6375 338e51e8 Iustin Pop
      instance.beparams = self.be_inst
6376 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
6377 338e51e8 Iustin Pop
        result.append(("be/%s" % key, val))
6378 a8083063 Iustin Pop
6379 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
6380 a8083063 Iustin Pop
6381 a8083063 Iustin Pop
    return result
6382 a8083063 Iustin Pop
6383 a8083063 Iustin Pop
6384 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
6385 a8083063 Iustin Pop
  """Query the exports list
6386 a8083063 Iustin Pop

6387 a8083063 Iustin Pop
  """
6388 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
6389 21a15682 Guido Trotter
  REQ_BGL = False
6390 21a15682 Guido Trotter
6391 21a15682 Guido Trotter
  def ExpandNames(self):
6392 21a15682 Guido Trotter
    self.needed_locks = {}
6393 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
6394 21a15682 Guido Trotter
    if not self.op.nodes:
6395 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6396 21a15682 Guido Trotter
    else:
6397 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
6398 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
6399 a8083063 Iustin Pop
6400 a8083063 Iustin Pop
  def CheckPrereq(self):
6401 21a15682 Guido Trotter
    """Check prerequisites.
6402 a8083063 Iustin Pop

6403 a8083063 Iustin Pop
    """
6404 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
6405 a8083063 Iustin Pop
6406 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6407 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
6408 a8083063 Iustin Pop

6409 e4376078 Iustin Pop
    @rtype: dict
6410 e4376078 Iustin Pop
    @return: a dictionary with the structure node->(export-list)
6411 e4376078 Iustin Pop
        where export-list is a list of the instances exported on
6412 e4376078 Iustin Pop
        that node.
6413 a8083063 Iustin Pop

6414 a8083063 Iustin Pop
    """
6415 b04285f2 Guido Trotter
    rpcresult = self.rpc.call_export_list(self.nodes)
6416 b04285f2 Guido Trotter
    result = {}
6417 b04285f2 Guido Trotter
    for node in rpcresult:
6418 b04285f2 Guido Trotter
      if rpcresult[node].failed:
6419 b04285f2 Guido Trotter
        result[node] = False
6420 b04285f2 Guido Trotter
      else:
6421 b04285f2 Guido Trotter
        result[node] = rpcresult[node].data
6422 b04285f2 Guido Trotter
6423 b04285f2 Guido Trotter
    return result
6424 a8083063 Iustin Pop
6425 a8083063 Iustin Pop
6426 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
6427 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
6428 a8083063 Iustin Pop

6429 a8083063 Iustin Pop
  """
6430 a8083063 Iustin Pop
  HPATH = "instance-export"
6431 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
6432 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
6433 6657590e Guido Trotter
  REQ_BGL = False
6434 6657590e Guido Trotter
6435 6657590e Guido Trotter
  def ExpandNames(self):
6436 6657590e Guido Trotter
    self._ExpandAndLockInstance()
6437 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
6438 6657590e Guido Trotter
    #
6439 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
6440 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
6441 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
6442 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
6443 6657590e Guido Trotter
    #    then one to remove, after
6444 5bbd3f7f Michael Hanselmann
    #  - removing the removal operation altogether
6445 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6446 6657590e Guido Trotter
6447 6657590e Guido Trotter
  def DeclareLocks(self, level):
6448 6657590e Guido Trotter
    """Last minute lock declaration."""
6449 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
6450 a8083063 Iustin Pop
6451 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6452 a8083063 Iustin Pop
    """Build hooks env.
6453 a8083063 Iustin Pop

6454 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
6455 a8083063 Iustin Pop

6456 a8083063 Iustin Pop
    """
6457 a8083063 Iustin Pop
    env = {
6458 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
6459 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
6460 a8083063 Iustin Pop
      }
6461 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6462 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
6463 a8083063 Iustin Pop
          self.op.target_node]
6464 a8083063 Iustin Pop
    return env, nl, nl
6465 a8083063 Iustin Pop
6466 a8083063 Iustin Pop
  def CheckPrereq(self):
6467 a8083063 Iustin Pop
    """Check prerequisites.
6468 a8083063 Iustin Pop

6469 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
6470 a8083063 Iustin Pop

6471 a8083063 Iustin Pop
    """
6472 6657590e Guido Trotter
    instance_name = self.op.instance_name
6473 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
6474 6657590e Guido Trotter
    assert self.instance is not None, \
6475 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
6476 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
6477 a8083063 Iustin Pop
6478 6657590e Guido Trotter
    self.dst_node = self.cfg.GetNodeInfo(
6479 6657590e Guido Trotter
      self.cfg.ExpandNodeName(self.op.target_node))
6480 a8083063 Iustin Pop
6481 268b8e42 Iustin Pop
    if self.dst_node is None:
6482 268b8e42 Iustin Pop
      # This is wrong node name, not a non-locked node
6483 268b8e42 Iustin Pop
      raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node)
6484 aeb83a2b Iustin Pop
    _CheckNodeOnline(self, self.dst_node.name)
6485 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, self.dst_node.name)
6486 a8083063 Iustin Pop
6487 b6023d6c Manuel Franceschini
    # instance disk type verification
6488 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
6489 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
6490 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
6491 b6023d6c Manuel Franceschini
                                   " file-based disks")
6492 b6023d6c Manuel Franceschini
6493 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6494 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
6495 a8083063 Iustin Pop

6496 a8083063 Iustin Pop
    """
6497 a8083063 Iustin Pop
    instance = self.instance
6498 a8083063 Iustin Pop
    dst_node = self.dst_node
6499 a8083063 Iustin Pop
    src_node = instance.primary_node
6500 a8083063 Iustin Pop
    if self.op.shutdown:
6501 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
6502 781de953 Iustin Pop
      result = self.rpc.call_instance_shutdown(src_node, instance)
6503 1fae010f Iustin Pop
      msg = result.RemoteFailMsg()
6504 1fae010f Iustin Pop
      if msg:
6505 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
6506 1fae010f Iustin Pop
                                 " node %s: %s" %
6507 1fae010f Iustin Pop
                                 (instance.name, src_node, msg))
6508 a8083063 Iustin Pop
6509 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
6510 a8083063 Iustin Pop
6511 a8083063 Iustin Pop
    snap_disks = []
6512 a8083063 Iustin Pop
6513 998c712c Iustin Pop
    # set the disks ID correctly since call_instance_start needs the
6514 998c712c Iustin Pop
    # correct drbd minor to create the symlinks
6515 998c712c Iustin Pop
    for disk in instance.disks:
6516 998c712c Iustin Pop
      self.cfg.SetDiskID(disk, src_node)
6517 998c712c Iustin Pop
6518 084f05a5 Iustin Pop
    # per-disk results
6519 084f05a5 Iustin Pop
    dresults = []
6520 a8083063 Iustin Pop
    try:
6521 a97da6b7 Iustin Pop
      for idx, disk in enumerate(instance.disks):
6522 19d7f90a Guido Trotter
        # new_dev_name will be a snapshot of an lvm leaf of the one we passed
6523 19d7f90a Guido Trotter
        new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk)
6524 781de953 Iustin Pop
        if new_dev_name.failed or not new_dev_name.data:
6525 a97da6b7 Iustin Pop
          self.LogWarning("Could not snapshot disk/%d on node %s",
6526 a97da6b7 Iustin Pop
                          idx, src_node)
6527 19d7f90a Guido Trotter
          snap_disks.append(False)
6528 19d7f90a Guido Trotter
        else:
6529 19d7f90a Guido Trotter
          new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
6530 781de953 Iustin Pop
                                 logical_id=(vgname, new_dev_name.data),
6531 781de953 Iustin Pop
                                 physical_id=(vgname, new_dev_name.data),
6532 19d7f90a Guido Trotter
                                 iv_name=disk.iv_name)
6533 19d7f90a Guido Trotter
          snap_disks.append(new_dev)
6534 a8083063 Iustin Pop
6535 a8083063 Iustin Pop
    finally:
6536 0d68c45d Iustin Pop
      if self.op.shutdown and instance.admin_up:
6537 0eca8e0c Iustin Pop
        result = self.rpc.call_instance_start(src_node, instance, None, None)
6538 dd279568 Iustin Pop
        msg = result.RemoteFailMsg()
6539 dd279568 Iustin Pop
        if msg:
6540 b9bddb6b Iustin Pop
          _ShutdownInstanceDisks(self, instance)
6541 dd279568 Iustin Pop
          raise errors.OpExecError("Could not start instance: %s" % msg)
6542 a8083063 Iustin Pop
6543 a8083063 Iustin Pop
    # TODO: check for size
6544 a8083063 Iustin Pop
6545 62c9ec92 Iustin Pop
    cluster_name = self.cfg.GetClusterName()
6546 74c47259 Iustin Pop
    for idx, dev in enumerate(snap_disks):
6547 19d7f90a Guido Trotter
      if dev:
6548 781de953 Iustin Pop
        result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
6549 781de953 Iustin Pop
                                               instance, cluster_name, idx)
6550 781de953 Iustin Pop
        if result.failed or not result.data:
6551 a97da6b7 Iustin Pop
          self.LogWarning("Could not export disk/%d from node %s to"
6552 a97da6b7 Iustin Pop
                          " node %s", idx, src_node, dst_node.name)
6553 084f05a5 Iustin Pop
          dresults.append(False)
6554 084f05a5 Iustin Pop
        else:
6555 084f05a5 Iustin Pop
          dresults.append(True)
6556 e1bc0878 Iustin Pop
        msg = self.rpc.call_blockdev_remove(src_node, dev).RemoteFailMsg()
6557 e1bc0878 Iustin Pop
        if msg:
6558 a97da6b7 Iustin Pop
          self.LogWarning("Could not remove snapshot for disk/%d from node"
6559 a97da6b7 Iustin Pop
                          " %s: %s", idx, src_node, msg)
6560 084f05a5 Iustin Pop
      else:
6561 084f05a5 Iustin Pop
        dresults.append(False)
6562 a8083063 Iustin Pop
6563 781de953 Iustin Pop
    result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
6564 084f05a5 Iustin Pop
    fin_resu = True
6565 781de953 Iustin Pop
    if result.failed or not result.data:
6566 19d7f90a Guido Trotter
      self.LogWarning("Could not finalize export for instance %s on node %s",
6567 19d7f90a Guido Trotter
                      instance.name, dst_node.name)
6568 084f05a5 Iustin Pop
      fin_resu = False
6569 a8083063 Iustin Pop
6570 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
6571 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
6572 a8083063 Iustin Pop
6573 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
6574 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
6575 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
6576 a8083063 Iustin Pop
    if nodelist:
6577 72737a7f Iustin Pop
      exportlist = self.rpc.call_export_list(nodelist)
6578 a8083063 Iustin Pop
      for node in exportlist:
6579 781de953 Iustin Pop
        if exportlist[node].failed:
6580 781de953 Iustin Pop
          continue
6581 781de953 Iustin Pop
        if instance.name in exportlist[node].data:
6582 72737a7f Iustin Pop
          if not self.rpc.call_export_remove(node, instance.name):
6583 19d7f90a Guido Trotter
            self.LogWarning("Could not remove older export for instance %s"
6584 19d7f90a Guido Trotter
                            " on node %s", instance.name, node)
6585 084f05a5 Iustin Pop
    return fin_resu, dresults
6586 5c947f38 Iustin Pop
6587 5c947f38 Iustin Pop
6588 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
6589 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
6590 9ac99fda Guido Trotter

6591 9ac99fda Guido Trotter
  """
6592 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
6593 3656b3af Guido Trotter
  REQ_BGL = False
6594 3656b3af Guido Trotter
6595 3656b3af Guido Trotter
  def ExpandNames(self):
6596 3656b3af Guido Trotter
    self.needed_locks = {}
6597 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
6598 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
6599 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
6600 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6601 9ac99fda Guido Trotter
6602 9ac99fda Guido Trotter
  def CheckPrereq(self):
6603 9ac99fda Guido Trotter
    """Check prerequisites.
6604 9ac99fda Guido Trotter
    """
6605 9ac99fda Guido Trotter
    pass
6606 9ac99fda Guido Trotter
6607 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
6608 9ac99fda Guido Trotter
    """Remove any export.
6609 9ac99fda Guido Trotter

6610 9ac99fda Guido Trotter
    """
6611 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
6612 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
6613 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
6614 9ac99fda Guido Trotter
    fqdn_warn = False
6615 9ac99fda Guido Trotter
    if not instance_name:
6616 9ac99fda Guido Trotter
      fqdn_warn = True
6617 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
6618 9ac99fda Guido Trotter
6619 72737a7f Iustin Pop
    exportlist = self.rpc.call_export_list(self.acquired_locks[
6620 72737a7f Iustin Pop
      locking.LEVEL_NODE])
6621 9ac99fda Guido Trotter
    found = False
6622 9ac99fda Guido Trotter
    for node in exportlist:
6623 781de953 Iustin Pop
      if exportlist[node].failed:
6624 25361b9a Iustin Pop
        self.LogWarning("Failed to query node %s, continuing" % node)
6625 781de953 Iustin Pop
        continue
6626 781de953 Iustin Pop
      if instance_name in exportlist[node].data:
6627 9ac99fda Guido Trotter
        found = True
6628 781de953 Iustin Pop
        result = self.rpc.call_export_remove(node, instance_name)
6629 781de953 Iustin Pop
        if result.failed or not result.data:
6630 9a4f63d1 Iustin Pop
          logging.error("Could not remove export for instance %s"
6631 9a4f63d1 Iustin Pop
                        " on node %s", instance_name, node)
6632 9ac99fda Guido Trotter
6633 9ac99fda Guido Trotter
    if fqdn_warn and not found:
6634 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
6635 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
6636 9ac99fda Guido Trotter
                  " Domain Name.")
6637 9ac99fda Guido Trotter
6638 9ac99fda Guido Trotter
6639 fe267188 Iustin Pop
class TagsLU(NoHooksLU): # pylint: disable-msg=W0223
6640 5c947f38 Iustin Pop
  """Generic tags LU.
6641 5c947f38 Iustin Pop

6642 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
6643 5c947f38 Iustin Pop

6644 5c947f38 Iustin Pop
  """
6645 5c947f38 Iustin Pop
6646 8646adce Guido Trotter
  def ExpandNames(self):
6647 8646adce Guido Trotter
    self.needed_locks = {}
6648 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
6649 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
6650 5c947f38 Iustin Pop
      if name is None:
6651 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
6652 3ecf6786 Iustin Pop
                                   (self.op.name,))
6653 5c947f38 Iustin Pop
      self.op.name = name
6654 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = name
6655 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
6656 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
6657 5c947f38 Iustin Pop
      if name is None:
6658 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
6659 3ecf6786 Iustin Pop
                                   (self.op.name,))
6660 5c947f38 Iustin Pop
      self.op.name = name
6661 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = name
6662 8646adce Guido Trotter
6663 8646adce Guido Trotter
  def CheckPrereq(self):
6664 8646adce Guido Trotter
    """Check prerequisites.
6665 8646adce Guido Trotter

6666 8646adce Guido Trotter
    """
6667 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
6668 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
6669 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
6670 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
6671 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
6672 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
6673 5c947f38 Iustin Pop
    else:
6674 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
6675 3ecf6786 Iustin Pop
                                 str(self.op.kind))
6676 5c947f38 Iustin Pop
6677 5c947f38 Iustin Pop
6678 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
6679 5c947f38 Iustin Pop
  """Returns the tags of a given object.
6680 5c947f38 Iustin Pop

6681 5c947f38 Iustin Pop
  """
6682 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
6683 8646adce Guido Trotter
  REQ_BGL = False
6684 5c947f38 Iustin Pop
6685 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6686 5c947f38 Iustin Pop
    """Returns the tag list.
6687 5c947f38 Iustin Pop

6688 5c947f38 Iustin Pop
    """
6689 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
6690 5c947f38 Iustin Pop
6691 5c947f38 Iustin Pop
6692 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
6693 73415719 Iustin Pop
  """Searches the tags for a given pattern.
6694 73415719 Iustin Pop

6695 73415719 Iustin Pop
  """
6696 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
6697 8646adce Guido Trotter
  REQ_BGL = False
6698 8646adce Guido Trotter
6699 8646adce Guido Trotter
  def ExpandNames(self):
6700 8646adce Guido Trotter
    self.needed_locks = {}
6701 73415719 Iustin Pop
6702 73415719 Iustin Pop
  def CheckPrereq(self):
6703 73415719 Iustin Pop
    """Check prerequisites.
6704 73415719 Iustin Pop

6705 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
6706 73415719 Iustin Pop

6707 73415719 Iustin Pop
    """
6708 73415719 Iustin Pop
    try:
6709 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
6710 73415719 Iustin Pop
    except re.error, err:
6711 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
6712 73415719 Iustin Pop
                                 (self.op.pattern, err))
6713 73415719 Iustin Pop
6714 73415719 Iustin Pop
  def Exec(self, feedback_fn):
6715 73415719 Iustin Pop
    """Returns the tag list.
6716 73415719 Iustin Pop

6717 73415719 Iustin Pop
    """
6718 73415719 Iustin Pop
    cfg = self.cfg
6719 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
6720 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
6721 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
6722 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
6723 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
6724 73415719 Iustin Pop
    results = []
6725 73415719 Iustin Pop
    for path, target in tgts:
6726 73415719 Iustin Pop
      for tag in target.GetTags():
6727 73415719 Iustin Pop
        if self.re.search(tag):
6728 73415719 Iustin Pop
          results.append((path, tag))
6729 73415719 Iustin Pop
    return results
6730 73415719 Iustin Pop
6731 73415719 Iustin Pop
6732 f27302fa Iustin Pop
class LUAddTags(TagsLU):
6733 5c947f38 Iustin Pop
  """Sets a tag on a given object.
6734 5c947f38 Iustin Pop

6735 5c947f38 Iustin Pop
  """
6736 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
6737 8646adce Guido Trotter
  REQ_BGL = False
6738 5c947f38 Iustin Pop
6739 5c947f38 Iustin Pop
  def CheckPrereq(self):
6740 5c947f38 Iustin Pop
    """Check prerequisites.
6741 5c947f38 Iustin Pop

6742 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
6743 5c947f38 Iustin Pop

6744 5c947f38 Iustin Pop
    """
6745 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
6746 f27302fa Iustin Pop
    for tag in self.op.tags:
6747 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
6748 5c947f38 Iustin Pop
6749 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6750 5c947f38 Iustin Pop
    """Sets the tag.
6751 5c947f38 Iustin Pop

6752 5c947f38 Iustin Pop
    """
6753 5c947f38 Iustin Pop
    try:
6754 f27302fa Iustin Pop
      for tag in self.op.tags:
6755 f27302fa Iustin Pop
        self.target.AddTag(tag)
6756 5c947f38 Iustin Pop
    except errors.TagError, err:
6757 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
6758 5c947f38 Iustin Pop
    try:
6759 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
6760 5c947f38 Iustin Pop
    except errors.ConfigurationError:
6761 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
6762 3ecf6786 Iustin Pop
                                " config file and the operation has been"
6763 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
6764 5c947f38 Iustin Pop
6765 5c947f38 Iustin Pop
6766 f27302fa Iustin Pop
class LUDelTags(TagsLU):
6767 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
6768 5c947f38 Iustin Pop

6769 5c947f38 Iustin Pop
  """
6770 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
6771 8646adce Guido Trotter
  REQ_BGL = False
6772 5c947f38 Iustin Pop
6773 5c947f38 Iustin Pop
  def CheckPrereq(self):
6774 5c947f38 Iustin Pop
    """Check prerequisites.
6775 5c947f38 Iustin Pop

6776 5c947f38 Iustin Pop
    This checks that we have the given tag.
6777 5c947f38 Iustin Pop

6778 5c947f38 Iustin Pop
    """
6779 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
6780 f27302fa Iustin Pop
    for tag in self.op.tags:
6781 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
6782 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
6783 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
6784 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
6785 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
6786 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
6787 f27302fa Iustin Pop
      diff_names.sort()
6788 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
6789 f27302fa Iustin Pop
                                 (",".join(diff_names)))
6790 5c947f38 Iustin Pop
6791 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6792 5c947f38 Iustin Pop
    """Remove the tag from the object.
6793 5c947f38 Iustin Pop

6794 5c947f38 Iustin Pop
    """
6795 f27302fa Iustin Pop
    for tag in self.op.tags:
6796 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
6797 5c947f38 Iustin Pop
    try:
6798 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
6799 5c947f38 Iustin Pop
    except errors.ConfigurationError:
6800 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
6801 3ecf6786 Iustin Pop
                                " config file and the operation has been"
6802 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
6803 06009e27 Iustin Pop
6804 0eed6e61 Guido Trotter
6805 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
6806 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
6807 06009e27 Iustin Pop

6808 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
6809 06009e27 Iustin Pop
  time.
6810 06009e27 Iustin Pop

6811 06009e27 Iustin Pop
  """
6812 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
6813 fbe9022f Guido Trotter
  REQ_BGL = False
6814 06009e27 Iustin Pop
6815 fbe9022f Guido Trotter
  def ExpandNames(self):
6816 fbe9022f Guido Trotter
    """Expand names and set required locks.
6817 06009e27 Iustin Pop

6818 fbe9022f Guido Trotter
    This expands the node list, if any.
6819 06009e27 Iustin Pop

6820 06009e27 Iustin Pop
    """
6821 fbe9022f Guido Trotter
    self.needed_locks = {}
6822 06009e27 Iustin Pop
    if self.op.on_nodes:
6823 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
6824 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
6825 fbe9022f Guido Trotter
      # more information.
6826 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
6827 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
6828 fbe9022f Guido Trotter
6829 fbe9022f Guido Trotter
  def CheckPrereq(self):
6830 fbe9022f Guido Trotter
    """Check prerequisites.
6831 fbe9022f Guido Trotter

6832 fbe9022f Guido Trotter
    """
6833 06009e27 Iustin Pop
6834 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
6835 06009e27 Iustin Pop
    """Do the actual sleep.
6836 06009e27 Iustin Pop

6837 06009e27 Iustin Pop
    """
6838 06009e27 Iustin Pop
    if self.op.on_master:
6839 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
6840 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
6841 06009e27 Iustin Pop
    if self.op.on_nodes:
6842 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
6843 06009e27 Iustin Pop
      if not result:
6844 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
6845 06009e27 Iustin Pop
      for node, node_result in result.items():
6846 781de953 Iustin Pop
        node_result.Raise()
6847 781de953 Iustin Pop
        if not node_result.data:
6848 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
6849 781de953 Iustin Pop
                                   " result: %s" % (node, node_result.data))
6850 d61df03e Iustin Pop
6851 d61df03e Iustin Pop
6852 d1c2dd75 Iustin Pop
class IAllocator(object):
6853 d1c2dd75 Iustin Pop
  """IAllocator framework.
6854 d61df03e Iustin Pop

6855 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
6856 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
6857 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
6858 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
6859 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
6860 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
6861 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
6862 d1c2dd75 Iustin Pop
      easy usage
6863 d61df03e Iustin Pop

6864 d61df03e Iustin Pop
  """
6865 29859cb7 Iustin Pop
  _ALLO_KEYS = [
6866 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
6867 8cc7e742 Guido Trotter
    "os", "tags", "nics", "vcpus", "hypervisor",
6868 d1c2dd75 Iustin Pop
    ]
6869 29859cb7 Iustin Pop
  _RELO_KEYS = [
6870 29859cb7 Iustin Pop
    "relocate_from",
6871 29859cb7 Iustin Pop
    ]
6872 d1c2dd75 Iustin Pop
6873 72737a7f Iustin Pop
  def __init__(self, lu, mode, name, **kwargs):
6874 72737a7f Iustin Pop
    self.lu = lu
6875 d1c2dd75 Iustin Pop
    # init buffer variables
6876 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
6877 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
6878 29859cb7 Iustin Pop
    self.mode = mode
6879 29859cb7 Iustin Pop
    self.name = name
6880 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
6881 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
6882 a0add446 Iustin Pop
    self.hypervisor = None
6883 29859cb7 Iustin Pop
    self.relocate_from = None
6884 27579978 Iustin Pop
    # computed fields
6885 27579978 Iustin Pop
    self.required_nodes = None
6886 d1c2dd75 Iustin Pop
    # init result fields
6887 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
6888 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6889 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
6890 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6891 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
6892 29859cb7 Iustin Pop
    else:
6893 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
6894 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
6895 d1c2dd75 Iustin Pop
    for key in kwargs:
6896 29859cb7 Iustin Pop
      if key not in keyset:
6897 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
6898 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
6899 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
6900 29859cb7 Iustin Pop
    for key in keyset:
6901 d1c2dd75 Iustin Pop
      if key not in kwargs:
6902 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
6903 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
6904 d1c2dd75 Iustin Pop
    self._BuildInputData()
6905 d1c2dd75 Iustin Pop
6906 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
6907 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
6908 d1c2dd75 Iustin Pop

6909 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
6910 d1c2dd75 Iustin Pop

6911 d1c2dd75 Iustin Pop
    """
6912 72737a7f Iustin Pop
    cfg = self.lu.cfg
6913 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
6914 d1c2dd75 Iustin Pop
    # cluster data
6915 d1c2dd75 Iustin Pop
    data = {
6916 77031881 Iustin Pop
      "version": constants.IALLOCATOR_VERSION,
6917 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
6918 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
6919 1325da74 Iustin Pop
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
6920 d1c2dd75 Iustin Pop
      # we don't have job IDs
6921 d61df03e Iustin Pop
      }
6922 b57e9819 Guido Trotter
    iinfo = cfg.GetAllInstancesInfo().values()
6923 b57e9819 Guido Trotter
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
6924 6286519f Iustin Pop
6925 d1c2dd75 Iustin Pop
    # node data
6926 d1c2dd75 Iustin Pop
    node_results = {}
6927 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
6928 8cc7e742 Guido Trotter
6929 8cc7e742 Guido Trotter
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6930 a0add446 Iustin Pop
      hypervisor_name = self.hypervisor
6931 8cc7e742 Guido Trotter
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6932 a0add446 Iustin Pop
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
6933 8cc7e742 Guido Trotter
6934 72737a7f Iustin Pop
    node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
6935 a0add446 Iustin Pop
                                           hypervisor_name)
6936 18640d69 Guido Trotter
    node_iinfo = self.lu.rpc.call_all_instances_info(node_list,
6937 18640d69 Guido Trotter
                       cluster_info.enabled_hypervisors)
6938 1325da74 Iustin Pop
    for nname, nresult in node_data.items():
6939 1325da74 Iustin Pop
      # first fill in static (config-based) values
6940 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
6941 d1c2dd75 Iustin Pop
      pnr = {
6942 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
6943 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
6944 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
6945 fc0fe88c Iustin Pop
        "offline": ninfo.offline,
6946 0b2454b9 Iustin Pop
        "drained": ninfo.drained,
6947 1325da74 Iustin Pop
        "master_candidate": ninfo.master_candidate,
6948 d1c2dd75 Iustin Pop
        }
6949 1325da74 Iustin Pop
6950 0d853843 Iustin Pop
      if not (ninfo.offline or ninfo.drained):
6951 1325da74 Iustin Pop
        nresult.Raise()
6952 1325da74 Iustin Pop
        if not isinstance(nresult.data, dict):
6953 1325da74 Iustin Pop
          raise errors.OpExecError("Can't get data for node %s" % nname)
6954 1325da74 Iustin Pop
        remote_info = nresult.data
6955 1325da74 Iustin Pop
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
6956 1325da74 Iustin Pop
                     'vg_size', 'vg_free', 'cpu_total']:
6957 1325da74 Iustin Pop
          if attr not in remote_info:
6958 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' didn't return attribute"
6959 1325da74 Iustin Pop
                                     " '%s'" % (nname, attr))
6960 1325da74 Iustin Pop
          try:
6961 1325da74 Iustin Pop
            remote_info[attr] = int(remote_info[attr])
6962 1325da74 Iustin Pop
          except ValueError, err:
6963 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' returned invalid value"
6964 1325da74 Iustin Pop
                                     " for '%s': %s" % (nname, attr, err))
6965 1325da74 Iustin Pop
        # compute memory used by primary instances
6966 1325da74 Iustin Pop
        i_p_mem = i_p_up_mem = 0
6967 1325da74 Iustin Pop
        for iinfo, beinfo in i_list:
6968 1325da74 Iustin Pop
          if iinfo.primary_node == nname:
6969 1325da74 Iustin Pop
            i_p_mem += beinfo[constants.BE_MEMORY]
6970 1325da74 Iustin Pop
            if iinfo.name not in node_iinfo[nname].data:
6971 1325da74 Iustin Pop
              i_used_mem = 0
6972 1325da74 Iustin Pop
            else:
6973 1325da74 Iustin Pop
              i_used_mem = int(node_iinfo[nname].data[iinfo.name]['memory'])
6974 1325da74 Iustin Pop
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
6975 1325da74 Iustin Pop
            remote_info['memory_free'] -= max(0, i_mem_diff)
6976 1325da74 Iustin Pop
6977 1325da74 Iustin Pop
            if iinfo.admin_up:
6978 1325da74 Iustin Pop
              i_p_up_mem += beinfo[constants.BE_MEMORY]
6979 1325da74 Iustin Pop
6980 1325da74 Iustin Pop
        # compute memory used by instances
6981 1325da74 Iustin Pop
        pnr_dyn = {
6982 1325da74 Iustin Pop
          "total_memory": remote_info['memory_total'],
6983 1325da74 Iustin Pop
          "reserved_memory": remote_info['memory_dom0'],
6984 1325da74 Iustin Pop
          "free_memory": remote_info['memory_free'],
6985 1325da74 Iustin Pop
          "total_disk": remote_info['vg_size'],
6986 1325da74 Iustin Pop
          "free_disk": remote_info['vg_free'],
6987 1325da74 Iustin Pop
          "total_cpus": remote_info['cpu_total'],
6988 1325da74 Iustin Pop
          "i_pri_memory": i_p_mem,
6989 1325da74 Iustin Pop
          "i_pri_up_memory": i_p_up_mem,
6990 1325da74 Iustin Pop
          }
6991 1325da74 Iustin Pop
        pnr.update(pnr_dyn)
6992 1325da74 Iustin Pop
6993 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
6994 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
6995 d1c2dd75 Iustin Pop
6996 d1c2dd75 Iustin Pop
    # instance data
6997 d1c2dd75 Iustin Pop
    instance_data = {}
6998 338e51e8 Iustin Pop
    for iinfo, beinfo in i_list:
6999 d1c2dd75 Iustin Pop
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
7000 d1c2dd75 Iustin Pop
                  for n in iinfo.nics]
7001 d1c2dd75 Iustin Pop
      pir = {
7002 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
7003 1325da74 Iustin Pop
        "admin_up": iinfo.admin_up,
7004 338e51e8 Iustin Pop
        "vcpus": beinfo[constants.BE_VCPUS],
7005 338e51e8 Iustin Pop
        "memory": beinfo[constants.BE_MEMORY],
7006 d1c2dd75 Iustin Pop
        "os": iinfo.os,
7007 1325da74 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
7008 d1c2dd75 Iustin Pop
        "nics": nic_data,
7009 1325da74 Iustin Pop
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
7010 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
7011 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
7012 d1c2dd75 Iustin Pop
        }
7013 88ae4f85 Iustin Pop
      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
7014 88ae4f85 Iustin Pop
                                                 pir["disks"])
7015 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
7016 d61df03e Iustin Pop
7017 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
7018 d61df03e Iustin Pop
7019 d1c2dd75 Iustin Pop
    self.in_data = data
7020 d61df03e Iustin Pop
7021 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
7022 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
7023 d61df03e Iustin Pop

7024 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
7025 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
7026 d61df03e Iustin Pop

7027 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
7028 d1c2dd75 Iustin Pop
    done.
7029 d61df03e Iustin Pop

7030 d1c2dd75 Iustin Pop
    """
7031 d1c2dd75 Iustin Pop
    data = self.in_data
7032 d1c2dd75 Iustin Pop
7033 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
7034 d1c2dd75 Iustin Pop
7035 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
7036 27579978 Iustin Pop
      self.required_nodes = 2
7037 27579978 Iustin Pop
    else:
7038 27579978 Iustin Pop
      self.required_nodes = 1
7039 d1c2dd75 Iustin Pop
    request = {
7040 d1c2dd75 Iustin Pop
      "type": "allocate",
7041 d1c2dd75 Iustin Pop
      "name": self.name,
7042 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
7043 d1c2dd75 Iustin Pop
      "tags": self.tags,
7044 d1c2dd75 Iustin Pop
      "os": self.os,
7045 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
7046 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
7047 d1c2dd75 Iustin Pop
      "disks": self.disks,
7048 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
7049 d1c2dd75 Iustin Pop
      "nics": self.nics,
7050 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
7051 d1c2dd75 Iustin Pop
      }
7052 d1c2dd75 Iustin Pop
    data["request"] = request
7053 298fe380 Iustin Pop
7054 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
7055 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
7056 298fe380 Iustin Pop

7057 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
7058 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
7059 d61df03e Iustin Pop

7060 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
7061 d1c2dd75 Iustin Pop
    done.
7062 d61df03e Iustin Pop

7063 d1c2dd75 Iustin Pop
    """
7064 72737a7f Iustin Pop
    instance = self.lu.cfg.GetInstanceInfo(self.name)
7065 27579978 Iustin Pop
    if instance is None:
7066 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
7067 27579978 Iustin Pop
                                   " IAllocator" % self.name)
7068 27579978 Iustin Pop
7069 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
7070 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
7071 27579978 Iustin Pop
7072 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
7073 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
7074 2a139bb0 Iustin Pop
7075 27579978 Iustin Pop
    self.required_nodes = 1
7076 dafc7302 Guido Trotter
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
7077 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
7078 27579978 Iustin Pop
7079 d1c2dd75 Iustin Pop
    request = {
7080 2a139bb0 Iustin Pop
      "type": "relocate",
7081 d1c2dd75 Iustin Pop
      "name": self.name,
7082 27579978 Iustin Pop
      "disk_space_total": disk_space,
7083 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
7084 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
7085 d1c2dd75 Iustin Pop
      }
7086 27579978 Iustin Pop
    self.in_data["request"] = request
7087 d61df03e Iustin Pop
7088 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
7089 d1c2dd75 Iustin Pop
    """Build input data structures.
7090 d61df03e Iustin Pop

7091 d1c2dd75 Iustin Pop
    """
7092 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
7093 d61df03e Iustin Pop
7094 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
7095 d1c2dd75 Iustin Pop
      self._AddNewInstance()
7096 d1c2dd75 Iustin Pop
    else:
7097 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
7098 d61df03e Iustin Pop
7099 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
7100 d61df03e Iustin Pop
7101 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
7102 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
7103 298fe380 Iustin Pop

7104 d1c2dd75 Iustin Pop
    """
7105 72737a7f Iustin Pop
    if call_fn is None:
7106 72737a7f Iustin Pop
      call_fn = self.lu.rpc.call_iallocator_runner
7107 298fe380 Iustin Pop
7108 72737a7f Iustin Pop
    result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
7109 781de953 Iustin Pop
    result.Raise()
7110 298fe380 Iustin Pop
7111 781de953 Iustin Pop
    if not isinstance(result.data, (list, tuple)) or len(result.data) != 4:
7112 8d528b7c Iustin Pop
      raise errors.OpExecError("Invalid result from master iallocator runner")
7113 8d528b7c Iustin Pop
7114 781de953 Iustin Pop
    rcode, stdout, stderr, fail = result.data
7115 8d528b7c Iustin Pop
7116 8d528b7c Iustin Pop
    if rcode == constants.IARUN_NOTFOUND:
7117 8d528b7c Iustin Pop
      raise errors.OpExecError("Can't find allocator '%s'" % name)
7118 8d528b7c Iustin Pop
    elif rcode == constants.IARUN_FAILURE:
7119 38206f3c Iustin Pop
      raise errors.OpExecError("Instance allocator call failed: %s,"
7120 38206f3c Iustin Pop
                               " output: %s" % (fail, stdout+stderr))
7121 8d528b7c Iustin Pop
    self.out_text = stdout
7122 d1c2dd75 Iustin Pop
    if validate:
7123 d1c2dd75 Iustin Pop
      self._ValidateResult()
7124 298fe380 Iustin Pop
7125 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
7126 d1c2dd75 Iustin Pop
    """Process the allocator results.
7127 538475ca Iustin Pop

7128 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
7129 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
7130 538475ca Iustin Pop

7131 d1c2dd75 Iustin Pop
    """
7132 d1c2dd75 Iustin Pop
    try:
7133 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
7134 d1c2dd75 Iustin Pop
    except Exception, err:
7135 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
7136 d1c2dd75 Iustin Pop
7137 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
7138 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
7139 538475ca Iustin Pop
7140 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
7141 d1c2dd75 Iustin Pop
      if key not in rdict:
7142 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
7143 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
7144 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
7145 538475ca Iustin Pop
7146 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
7147 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
7148 d1c2dd75 Iustin Pop
                               " is not a list")
7149 d1c2dd75 Iustin Pop
    self.out_data = rdict
7150 538475ca Iustin Pop
7151 538475ca Iustin Pop
7152 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
7153 d61df03e Iustin Pop
  """Run allocator tests.
7154 d61df03e Iustin Pop

7155 d61df03e Iustin Pop
  This LU runs the allocator tests
7156 d61df03e Iustin Pop

7157 d61df03e Iustin Pop
  """
7158 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
7159 d61df03e Iustin Pop
7160 d61df03e Iustin Pop
  def CheckPrereq(self):
7161 d61df03e Iustin Pop
    """Check prerequisites.
7162 d61df03e Iustin Pop

7163 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
7164 d61df03e Iustin Pop

7165 d61df03e Iustin Pop
    """
7166 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
7167 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
7168 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
7169 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
7170 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
7171 d61df03e Iustin Pop
                                     attr)
7172 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
7173 d61df03e Iustin Pop
      if iname is not None:
7174 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
7175 d61df03e Iustin Pop
                                   iname)
7176 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
7177 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
7178 d61df03e Iustin Pop
      for row in self.op.nics:
7179 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
7180 d61df03e Iustin Pop
            "mac" not in row or
7181 d61df03e Iustin Pop
            "ip" not in row or
7182 d61df03e Iustin Pop
            "bridge" not in row):
7183 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
7184 d61df03e Iustin Pop
                                     " 'nics' parameter")
7185 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
7186 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
7187 d61df03e Iustin Pop
      for row in self.op.disks:
7188 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
7189 d61df03e Iustin Pop
            "size" not in row or
7190 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
7191 d61df03e Iustin Pop
            "mode" not in row or
7192 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
7193 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
7194 d61df03e Iustin Pop
                                     " 'disks' parameter")
7195 8901997e Iustin Pop
      if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None:
7196 8cc7e742 Guido Trotter
        self.op.hypervisor = self.cfg.GetHypervisorType()
7197 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
7198 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
7199 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
7200 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
7201 d61df03e Iustin Pop
      if fname is None:
7202 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
7203 d61df03e Iustin Pop
                                   self.op.name)
7204 d61df03e Iustin Pop
      self.op.name = fname
7205 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
7206 d61df03e Iustin Pop
    else:
7207 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
7208 d61df03e Iustin Pop
                                 self.op.mode)
7209 d61df03e Iustin Pop
7210 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
7211 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
7212 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
7213 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
7214 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
7215 d61df03e Iustin Pop
                                 self.op.direction)
7216 d61df03e Iustin Pop
7217 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
7218 d61df03e Iustin Pop
    """Run the allocator test.
7219 d61df03e Iustin Pop

7220 d61df03e Iustin Pop
    """
7221 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
7222 72737a7f Iustin Pop
      ial = IAllocator(self,
7223 29859cb7 Iustin Pop
                       mode=self.op.mode,
7224 29859cb7 Iustin Pop
                       name=self.op.name,
7225 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
7226 29859cb7 Iustin Pop
                       disks=self.op.disks,
7227 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
7228 29859cb7 Iustin Pop
                       os=self.op.os,
7229 29859cb7 Iustin Pop
                       tags=self.op.tags,
7230 29859cb7 Iustin Pop
                       nics=self.op.nics,
7231 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
7232 8cc7e742 Guido Trotter
                       hypervisor=self.op.hypervisor,
7233 29859cb7 Iustin Pop
                       )
7234 29859cb7 Iustin Pop
    else:
7235 72737a7f Iustin Pop
      ial = IAllocator(self,
7236 29859cb7 Iustin Pop
                       mode=self.op.mode,
7237 29859cb7 Iustin Pop
                       name=self.op.name,
7238 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
7239 29859cb7 Iustin Pop
                       )
7240 d61df03e Iustin Pop
7241 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
7242 d1c2dd75 Iustin Pop
      result = ial.in_text
7243 298fe380 Iustin Pop
    else:
7244 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
7245 d1c2dd75 Iustin Pop
      result = ial.out_text
7246 298fe380 Iustin Pop
    return result