Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 4b7735f9

History | View | Annotate | Download (207.1 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 ffa1c0dc Iustin Pop
import logging
34 74409b12 Iustin Pop
import copy
35 4b7735f9 Iustin Pop
import random
36 a8083063 Iustin Pop
37 a8083063 Iustin Pop
from ganeti import ssh
38 a8083063 Iustin Pop
from ganeti import utils
39 a8083063 Iustin Pop
from ganeti import errors
40 a8083063 Iustin Pop
from ganeti import hypervisor
41 6048c986 Guido Trotter
from ganeti import locking
42 a8083063 Iustin Pop
from ganeti import constants
43 a8083063 Iustin Pop
from ganeti import objects
44 a8083063 Iustin Pop
from ganeti import opcodes
45 8d14b30d Iustin Pop
from ganeti import serializer
46 d61df03e Iustin Pop
47 d61df03e Iustin Pop
48 a8083063 Iustin Pop
class LogicalUnit(object):
49 396e1b78 Michael Hanselmann
  """Logical Unit base class.
50 a8083063 Iustin Pop

51 a8083063 Iustin Pop
  Subclasses must follow these rules:
52 d465bdc8 Guido Trotter
    - implement ExpandNames
53 d465bdc8 Guido Trotter
    - implement CheckPrereq
54 a8083063 Iustin Pop
    - implement Exec
55 a8083063 Iustin Pop
    - implement BuildHooksEnv
56 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
57 05f86716 Guido Trotter
    - optionally redefine their run requirements:
58 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
59 05f86716 Guido Trotter

60 05f86716 Guido Trotter
  Note that all commands require root permissions.
61 a8083063 Iustin Pop

62 a8083063 Iustin Pop
  """
63 a8083063 Iustin Pop
  HPATH = None
64 a8083063 Iustin Pop
  HTYPE = None
65 a8083063 Iustin Pop
  _OP_REQP = []
66 7e55040e Guido Trotter
  REQ_BGL = True
67 a8083063 Iustin Pop
68 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
69 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
70 a8083063 Iustin Pop

71 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
72 a8083063 Iustin Pop
    validity.
73 a8083063 Iustin Pop

74 a8083063 Iustin Pop
    """
75 5bfac263 Iustin Pop
    self.proc = processor
76 a8083063 Iustin Pop
    self.op = op
77 77b657a3 Guido Trotter
    self.cfg = context.cfg
78 77b657a3 Guido Trotter
    self.context = context
79 72737a7f Iustin Pop
    self.rpc = rpc
80 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
81 d465bdc8 Guido Trotter
    self.needed_locks = None
82 6683bba2 Guido Trotter
    self.acquired_locks = {}
83 3977a4c1 Guido Trotter
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
84 ca2a79e1 Guido Trotter
    self.add_locks = {}
85 ca2a79e1 Guido Trotter
    self.remove_locks = {}
86 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
87 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
88 c92b310a Michael Hanselmann
    self.__ssh = None
89 86d9d3bb Iustin Pop
    # logging
90 86d9d3bb Iustin Pop
    self.LogWarning = processor.LogWarning
91 86d9d3bb Iustin Pop
    self.LogInfo = processor.LogInfo
92 c92b310a Michael Hanselmann
93 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
94 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
95 a8083063 Iustin Pop
      if attr_val is None:
96 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
97 3ecf6786 Iustin Pop
                                   attr_name)
98 4be4691d Iustin Pop
    self.CheckArguments()
99 a8083063 Iustin Pop
100 c92b310a Michael Hanselmann
  def __GetSSH(self):
101 c92b310a Michael Hanselmann
    """Returns the SshRunner object
102 c92b310a Michael Hanselmann

103 c92b310a Michael Hanselmann
    """
104 c92b310a Michael Hanselmann
    if not self.__ssh:
105 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
106 c92b310a Michael Hanselmann
    return self.__ssh
107 c92b310a Michael Hanselmann
108 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
109 c92b310a Michael Hanselmann
110 4be4691d Iustin Pop
  def CheckArguments(self):
111 4be4691d Iustin Pop
    """Check syntactic validity for the opcode arguments.
112 4be4691d Iustin Pop

113 4be4691d Iustin Pop
    This method is for doing a simple syntactic check and ensure
114 4be4691d Iustin Pop
    validity of opcode parameters, without any cluster-related
115 4be4691d Iustin Pop
    checks. While the same can be accomplished in ExpandNames and/or
116 4be4691d Iustin Pop
    CheckPrereq, doing these separate is better because:
117 4be4691d Iustin Pop

118 4be4691d Iustin Pop
      - ExpandNames is left as as purely a lock-related function
119 4be4691d Iustin Pop
      - CheckPrereq is run after we have aquired locks (and possible
120 4be4691d Iustin Pop
        waited for them)
121 4be4691d Iustin Pop

122 4be4691d Iustin Pop
    The function is allowed to change the self.op attribute so that
123 4be4691d Iustin Pop
    later methods can no longer worry about missing parameters.
124 4be4691d Iustin Pop

125 4be4691d Iustin Pop
    """
126 4be4691d Iustin Pop
    pass
127 4be4691d Iustin Pop
128 d465bdc8 Guido Trotter
  def ExpandNames(self):
129 d465bdc8 Guido Trotter
    """Expand names for this LU.
130 d465bdc8 Guido Trotter

131 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
132 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
133 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
134 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
135 d465bdc8 Guido Trotter

136 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
137 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
138 d465bdc8 Guido Trotter
    as values. Rules:
139 e4376078 Iustin Pop

140 e4376078 Iustin Pop
      - use an empty dict if you don't need any lock
141 e4376078 Iustin Pop
      - if you don't need any lock at a particular level omit that level
142 e4376078 Iustin Pop
      - don't put anything for the BGL level
143 e4376078 Iustin Pop
      - if you want all locks at a level use locking.ALL_SET as a value
144 d465bdc8 Guido Trotter

145 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
146 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
147 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
148 3977a4c1 Guido Trotter

149 e4376078 Iustin Pop
    Examples::
150 e4376078 Iustin Pop

151 e4376078 Iustin Pop
      # Acquire all nodes and one instance
152 e4376078 Iustin Pop
      self.needed_locks = {
153 e4376078 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
154 e4376078 Iustin Pop
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
155 e4376078 Iustin Pop
      }
156 e4376078 Iustin Pop
      # Acquire just two nodes
157 e4376078 Iustin Pop
      self.needed_locks = {
158 e4376078 Iustin Pop
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
159 e4376078 Iustin Pop
      }
160 e4376078 Iustin Pop
      # Acquire no locks
161 e4376078 Iustin Pop
      self.needed_locks = {} # No, you can't leave it to the default value None
162 d465bdc8 Guido Trotter

163 d465bdc8 Guido Trotter
    """
164 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
165 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
166 d465bdc8 Guido Trotter
    # time.
167 d465bdc8 Guido Trotter
    if self.REQ_BGL:
168 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
169 d465bdc8 Guido Trotter
    else:
170 d465bdc8 Guido Trotter
      raise NotImplementedError
171 d465bdc8 Guido Trotter
172 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
173 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
174 fb8dcb62 Guido Trotter

175 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
176 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
177 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
178 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
179 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
180 fb8dcb62 Guido Trotter
    default it does nothing.
181 fb8dcb62 Guido Trotter

182 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
183 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
184 fb8dcb62 Guido Trotter

185 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
186 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
187 fb8dcb62 Guido Trotter

188 fb8dcb62 Guido Trotter
    """
189 fb8dcb62 Guido Trotter
190 a8083063 Iustin Pop
  def CheckPrereq(self):
191 a8083063 Iustin Pop
    """Check prerequisites for this LU.
192 a8083063 Iustin Pop

193 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
194 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
195 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
196 a8083063 Iustin Pop
    allowed.
197 a8083063 Iustin Pop

198 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
199 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
200 a8083063 Iustin Pop

201 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
202 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
203 a8083063 Iustin Pop

204 a8083063 Iustin Pop
    """
205 a8083063 Iustin Pop
    raise NotImplementedError
206 a8083063 Iustin Pop
207 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
208 a8083063 Iustin Pop
    """Execute the LU.
209 a8083063 Iustin Pop

210 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
211 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
212 a8083063 Iustin Pop
    code, or expected.
213 a8083063 Iustin Pop

214 a8083063 Iustin Pop
    """
215 a8083063 Iustin Pop
    raise NotImplementedError
216 a8083063 Iustin Pop
217 a8083063 Iustin Pop
  def BuildHooksEnv(self):
218 a8083063 Iustin Pop
    """Build hooks environment for this LU.
219 a8083063 Iustin Pop

220 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
221 a8083063 Iustin Pop
    containing the environment that will be used for running the
222 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
223 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
224 a8083063 Iustin Pop
    the hook should run after the execution.
225 a8083063 Iustin Pop

226 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
227 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
228 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
229 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
230 a8083063 Iustin Pop

231 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
232 a8083063 Iustin Pop

233 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
234 a8083063 Iustin Pop
    not be called.
235 a8083063 Iustin Pop

236 a8083063 Iustin Pop
    """
237 a8083063 Iustin Pop
    raise NotImplementedError
238 a8083063 Iustin Pop
239 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
240 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
241 1fce5219 Guido Trotter

242 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
243 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
244 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
245 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
246 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
247 1fce5219 Guido Trotter

248 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
249 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
250 e4376078 Iustin Pop
    @param hook_results: the results of the multi-node hooks rpc call
251 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
252 e4376078 Iustin Pop
    @param lu_result: the previous Exec result this LU had, or None
253 e4376078 Iustin Pop
        in the PRE phase
254 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
255 e4376078 Iustin Pop
        and hook results
256 1fce5219 Guido Trotter

257 1fce5219 Guido Trotter
    """
258 1fce5219 Guido Trotter
    return lu_result
259 1fce5219 Guido Trotter
260 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
261 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
262 43905206 Guido Trotter

263 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
264 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
265 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
266 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
267 43905206 Guido Trotter
    before.
268 43905206 Guido Trotter

269 43905206 Guido Trotter
    """
270 43905206 Guido Trotter
    if self.needed_locks is None:
271 43905206 Guido Trotter
      self.needed_locks = {}
272 43905206 Guido Trotter
    else:
273 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
274 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
275 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
276 43905206 Guido Trotter
    if expanded_name is None:
277 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
278 43905206 Guido Trotter
                                  self.op.instance_name)
279 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
280 43905206 Guido Trotter
    self.op.instance_name = expanded_name
281 43905206 Guido Trotter
282 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
283 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
284 c4a2fee1 Guido Trotter

285 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
286 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
287 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
288 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
289 c4a2fee1 Guido Trotter

290 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
291 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
292 c4a2fee1 Guido Trotter

293 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
294 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
295 c4a2fee1 Guido Trotter

296 e4376078 Iustin Pop
    If should be called in DeclareLocks in a way similar to::
297 c4a2fee1 Guido Trotter

298 e4376078 Iustin Pop
      if level == locking.LEVEL_NODE:
299 e4376078 Iustin Pop
        self._LockInstancesNodes()
300 c4a2fee1 Guido Trotter

301 a82ce292 Guido Trotter
    @type primary_only: boolean
302 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
303 a82ce292 Guido Trotter

304 c4a2fee1 Guido Trotter
    """
305 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
306 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
307 c4a2fee1 Guido Trotter
308 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
309 c4a2fee1 Guido Trotter
310 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
311 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
312 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
313 c4a2fee1 Guido Trotter
    wanted_nodes = []
314 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
315 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
316 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
317 a82ce292 Guido Trotter
      if not primary_only:
318 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
319 9513b6ab Guido Trotter
320 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
321 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
322 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
323 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
324 c4a2fee1 Guido Trotter
325 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
326 c4a2fee1 Guido Trotter
327 a8083063 Iustin Pop
328 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
329 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
330 a8083063 Iustin Pop

331 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
332 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
333 a8083063 Iustin Pop

334 a8083063 Iustin Pop
  """
335 a8083063 Iustin Pop
  HPATH = None
336 a8083063 Iustin Pop
  HTYPE = None
337 a8083063 Iustin Pop
338 a8083063 Iustin Pop
339 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
340 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
341 83120a01 Michael Hanselmann

342 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
343 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
344 e4376078 Iustin Pop
  @type nodes: list
345 e4376078 Iustin Pop
  @param nodes: list of node names or None for all nodes
346 e4376078 Iustin Pop
  @rtype: list
347 e4376078 Iustin Pop
  @return: the list of nodes, sorted
348 e4376078 Iustin Pop
  @raise errors.OpProgrammerError: if the nodes parameter is wrong type
349 83120a01 Michael Hanselmann

350 83120a01 Michael Hanselmann
  """
351 3312b702 Iustin Pop
  if not isinstance(nodes, list):
352 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
353 dcb93971 Michael Hanselmann
354 ea47808a Guido Trotter
  if not nodes:
355 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
356 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
357 dcb93971 Michael Hanselmann
358 ea47808a Guido Trotter
  wanted = []
359 ea47808a Guido Trotter
  for name in nodes:
360 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
361 ea47808a Guido Trotter
    if node is None:
362 ea47808a Guido Trotter
      raise errors.OpPrereqError("No such node name '%s'" % name)
363 ea47808a Guido Trotter
    wanted.append(node)
364 dcb93971 Michael Hanselmann
365 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
366 3312b702 Iustin Pop
367 3312b702 Iustin Pop
368 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
369 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
370 3312b702 Iustin Pop

371 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
372 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
373 e4376078 Iustin Pop
  @type instances: list
374 e4376078 Iustin Pop
  @param instances: list of instance names or None for all instances
375 e4376078 Iustin Pop
  @rtype: list
376 e4376078 Iustin Pop
  @return: the list of instances, sorted
377 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if the instances parameter is wrong type
378 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if any of the passed instances is not found
379 3312b702 Iustin Pop

380 3312b702 Iustin Pop
  """
381 3312b702 Iustin Pop
  if not isinstance(instances, list):
382 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
383 3312b702 Iustin Pop
384 3312b702 Iustin Pop
  if instances:
385 3312b702 Iustin Pop
    wanted = []
386 3312b702 Iustin Pop
387 3312b702 Iustin Pop
    for name in instances:
388 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
389 3312b702 Iustin Pop
      if instance is None:
390 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
391 3312b702 Iustin Pop
      wanted.append(instance)
392 3312b702 Iustin Pop
393 3312b702 Iustin Pop
  else:
394 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
395 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
396 dcb93971 Michael Hanselmann
397 dcb93971 Michael Hanselmann
398 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
399 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
400 83120a01 Michael Hanselmann

401 a2d2e1a7 Iustin Pop
  @type static: L{utils.FieldSet}
402 31bf511f Iustin Pop
  @param static: static fields set
403 a2d2e1a7 Iustin Pop
  @type dynamic: L{utils.FieldSet}
404 31bf511f Iustin Pop
  @param dynamic: dynamic fields set
405 83120a01 Michael Hanselmann

406 83120a01 Michael Hanselmann
  """
407 a2d2e1a7 Iustin Pop
  f = utils.FieldSet()
408 31bf511f Iustin Pop
  f.Extend(static)
409 31bf511f Iustin Pop
  f.Extend(dynamic)
410 dcb93971 Michael Hanselmann
411 31bf511f Iustin Pop
  delta = f.NonMatching(selected)
412 31bf511f Iustin Pop
  if delta:
413 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
414 31bf511f Iustin Pop
                               % ",".join(delta))
415 dcb93971 Michael Hanselmann
416 dcb93971 Michael Hanselmann
417 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
418 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
419 e4376078 Iustin Pop
  """Builds instance related env variables for hooks
420 e4376078 Iustin Pop

421 e4376078 Iustin Pop
  This builds the hook environment from individual variables.
422 e4376078 Iustin Pop

423 e4376078 Iustin Pop
  @type name: string
424 e4376078 Iustin Pop
  @param name: the name of the instance
425 e4376078 Iustin Pop
  @type primary_node: string
426 e4376078 Iustin Pop
  @param primary_node: the name of the instance's primary node
427 e4376078 Iustin Pop
  @type secondary_nodes: list
428 e4376078 Iustin Pop
  @param secondary_nodes: list of secondary nodes as strings
429 e4376078 Iustin Pop
  @type os_type: string
430 e4376078 Iustin Pop
  @param os_type: the name of the instance's OS
431 e4376078 Iustin Pop
  @type status: string
432 e4376078 Iustin Pop
  @param status: the desired status of the instances
433 e4376078 Iustin Pop
  @type memory: string
434 e4376078 Iustin Pop
  @param memory: the memory size of the instance
435 e4376078 Iustin Pop
  @type vcpus: string
436 e4376078 Iustin Pop
  @param vcpus: the count of VCPUs the instance has
437 e4376078 Iustin Pop
  @type nics: list
438 e4376078 Iustin Pop
  @param nics: list of tuples (ip, bridge, mac) representing
439 e4376078 Iustin Pop
      the NICs the instance  has
440 e4376078 Iustin Pop
  @rtype: dict
441 e4376078 Iustin Pop
  @return: the hook environment for this instance
442 ecb215b5 Michael Hanselmann

443 396e1b78 Michael Hanselmann
  """
444 396e1b78 Michael Hanselmann
  env = {
445 0e137c28 Iustin Pop
    "OP_TARGET": name,
446 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
447 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
448 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
449 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
450 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
451 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
452 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
453 396e1b78 Michael Hanselmann
  }
454 396e1b78 Michael Hanselmann
455 396e1b78 Michael Hanselmann
  if nics:
456 396e1b78 Michael Hanselmann
    nic_count = len(nics)
457 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
458 396e1b78 Michael Hanselmann
      if ip is None:
459 396e1b78 Michael Hanselmann
        ip = ""
460 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
461 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
462 53e4e875 Guido Trotter
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
463 396e1b78 Michael Hanselmann
  else:
464 396e1b78 Michael Hanselmann
    nic_count = 0
465 396e1b78 Michael Hanselmann
466 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
467 396e1b78 Michael Hanselmann
468 396e1b78 Michael Hanselmann
  return env
469 396e1b78 Michael Hanselmann
470 396e1b78 Michael Hanselmann
471 338e51e8 Iustin Pop
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
472 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
473 ecb215b5 Michael Hanselmann

474 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
475 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
476 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
477 e4376078 Iustin Pop
  @param instance: the instance for which we should build the
478 e4376078 Iustin Pop
      environment
479 e4376078 Iustin Pop
  @type override: dict
480 e4376078 Iustin Pop
  @param override: dictionary with key/values that will override
481 e4376078 Iustin Pop
      our values
482 e4376078 Iustin Pop
  @rtype: dict
483 e4376078 Iustin Pop
  @return: the hook environment dictionary
484 e4376078 Iustin Pop

485 ecb215b5 Michael Hanselmann
  """
486 338e51e8 Iustin Pop
  bep = lu.cfg.GetClusterInfo().FillBE(instance)
487 396e1b78 Michael Hanselmann
  args = {
488 396e1b78 Michael Hanselmann
    'name': instance.name,
489 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
490 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
491 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
492 396e1b78 Michael Hanselmann
    'status': instance.os,
493 338e51e8 Iustin Pop
    'memory': bep[constants.BE_MEMORY],
494 338e51e8 Iustin Pop
    'vcpus': bep[constants.BE_VCPUS],
495 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
496 396e1b78 Michael Hanselmann
  }
497 396e1b78 Michael Hanselmann
  if override:
498 396e1b78 Michael Hanselmann
    args.update(override)
499 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
500 396e1b78 Michael Hanselmann
501 396e1b78 Michael Hanselmann
502 b9bddb6b Iustin Pop
def _CheckInstanceBridgesExist(lu, instance):
503 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
504 bf6929a2 Alexander Schreiber

505 bf6929a2 Alexander Schreiber
  """
506 bf6929a2 Alexander Schreiber
  # check bridges existance
507 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
508 72737a7f Iustin Pop
  if not lu.rpc.call_bridges_exist(instance.primary_node, brlist):
509 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
510 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
511 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
512 bf6929a2 Alexander Schreiber
513 bf6929a2 Alexander Schreiber
514 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
515 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
516 a8083063 Iustin Pop

517 a8083063 Iustin Pop
  """
518 a8083063 Iustin Pop
  _OP_REQP = []
519 a8083063 Iustin Pop
520 a8083063 Iustin Pop
  def CheckPrereq(self):
521 a8083063 Iustin Pop
    """Check prerequisites.
522 a8083063 Iustin Pop

523 a8083063 Iustin Pop
    This checks whether the cluster is empty.
524 a8083063 Iustin Pop

525 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
526 a8083063 Iustin Pop

527 a8083063 Iustin Pop
    """
528 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
529 a8083063 Iustin Pop
530 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
531 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
532 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
533 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
534 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
535 db915bd1 Michael Hanselmann
    if instancelist:
536 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
537 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
538 a8083063 Iustin Pop
539 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
540 a8083063 Iustin Pop
    """Destroys the cluster.
541 a8083063 Iustin Pop

542 a8083063 Iustin Pop
    """
543 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
544 72737a7f Iustin Pop
    if not self.rpc.call_node_stop_master(master, False):
545 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
546 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
547 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
548 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
549 140aa4a8 Iustin Pop
    return master
550 a8083063 Iustin Pop
551 a8083063 Iustin Pop
552 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
553 a8083063 Iustin Pop
  """Verifies the cluster status.
554 a8083063 Iustin Pop

555 a8083063 Iustin Pop
  """
556 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
557 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
558 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
559 d4b9d97f Guido Trotter
  REQ_BGL = False
560 d4b9d97f Guido Trotter
561 d4b9d97f Guido Trotter
  def ExpandNames(self):
562 d4b9d97f Guido Trotter
    self.needed_locks = {
563 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
564 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
565 d4b9d97f Guido Trotter
    }
566 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
567 a8083063 Iustin Pop
568 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
569 a8083063 Iustin Pop
                  remote_version, feedback_fn):
570 a8083063 Iustin Pop
    """Run multiple tests against a node.
571 a8083063 Iustin Pop

572 e4376078 Iustin Pop
    Test list::
573 e4376078 Iustin Pop

574 a8083063 Iustin Pop
      - compares ganeti version
575 a8083063 Iustin Pop
      - checks vg existance and size > 20G
576 a8083063 Iustin Pop
      - checks config file checksum
577 a8083063 Iustin Pop
      - checks ssh to other nodes
578 a8083063 Iustin Pop

579 e4376078 Iustin Pop
    @type node: string
580 e4376078 Iustin Pop
    @param node: the name of the node to check
581 e4376078 Iustin Pop
    @param file_list: required list of files
582 e4376078 Iustin Pop
    @param local_cksum: dictionary of local files and their checksums
583 e4376078 Iustin Pop
    @type vglist: dict
584 e4376078 Iustin Pop
    @param vglist: dictionary of volume group names and their size
585 e4376078 Iustin Pop
    @param node_result: the results from the node
586 e4376078 Iustin Pop
    @param remote_version: the RPC version from the remote node
587 e4376078 Iustin Pop
    @param feedback_fn: function used to accumulate results
588 098c0958 Michael Hanselmann

589 a8083063 Iustin Pop
    """
590 a8083063 Iustin Pop
    # compares ganeti version
591 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
592 a8083063 Iustin Pop
    if not remote_version:
593 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
594 a8083063 Iustin Pop
      return True
595 a8083063 Iustin Pop
596 a8083063 Iustin Pop
    if local_version != remote_version:
597 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
598 a8083063 Iustin Pop
                      (local_version, node, remote_version))
599 a8083063 Iustin Pop
      return True
600 a8083063 Iustin Pop
601 a8083063 Iustin Pop
    # checks vg existance and size > 20G
602 a8083063 Iustin Pop
603 a8083063 Iustin Pop
    bad = False
604 a8083063 Iustin Pop
    if not vglist:
605 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
606 a8083063 Iustin Pop
                      (node,))
607 a8083063 Iustin Pop
      bad = True
608 a8083063 Iustin Pop
    else:
609 8d1a2a64 Michael Hanselmann
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
610 8d1a2a64 Michael Hanselmann
                                            constants.MIN_VG_SIZE)
611 a8083063 Iustin Pop
      if vgstatus:
612 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
613 a8083063 Iustin Pop
        bad = True
614 a8083063 Iustin Pop
615 2eb78bc8 Guido Trotter
    if not node_result:
616 2eb78bc8 Guido Trotter
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
617 2eb78bc8 Guido Trotter
      return True
618 2eb78bc8 Guido Trotter
619 a8083063 Iustin Pop
    # checks config file checksum
620 a8083063 Iustin Pop
    # checks ssh to any
621 a8083063 Iustin Pop
622 a8083063 Iustin Pop
    if 'filelist' not in node_result:
623 a8083063 Iustin Pop
      bad = True
624 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
625 a8083063 Iustin Pop
    else:
626 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
627 a8083063 Iustin Pop
      for file_name in file_list:
628 a8083063 Iustin Pop
        if file_name not in remote_cksum:
629 a8083063 Iustin Pop
          bad = True
630 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
631 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
632 a8083063 Iustin Pop
          bad = True
633 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
634 a8083063 Iustin Pop
635 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
636 a8083063 Iustin Pop
      bad = True
637 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
638 a8083063 Iustin Pop
    else:
639 a8083063 Iustin Pop
      if node_result['nodelist']:
640 a8083063 Iustin Pop
        bad = True
641 a8083063 Iustin Pop
        for node in node_result['nodelist']:
642 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
643 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
644 9d4bfc96 Iustin Pop
    if 'node-net-test' not in node_result:
645 9d4bfc96 Iustin Pop
      bad = True
646 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
647 9d4bfc96 Iustin Pop
    else:
648 9d4bfc96 Iustin Pop
      if node_result['node-net-test']:
649 9d4bfc96 Iustin Pop
        bad = True
650 9d4bfc96 Iustin Pop
        nlist = utils.NiceSort(node_result['node-net-test'].keys())
651 9d4bfc96 Iustin Pop
        for node in nlist:
652 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
653 9d4bfc96 Iustin Pop
                          (node, node_result['node-net-test'][node]))
654 9d4bfc96 Iustin Pop
655 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
656 e69d05fd Iustin Pop
    if isinstance(hyp_result, dict):
657 e69d05fd Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
658 e69d05fd Iustin Pop
        if hv_result is not None:
659 e69d05fd Iustin Pop
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
660 e69d05fd Iustin Pop
                      (hv_name, hv_result))
661 a8083063 Iustin Pop
    return bad
662 a8083063 Iustin Pop
663 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
664 c5705f58 Guido Trotter
                      node_instance, feedback_fn):
665 a8083063 Iustin Pop
    """Verify an instance.
666 a8083063 Iustin Pop

667 a8083063 Iustin Pop
    This function checks to see if the required block devices are
668 a8083063 Iustin Pop
    available on the instance's node.
669 a8083063 Iustin Pop

670 a8083063 Iustin Pop
    """
671 a8083063 Iustin Pop
    bad = False
672 a8083063 Iustin Pop
673 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
674 a8083063 Iustin Pop
675 a8083063 Iustin Pop
    node_vol_should = {}
676 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
677 a8083063 Iustin Pop
678 a8083063 Iustin Pop
    for node in node_vol_should:
679 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
680 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
681 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
682 a8083063 Iustin Pop
                          (volume, node))
683 a8083063 Iustin Pop
          bad = True
684 a8083063 Iustin Pop
685 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
686 a872dae6 Guido Trotter
      if (node_current not in node_instance or
687 a872dae6 Guido Trotter
          not instance in node_instance[node_current]):
688 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
689 a8083063 Iustin Pop
                        (instance, node_current))
690 a8083063 Iustin Pop
        bad = True
691 a8083063 Iustin Pop
692 a8083063 Iustin Pop
    for node in node_instance:
693 a8083063 Iustin Pop
      if (not node == node_current):
694 a8083063 Iustin Pop
        if instance in node_instance[node]:
695 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
696 a8083063 Iustin Pop
                          (instance, node))
697 a8083063 Iustin Pop
          bad = True
698 a8083063 Iustin Pop
699 6a438c98 Michael Hanselmann
    return bad
700 a8083063 Iustin Pop
701 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
702 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
703 a8083063 Iustin Pop

704 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
705 a8083063 Iustin Pop
    reported as unknown.
706 a8083063 Iustin Pop

707 a8083063 Iustin Pop
    """
708 a8083063 Iustin Pop
    bad = False
709 a8083063 Iustin Pop
710 a8083063 Iustin Pop
    for node in node_vol_is:
711 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
712 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
713 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
714 a8083063 Iustin Pop
                      (volume, node))
715 a8083063 Iustin Pop
          bad = True
716 a8083063 Iustin Pop
    return bad
717 a8083063 Iustin Pop
718 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
719 a8083063 Iustin Pop
    """Verify the list of running instances.
720 a8083063 Iustin Pop

721 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
722 a8083063 Iustin Pop

723 a8083063 Iustin Pop
    """
724 a8083063 Iustin Pop
    bad = False
725 a8083063 Iustin Pop
    for node in node_instance:
726 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
727 a8083063 Iustin Pop
        if runninginstance not in instancelist:
728 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
729 a8083063 Iustin Pop
                          (runninginstance, node))
730 a8083063 Iustin Pop
          bad = True
731 a8083063 Iustin Pop
    return bad
732 a8083063 Iustin Pop
733 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
734 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
735 2b3b6ddd Guido Trotter

736 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
737 2b3b6ddd Guido Trotter
    was primary for.
738 2b3b6ddd Guido Trotter

739 2b3b6ddd Guido Trotter
    """
740 2b3b6ddd Guido Trotter
    bad = False
741 2b3b6ddd Guido Trotter
742 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
743 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
744 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
745 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
746 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
747 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
748 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
749 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
750 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
751 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
752 2b3b6ddd Guido Trotter
        needed_mem = 0
753 2b3b6ddd Guido Trotter
        for instance in instances:
754 338e51e8 Iustin Pop
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
755 c0f2b229 Iustin Pop
          if bep[constants.BE_AUTO_BALANCE]:
756 3924700f Iustin Pop
            needed_mem += bep[constants.BE_MEMORY]
757 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
758 2b3b6ddd Guido Trotter
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
759 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
760 2b3b6ddd Guido Trotter
          bad = True
761 2b3b6ddd Guido Trotter
    return bad
762 2b3b6ddd Guido Trotter
763 a8083063 Iustin Pop
  def CheckPrereq(self):
764 a8083063 Iustin Pop
    """Check prerequisites.
765 a8083063 Iustin Pop

766 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
767 e54c4c5e Guido Trotter
    all its members are valid.
768 a8083063 Iustin Pop

769 a8083063 Iustin Pop
    """
770 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
771 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
772 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
773 a8083063 Iustin Pop
774 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
775 d8fff41c Guido Trotter
    """Build hooks env.
776 d8fff41c Guido Trotter

777 d8fff41c Guido Trotter
    Cluster-Verify hooks just rone in the post phase and their failure makes
778 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
779 d8fff41c Guido Trotter

780 d8fff41c Guido Trotter
    """
781 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
782 d8fff41c Guido Trotter
    # TODO: populate the environment with useful information for verify hooks
783 d8fff41c Guido Trotter
    env = {}
784 d8fff41c Guido Trotter
    return env, [], all_nodes
785 d8fff41c Guido Trotter
786 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
787 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
788 a8083063 Iustin Pop

789 a8083063 Iustin Pop
    """
790 a8083063 Iustin Pop
    bad = False
791 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
792 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
793 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
794 a8083063 Iustin Pop
795 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
796 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
797 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
798 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
799 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
800 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
801 3924700f Iustin Pop
    i_non_a_balanced = [] # Non auto-balanced instances
802 a8083063 Iustin Pop
    node_volume = {}
803 a8083063 Iustin Pop
    node_instance = {}
804 9c9c7d30 Guido Trotter
    node_info = {}
805 26b6af5e Guido Trotter
    instance_cfg = {}
806 a8083063 Iustin Pop
807 a8083063 Iustin Pop
    # FIXME: verify OS list
808 a8083063 Iustin Pop
    # do local checksums
809 d6a02168 Michael Hanselmann
    file_names = []
810 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
811 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
812 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
813 a8083063 Iustin Pop
814 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
815 72737a7f Iustin Pop
    all_volumeinfo = self.rpc.call_volume_list(nodelist, vg_name)
816 72737a7f Iustin Pop
    all_instanceinfo = self.rpc.call_instance_list(nodelist, hypervisors)
817 72737a7f Iustin Pop
    all_vglist = self.rpc.call_vg_list(nodelist)
818 a8083063 Iustin Pop
    node_verify_param = {
819 a8083063 Iustin Pop
      'filelist': file_names,
820 a8083063 Iustin Pop
      'nodelist': nodelist,
821 e69d05fd Iustin Pop
      'hypervisor': hypervisors,
822 9d4bfc96 Iustin Pop
      'node-net-test': [(node.name, node.primary_ip, node.secondary_ip)
823 9d4bfc96 Iustin Pop
                        for node in nodeinfo]
824 a8083063 Iustin Pop
      }
825 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
826 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
827 72737a7f Iustin Pop
    all_rversion = self.rpc.call_version(nodelist)
828 72737a7f Iustin Pop
    all_ninfo = self.rpc.call_node_info(nodelist, self.cfg.GetVGName(),
829 72737a7f Iustin Pop
                                        self.cfg.GetHypervisorType())
830 a8083063 Iustin Pop
831 3924700f Iustin Pop
    cluster = self.cfg.GetClusterInfo()
832 a8083063 Iustin Pop
    for node in nodelist:
833 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
834 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
835 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
836 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
837 a8083063 Iustin Pop
      bad = bad or result
838 a8083063 Iustin Pop
839 a8083063 Iustin Pop
      # node_volume
840 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
841 a8083063 Iustin Pop
842 b63ed789 Iustin Pop
      if isinstance(volumeinfo, basestring):
843 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
844 b63ed789 Iustin Pop
                    (node, volumeinfo[-400:].encode('string_escape')))
845 b63ed789 Iustin Pop
        bad = True
846 b63ed789 Iustin Pop
        node_volume[node] = {}
847 b63ed789 Iustin Pop
      elif not isinstance(volumeinfo, dict):
848 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
849 a8083063 Iustin Pop
        bad = True
850 a8083063 Iustin Pop
        continue
851 b63ed789 Iustin Pop
      else:
852 b63ed789 Iustin Pop
        node_volume[node] = volumeinfo
853 a8083063 Iustin Pop
854 a8083063 Iustin Pop
      # node_instance
855 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
856 a8083063 Iustin Pop
      if type(nodeinstance) != list:
857 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
858 a8083063 Iustin Pop
        bad = True
859 a8083063 Iustin Pop
        continue
860 a8083063 Iustin Pop
861 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
862 a8083063 Iustin Pop
863 9c9c7d30 Guido Trotter
      # node_info
864 9c9c7d30 Guido Trotter
      nodeinfo = all_ninfo[node]
865 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
866 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
867 9c9c7d30 Guido Trotter
        bad = True
868 9c9c7d30 Guido Trotter
        continue
869 9c9c7d30 Guido Trotter
870 9c9c7d30 Guido Trotter
      try:
871 9c9c7d30 Guido Trotter
        node_info[node] = {
872 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
873 9c9c7d30 Guido Trotter
          "dfree": int(nodeinfo['vg_free']),
874 93e4c50b Guido Trotter
          "pinst": [],
875 93e4c50b Guido Trotter
          "sinst": [],
876 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
877 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
878 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
879 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
880 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
881 36e7da50 Guido Trotter
          # secondary.
882 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
883 9c9c7d30 Guido Trotter
        }
884 9c9c7d30 Guido Trotter
      except ValueError:
885 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
886 9c9c7d30 Guido Trotter
        bad = True
887 9c9c7d30 Guido Trotter
        continue
888 9c9c7d30 Guido Trotter
889 a8083063 Iustin Pop
    node_vol_should = {}
890 a8083063 Iustin Pop
891 a8083063 Iustin Pop
    for instance in instancelist:
892 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
893 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
894 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
895 c5705f58 Guido Trotter
                                     node_instance, feedback_fn)
896 c5705f58 Guido Trotter
      bad = bad or result
897 a8083063 Iustin Pop
898 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
899 a8083063 Iustin Pop
900 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
901 26b6af5e Guido Trotter
902 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
903 93e4c50b Guido Trotter
      if pnode in node_info:
904 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
905 93e4c50b Guido Trotter
      else:
906 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
907 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
908 93e4c50b Guido Trotter
        bad = True
909 93e4c50b Guido Trotter
910 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
911 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
912 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
913 93e4c50b Guido Trotter
      # supported either.
914 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
915 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
916 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
917 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
918 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
919 93e4c50b Guido Trotter
                    % instance)
920 93e4c50b Guido Trotter
921 c0f2b229 Iustin Pop
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
922 3924700f Iustin Pop
        i_non_a_balanced.append(instance)
923 3924700f Iustin Pop
924 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
925 93e4c50b Guido Trotter
        if snode in node_info:
926 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
927 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
928 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
929 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
930 93e4c50b Guido Trotter
        else:
931 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
932 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
933 93e4c50b Guido Trotter
934 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
935 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
936 a8083063 Iustin Pop
                                       feedback_fn)
937 a8083063 Iustin Pop
    bad = bad or result
938 a8083063 Iustin Pop
939 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
940 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
941 a8083063 Iustin Pop
                                         feedback_fn)
942 a8083063 Iustin Pop
    bad = bad or result
943 a8083063 Iustin Pop
944 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
945 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
946 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
947 e54c4c5e Guido Trotter
      bad = bad or result
948 2b3b6ddd Guido Trotter
949 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
950 2b3b6ddd Guido Trotter
    if i_non_redundant:
951 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
952 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
953 2b3b6ddd Guido Trotter
954 3924700f Iustin Pop
    if i_non_a_balanced:
955 3924700f Iustin Pop
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
956 3924700f Iustin Pop
                  % len(i_non_a_balanced))
957 3924700f Iustin Pop
958 34290825 Michael Hanselmann
    return not bad
959 a8083063 Iustin Pop
960 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
961 e4376078 Iustin Pop
    """Analize the post-hooks' result
962 e4376078 Iustin Pop

963 e4376078 Iustin Pop
    This method analyses the hook result, handles it, and sends some
964 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
965 d8fff41c Guido Trotter

966 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
967 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
968 e4376078 Iustin Pop
    @param hooks_results: the results of the multi-node hooks rpc call
969 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
970 e4376078 Iustin Pop
    @param lu_result: previous Exec result
971 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
972 e4376078 Iustin Pop
        and hook results
973 d8fff41c Guido Trotter

974 d8fff41c Guido Trotter
    """
975 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
976 38206f3c Iustin Pop
    # their results
977 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
978 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
979 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
980 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
981 d8fff41c Guido Trotter
      if not hooks_results:
982 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
983 d8fff41c Guido Trotter
        lu_result = 1
984 d8fff41c Guido Trotter
      else:
985 d8fff41c Guido Trotter
        for node_name in hooks_results:
986 d8fff41c Guido Trotter
          show_node_header = True
987 d8fff41c Guido Trotter
          res = hooks_results[node_name]
988 d8fff41c Guido Trotter
          if res is False or not isinstance(res, list):
989 d8fff41c Guido Trotter
            feedback_fn("    Communication failure")
990 d8fff41c Guido Trotter
            lu_result = 1
991 d8fff41c Guido Trotter
            continue
992 d8fff41c Guido Trotter
          for script, hkr, output in res:
993 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
994 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
995 d8fff41c Guido Trotter
              # failing hooks on that node
996 d8fff41c Guido Trotter
              if show_node_header:
997 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
998 d8fff41c Guido Trotter
                show_node_header = False
999 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
1000 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
1001 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
1002 d8fff41c Guido Trotter
              lu_result = 1
1003 d8fff41c Guido Trotter
1004 d8fff41c Guido Trotter
      return lu_result
1005 d8fff41c Guido Trotter
1006 a8083063 Iustin Pop
1007 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
1008 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
1009 2c95a8d4 Iustin Pop

1010 2c95a8d4 Iustin Pop
  """
1011 2c95a8d4 Iustin Pop
  _OP_REQP = []
1012 d4b9d97f Guido Trotter
  REQ_BGL = False
1013 d4b9d97f Guido Trotter
1014 d4b9d97f Guido Trotter
  def ExpandNames(self):
1015 d4b9d97f Guido Trotter
    self.needed_locks = {
1016 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1017 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1018 d4b9d97f Guido Trotter
    }
1019 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1020 2c95a8d4 Iustin Pop
1021 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
1022 2c95a8d4 Iustin Pop
    """Check prerequisites.
1023 2c95a8d4 Iustin Pop

1024 2c95a8d4 Iustin Pop
    This has no prerequisites.
1025 2c95a8d4 Iustin Pop

1026 2c95a8d4 Iustin Pop
    """
1027 2c95a8d4 Iustin Pop
    pass
1028 2c95a8d4 Iustin Pop
1029 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
1030 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
1031 2c95a8d4 Iustin Pop

1032 2c95a8d4 Iustin Pop
    """
1033 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
1034 2c95a8d4 Iustin Pop
1035 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
1036 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1037 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
1038 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
1039 2c95a8d4 Iustin Pop
1040 2c95a8d4 Iustin Pop
    nv_dict = {}
1041 2c95a8d4 Iustin Pop
    for inst in instances:
1042 2c95a8d4 Iustin Pop
      inst_lvs = {}
1043 2c95a8d4 Iustin Pop
      if (inst.status != "up" or
1044 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
1045 2c95a8d4 Iustin Pop
        continue
1046 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
1047 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1048 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
1049 2c95a8d4 Iustin Pop
        for vol in vol_list:
1050 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
1051 2c95a8d4 Iustin Pop
1052 2c95a8d4 Iustin Pop
    if not nv_dict:
1053 2c95a8d4 Iustin Pop
      return result
1054 2c95a8d4 Iustin Pop
1055 72737a7f Iustin Pop
    node_lvs = self.rpc.call_volume_list(nodes, vg_name)
1056 2c95a8d4 Iustin Pop
1057 2c95a8d4 Iustin Pop
    to_act = set()
1058 2c95a8d4 Iustin Pop
    for node in nodes:
1059 2c95a8d4 Iustin Pop
      # node_volume
1060 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
1061 2c95a8d4 Iustin Pop
1062 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
1063 9a4f63d1 Iustin Pop
        logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
1064 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
1065 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
1066 9a4f63d1 Iustin Pop
        logging.warning("Connection to node %s failed or invalid data"
1067 9a4f63d1 Iustin Pop
                        " returned", node)
1068 2c95a8d4 Iustin Pop
        res_nodes.append(node)
1069 2c95a8d4 Iustin Pop
        continue
1070 2c95a8d4 Iustin Pop
1071 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
1072 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
1073 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
1074 b63ed789 Iustin Pop
            and inst.name not in res_instances):
1075 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
1076 2c95a8d4 Iustin Pop
1077 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1078 b63ed789 Iustin Pop
    # data better
1079 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1080 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1081 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1082 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1083 b63ed789 Iustin Pop
1084 2c95a8d4 Iustin Pop
    return result
1085 2c95a8d4 Iustin Pop
1086 2c95a8d4 Iustin Pop
1087 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1088 07bd8a51 Iustin Pop
  """Rename the cluster.
1089 07bd8a51 Iustin Pop

1090 07bd8a51 Iustin Pop
  """
1091 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1092 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1093 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1094 07bd8a51 Iustin Pop
1095 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1096 07bd8a51 Iustin Pop
    """Build hooks env.
1097 07bd8a51 Iustin Pop

1098 07bd8a51 Iustin Pop
    """
1099 07bd8a51 Iustin Pop
    env = {
1100 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1101 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1102 07bd8a51 Iustin Pop
      }
1103 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1104 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1105 07bd8a51 Iustin Pop
1106 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1107 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1108 07bd8a51 Iustin Pop

1109 07bd8a51 Iustin Pop
    """
1110 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1111 07bd8a51 Iustin Pop
1112 bcf043c9 Iustin Pop
    new_name = hostname.name
1113 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1114 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
1115 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
1116 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1117 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1118 07bd8a51 Iustin Pop
                                 " cluster has changed")
1119 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1120 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1121 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1122 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1123 07bd8a51 Iustin Pop
                                   new_ip)
1124 07bd8a51 Iustin Pop
1125 07bd8a51 Iustin Pop
    self.op.name = new_name
1126 07bd8a51 Iustin Pop
1127 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1128 07bd8a51 Iustin Pop
    """Rename the cluster.
1129 07bd8a51 Iustin Pop

1130 07bd8a51 Iustin Pop
    """
1131 07bd8a51 Iustin Pop
    clustername = self.op.name
1132 07bd8a51 Iustin Pop
    ip = self.ip
1133 07bd8a51 Iustin Pop
1134 07bd8a51 Iustin Pop
    # shutdown the master IP
1135 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
1136 72737a7f Iustin Pop
    if not self.rpc.call_node_stop_master(master, False):
1137 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1138 07bd8a51 Iustin Pop
1139 07bd8a51 Iustin Pop
    try:
1140 07bd8a51 Iustin Pop
      # modify the sstore
1141 d6a02168 Michael Hanselmann
      # TODO: sstore
1142 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
1143 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
1144 07bd8a51 Iustin Pop
1145 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
1146 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
1147 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
1148 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
1149 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
1150 07bd8a51 Iustin Pop
1151 9a4f63d1 Iustin Pop
      logging.debug("Copying updated ssconf data to all nodes")
1152 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1153 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
1154 72737a7f Iustin Pop
        result = self.rpc.call_upload_file(dist_nodes, fname)
1155 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
1156 07bd8a51 Iustin Pop
          if not result[to_node]:
1157 86d9d3bb Iustin Pop
            self.LogWarning("Copy of file %s to node %s failed",
1158 86d9d3bb Iustin Pop
                            fname, to_node)
1159 07bd8a51 Iustin Pop
    finally:
1160 72737a7f Iustin Pop
      if not self.rpc.call_node_start_master(master, False):
1161 86d9d3bb Iustin Pop
        self.LogWarning("Could not re-enable the master role on"
1162 86d9d3bb Iustin Pop
                        " the master, please restart manually.")
1163 07bd8a51 Iustin Pop
1164 07bd8a51 Iustin Pop
1165 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1166 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1167 8084f9f6 Manuel Franceschini

1168 e4376078 Iustin Pop
  @type disk: L{objects.Disk}
1169 e4376078 Iustin Pop
  @param disk: the disk to check
1170 e4376078 Iustin Pop
  @rtype: booleean
1171 e4376078 Iustin Pop
  @return: boolean indicating whether a LD_LV dev_type was found or not
1172 8084f9f6 Manuel Franceschini

1173 8084f9f6 Manuel Franceschini
  """
1174 8084f9f6 Manuel Franceschini
  if disk.children:
1175 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1176 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1177 8084f9f6 Manuel Franceschini
        return True
1178 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1179 8084f9f6 Manuel Franceschini
1180 8084f9f6 Manuel Franceschini
1181 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1182 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1183 8084f9f6 Manuel Franceschini

1184 8084f9f6 Manuel Franceschini
  """
1185 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1186 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1187 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1188 c53279cf Guido Trotter
  REQ_BGL = False
1189 c53279cf Guido Trotter
1190 4b7735f9 Iustin Pop
  def CheckParameters(self):
1191 4b7735f9 Iustin Pop
    """Check parameters
1192 4b7735f9 Iustin Pop

1193 4b7735f9 Iustin Pop
    """
1194 4b7735f9 Iustin Pop
    if not hasattr(self.op, "candidate_pool_size"):
1195 4b7735f9 Iustin Pop
      self.op.candidate_pool_size = None
1196 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1197 4b7735f9 Iustin Pop
      try:
1198 4b7735f9 Iustin Pop
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1199 4b7735f9 Iustin Pop
      except ValueError, err:
1200 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1201 4b7735f9 Iustin Pop
                                   str(err))
1202 4b7735f9 Iustin Pop
      if self.op.candidate_pool_size < 1:
1203 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("At least one master candidate needed")
1204 4b7735f9 Iustin Pop
1205 c53279cf Guido Trotter
  def ExpandNames(self):
1206 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
1207 c53279cf Guido Trotter
    # all nodes to be modified.
1208 c53279cf Guido Trotter
    self.needed_locks = {
1209 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1210 c53279cf Guido Trotter
    }
1211 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1212 8084f9f6 Manuel Franceschini
1213 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1214 8084f9f6 Manuel Franceschini
    """Build hooks env.
1215 8084f9f6 Manuel Franceschini

1216 8084f9f6 Manuel Franceschini
    """
1217 8084f9f6 Manuel Franceschini
    env = {
1218 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1219 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1220 8084f9f6 Manuel Franceschini
      }
1221 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1222 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1223 8084f9f6 Manuel Franceschini
1224 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1225 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1226 8084f9f6 Manuel Franceschini

1227 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1228 5f83e263 Iustin Pop
    if the given volume group is valid.
1229 8084f9f6 Manuel Franceschini

1230 8084f9f6 Manuel Franceschini
    """
1231 c53279cf Guido Trotter
    # FIXME: This only works because there is only one parameter that can be
1232 c53279cf Guido Trotter
    # changed or removed.
1233 779c15bb Iustin Pop
    if self.op.vg_name is not None and not self.op.vg_name:
1234 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
1235 8084f9f6 Manuel Franceschini
      for inst in instances:
1236 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1237 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1238 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1239 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1240 8084f9f6 Manuel Franceschini
1241 779c15bb Iustin Pop
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1242 779c15bb Iustin Pop
1243 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1244 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1245 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
1246 8084f9f6 Manuel Franceschini
      for node in node_list:
1247 8d1a2a64 Michael Hanselmann
        vgstatus = utils.CheckVolumeGroupSize(vglist[node], self.op.vg_name,
1248 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1249 8084f9f6 Manuel Franceschini
        if vgstatus:
1250 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1251 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1252 8084f9f6 Manuel Franceschini
1253 779c15bb Iustin Pop
    self.cluster = cluster = self.cfg.GetClusterInfo()
1254 779c15bb Iustin Pop
    # beparams changes do not need validation (we can't validate?),
1255 779c15bb Iustin Pop
    # but we still process here
1256 779c15bb Iustin Pop
    if self.op.beparams:
1257 779c15bb Iustin Pop
      self.new_beparams = cluster.FillDict(
1258 779c15bb Iustin Pop
        cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)
1259 779c15bb Iustin Pop
1260 779c15bb Iustin Pop
    # hypervisor list/parameters
1261 779c15bb Iustin Pop
    self.new_hvparams = cluster.FillDict(cluster.hvparams, {})
1262 779c15bb Iustin Pop
    if self.op.hvparams:
1263 779c15bb Iustin Pop
      if not isinstance(self.op.hvparams, dict):
1264 779c15bb Iustin Pop
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1265 779c15bb Iustin Pop
      for hv_name, hv_dict in self.op.hvparams.items():
1266 779c15bb Iustin Pop
        if hv_name not in self.new_hvparams:
1267 779c15bb Iustin Pop
          self.new_hvparams[hv_name] = hv_dict
1268 779c15bb Iustin Pop
        else:
1269 779c15bb Iustin Pop
          self.new_hvparams[hv_name].update(hv_dict)
1270 779c15bb Iustin Pop
1271 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1272 779c15bb Iustin Pop
      self.hv_list = self.op.enabled_hypervisors
1273 779c15bb Iustin Pop
    else:
1274 779c15bb Iustin Pop
      self.hv_list = cluster.enabled_hypervisors
1275 779c15bb Iustin Pop
1276 779c15bb Iustin Pop
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1277 779c15bb Iustin Pop
      # either the enabled list has changed, or the parameters have, validate
1278 779c15bb Iustin Pop
      for hv_name, hv_params in self.new_hvparams.items():
1279 779c15bb Iustin Pop
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1280 779c15bb Iustin Pop
            (self.op.enabled_hypervisors and
1281 779c15bb Iustin Pop
             hv_name in self.op.enabled_hypervisors)):
1282 779c15bb Iustin Pop
          # either this is a new hypervisor, or its parameters have changed
1283 779c15bb Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
1284 779c15bb Iustin Pop
          hv_class.CheckParameterSyntax(hv_params)
1285 779c15bb Iustin Pop
          _CheckHVParams(self, node_list, hv_name, hv_params)
1286 779c15bb Iustin Pop
1287 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1288 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1289 8084f9f6 Manuel Franceschini

1290 8084f9f6 Manuel Franceschini
    """
1291 779c15bb Iustin Pop
    if self.op.vg_name is not None:
1292 779c15bb Iustin Pop
      if self.op.vg_name != self.cfg.GetVGName():
1293 779c15bb Iustin Pop
        self.cfg.SetVGName(self.op.vg_name)
1294 779c15bb Iustin Pop
      else:
1295 779c15bb Iustin Pop
        feedback_fn("Cluster LVM configuration already in desired"
1296 779c15bb Iustin Pop
                    " state, not changing")
1297 779c15bb Iustin Pop
    if self.op.hvparams:
1298 779c15bb Iustin Pop
      self.cluster.hvparams = self.new_hvparams
1299 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1300 779c15bb Iustin Pop
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1301 779c15bb Iustin Pop
    if self.op.beparams:
1302 779c15bb Iustin Pop
      self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams
1303 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1304 4b7735f9 Iustin Pop
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
1305 4b7735f9 Iustin Pop
1306 779c15bb Iustin Pop
    self.cfg.Update(self.cluster)
1307 8084f9f6 Manuel Franceschini
1308 4b7735f9 Iustin Pop
    # we want to update nodes after the cluster so that if any errors
1309 4b7735f9 Iustin Pop
    # happen, we have recorded and saved the cluster info
1310 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1311 4b7735f9 Iustin Pop
      node_info = self.cfg.GetAllNodesInfo().values()
1312 4b7735f9 Iustin Pop
      num_candidates = len([node for node in node_info
1313 4b7735f9 Iustin Pop
                            if node.master_candidate])
1314 4b7735f9 Iustin Pop
      num_nodes = len(node_info)
1315 4b7735f9 Iustin Pop
      if num_candidates < self.op.candidate_pool_size:
1316 4b7735f9 Iustin Pop
        random.shuffle(node_info)
1317 4b7735f9 Iustin Pop
        for node in node_info:
1318 4b7735f9 Iustin Pop
          if num_candidates >= self.op.candidate_pool_size:
1319 4b7735f9 Iustin Pop
            break
1320 4b7735f9 Iustin Pop
          if node.master_candidate:
1321 4b7735f9 Iustin Pop
            continue
1322 4b7735f9 Iustin Pop
          node.master_candidate = True
1323 4b7735f9 Iustin Pop
          self.LogInfo("Promoting node %s to master candidate", node.name)
1324 4b7735f9 Iustin Pop
          self.cfg.Update(node)
1325 4b7735f9 Iustin Pop
          self.context.ReaddNode(node)
1326 4b7735f9 Iustin Pop
          num_candidates += 1
1327 4b7735f9 Iustin Pop
      elif num_candidates > self.op.candidate_pool_size:
1328 4b7735f9 Iustin Pop
        self.LogInfo("Note: more nodes are candidates (%d) than the new value"
1329 4b7735f9 Iustin Pop
                     " of candidate_pool_size (%d)" %
1330 4b7735f9 Iustin Pop
                     (num_candidates, self.op.candidate_pool_size))
1331 4b7735f9 Iustin Pop
1332 8084f9f6 Manuel Franceschini
1333 b9bddb6b Iustin Pop
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1334 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1335 a8083063 Iustin Pop

1336 a8083063 Iustin Pop
  """
1337 a8083063 Iustin Pop
  if not instance.disks:
1338 a8083063 Iustin Pop
    return True
1339 a8083063 Iustin Pop
1340 a8083063 Iustin Pop
  if not oneshot:
1341 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1342 a8083063 Iustin Pop
1343 a8083063 Iustin Pop
  node = instance.primary_node
1344 a8083063 Iustin Pop
1345 a8083063 Iustin Pop
  for dev in instance.disks:
1346 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
1347 a8083063 Iustin Pop
1348 a8083063 Iustin Pop
  retries = 0
1349 a8083063 Iustin Pop
  while True:
1350 a8083063 Iustin Pop
    max_time = 0
1351 a8083063 Iustin Pop
    done = True
1352 a8083063 Iustin Pop
    cumul_degraded = False
1353 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1354 a8083063 Iustin Pop
    if not rstats:
1355 86d9d3bb Iustin Pop
      lu.LogWarning("Can't get any data from node %s", node)
1356 a8083063 Iustin Pop
      retries += 1
1357 a8083063 Iustin Pop
      if retries >= 10:
1358 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1359 3ecf6786 Iustin Pop
                                 " aborting." % node)
1360 a8083063 Iustin Pop
      time.sleep(6)
1361 a8083063 Iustin Pop
      continue
1362 a8083063 Iustin Pop
    retries = 0
1363 a8083063 Iustin Pop
    for i in range(len(rstats)):
1364 a8083063 Iustin Pop
      mstat = rstats[i]
1365 a8083063 Iustin Pop
      if mstat is None:
1366 86d9d3bb Iustin Pop
        lu.LogWarning("Can't compute data for node %s/%s",
1367 86d9d3bb Iustin Pop
                           node, instance.disks[i].iv_name)
1368 a8083063 Iustin Pop
        continue
1369 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1370 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1371 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1372 a8083063 Iustin Pop
      if perc_done is not None:
1373 a8083063 Iustin Pop
        done = False
1374 a8083063 Iustin Pop
        if est_time is not None:
1375 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1376 a8083063 Iustin Pop
          max_time = est_time
1377 a8083063 Iustin Pop
        else:
1378 a8083063 Iustin Pop
          rem_time = "no time estimate"
1379 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1380 b9bddb6b Iustin Pop
                        (instance.disks[i].iv_name, perc_done, rem_time))
1381 a8083063 Iustin Pop
    if done or oneshot:
1382 a8083063 Iustin Pop
      break
1383 a8083063 Iustin Pop
1384 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
1385 a8083063 Iustin Pop
1386 a8083063 Iustin Pop
  if done:
1387 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1388 a8083063 Iustin Pop
  return not cumul_degraded
1389 a8083063 Iustin Pop
1390 a8083063 Iustin Pop
1391 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1392 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1393 a8083063 Iustin Pop

1394 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1395 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1396 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1397 0834c866 Iustin Pop

1398 a8083063 Iustin Pop
  """
1399 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
1400 0834c866 Iustin Pop
  if ldisk:
1401 0834c866 Iustin Pop
    idx = 6
1402 0834c866 Iustin Pop
  else:
1403 0834c866 Iustin Pop
    idx = 5
1404 a8083063 Iustin Pop
1405 a8083063 Iustin Pop
  result = True
1406 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1407 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
1408 a8083063 Iustin Pop
    if not rstats:
1409 9a4f63d1 Iustin Pop
      logging.warning("Node %s: disk degraded, not found or node down", node)
1410 a8083063 Iustin Pop
      result = False
1411 a8083063 Iustin Pop
    else:
1412 0834c866 Iustin Pop
      result = result and (not rstats[idx])
1413 a8083063 Iustin Pop
  if dev.children:
1414 a8083063 Iustin Pop
    for child in dev.children:
1415 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1416 a8083063 Iustin Pop
1417 a8083063 Iustin Pop
  return result
1418 a8083063 Iustin Pop
1419 a8083063 Iustin Pop
1420 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1421 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1422 a8083063 Iustin Pop

1423 a8083063 Iustin Pop
  """
1424 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1425 6bf01bbb Guido Trotter
  REQ_BGL = False
1426 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet()
1427 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
1428 a8083063 Iustin Pop
1429 6bf01bbb Guido Trotter
  def ExpandNames(self):
1430 1f9430d6 Iustin Pop
    if self.op.names:
1431 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1432 1f9430d6 Iustin Pop
1433 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1434 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1435 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1436 1f9430d6 Iustin Pop
1437 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
1438 6bf01bbb Guido Trotter
    self.needed_locks = {}
1439 6bf01bbb Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1440 e310b019 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1441 6bf01bbb Guido Trotter
1442 6bf01bbb Guido Trotter
  def CheckPrereq(self):
1443 6bf01bbb Guido Trotter
    """Check prerequisites.
1444 6bf01bbb Guido Trotter

1445 6bf01bbb Guido Trotter
    """
1446 6bf01bbb Guido Trotter
1447 1f9430d6 Iustin Pop
  @staticmethod
1448 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1449 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1450 1f9430d6 Iustin Pop

1451 e4376078 Iustin Pop
    @param node_list: a list with the names of all nodes
1452 e4376078 Iustin Pop
    @param rlist: a map with node names as keys and OS objects as values
1453 1f9430d6 Iustin Pop

1454 e4376078 Iustin Pop
    @rtype: dict
1455 e4376078 Iustin Pop
    @returns: a dictionary with osnames as keys and as value another map, with
1456 e4376078 Iustin Pop
        nodes as keys and list of OS objects as values, eg::
1457 e4376078 Iustin Pop

1458 e4376078 Iustin Pop
          {"debian-etch": {"node1": [<object>,...],
1459 e4376078 Iustin Pop
                           "node2": [<object>,]}
1460 e4376078 Iustin Pop
          }
1461 1f9430d6 Iustin Pop

1462 1f9430d6 Iustin Pop
    """
1463 1f9430d6 Iustin Pop
    all_os = {}
1464 1f9430d6 Iustin Pop
    for node_name, nr in rlist.iteritems():
1465 1f9430d6 Iustin Pop
      if not nr:
1466 1f9430d6 Iustin Pop
        continue
1467 b4de68a9 Iustin Pop
      for os_obj in nr:
1468 b4de68a9 Iustin Pop
        if os_obj.name not in all_os:
1469 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1470 1f9430d6 Iustin Pop
          # for each node in node_list
1471 b4de68a9 Iustin Pop
          all_os[os_obj.name] = {}
1472 1f9430d6 Iustin Pop
          for nname in node_list:
1473 b4de68a9 Iustin Pop
            all_os[os_obj.name][nname] = []
1474 b4de68a9 Iustin Pop
        all_os[os_obj.name][node_name].append(os_obj)
1475 1f9430d6 Iustin Pop
    return all_os
1476 a8083063 Iustin Pop
1477 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1478 a8083063 Iustin Pop
    """Compute the list of OSes.
1479 a8083063 Iustin Pop

1480 a8083063 Iustin Pop
    """
1481 6bf01bbb Guido Trotter
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1482 72737a7f Iustin Pop
    node_data = self.rpc.call_os_diagnose(node_list)
1483 a8083063 Iustin Pop
    if node_data == False:
1484 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1485 1f9430d6 Iustin Pop
    pol = self._DiagnoseByOS(node_list, node_data)
1486 1f9430d6 Iustin Pop
    output = []
1487 1f9430d6 Iustin Pop
    for os_name, os_data in pol.iteritems():
1488 1f9430d6 Iustin Pop
      row = []
1489 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1490 1f9430d6 Iustin Pop
        if field == "name":
1491 1f9430d6 Iustin Pop
          val = os_name
1492 1f9430d6 Iustin Pop
        elif field == "valid":
1493 1f9430d6 Iustin Pop
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1494 1f9430d6 Iustin Pop
        elif field == "node_status":
1495 1f9430d6 Iustin Pop
          val = {}
1496 1f9430d6 Iustin Pop
          for node_name, nos_list in os_data.iteritems():
1497 1f9430d6 Iustin Pop
            val[node_name] = [(v.status, v.path) for v in nos_list]
1498 1f9430d6 Iustin Pop
        else:
1499 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1500 1f9430d6 Iustin Pop
        row.append(val)
1501 1f9430d6 Iustin Pop
      output.append(row)
1502 1f9430d6 Iustin Pop
1503 1f9430d6 Iustin Pop
    return output
1504 a8083063 Iustin Pop
1505 a8083063 Iustin Pop
1506 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1507 a8083063 Iustin Pop
  """Logical unit for removing a node.
1508 a8083063 Iustin Pop

1509 a8083063 Iustin Pop
  """
1510 a8083063 Iustin Pop
  HPATH = "node-remove"
1511 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1512 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1513 a8083063 Iustin Pop
1514 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1515 a8083063 Iustin Pop
    """Build hooks env.
1516 a8083063 Iustin Pop

1517 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1518 d08869ee Guido Trotter
    node would then be impossible to remove.
1519 a8083063 Iustin Pop

1520 a8083063 Iustin Pop
    """
1521 396e1b78 Michael Hanselmann
    env = {
1522 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1523 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1524 396e1b78 Michael Hanselmann
      }
1525 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1526 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1527 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1528 a8083063 Iustin Pop
1529 a8083063 Iustin Pop
  def CheckPrereq(self):
1530 a8083063 Iustin Pop
    """Check prerequisites.
1531 a8083063 Iustin Pop

1532 a8083063 Iustin Pop
    This checks:
1533 a8083063 Iustin Pop
     - the node exists in the configuration
1534 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1535 a8083063 Iustin Pop
     - it's not the master
1536 a8083063 Iustin Pop

1537 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1538 a8083063 Iustin Pop

1539 a8083063 Iustin Pop
    """
1540 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1541 a8083063 Iustin Pop
    if node is None:
1542 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1543 a8083063 Iustin Pop
1544 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1545 a8083063 Iustin Pop
1546 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
1547 a8083063 Iustin Pop
    if node.name == masternode:
1548 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1549 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1550 a8083063 Iustin Pop
1551 a8083063 Iustin Pop
    for instance_name in instance_list:
1552 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1553 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1554 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1555 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1556 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1557 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1558 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1559 a8083063 Iustin Pop
    self.op.node_name = node.name
1560 a8083063 Iustin Pop
    self.node = node
1561 a8083063 Iustin Pop
1562 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1563 a8083063 Iustin Pop
    """Removes the node from the cluster.
1564 a8083063 Iustin Pop

1565 a8083063 Iustin Pop
    """
1566 a8083063 Iustin Pop
    node = self.node
1567 9a4f63d1 Iustin Pop
    logging.info("Stopping the node daemon and removing configs from node %s",
1568 9a4f63d1 Iustin Pop
                 node.name)
1569 a8083063 Iustin Pop
1570 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
1571 a8083063 Iustin Pop
1572 72737a7f Iustin Pop
    self.rpc.call_node_leave_cluster(node.name)
1573 c8a0948f Michael Hanselmann
1574 a8083063 Iustin Pop
1575 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1576 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1577 a8083063 Iustin Pop

1578 a8083063 Iustin Pop
  """
1579 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1580 35705d8f Guido Trotter
  REQ_BGL = False
1581 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet(
1582 31bf511f Iustin Pop
    "dtotal", "dfree",
1583 31bf511f Iustin Pop
    "mtotal", "mnode", "mfree",
1584 31bf511f Iustin Pop
    "bootid",
1585 31bf511f Iustin Pop
    "ctotal",
1586 31bf511f Iustin Pop
    )
1587 31bf511f Iustin Pop
1588 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(
1589 31bf511f Iustin Pop
    "name", "pinst_cnt", "sinst_cnt",
1590 31bf511f Iustin Pop
    "pinst_list", "sinst_list",
1591 31bf511f Iustin Pop
    "pip", "sip", "tags",
1592 31bf511f Iustin Pop
    "serial_no",
1593 0e67cdbe Iustin Pop
    "master_candidate",
1594 0e67cdbe Iustin Pop
    "master",
1595 31bf511f Iustin Pop
    )
1596 a8083063 Iustin Pop
1597 35705d8f Guido Trotter
  def ExpandNames(self):
1598 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1599 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1600 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1601 a8083063 Iustin Pop
1602 35705d8f Guido Trotter
    self.needed_locks = {}
1603 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1604 c8d8b4c8 Iustin Pop
1605 c8d8b4c8 Iustin Pop
    if self.op.names:
1606 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
1607 35705d8f Guido Trotter
    else:
1608 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
1609 c8d8b4c8 Iustin Pop
1610 31bf511f Iustin Pop
    self.do_locking = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
1611 c8d8b4c8 Iustin Pop
    if self.do_locking:
1612 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
1613 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
1614 c8d8b4c8 Iustin Pop
1615 35705d8f Guido Trotter
1616 35705d8f Guido Trotter
  def CheckPrereq(self):
1617 35705d8f Guido Trotter
    """Check prerequisites.
1618 35705d8f Guido Trotter

1619 35705d8f Guido Trotter
    """
1620 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
1621 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
1622 c8d8b4c8 Iustin Pop
    pass
1623 a8083063 Iustin Pop
1624 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1625 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1626 a8083063 Iustin Pop

1627 a8083063 Iustin Pop
    """
1628 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
1629 c8d8b4c8 Iustin Pop
    if self.do_locking:
1630 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
1631 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
1632 3fa93523 Guido Trotter
      nodenames = self.wanted
1633 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
1634 3fa93523 Guido Trotter
      if missing:
1635 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
1636 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
1637 c8d8b4c8 Iustin Pop
    else:
1638 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
1639 c1f1cbb2 Iustin Pop
1640 c1f1cbb2 Iustin Pop
    nodenames = utils.NiceSort(nodenames)
1641 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
1642 a8083063 Iustin Pop
1643 a8083063 Iustin Pop
    # begin data gathering
1644 a8083063 Iustin Pop
1645 31bf511f Iustin Pop
    if self.do_locking:
1646 a8083063 Iustin Pop
      live_data = {}
1647 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
1648 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
1649 a8083063 Iustin Pop
      for name in nodenames:
1650 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1651 a8083063 Iustin Pop
        if nodeinfo:
1652 d599d686 Iustin Pop
          fn = utils.TryConvert
1653 a8083063 Iustin Pop
          live_data[name] = {
1654 d599d686 Iustin Pop
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
1655 d599d686 Iustin Pop
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
1656 d599d686 Iustin Pop
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
1657 d599d686 Iustin Pop
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
1658 d599d686 Iustin Pop
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
1659 d599d686 Iustin Pop
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
1660 d599d686 Iustin Pop
            "bootid": nodeinfo.get('bootid', None),
1661 a8083063 Iustin Pop
            }
1662 a8083063 Iustin Pop
        else:
1663 a8083063 Iustin Pop
          live_data[name] = {}
1664 a8083063 Iustin Pop
    else:
1665 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1666 a8083063 Iustin Pop
1667 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1668 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1669 a8083063 Iustin Pop
1670 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1671 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1672 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1673 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1674 a8083063 Iustin Pop
1675 ec223efb Iustin Pop
      for instance_name in instancelist:
1676 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1677 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1678 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1679 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1680 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1681 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1682 a8083063 Iustin Pop
1683 0e67cdbe Iustin Pop
    master_node = self.cfg.GetMasterNode()
1684 0e67cdbe Iustin Pop
1685 a8083063 Iustin Pop
    # end data gathering
1686 a8083063 Iustin Pop
1687 a8083063 Iustin Pop
    output = []
1688 a8083063 Iustin Pop
    for node in nodelist:
1689 a8083063 Iustin Pop
      node_output = []
1690 a8083063 Iustin Pop
      for field in self.op.output_fields:
1691 a8083063 Iustin Pop
        if field == "name":
1692 a8083063 Iustin Pop
          val = node.name
1693 ec223efb Iustin Pop
        elif field == "pinst_list":
1694 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1695 ec223efb Iustin Pop
        elif field == "sinst_list":
1696 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1697 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1698 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1699 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1700 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1701 a8083063 Iustin Pop
        elif field == "pip":
1702 a8083063 Iustin Pop
          val = node.primary_ip
1703 a8083063 Iustin Pop
        elif field == "sip":
1704 a8083063 Iustin Pop
          val = node.secondary_ip
1705 130a6a6f Iustin Pop
        elif field == "tags":
1706 130a6a6f Iustin Pop
          val = list(node.GetTags())
1707 38d7239a Iustin Pop
        elif field == "serial_no":
1708 38d7239a Iustin Pop
          val = node.serial_no
1709 0e67cdbe Iustin Pop
        elif field == "master_candidate":
1710 0e67cdbe Iustin Pop
          val = node.master_candidate
1711 0e67cdbe Iustin Pop
        elif field == "master":
1712 0e67cdbe Iustin Pop
          val = node.name == master_node
1713 31bf511f Iustin Pop
        elif self._FIELDS_DYNAMIC.Matches(field):
1714 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1715 a8083063 Iustin Pop
        else:
1716 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1717 a8083063 Iustin Pop
        node_output.append(val)
1718 a8083063 Iustin Pop
      output.append(node_output)
1719 a8083063 Iustin Pop
1720 a8083063 Iustin Pop
    return output
1721 a8083063 Iustin Pop
1722 a8083063 Iustin Pop
1723 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1724 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1725 dcb93971 Michael Hanselmann

1726 dcb93971 Michael Hanselmann
  """
1727 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1728 21a15682 Guido Trotter
  REQ_BGL = False
1729 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
1730 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("node")
1731 21a15682 Guido Trotter
1732 21a15682 Guido Trotter
  def ExpandNames(self):
1733 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1734 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1735 21a15682 Guido Trotter
                       selected=self.op.output_fields)
1736 21a15682 Guido Trotter
1737 21a15682 Guido Trotter
    self.needed_locks = {}
1738 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1739 21a15682 Guido Trotter
    if not self.op.nodes:
1740 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1741 21a15682 Guido Trotter
    else:
1742 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
1743 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
1744 dcb93971 Michael Hanselmann
1745 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1746 dcb93971 Michael Hanselmann
    """Check prerequisites.
1747 dcb93971 Michael Hanselmann

1748 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1749 dcb93971 Michael Hanselmann

1750 dcb93971 Michael Hanselmann
    """
1751 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
1752 dcb93971 Michael Hanselmann
1753 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1754 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1755 dcb93971 Michael Hanselmann

1756 dcb93971 Michael Hanselmann
    """
1757 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1758 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
1759 dcb93971 Michael Hanselmann
1760 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1761 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1762 dcb93971 Michael Hanselmann
1763 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1764 dcb93971 Michael Hanselmann
1765 dcb93971 Michael Hanselmann
    output = []
1766 dcb93971 Michael Hanselmann
    for node in nodenames:
1767 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1768 37d19eb2 Michael Hanselmann
        continue
1769 37d19eb2 Michael Hanselmann
1770 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1771 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1772 dcb93971 Michael Hanselmann
1773 dcb93971 Michael Hanselmann
      for vol in node_vols:
1774 dcb93971 Michael Hanselmann
        node_output = []
1775 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1776 dcb93971 Michael Hanselmann
          if field == "node":
1777 dcb93971 Michael Hanselmann
            val = node
1778 dcb93971 Michael Hanselmann
          elif field == "phys":
1779 dcb93971 Michael Hanselmann
            val = vol['dev']
1780 dcb93971 Michael Hanselmann
          elif field == "vg":
1781 dcb93971 Michael Hanselmann
            val = vol['vg']
1782 dcb93971 Michael Hanselmann
          elif field == "name":
1783 dcb93971 Michael Hanselmann
            val = vol['name']
1784 dcb93971 Michael Hanselmann
          elif field == "size":
1785 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1786 dcb93971 Michael Hanselmann
          elif field == "instance":
1787 dcb93971 Michael Hanselmann
            for inst in ilist:
1788 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1789 dcb93971 Michael Hanselmann
                continue
1790 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1791 dcb93971 Michael Hanselmann
                val = inst.name
1792 dcb93971 Michael Hanselmann
                break
1793 dcb93971 Michael Hanselmann
            else:
1794 dcb93971 Michael Hanselmann
              val = '-'
1795 dcb93971 Michael Hanselmann
          else:
1796 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1797 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1798 dcb93971 Michael Hanselmann
1799 dcb93971 Michael Hanselmann
        output.append(node_output)
1800 dcb93971 Michael Hanselmann
1801 dcb93971 Michael Hanselmann
    return output
1802 dcb93971 Michael Hanselmann
1803 dcb93971 Michael Hanselmann
1804 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1805 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1806 a8083063 Iustin Pop

1807 a8083063 Iustin Pop
  """
1808 a8083063 Iustin Pop
  HPATH = "node-add"
1809 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1810 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1811 a8083063 Iustin Pop
1812 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1813 a8083063 Iustin Pop
    """Build hooks env.
1814 a8083063 Iustin Pop

1815 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1816 a8083063 Iustin Pop

1817 a8083063 Iustin Pop
    """
1818 a8083063 Iustin Pop
    env = {
1819 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1820 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1821 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1822 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1823 a8083063 Iustin Pop
      }
1824 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1825 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1826 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1827 a8083063 Iustin Pop
1828 a8083063 Iustin Pop
  def CheckPrereq(self):
1829 a8083063 Iustin Pop
    """Check prerequisites.
1830 a8083063 Iustin Pop

1831 a8083063 Iustin Pop
    This checks:
1832 a8083063 Iustin Pop
     - the new node is not already in the config
1833 a8083063 Iustin Pop
     - it is resolvable
1834 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1835 a8083063 Iustin Pop

1836 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1837 a8083063 Iustin Pop

1838 a8083063 Iustin Pop
    """
1839 a8083063 Iustin Pop
    node_name = self.op.node_name
1840 a8083063 Iustin Pop
    cfg = self.cfg
1841 a8083063 Iustin Pop
1842 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1843 a8083063 Iustin Pop
1844 bcf043c9 Iustin Pop
    node = dns_data.name
1845 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1846 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1847 a8083063 Iustin Pop
    if secondary_ip is None:
1848 a8083063 Iustin Pop
      secondary_ip = primary_ip
1849 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1850 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1851 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1852 e7c6e02b Michael Hanselmann
1853 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1854 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
1855 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
1856 e7c6e02b Michael Hanselmann
                                 node)
1857 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
1858 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
1859 a8083063 Iustin Pop
1860 a8083063 Iustin Pop
    for existing_node_name in node_list:
1861 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1862 e7c6e02b Michael Hanselmann
1863 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
1864 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
1865 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
1866 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
1867 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
1868 e7c6e02b Michael Hanselmann
        continue
1869 e7c6e02b Michael Hanselmann
1870 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1871 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1872 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1873 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1874 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1875 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1876 a8083063 Iustin Pop
1877 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1878 a8083063 Iustin Pop
    # same as for the master
1879 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
1880 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1881 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1882 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1883 a8083063 Iustin Pop
      if master_singlehomed:
1884 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1885 3ecf6786 Iustin Pop
                                   " new node has one")
1886 a8083063 Iustin Pop
      else:
1887 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1888 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1889 a8083063 Iustin Pop
1890 a8083063 Iustin Pop
    # checks reachablity
1891 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
1892 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1893 a8083063 Iustin Pop
1894 a8083063 Iustin Pop
    if not newbie_singlehomed:
1895 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1896 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
1897 b15d625f Iustin Pop
                           source=myself.secondary_ip):
1898 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1899 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
1900 a8083063 Iustin Pop
1901 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1902 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1903 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1904 a8083063 Iustin Pop
1905 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1906 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1907 a8083063 Iustin Pop

1908 a8083063 Iustin Pop
    """
1909 a8083063 Iustin Pop
    new_node = self.new_node
1910 a8083063 Iustin Pop
    node = new_node.name
1911 a8083063 Iustin Pop
1912 a8083063 Iustin Pop
    # check connectivity
1913 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
1914 a8083063 Iustin Pop
    if result:
1915 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1916 9a4f63d1 Iustin Pop
        logging.info("Communication to node %s fine, sw version %s match",
1917 9a4f63d1 Iustin Pop
                     node, result)
1918 a8083063 Iustin Pop
      else:
1919 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1920 3ecf6786 Iustin Pop
                                 " node version %s" %
1921 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1922 a8083063 Iustin Pop
    else:
1923 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1924 a8083063 Iustin Pop
1925 a8083063 Iustin Pop
    # setup ssh on node
1926 9a4f63d1 Iustin Pop
    logging.info("Copy ssh key to node %s", node)
1927 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1928 a8083063 Iustin Pop
    keyarray = []
1929 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1930 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1931 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1932 a8083063 Iustin Pop
1933 a8083063 Iustin Pop
    for i in keyfiles:
1934 a8083063 Iustin Pop
      f = open(i, 'r')
1935 a8083063 Iustin Pop
      try:
1936 a8083063 Iustin Pop
        keyarray.append(f.read())
1937 a8083063 Iustin Pop
      finally:
1938 a8083063 Iustin Pop
        f.close()
1939 a8083063 Iustin Pop
1940 72737a7f Iustin Pop
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
1941 72737a7f Iustin Pop
                                    keyarray[2],
1942 72737a7f Iustin Pop
                                    keyarray[3], keyarray[4], keyarray[5])
1943 a8083063 Iustin Pop
1944 a8083063 Iustin Pop
    if not result:
1945 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1946 a8083063 Iustin Pop
1947 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1948 d9c02ca6 Michael Hanselmann
    utils.AddHostToEtcHosts(new_node.name)
1949 c8a0948f Michael Hanselmann
1950 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1951 caad16e2 Iustin Pop
      if not self.rpc.call_node_has_ip_address(new_node.name,
1952 caad16e2 Iustin Pop
                                               new_node.secondary_ip):
1953 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
1954 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
1955 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
1956 a8083063 Iustin Pop
1957 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
1958 5c0527ed Guido Trotter
    node_verify_param = {
1959 5c0527ed Guido Trotter
      'nodelist': [node],
1960 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
1961 5c0527ed Guido Trotter
    }
1962 5c0527ed Guido Trotter
1963 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
1964 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
1965 5c0527ed Guido Trotter
    for verifier in node_verify_list:
1966 5c0527ed Guido Trotter
      if not result[verifier]:
1967 5c0527ed Guido Trotter
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
1968 5c0527ed Guido Trotter
                                 " for remote verification" % verifier)
1969 5c0527ed Guido Trotter
      if result[verifier]['nodelist']:
1970 5c0527ed Guido Trotter
        for failed in result[verifier]['nodelist']:
1971 5c0527ed Guido Trotter
          feedback_fn("ssh/hostname verification failed %s -> %s" %
1972 5c0527ed Guido Trotter
                      (verifier, result[verifier]['nodelist'][failed]))
1973 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
1974 ff98055b Iustin Pop
1975 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1976 a8083063 Iustin Pop
    # including the node just added
1977 d6a02168 Michael Hanselmann
    myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
1978 102b115b Michael Hanselmann
    dist_nodes = self.cfg.GetNodeList()
1979 102b115b Michael Hanselmann
    if not self.op.readd:
1980 102b115b Michael Hanselmann
      dist_nodes.append(node)
1981 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1982 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1983 a8083063 Iustin Pop
1984 9a4f63d1 Iustin Pop
    logging.debug("Copying hosts and known_hosts to all nodes")
1985 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
1986 72737a7f Iustin Pop
      result = self.rpc.call_upload_file(dist_nodes, fname)
1987 a8083063 Iustin Pop
      for to_node in dist_nodes:
1988 a8083063 Iustin Pop
        if not result[to_node]:
1989 9a4f63d1 Iustin Pop
          logging.error("Copy of file %s to node %s failed", fname, to_node)
1990 a8083063 Iustin Pop
1991 d6a02168 Michael Hanselmann
    to_copy = []
1992 00cd937c Iustin Pop
    if constants.HT_XEN_HVM in self.cfg.GetClusterInfo().enabled_hypervisors:
1993 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
1994 a8083063 Iustin Pop
    for fname in to_copy:
1995 72737a7f Iustin Pop
      result = self.rpc.call_upload_file([node], fname)
1996 b5602d15 Guido Trotter
      if not result[node]:
1997 9a4f63d1 Iustin Pop
        logging.error("Could not copy file %s to node %s", fname, node)
1998 a8083063 Iustin Pop
1999 d8470559 Michael Hanselmann
    if self.op.readd:
2000 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
2001 d8470559 Michael Hanselmann
    else:
2002 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
2003 a8083063 Iustin Pop
2004 a8083063 Iustin Pop
2005 b31c8676 Iustin Pop
class LUSetNodeParams(LogicalUnit):
2006 b31c8676 Iustin Pop
  """Modifies the parameters of a node.
2007 b31c8676 Iustin Pop

2008 b31c8676 Iustin Pop
  """
2009 b31c8676 Iustin Pop
  HPATH = "node-modify"
2010 b31c8676 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2011 b31c8676 Iustin Pop
  _OP_REQP = ["node_name"]
2012 b31c8676 Iustin Pop
  REQ_BGL = False
2013 b31c8676 Iustin Pop
2014 b31c8676 Iustin Pop
  def CheckArguments(self):
2015 b31c8676 Iustin Pop
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2016 b31c8676 Iustin Pop
    if node_name is None:
2017 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2018 b31c8676 Iustin Pop
    self.op.node_name = node_name
2019 b31c8676 Iustin Pop
    if not hasattr(self.op, 'master_candidate'):
2020 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Please pass at least one modification")
2021 b31c8676 Iustin Pop
    self.op.master_candidate = bool(self.op.master_candidate)
2022 b31c8676 Iustin Pop
2023 b31c8676 Iustin Pop
  def ExpandNames(self):
2024 b31c8676 Iustin Pop
    self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
2025 b31c8676 Iustin Pop
2026 b31c8676 Iustin Pop
  def BuildHooksEnv(self):
2027 b31c8676 Iustin Pop
    """Build hooks env.
2028 b31c8676 Iustin Pop

2029 b31c8676 Iustin Pop
    This runs on the master node.
2030 b31c8676 Iustin Pop

2031 b31c8676 Iustin Pop
    """
2032 b31c8676 Iustin Pop
    env = {
2033 b31c8676 Iustin Pop
      "OP_TARGET": self.op.node_name,
2034 b31c8676 Iustin Pop
      "MASTER_CANDIDATE": str(self.op.master_candidate),
2035 b31c8676 Iustin Pop
      }
2036 b31c8676 Iustin Pop
    nl = [self.cfg.GetMasterNode(),
2037 b31c8676 Iustin Pop
          self.op.node_name]
2038 b31c8676 Iustin Pop
    return env, nl, nl
2039 b31c8676 Iustin Pop
2040 b31c8676 Iustin Pop
  def CheckPrereq(self):
2041 b31c8676 Iustin Pop
    """Check prerequisites.
2042 b31c8676 Iustin Pop

2043 b31c8676 Iustin Pop
    This only checks the instance list against the existing names.
2044 b31c8676 Iustin Pop

2045 b31c8676 Iustin Pop
    """
2046 b31c8676 Iustin Pop
    force = self.force = self.op.force
2047 b31c8676 Iustin Pop
2048 b31c8676 Iustin Pop
    return
2049 b31c8676 Iustin Pop
2050 b31c8676 Iustin Pop
  def Exec(self, feedback_fn):
2051 b31c8676 Iustin Pop
    """Modifies a node.
2052 b31c8676 Iustin Pop

2053 b31c8676 Iustin Pop
    """
2054 b31c8676 Iustin Pop
    node = self.cfg.GetNodeInfo(self.op.node_name)
2055 b31c8676 Iustin Pop
2056 b31c8676 Iustin Pop
    result = []
2057 b31c8676 Iustin Pop
2058 b31c8676 Iustin Pop
    if self.op.master_candidate is not None:
2059 b31c8676 Iustin Pop
      node.master_candidate = self.op.master_candidate
2060 b31c8676 Iustin Pop
      result.append(("master_candidate", str(self.op.master_candidate)))
2061 b31c8676 Iustin Pop
2062 b31c8676 Iustin Pop
    # this will trigger configuration file update, if needed
2063 b31c8676 Iustin Pop
    self.cfg.Update(node)
2064 b31c8676 Iustin Pop
    # this will trigger job queue propagation or cleanup
2065 b31c8676 Iustin Pop
    self.context.ReaddNode(node)
2066 b31c8676 Iustin Pop
2067 b31c8676 Iustin Pop
    return result
2068 b31c8676 Iustin Pop
2069 b31c8676 Iustin Pop
2070 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
2071 a8083063 Iustin Pop
  """Query cluster configuration.
2072 a8083063 Iustin Pop

2073 a8083063 Iustin Pop
  """
2074 a8083063 Iustin Pop
  _OP_REQP = []
2075 642339cf Guido Trotter
  REQ_BGL = False
2076 642339cf Guido Trotter
2077 642339cf Guido Trotter
  def ExpandNames(self):
2078 642339cf Guido Trotter
    self.needed_locks = {}
2079 a8083063 Iustin Pop
2080 a8083063 Iustin Pop
  def CheckPrereq(self):
2081 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
2082 a8083063 Iustin Pop

2083 a8083063 Iustin Pop
    """
2084 a8083063 Iustin Pop
    pass
2085 a8083063 Iustin Pop
2086 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2087 a8083063 Iustin Pop
    """Return cluster config.
2088 a8083063 Iustin Pop

2089 a8083063 Iustin Pop
    """
2090 469f88e1 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
2091 a8083063 Iustin Pop
    result = {
2092 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
2093 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
2094 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
2095 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
2096 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
2097 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
2098 469f88e1 Iustin Pop
      "name": cluster.cluster_name,
2099 469f88e1 Iustin Pop
      "master": cluster.master_node,
2100 02691904 Alexander Schreiber
      "default_hypervisor": cluster.default_hypervisor,
2101 469f88e1 Iustin Pop
      "enabled_hypervisors": cluster.enabled_hypervisors,
2102 469f88e1 Iustin Pop
      "hvparams": cluster.hvparams,
2103 469f88e1 Iustin Pop
      "beparams": cluster.beparams,
2104 4b7735f9 Iustin Pop
      "candidate_pool_size": cluster.candidate_pool_size,
2105 a8083063 Iustin Pop
      }
2106 a8083063 Iustin Pop
2107 a8083063 Iustin Pop
    return result
2108 a8083063 Iustin Pop
2109 a8083063 Iustin Pop
2110 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
2111 ae5849b5 Michael Hanselmann
  """Return configuration values.
2112 a8083063 Iustin Pop

2113 a8083063 Iustin Pop
  """
2114 a8083063 Iustin Pop
  _OP_REQP = []
2115 642339cf Guido Trotter
  REQ_BGL = False
2116 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet()
2117 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag")
2118 642339cf Guido Trotter
2119 642339cf Guido Trotter
  def ExpandNames(self):
2120 642339cf Guido Trotter
    self.needed_locks = {}
2121 a8083063 Iustin Pop
2122 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2123 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2124 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
2125 ae5849b5 Michael Hanselmann
2126 a8083063 Iustin Pop
  def CheckPrereq(self):
2127 a8083063 Iustin Pop
    """No prerequisites.
2128 a8083063 Iustin Pop

2129 a8083063 Iustin Pop
    """
2130 a8083063 Iustin Pop
    pass
2131 a8083063 Iustin Pop
2132 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2133 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
2134 a8083063 Iustin Pop

2135 a8083063 Iustin Pop
    """
2136 ae5849b5 Michael Hanselmann
    values = []
2137 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
2138 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
2139 3ccafd0e Iustin Pop
        entry = self.cfg.GetClusterName()
2140 ae5849b5 Michael Hanselmann
      elif field == "master_node":
2141 3ccafd0e Iustin Pop
        entry = self.cfg.GetMasterNode()
2142 3ccafd0e Iustin Pop
      elif field == "drain_flag":
2143 3ccafd0e Iustin Pop
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2144 ae5849b5 Michael Hanselmann
      else:
2145 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
2146 3ccafd0e Iustin Pop
      values.append(entry)
2147 ae5849b5 Michael Hanselmann
    return values
2148 a8083063 Iustin Pop
2149 a8083063 Iustin Pop
2150 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
2151 a8083063 Iustin Pop
  """Bring up an instance's disks.
2152 a8083063 Iustin Pop

2153 a8083063 Iustin Pop
  """
2154 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2155 f22a8ba3 Guido Trotter
  REQ_BGL = False
2156 f22a8ba3 Guido Trotter
2157 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2158 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2159 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2160 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2161 f22a8ba3 Guido Trotter
2162 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2163 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2164 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2165 a8083063 Iustin Pop
2166 a8083063 Iustin Pop
  def CheckPrereq(self):
2167 a8083063 Iustin Pop
    """Check prerequisites.
2168 a8083063 Iustin Pop

2169 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2170 a8083063 Iustin Pop

2171 a8083063 Iustin Pop
    """
2172 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2173 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2174 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2175 a8083063 Iustin Pop
2176 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2177 a8083063 Iustin Pop
    """Activate the disks.
2178 a8083063 Iustin Pop

2179 a8083063 Iustin Pop
    """
2180 b9bddb6b Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
2181 a8083063 Iustin Pop
    if not disks_ok:
2182 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
2183 a8083063 Iustin Pop
2184 a8083063 Iustin Pop
    return disks_info
2185 a8083063 Iustin Pop
2186 a8083063 Iustin Pop
2187 b9bddb6b Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
2188 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
2189 a8083063 Iustin Pop

2190 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
2191 a8083063 Iustin Pop

2192 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
2193 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
2194 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
2195 e4376078 Iustin Pop
  @param instance: the instance for whose disks we assemble
2196 e4376078 Iustin Pop
  @type ignore_secondaries: boolean
2197 e4376078 Iustin Pop
  @param ignore_secondaries: if true, errors on secondary nodes
2198 e4376078 Iustin Pop
      won't result in an error return from the function
2199 e4376078 Iustin Pop
  @return: False if the operation failed, otherwise a list of
2200 e4376078 Iustin Pop
      (host, instance_visible_name, node_visible_name)
2201 e4376078 Iustin Pop
      with the mapping from node devices to instance devices
2202 a8083063 Iustin Pop

2203 a8083063 Iustin Pop
  """
2204 a8083063 Iustin Pop
  device_info = []
2205 a8083063 Iustin Pop
  disks_ok = True
2206 fdbd668d Iustin Pop
  iname = instance.name
2207 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
2208 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
2209 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
2210 fdbd668d Iustin Pop
2211 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
2212 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
2213 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
2214 fdbd668d Iustin Pop
  # SyncSource, etc.)
2215 fdbd668d Iustin Pop
2216 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
2217 a8083063 Iustin Pop
  for inst_disk in instance.disks:
2218 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2219 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2220 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2221 a8083063 Iustin Pop
      if not result:
2222 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2223 86d9d3bb Iustin Pop
                           " (is_primary=False, pass=1)",
2224 86d9d3bb Iustin Pop
                           inst_disk.iv_name, node)
2225 fdbd668d Iustin Pop
        if not ignore_secondaries:
2226 a8083063 Iustin Pop
          disks_ok = False
2227 fdbd668d Iustin Pop
2228 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
2229 fdbd668d Iustin Pop
2230 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
2231 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
2232 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2233 fdbd668d Iustin Pop
      if node != instance.primary_node:
2234 fdbd668d Iustin Pop
        continue
2235 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2236 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2237 fdbd668d Iustin Pop
      if not result:
2238 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2239 86d9d3bb Iustin Pop
                           " (is_primary=True, pass=2)",
2240 86d9d3bb Iustin Pop
                           inst_disk.iv_name, node)
2241 fdbd668d Iustin Pop
        disks_ok = False
2242 fdbd668d Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name, result))
2243 a8083063 Iustin Pop
2244 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
2245 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
2246 b352ab5b Iustin Pop
  # improving the logical/physical id handling
2247 b352ab5b Iustin Pop
  for disk in instance.disks:
2248 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
2249 b352ab5b Iustin Pop
2250 a8083063 Iustin Pop
  return disks_ok, device_info
2251 a8083063 Iustin Pop
2252 a8083063 Iustin Pop
2253 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
2254 3ecf6786 Iustin Pop
  """Start the disks of an instance.
2255 3ecf6786 Iustin Pop

2256 3ecf6786 Iustin Pop
  """
2257 b9bddb6b Iustin Pop
  disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2258 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
2259 fe7b0351 Michael Hanselmann
  if not disks_ok:
2260 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
2261 fe7b0351 Michael Hanselmann
    if force is not None and not force:
2262 86d9d3bb Iustin Pop
      lu.proc.LogWarning("", hint="If the message above refers to a"
2263 86d9d3bb Iustin Pop
                         " secondary node,"
2264 86d9d3bb Iustin Pop
                         " you can retry the operation using '--force'.")
2265 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
2266 fe7b0351 Michael Hanselmann
2267 fe7b0351 Michael Hanselmann
2268 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
2269 a8083063 Iustin Pop
  """Shutdown an instance's disks.
2270 a8083063 Iustin Pop

2271 a8083063 Iustin Pop
  """
2272 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2273 f22a8ba3 Guido Trotter
  REQ_BGL = False
2274 f22a8ba3 Guido Trotter
2275 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2276 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2277 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2278 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2279 f22a8ba3 Guido Trotter
2280 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2281 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2282 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2283 a8083063 Iustin Pop
2284 a8083063 Iustin Pop
  def CheckPrereq(self):
2285 a8083063 Iustin Pop
    """Check prerequisites.
2286 a8083063 Iustin Pop

2287 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2288 a8083063 Iustin Pop

2289 a8083063 Iustin Pop
    """
2290 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2291 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2292 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2293 a8083063 Iustin Pop
2294 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2295 a8083063 Iustin Pop
    """Deactivate the disks
2296 a8083063 Iustin Pop

2297 a8083063 Iustin Pop
    """
2298 a8083063 Iustin Pop
    instance = self.instance
2299 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
2300 a8083063 Iustin Pop
2301 a8083063 Iustin Pop
2302 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
2303 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
2304 155d6c75 Guido Trotter

2305 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
2306 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
2307 155d6c75 Guido Trotter

2308 155d6c75 Guido Trotter
  """
2309 72737a7f Iustin Pop
  ins_l = lu.rpc.call_instance_list([instance.primary_node],
2310 72737a7f Iustin Pop
                                      [instance.hypervisor])
2311 155d6c75 Guido Trotter
  ins_l = ins_l[instance.primary_node]
2312 155d6c75 Guido Trotter
  if not type(ins_l) is list:
2313 155d6c75 Guido Trotter
    raise errors.OpExecError("Can't contact node '%s'" %
2314 155d6c75 Guido Trotter
                             instance.primary_node)
2315 155d6c75 Guido Trotter
2316 155d6c75 Guido Trotter
  if instance.name in ins_l:
2317 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
2318 155d6c75 Guido Trotter
                             " block devices.")
2319 155d6c75 Guido Trotter
2320 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
2321 a8083063 Iustin Pop
2322 a8083063 Iustin Pop
2323 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2324 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
2325 a8083063 Iustin Pop

2326 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
2327 a8083063 Iustin Pop

2328 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
2329 a8083063 Iustin Pop
  ignored.
2330 a8083063 Iustin Pop

2331 a8083063 Iustin Pop
  """
2332 a8083063 Iustin Pop
  result = True
2333 a8083063 Iustin Pop
  for disk in instance.disks:
2334 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2335 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
2336 72737a7f Iustin Pop
      if not lu.rpc.call_blockdev_shutdown(node, top_disk):
2337 9a4f63d1 Iustin Pop
        logging.error("Could not shutdown block device %s on node %s",
2338 9a4f63d1 Iustin Pop
                      disk.iv_name, node)
2339 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
2340 a8083063 Iustin Pop
          result = False
2341 a8083063 Iustin Pop
  return result
2342 a8083063 Iustin Pop
2343 a8083063 Iustin Pop
2344 b9bddb6b Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor):
2345 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
2346 d4f16fd9 Iustin Pop

2347 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
2348 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
2349 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
2350 d4f16fd9 Iustin Pop
  exception.
2351 d4f16fd9 Iustin Pop

2352 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
2353 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
2354 e69d05fd Iustin Pop
  @type node: C{str}
2355 e69d05fd Iustin Pop
  @param node: the node to check
2356 e69d05fd Iustin Pop
  @type reason: C{str}
2357 e69d05fd Iustin Pop
  @param reason: string to use in the error message
2358 e69d05fd Iustin Pop
  @type requested: C{int}
2359 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
2360 e69d05fd Iustin Pop
  @type hypervisor: C{str}
2361 e69d05fd Iustin Pop
  @param hypervisor: the hypervisor to ask for memory stats
2362 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2363 e69d05fd Iustin Pop
      we cannot check the node
2364 d4f16fd9 Iustin Pop

2365 d4f16fd9 Iustin Pop
  """
2366 72737a7f Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor)
2367 d4f16fd9 Iustin Pop
  if not nodeinfo or not isinstance(nodeinfo, dict):
2368 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Could not contact node %s for resource"
2369 d4f16fd9 Iustin Pop
                             " information" % (node,))
2370 d4f16fd9 Iustin Pop
2371 d4f16fd9 Iustin Pop
  free_mem = nodeinfo[node].get('memory_free')
2372 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2373 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2374 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
2375 d4f16fd9 Iustin Pop
  if requested > free_mem:
2376 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2377 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
2378 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
2379 d4f16fd9 Iustin Pop
2380 d4f16fd9 Iustin Pop
2381 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2382 a8083063 Iustin Pop
  """Starts an instance.
2383 a8083063 Iustin Pop

2384 a8083063 Iustin Pop
  """
2385 a8083063 Iustin Pop
  HPATH = "instance-start"
2386 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2387 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2388 e873317a Guido Trotter
  REQ_BGL = False
2389 e873317a Guido Trotter
2390 e873317a Guido Trotter
  def ExpandNames(self):
2391 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2392 a8083063 Iustin Pop
2393 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2394 a8083063 Iustin Pop
    """Build hooks env.
2395 a8083063 Iustin Pop

2396 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2397 a8083063 Iustin Pop

2398 a8083063 Iustin Pop
    """
2399 a8083063 Iustin Pop
    env = {
2400 a8083063 Iustin Pop
      "FORCE": self.op.force,
2401 a8083063 Iustin Pop
      }
2402 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2403 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2404 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2405 a8083063 Iustin Pop
    return env, nl, nl
2406 a8083063 Iustin Pop
2407 a8083063 Iustin Pop
  def CheckPrereq(self):
2408 a8083063 Iustin Pop
    """Check prerequisites.
2409 a8083063 Iustin Pop

2410 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2411 a8083063 Iustin Pop

2412 a8083063 Iustin Pop
    """
2413 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2414 e873317a Guido Trotter
    assert self.instance is not None, \
2415 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2416 a8083063 Iustin Pop
2417 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2418 a8083063 Iustin Pop
    # check bridges existance
2419 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2420 a8083063 Iustin Pop
2421 b9bddb6b Iustin Pop
    _CheckNodeFreeMemory(self, instance.primary_node,
2422 d4f16fd9 Iustin Pop
                         "starting instance %s" % instance.name,
2423 338e51e8 Iustin Pop
                         bep[constants.BE_MEMORY], instance.hypervisor)
2424 d4f16fd9 Iustin Pop
2425 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2426 a8083063 Iustin Pop
    """Start the instance.
2427 a8083063 Iustin Pop

2428 a8083063 Iustin Pop
    """
2429 a8083063 Iustin Pop
    instance = self.instance
2430 a8083063 Iustin Pop
    force = self.op.force
2431 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
2432 a8083063 Iustin Pop
2433 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2434 fe482621 Iustin Pop
2435 a8083063 Iustin Pop
    node_current = instance.primary_node
2436 a8083063 Iustin Pop
2437 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
2438 a8083063 Iustin Pop
2439 72737a7f Iustin Pop
    if not self.rpc.call_instance_start(node_current, instance, extra_args):
2440 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
2441 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
2442 a8083063 Iustin Pop
2443 a8083063 Iustin Pop
2444 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2445 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2446 bf6929a2 Alexander Schreiber

2447 bf6929a2 Alexander Schreiber
  """
2448 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2449 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2450 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2451 e873317a Guido Trotter
  REQ_BGL = False
2452 e873317a Guido Trotter
2453 e873317a Guido Trotter
  def ExpandNames(self):
2454 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2455 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2456 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
2457 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2458 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
2459 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2460 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
2461 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2462 bf6929a2 Alexander Schreiber
2463 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2464 bf6929a2 Alexander Schreiber
    """Build hooks env.
2465 bf6929a2 Alexander Schreiber

2466 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2467 bf6929a2 Alexander Schreiber

2468 bf6929a2 Alexander Schreiber
    """
2469 bf6929a2 Alexander Schreiber
    env = {
2470 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2471 bf6929a2 Alexander Schreiber
      }
2472 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2473 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2474 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
2475 bf6929a2 Alexander Schreiber
    return env, nl, nl
2476 bf6929a2 Alexander Schreiber
2477 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2478 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2479 bf6929a2 Alexander Schreiber

2480 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2481 bf6929a2 Alexander Schreiber

2482 bf6929a2 Alexander Schreiber
    """
2483 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2484 e873317a Guido Trotter
    assert self.instance is not None, \
2485 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2486 bf6929a2 Alexander Schreiber
2487 bf6929a2 Alexander Schreiber
    # check bridges existance
2488 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2489 bf6929a2 Alexander Schreiber
2490 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2491 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2492 bf6929a2 Alexander Schreiber

2493 bf6929a2 Alexander Schreiber
    """
2494 bf6929a2 Alexander Schreiber
    instance = self.instance
2495 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2496 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2497 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2498 bf6929a2 Alexander Schreiber
2499 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2500 bf6929a2 Alexander Schreiber
2501 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2502 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2503 72737a7f Iustin Pop
      if not self.rpc.call_instance_reboot(node_current, instance,
2504 72737a7f Iustin Pop
                                           reboot_type, extra_args):
2505 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2506 bf6929a2 Alexander Schreiber
    else:
2507 72737a7f Iustin Pop
      if not self.rpc.call_instance_shutdown(node_current, instance):
2508 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2509 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
2510 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
2511 72737a7f Iustin Pop
      if not self.rpc.call_instance_start(node_current, instance, extra_args):
2512 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
2513 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2514 bf6929a2 Alexander Schreiber
2515 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2516 bf6929a2 Alexander Schreiber
2517 bf6929a2 Alexander Schreiber
2518 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2519 a8083063 Iustin Pop
  """Shutdown an instance.
2520 a8083063 Iustin Pop

2521 a8083063 Iustin Pop
  """
2522 a8083063 Iustin Pop
  HPATH = "instance-stop"
2523 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2524 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2525 e873317a Guido Trotter
  REQ_BGL = False
2526 e873317a Guido Trotter
2527 e873317a Guido Trotter
  def ExpandNames(self):
2528 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2529 a8083063 Iustin Pop
2530 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2531 a8083063 Iustin Pop
    """Build hooks env.
2532 a8083063 Iustin Pop

2533 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2534 a8083063 Iustin Pop

2535 a8083063 Iustin Pop
    """
2536 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2537 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2538 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2539 a8083063 Iustin Pop
    return env, nl, nl
2540 a8083063 Iustin Pop
2541 a8083063 Iustin Pop
  def CheckPrereq(self):
2542 a8083063 Iustin Pop
    """Check prerequisites.
2543 a8083063 Iustin Pop

2544 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2545 a8083063 Iustin Pop

2546 a8083063 Iustin Pop
    """
2547 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2548 e873317a Guido Trotter
    assert self.instance is not None, \
2549 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2550 a8083063 Iustin Pop
2551 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2552 a8083063 Iustin Pop
    """Shutdown the instance.
2553 a8083063 Iustin Pop

2554 a8083063 Iustin Pop
    """
2555 a8083063 Iustin Pop
    instance = self.instance
2556 a8083063 Iustin Pop
    node_current = instance.primary_node
2557 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2558 72737a7f Iustin Pop
    if not self.rpc.call_instance_shutdown(node_current, instance):
2559 86d9d3bb Iustin Pop
      self.proc.LogWarning("Could not shutdown instance")
2560 a8083063 Iustin Pop
2561 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
2562 a8083063 Iustin Pop
2563 a8083063 Iustin Pop
2564 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2565 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2566 fe7b0351 Michael Hanselmann

2567 fe7b0351 Michael Hanselmann
  """
2568 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2569 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2570 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2571 4e0b4d2d Guido Trotter
  REQ_BGL = False
2572 4e0b4d2d Guido Trotter
2573 4e0b4d2d Guido Trotter
  def ExpandNames(self):
2574 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
2575 fe7b0351 Michael Hanselmann
2576 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2577 fe7b0351 Michael Hanselmann
    """Build hooks env.
2578 fe7b0351 Michael Hanselmann

2579 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2580 fe7b0351 Michael Hanselmann

2581 fe7b0351 Michael Hanselmann
    """
2582 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2583 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2584 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2585 fe7b0351 Michael Hanselmann
    return env, nl, nl
2586 fe7b0351 Michael Hanselmann
2587 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2588 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2589 fe7b0351 Michael Hanselmann

2590 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2591 fe7b0351 Michael Hanselmann

2592 fe7b0351 Michael Hanselmann
    """
2593 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2594 4e0b4d2d Guido Trotter
    assert instance is not None, \
2595 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2596 4e0b4d2d Guido Trotter
2597 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2598 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2599 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2600 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2601 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2602 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2603 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2604 72737a7f Iustin Pop
                                              instance.name,
2605 72737a7f Iustin Pop
                                              instance.hypervisor)
2606 fe7b0351 Michael Hanselmann
    if remote_info:
2607 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2608 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2609 3ecf6786 Iustin Pop
                                  instance.primary_node))
2610 d0834de3 Michael Hanselmann
2611 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2612 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2613 d0834de3 Michael Hanselmann
      # OS verification
2614 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2615 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2616 d0834de3 Michael Hanselmann
      if pnode is None:
2617 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2618 3ecf6786 Iustin Pop
                                   self.op.pnode)
2619 72737a7f Iustin Pop
      os_obj = self.rpc.call_os_get(pnode.name, self.op.os_type)
2620 dfa96ded Guido Trotter
      if not os_obj:
2621 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2622 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2623 d0834de3 Michael Hanselmann
2624 fe7b0351 Michael Hanselmann
    self.instance = instance
2625 fe7b0351 Michael Hanselmann
2626 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2627 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2628 fe7b0351 Michael Hanselmann

2629 fe7b0351 Michael Hanselmann
    """
2630 fe7b0351 Michael Hanselmann
    inst = self.instance
2631 fe7b0351 Michael Hanselmann
2632 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2633 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2634 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2635 97abc79f Iustin Pop
      self.cfg.Update(inst)
2636 d0834de3 Michael Hanselmann
2637 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
2638 fe7b0351 Michael Hanselmann
    try:
2639 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2640 bb2ee932 Iustin Pop
      if not self.rpc.call_instance_os_add(inst.primary_node, inst):
2641 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
2642 f4bc1f2c Michael Hanselmann
                                 " on node %s" %
2643 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2644 fe7b0351 Michael Hanselmann
    finally:
2645 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
2646 fe7b0351 Michael Hanselmann
2647 fe7b0351 Michael Hanselmann
2648 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2649 decd5f45 Iustin Pop
  """Rename an instance.
2650 decd5f45 Iustin Pop

2651 decd5f45 Iustin Pop
  """
2652 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2653 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2654 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2655 decd5f45 Iustin Pop
2656 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2657 decd5f45 Iustin Pop
    """Build hooks env.
2658 decd5f45 Iustin Pop

2659 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2660 decd5f45 Iustin Pop

2661 decd5f45 Iustin Pop
    """
2662 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2663 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2664 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2665 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2666 decd5f45 Iustin Pop
    return env, nl, nl
2667 decd5f45 Iustin Pop
2668 decd5f45 Iustin Pop
  def CheckPrereq(self):
2669 decd5f45 Iustin Pop
    """Check prerequisites.
2670 decd5f45 Iustin Pop

2671 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2672 decd5f45 Iustin Pop

2673 decd5f45 Iustin Pop
    """
2674 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2675 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2676 decd5f45 Iustin Pop
    if instance is None:
2677 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2678 decd5f45 Iustin Pop
                                 self.op.instance_name)
2679 decd5f45 Iustin Pop
    if instance.status != "down":
2680 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2681 decd5f45 Iustin Pop
                                 self.op.instance_name)
2682 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2683 72737a7f Iustin Pop
                                              instance.name,
2684 72737a7f Iustin Pop
                                              instance.hypervisor)
2685 decd5f45 Iustin Pop
    if remote_info:
2686 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2687 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2688 decd5f45 Iustin Pop
                                  instance.primary_node))
2689 decd5f45 Iustin Pop
    self.instance = instance
2690 decd5f45 Iustin Pop
2691 decd5f45 Iustin Pop
    # new name verification
2692 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2693 decd5f45 Iustin Pop
2694 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2695 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
2696 7bde3275 Guido Trotter
    if new_name in instance_list:
2697 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2698 c09f363f Manuel Franceschini
                                 new_name)
2699 7bde3275 Guido Trotter
2700 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2701 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
2702 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2703 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2704 decd5f45 Iustin Pop
2705 decd5f45 Iustin Pop
2706 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2707 decd5f45 Iustin Pop
    """Reinstall the instance.
2708 decd5f45 Iustin Pop

2709 decd5f45 Iustin Pop
    """
2710 decd5f45 Iustin Pop
    inst = self.instance
2711 decd5f45 Iustin Pop
    old_name = inst.name
2712 decd5f45 Iustin Pop
2713 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2714 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2715 b23c4333 Manuel Franceschini
2716 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2717 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
2718 cb4e8387 Iustin Pop
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
2719 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
2720 decd5f45 Iustin Pop
2721 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2722 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2723 decd5f45 Iustin Pop
2724 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2725 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2726 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
2727 72737a7f Iustin Pop
                                                     old_file_storage_dir,
2728 72737a7f Iustin Pop
                                                     new_file_storage_dir)
2729 b23c4333 Manuel Franceschini
2730 b23c4333 Manuel Franceschini
      if not result:
2731 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not connect to node '%s' to rename"
2732 b23c4333 Manuel Franceschini
                                 " directory '%s' to '%s' (but the instance"
2733 b23c4333 Manuel Franceschini
                                 " has been renamed in Ganeti)" % (
2734 b23c4333 Manuel Franceschini
                                 inst.primary_node, old_file_storage_dir,
2735 b23c4333 Manuel Franceschini
                                 new_file_storage_dir))
2736 b23c4333 Manuel Franceschini
2737 b23c4333 Manuel Franceschini
      if not result[0]:
2738 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2739 b23c4333 Manuel Franceschini
                                 " (but the instance has been renamed in"
2740 b23c4333 Manuel Franceschini
                                 " Ganeti)" % (old_file_storage_dir,
2741 b23c4333 Manuel Franceschini
                                               new_file_storage_dir))
2742 b23c4333 Manuel Franceschini
2743 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
2744 decd5f45 Iustin Pop
    try:
2745 72737a7f Iustin Pop
      if not self.rpc.call_instance_run_rename(inst.primary_node, inst,
2746 d15a9ad3 Guido Trotter
                                               old_name):
2747 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
2748 6291574d Alexander Schreiber
               " (but the instance has been renamed in Ganeti)" %
2749 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2750 86d9d3bb Iustin Pop
        self.proc.LogWarning(msg)
2751 decd5f45 Iustin Pop
    finally:
2752 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
2753 decd5f45 Iustin Pop
2754 decd5f45 Iustin Pop
2755 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2756 a8083063 Iustin Pop
  """Remove an instance.
2757 a8083063 Iustin Pop

2758 a8083063 Iustin Pop
  """
2759 a8083063 Iustin Pop
  HPATH = "instance-remove"
2760 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2761 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
2762 cf472233 Guido Trotter
  REQ_BGL = False
2763 cf472233 Guido Trotter
2764 cf472233 Guido Trotter
  def ExpandNames(self):
2765 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
2766 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2767 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2768 cf472233 Guido Trotter
2769 cf472233 Guido Trotter
  def DeclareLocks(self, level):
2770 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
2771 cf472233 Guido Trotter
      self._LockInstancesNodes()
2772 a8083063 Iustin Pop
2773 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2774 a8083063 Iustin Pop
    """Build hooks env.
2775 a8083063 Iustin Pop

2776 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2777 a8083063 Iustin Pop

2778 a8083063 Iustin Pop
    """
2779 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2780 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
2781 a8083063 Iustin Pop
    return env, nl, nl
2782 a8083063 Iustin Pop
2783 a8083063 Iustin Pop
  def CheckPrereq(self):
2784 a8083063 Iustin Pop
    """Check prerequisites.
2785 a8083063 Iustin Pop

2786 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2787 a8083063 Iustin Pop

2788 a8083063 Iustin Pop
    """
2789 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2790 cf472233 Guido Trotter
    assert self.instance is not None, \
2791 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2792 a8083063 Iustin Pop
2793 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2794 a8083063 Iustin Pop
    """Remove the instance.
2795 a8083063 Iustin Pop

2796 a8083063 Iustin Pop
    """
2797 a8083063 Iustin Pop
    instance = self.instance
2798 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
2799 9a4f63d1 Iustin Pop
                 instance.name, instance.primary_node)
2800 a8083063 Iustin Pop
2801 72737a7f Iustin Pop
    if not self.rpc.call_instance_shutdown(instance.primary_node, instance):
2802 1d67656e Iustin Pop
      if self.op.ignore_failures:
2803 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2804 1d67656e Iustin Pop
      else:
2805 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2806 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2807 a8083063 Iustin Pop
2808 9a4f63d1 Iustin Pop
    logging.info("Removing block devices for instance %s", instance.name)
2809 a8083063 Iustin Pop
2810 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
2811 1d67656e Iustin Pop
      if self.op.ignore_failures:
2812 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2813 1d67656e Iustin Pop
      else:
2814 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2815 a8083063 Iustin Pop
2816 9a4f63d1 Iustin Pop
    logging.info("Removing instance %s out of cluster config", instance.name)
2817 a8083063 Iustin Pop
2818 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2819 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
2820 a8083063 Iustin Pop
2821 a8083063 Iustin Pop
2822 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2823 a8083063 Iustin Pop
  """Logical unit for querying instances.
2824 a8083063 Iustin Pop

2825 a8083063 Iustin Pop
  """
2826 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2827 7eb9d8f7 Guido Trotter
  REQ_BGL = False
2828 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
2829 a2d2e1a7 Iustin Pop
                                    "admin_state", "admin_ram",
2830 a2d2e1a7 Iustin Pop
                                    "disk_template", "ip", "mac", "bridge",
2831 a2d2e1a7 Iustin Pop
                                    "sda_size", "sdb_size", "vcpus", "tags",
2832 a2d2e1a7 Iustin Pop
                                    "network_port", "beparams",
2833 a2d2e1a7 Iustin Pop
                                    "(disk).(size)/([0-9]+)",
2834 a2d2e1a7 Iustin Pop
                                    "(disk).(sizes)",
2835 a2d2e1a7 Iustin Pop
                                    "(nic).(mac|ip|bridge)/([0-9]+)",
2836 a2d2e1a7 Iustin Pop
                                    "(nic).(macs|ips|bridges)",
2837 a2d2e1a7 Iustin Pop
                                    "(disk|nic).(count)",
2838 a2d2e1a7 Iustin Pop
                                    "serial_no", "hypervisor", "hvparams",] +
2839 a2d2e1a7 Iustin Pop
                                  ["hv/%s" % name
2840 a2d2e1a7 Iustin Pop
                                   for name in constants.HVS_PARAMETERS] +
2841 a2d2e1a7 Iustin Pop
                                  ["be/%s" % name
2842 a2d2e1a7 Iustin Pop
                                   for name in constants.BES_PARAMETERS])
2843 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
2844 31bf511f Iustin Pop
2845 a8083063 Iustin Pop
2846 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
2847 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2848 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2849 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2850 a8083063 Iustin Pop
2851 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
2852 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
2853 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2854 7eb9d8f7 Guido Trotter
2855 57a2fb91 Iustin Pop
    if self.op.names:
2856 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
2857 7eb9d8f7 Guido Trotter
    else:
2858 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
2859 7eb9d8f7 Guido Trotter
2860 31bf511f Iustin Pop
    self.do_locking = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
2861 57a2fb91 Iustin Pop
    if self.do_locking:
2862 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
2863 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
2864 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2865 7eb9d8f7 Guido Trotter
2866 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
2867 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
2868 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
2869 7eb9d8f7 Guido Trotter
2870 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
2871 7eb9d8f7 Guido Trotter
    """Check prerequisites.
2872 7eb9d8f7 Guido Trotter

2873 7eb9d8f7 Guido Trotter
    """
2874 57a2fb91 Iustin Pop
    pass
2875 069dcc86 Iustin Pop
2876 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2877 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2878 a8083063 Iustin Pop

2879 a8083063 Iustin Pop
    """
2880 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
2881 57a2fb91 Iustin Pop
    if self.do_locking:
2882 57a2fb91 Iustin Pop
      instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2883 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
2884 3fa93523 Guido Trotter
      instance_names = self.wanted
2885 3fa93523 Guido Trotter
      missing = set(instance_names).difference(all_info.keys())
2886 3fa93523 Guido Trotter
      if missing:
2887 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
2888 3fa93523 Guido Trotter
          "Some instances were removed before retrieving their data: %s"
2889 3fa93523 Guido Trotter
          % missing)
2890 57a2fb91 Iustin Pop
    else:
2891 57a2fb91 Iustin Pop
      instance_names = all_info.keys()
2892 c1f1cbb2 Iustin Pop
2893 c1f1cbb2 Iustin Pop
    instance_names = utils.NiceSort(instance_names)
2894 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
2895 a8083063 Iustin Pop
2896 a8083063 Iustin Pop
    # begin data gathering
2897 a8083063 Iustin Pop
2898 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2899 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
2900 a8083063 Iustin Pop
2901 a8083063 Iustin Pop
    bad_nodes = []
2902 31bf511f Iustin Pop
    if self.do_locking:
2903 a8083063 Iustin Pop
      live_data = {}
2904 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
2905 a8083063 Iustin Pop
      for name in nodes:
2906 a8083063 Iustin Pop
        result = node_data[name]
2907 a8083063 Iustin Pop
        if result:
2908 a8083063 Iustin Pop
          live_data.update(result)
2909 a8083063 Iustin Pop
        elif result == False:
2910 a8083063 Iustin Pop
          bad_nodes.append(name)
2911 a8083063 Iustin Pop
        # else no instance is alive
2912 a8083063 Iustin Pop
    else:
2913 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2914 a8083063 Iustin Pop
2915 a8083063 Iustin Pop
    # end data gathering
2916 a8083063 Iustin Pop
2917 5018a335 Iustin Pop
    HVPREFIX = "hv/"
2918 338e51e8 Iustin Pop
    BEPREFIX = "be/"
2919 a8083063 Iustin Pop
    output = []
2920 a8083063 Iustin Pop
    for instance in instance_list:
2921 a8083063 Iustin Pop
      iout = []
2922 5018a335 Iustin Pop
      i_hv = self.cfg.GetClusterInfo().FillHV(instance)
2923 338e51e8 Iustin Pop
      i_be = self.cfg.GetClusterInfo().FillBE(instance)
2924 a8083063 Iustin Pop
      for field in self.op.output_fields:
2925 71c1af58 Iustin Pop
        st_match = self._FIELDS_STATIC.Matches(field)
2926 a8083063 Iustin Pop
        if field == "name":
2927 a8083063 Iustin Pop
          val = instance.name
2928 a8083063 Iustin Pop
        elif field == "os":
2929 a8083063 Iustin Pop
          val = instance.os
2930 a8083063 Iustin Pop
        elif field == "pnode":
2931 a8083063 Iustin Pop
          val = instance.primary_node
2932 a8083063 Iustin Pop
        elif field == "snodes":
2933 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2934 a8083063 Iustin Pop
        elif field == "admin_state":
2935 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2936 a8083063 Iustin Pop
        elif field == "oper_state":
2937 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2938 8a23d2d3 Iustin Pop
            val = None
2939 a8083063 Iustin Pop
          else:
2940 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2941 d8052456 Iustin Pop
        elif field == "status":
2942 d8052456 Iustin Pop
          if instance.primary_node in bad_nodes:
2943 d8052456 Iustin Pop
            val = "ERROR_nodedown"
2944 d8052456 Iustin Pop
          else:
2945 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
2946 d8052456 Iustin Pop
            if running:
2947 d8052456 Iustin Pop
              if instance.status != "down":
2948 d8052456 Iustin Pop
                val = "running"
2949 d8052456 Iustin Pop
              else:
2950 d8052456 Iustin Pop
                val = "ERROR_up"
2951 d8052456 Iustin Pop
            else:
2952 d8052456 Iustin Pop
              if instance.status != "down":
2953 d8052456 Iustin Pop
                val = "ERROR_down"
2954 d8052456 Iustin Pop
              else:
2955 d8052456 Iustin Pop
                val = "ADMIN_down"
2956 a8083063 Iustin Pop
        elif field == "oper_ram":
2957 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2958 8a23d2d3 Iustin Pop
            val = None
2959 a8083063 Iustin Pop
          elif instance.name in live_data:
2960 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2961 a8083063 Iustin Pop
          else:
2962 a8083063 Iustin Pop
            val = "-"
2963 a8083063 Iustin Pop
        elif field == "disk_template":
2964 a8083063 Iustin Pop
          val = instance.disk_template
2965 a8083063 Iustin Pop
        elif field == "ip":
2966 a8083063 Iustin Pop
          val = instance.nics[0].ip
2967 a8083063 Iustin Pop
        elif field == "bridge":
2968 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2969 a8083063 Iustin Pop
        elif field == "mac":
2970 a8083063 Iustin Pop
          val = instance.nics[0].mac
2971 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2972 ad24e046 Iustin Pop
          idx = ord(field[2]) - ord('a')
2973 ad24e046 Iustin Pop
          try:
2974 ad24e046 Iustin Pop
            val = instance.FindDisk(idx).size
2975 ad24e046 Iustin Pop
          except errors.OpPrereqError:
2976 8a23d2d3 Iustin Pop
            val = None
2977 130a6a6f Iustin Pop
        elif field == "tags":
2978 130a6a6f Iustin Pop
          val = list(instance.GetTags())
2979 38d7239a Iustin Pop
        elif field == "serial_no":
2980 38d7239a Iustin Pop
          val = instance.serial_no
2981 5018a335 Iustin Pop
        elif field == "network_port":
2982 5018a335 Iustin Pop
          val = instance.network_port
2983 338e51e8 Iustin Pop
        elif field == "hypervisor":
2984 338e51e8 Iustin Pop
          val = instance.hypervisor
2985 338e51e8 Iustin Pop
        elif field == "hvparams":
2986 338e51e8 Iustin Pop
          val = i_hv
2987 5018a335 Iustin Pop
        elif (field.startswith(HVPREFIX) and
2988 5018a335 Iustin Pop
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
2989 5018a335 Iustin Pop
          val = i_hv.get(field[len(HVPREFIX):], None)
2990 338e51e8 Iustin Pop
        elif field == "beparams":
2991 338e51e8 Iustin Pop
          val = i_be
2992 338e51e8 Iustin Pop
        elif (field.startswith(BEPREFIX) and
2993 338e51e8 Iustin Pop
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
2994 338e51e8 Iustin Pop
          val = i_be.get(field[len(BEPREFIX):], None)
2995 71c1af58 Iustin Pop
        elif st_match and st_match.groups():
2996 71c1af58 Iustin Pop
          # matches a variable list
2997 71c1af58 Iustin Pop
          st_groups = st_match.groups()
2998 71c1af58 Iustin Pop
          if st_groups and st_groups[0] == "disk":
2999 71c1af58 Iustin Pop
            if st_groups[1] == "count":
3000 71c1af58 Iustin Pop
              val = len(instance.disks)
3001 41a776da Iustin Pop
            elif st_groups[1] == "sizes":
3002 41a776da Iustin Pop
              val = [disk.size for disk in instance.disks]
3003 71c1af58 Iustin Pop
            elif st_groups[1] == "size":
3004 3e0cea06 Iustin Pop
              try:
3005 3e0cea06 Iustin Pop
                val = instance.FindDisk(st_groups[2]).size
3006 3e0cea06 Iustin Pop
              except errors.OpPrereqError:
3007 71c1af58 Iustin Pop
                val = None
3008 71c1af58 Iustin Pop
            else:
3009 71c1af58 Iustin Pop
              assert False, "Unhandled disk parameter"
3010 71c1af58 Iustin Pop
          elif st_groups[0] == "nic":
3011 71c1af58 Iustin Pop
            if st_groups[1] == "count":
3012 71c1af58 Iustin Pop
              val = len(instance.nics)
3013 41a776da Iustin Pop
            elif st_groups[1] == "macs":
3014 41a776da Iustin Pop
              val = [nic.mac for nic in instance.nics]
3015 41a776da Iustin Pop
            elif st_groups[1] == "ips":
3016 41a776da Iustin Pop
              val = [nic.ip for nic in instance.nics]
3017 41a776da Iustin Pop
            elif st_groups[1] == "bridges":
3018 41a776da Iustin Pop
              val = [nic.bridge for nic in instance.nics]
3019 71c1af58 Iustin Pop
            else:
3020 71c1af58 Iustin Pop
              # index-based item
3021 71c1af58 Iustin Pop
              nic_idx = int(st_groups[2])
3022 71c1af58 Iustin Pop
              if nic_idx >= len(instance.nics):
3023 71c1af58 Iustin Pop
                val = None
3024 71c1af58 Iustin Pop
              else:
3025 71c1af58 Iustin Pop
                if st_groups[1] == "mac":
3026 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].mac
3027 71c1af58 Iustin Pop
                elif st_groups[1] == "ip":
3028 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].ip
3029 71c1af58 Iustin Pop
                elif st_groups[1] == "bridge":
3030 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].bridge
3031 71c1af58 Iustin Pop
                else:
3032 71c1af58 Iustin Pop
                  assert False, "Unhandled NIC parameter"
3033 71c1af58 Iustin Pop
          else:
3034 71c1af58 Iustin Pop
            assert False, "Unhandled variable parameter"
3035 a8083063 Iustin Pop
        else:
3036 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
3037 a8083063 Iustin Pop
        iout.append(val)
3038 a8083063 Iustin Pop
      output.append(iout)
3039 a8083063 Iustin Pop
3040 a8083063 Iustin Pop
    return output
3041 a8083063 Iustin Pop
3042 a8083063 Iustin Pop
3043 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
3044 a8083063 Iustin Pop
  """Failover an instance.
3045 a8083063 Iustin Pop

3046 a8083063 Iustin Pop
  """
3047 a8083063 Iustin Pop
  HPATH = "instance-failover"
3048 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3049 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
3050 c9e5c064 Guido Trotter
  REQ_BGL = False
3051 c9e5c064 Guido Trotter
3052 c9e5c064 Guido Trotter
  def ExpandNames(self):
3053 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
3054 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3055 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3056 c9e5c064 Guido Trotter
3057 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
3058 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
3059 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
3060 a8083063 Iustin Pop
3061 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3062 a8083063 Iustin Pop
    """Build hooks env.
3063 a8083063 Iustin Pop

3064 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3065 a8083063 Iustin Pop

3066 a8083063 Iustin Pop
    """
3067 a8083063 Iustin Pop
    env = {
3068 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
3069 a8083063 Iustin Pop
      }
3070 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3071 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3072 a8083063 Iustin Pop
    return env, nl, nl
3073 a8083063 Iustin Pop
3074 a8083063 Iustin Pop
  def CheckPrereq(self):
3075 a8083063 Iustin Pop
    """Check prerequisites.
3076 a8083063 Iustin Pop

3077 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3078 a8083063 Iustin Pop

3079 a8083063 Iustin Pop
    """
3080 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3081 c9e5c064 Guido Trotter
    assert self.instance is not None, \
3082 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3083 a8083063 Iustin Pop
3084 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3085 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3086 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
3087 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
3088 2a710df1 Michael Hanselmann
3089 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
3090 2a710df1 Michael Hanselmann
    if not secondary_nodes:
3091 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
3092 abdf0113 Iustin Pop
                                   "a mirrored disk template")
3093 2a710df1 Michael Hanselmann
3094 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
3095 d4f16fd9 Iustin Pop
    # check memory requirements on the secondary node
3096 b9bddb6b Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
3097 338e51e8 Iustin Pop
                         instance.name, bep[constants.BE_MEMORY],
3098 e69d05fd Iustin Pop
                         instance.hypervisor)
3099 3a7c308e Guido Trotter
3100 a8083063 Iustin Pop
    # check bridge existance
3101 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
3102 72737a7f Iustin Pop
    if not self.rpc.call_bridges_exist(target_node, brlist):
3103 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
3104 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
3105 50ff9a7a Iustin Pop
                                 (brlist, target_node))
3106 a8083063 Iustin Pop
3107 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3108 a8083063 Iustin Pop
    """Failover an instance.
3109 a8083063 Iustin Pop

3110 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
3111 a8083063 Iustin Pop
    starting it on the secondary.
3112 a8083063 Iustin Pop

3113 a8083063 Iustin Pop
    """
3114 a8083063 Iustin Pop
    instance = self.instance
3115 a8083063 Iustin Pop
3116 a8083063 Iustin Pop
    source_node = instance.primary_node
3117 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
3118 a8083063 Iustin Pop
3119 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
3120 a8083063 Iustin Pop
    for dev in instance.disks:
3121 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
3122 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
3123 a0aaa0d0 Guido Trotter
        if instance.status == "up" and not self.op.ignore_consistency:
3124 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
3125 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
3126 a8083063 Iustin Pop
3127 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
3128 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
3129 9a4f63d1 Iustin Pop
                 instance.name, source_node)
3130 a8083063 Iustin Pop
3131 72737a7f Iustin Pop
    if not self.rpc.call_instance_shutdown(source_node, instance):
3132 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
3133 86d9d3bb Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
3134 86d9d3bb Iustin Pop
                             " Proceeding"
3135 86d9d3bb Iustin Pop
                             " anyway. Please make sure node %s is down",
3136 86d9d3bb Iustin Pop
                             instance.name, source_node, source_node)
3137 24a40d57 Iustin Pop
      else:
3138 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
3139 24a40d57 Iustin Pop
                                 (instance.name, source_node))
3140 a8083063 Iustin Pop
3141 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
3142 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3143 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
3144 a8083063 Iustin Pop
3145 a8083063 Iustin Pop
    instance.primary_node = target_node
3146 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
3147 b6102dab Guido Trotter
    self.cfg.Update(instance)
3148 a8083063 Iustin Pop
3149 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
3150 12a0cfbe Guido Trotter
    if instance.status == "up":
3151 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
3152 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s",
3153 9a4f63d1 Iustin Pop
                   instance.name, target_node)
3154 12a0cfbe Guido Trotter
3155 b9bddb6b Iustin Pop
      disks_ok, dummy = _AssembleInstanceDisks(self, instance,
3156 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
3157 12a0cfbe Guido Trotter
      if not disks_ok:
3158 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3159 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
3160 a8083063 Iustin Pop
3161 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
3162 72737a7f Iustin Pop
      if not self.rpc.call_instance_start(target_node, instance, None):
3163 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3164 12a0cfbe Guido Trotter
        raise errors.OpExecError("Could not start instance %s on node %s." %
3165 12a0cfbe Guido Trotter
                                 (instance.name, target_node))
3166 a8083063 Iustin Pop
3167 a8083063 Iustin Pop
3168 b9bddb6b Iustin Pop
def _CreateBlockDevOnPrimary(lu, node, instance, device, info):
3169 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
3170 a8083063 Iustin Pop

3171 a8083063 Iustin Pop
  This always creates all devices.
3172 a8083063 Iustin Pop

3173 a8083063 Iustin Pop
  """
3174 a8083063 Iustin Pop
  if device.children:
3175 a8083063 Iustin Pop
    for child in device.children:
3176 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnPrimary(lu, node, instance, child, info):
3177 a8083063 Iustin Pop
        return False
3178 a8083063 Iustin Pop
3179 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
3180 72737a7f Iustin Pop
  new_id = lu.rpc.call_blockdev_create(node, device, device.size,
3181 72737a7f Iustin Pop
                                       instance.name, True, info)
3182 a8083063 Iustin Pop
  if not new_id:
3183 a8083063 Iustin Pop
    return False
3184 a8083063 Iustin Pop
  if device.physical_id is None:
3185 a8083063 Iustin Pop
    device.physical_id = new_id
3186 a8083063 Iustin Pop
  return True
3187 a8083063 Iustin Pop
3188 a8083063 Iustin Pop
3189 b9bddb6b Iustin Pop
def _CreateBlockDevOnSecondary(lu, node, instance, device, force, info):
3190 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
3191 a8083063 Iustin Pop

3192 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
3193 a8083063 Iustin Pop
  all its children.
3194 a8083063 Iustin Pop

3195 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
3196 a8083063 Iustin Pop

3197 a8083063 Iustin Pop
  """
3198 a8083063 Iustin Pop
  if device.CreateOnSecondary():
3199 a8083063 Iustin Pop
    force = True
3200 a8083063 Iustin Pop
  if device.children:
3201 a8083063 Iustin Pop
    for child in device.children:
3202 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnSecondary(lu, node, instance,
3203 3f78eef2 Iustin Pop
                                        child, force, info):
3204 a8083063 Iustin Pop
        return False
3205 a8083063 Iustin Pop
3206 a8083063 Iustin Pop
  if not force:
3207 a8083063 Iustin Pop
    return True
3208 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
3209 72737a7f Iustin Pop
  new_id = lu.rpc.call_blockdev_create(node, device, device.size,
3210 72737a7f Iustin Pop
                                       instance.name, False, info)
3211 a8083063 Iustin Pop
  if not new_id:
3212 a8083063 Iustin Pop
    return False
3213 a8083063 Iustin Pop
  if device.physical_id is None:
3214 a8083063 Iustin Pop
    device.physical_id = new_id
3215 a8083063 Iustin Pop
  return True
3216 a8083063 Iustin Pop
3217 a8083063 Iustin Pop
3218 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
3219 923b1523 Iustin Pop
  """Generate a suitable LV name.
3220 923b1523 Iustin Pop

3221 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
3222 923b1523 Iustin Pop

3223 923b1523 Iustin Pop
  """
3224 923b1523 Iustin Pop
  results = []
3225 923b1523 Iustin Pop
  for val in exts:
3226 b9bddb6b Iustin Pop
    new_id = lu.cfg.GenerateUniqueID()
3227 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
3228 923b1523 Iustin Pop
  return results
3229 923b1523 Iustin Pop
3230 923b1523 Iustin Pop
3231 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
3232 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
3233 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
3234 a1f445d3 Iustin Pop

3235 a1f445d3 Iustin Pop
  """
3236 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
3237 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
3238 b9bddb6b Iustin Pop
  shared_secret = lu.cfg.GenerateDRBDSecret()
3239 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3240 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
3241 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3242 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
3243 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
3244 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
3245 f9518d38 Iustin Pop
                                      p_minor, s_minor,
3246 f9518d38 Iustin Pop
                                      shared_secret),
3247 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
3248 a1f445d3 Iustin Pop
                          iv_name=iv_name)
3249 a1f445d3 Iustin Pop
  return drbd_dev
3250 a1f445d3 Iustin Pop
3251 7c0d6283 Michael Hanselmann
3252 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
3253 a8083063 Iustin Pop
                          instance_name, primary_node,
3254 08db7c5c Iustin Pop
                          secondary_nodes, disk_info,
3255 e2a65344 Iustin Pop
                          file_storage_dir, file_driver,
3256 e2a65344 Iustin Pop
                          base_index):
3257 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
3258 a8083063 Iustin Pop

3259 a8083063 Iustin Pop
  """
3260 a8083063 Iustin Pop
  #TODO: compute space requirements
3261 a8083063 Iustin Pop
3262 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
3263 08db7c5c Iustin Pop
  disk_count = len(disk_info)
3264 08db7c5c Iustin Pop
  disks = []
3265 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
3266 08db7c5c Iustin Pop
    pass
3267 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
3268 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
3269 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
3270 923b1523 Iustin Pop
3271 08db7c5c Iustin Pop
    names = _GenerateUniqueNames(lu, [".disk%d" % i
3272 08db7c5c Iustin Pop
                                      for i in range(disk_count)])
3273 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
3274 e2a65344 Iustin Pop
      disk_index = idx + base_index
3275 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
3276 08db7c5c Iustin Pop
                              logical_id=(vgname, names[idx]),
3277 e2a65344 Iustin Pop
                              iv_name="disk/%d" % disk_index)
3278 08db7c5c Iustin Pop
      disks.append(disk_dev)
3279 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
3280 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
3281 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
3282 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
3283 08db7c5c Iustin Pop
    minors = lu.cfg.AllocateDRBDMinor(
3284 08db7c5c Iustin Pop
      [primary_node, remote_node] * len(disk_info), instance_name)
3285 08db7c5c Iustin Pop
3286 08db7c5c Iustin Pop
    names = _GenerateUniqueNames(lu,
3287 08db7c5c Iustin Pop
                                 [".disk%d_%s" % (i, s)
3288 08db7c5c Iustin Pop
                                  for i in range(disk_count)
3289 08db7c5c Iustin Pop
                                  for s in ("data", "meta")
3290 08db7c5c Iustin Pop
                                  ])
3291 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
3292 112050d9 Iustin Pop
      disk_index = idx + base_index
3293 08db7c5c Iustin Pop
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
3294 08db7c5c Iustin Pop
                                      disk["size"], names[idx*2:idx*2+2],
3295 e2a65344 Iustin Pop
                                      "disk/%d" % disk_index,
3296 08db7c5c Iustin Pop
                                      minors[idx*2], minors[idx*2+1])
3297 08db7c5c Iustin Pop
      disks.append(disk_dev)
3298 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
3299 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
3300 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
3301 0f1a06e3 Manuel Franceschini
3302 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
3303 112050d9 Iustin Pop
      disk_index = idx + base_index
3304 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
3305 e2a65344 Iustin Pop
                              iv_name="disk/%d" % disk_index,
3306 08db7c5c Iustin Pop
                              logical_id=(file_driver,
3307 08db7c5c Iustin Pop
                                          "%s/disk%d" % (file_storage_dir,
3308 08db7c5c Iustin Pop
                                                         idx)))
3309 08db7c5c Iustin Pop
      disks.append(disk_dev)
3310 a8083063 Iustin Pop
  else:
3311 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
3312 a8083063 Iustin Pop
  return disks
3313 a8083063 Iustin Pop
3314 a8083063 Iustin Pop
3315 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
3316 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
3317 3ecf6786 Iustin Pop

3318 3ecf6786 Iustin Pop
  """
3319 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
3320 a0c3fea1 Michael Hanselmann
3321 a0c3fea1 Michael Hanselmann
3322 b9bddb6b Iustin Pop
def _CreateDisks(lu, instance):
3323 a8083063 Iustin Pop
  """Create all disks for an instance.
3324 a8083063 Iustin Pop

3325 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
3326 a8083063 Iustin Pop

3327 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
3328 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
3329 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
3330 e4376078 Iustin Pop
  @param instance: the instance whose disks we should create
3331 e4376078 Iustin Pop
  @rtype: boolean
3332 e4376078 Iustin Pop
  @return: the success of the creation
3333 a8083063 Iustin Pop

3334 a8083063 Iustin Pop
  """
3335 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
3336 a0c3fea1 Michael Hanselmann
3337 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
3338 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3339 72737a7f Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(instance.primary_node,
3340 72737a7f Iustin Pop
                                                 file_storage_dir)
3341 0f1a06e3 Manuel Franceschini
3342 0f1a06e3 Manuel Franceschini
    if not result:
3343 9a4f63d1 Iustin Pop
      logging.error("Could not connect to node '%s'", instance.primary_node)
3344 0f1a06e3 Manuel Franceschini
      return False
3345 0f1a06e3 Manuel Franceschini
3346 0f1a06e3 Manuel Franceschini
    if not result[0]:
3347 9a4f63d1 Iustin Pop
      logging.error("Failed to create directory '%s'", file_storage_dir)
3348 0f1a06e3 Manuel Franceschini
      return False
3349 0f1a06e3 Manuel Franceschini
3350 24991749 Iustin Pop
  # Note: this needs to be kept in sync with adding of disks in
3351 24991749 Iustin Pop
  # LUSetInstanceParams
3352 a8083063 Iustin Pop
  for device in instance.disks:
3353 9a4f63d1 Iustin Pop
    logging.info("Creating volume %s for instance %s",
3354 9a4f63d1 Iustin Pop
                 device.iv_name, instance.name)
3355 a8083063 Iustin Pop
    #HARDCODE
3356 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
3357 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnSecondary(lu, secondary_node, instance,
3358 3f78eef2 Iustin Pop
                                        device, False, info):
3359 9a4f63d1 Iustin Pop
        logging.error("Failed to create volume %s (%s) on secondary node %s!",
3360 9a4f63d1 Iustin Pop
                      device.iv_name, device, secondary_node)
3361 a8083063 Iustin Pop
        return False
3362 a8083063 Iustin Pop
    #HARDCODE
3363 b9bddb6b Iustin Pop
    if not _CreateBlockDevOnPrimary(lu, instance.primary_node,
3364 3f78eef2 Iustin Pop
                                    instance, device, info):
3365 9a4f63d1 Iustin Pop
      logging.error("Failed to create volume %s on primary!", device.iv_name)
3366 a8083063 Iustin Pop
      return False
3367 1c6e3627 Manuel Franceschini
3368 a8083063 Iustin Pop
  return True
3369 a8083063 Iustin Pop
3370 a8083063 Iustin Pop
3371 b9bddb6b Iustin Pop
def _RemoveDisks(lu, instance):
3372 a8083063 Iustin Pop
  """Remove all disks for an instance.
3373 a8083063 Iustin Pop

3374 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
3375 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
3376 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
3377 a8083063 Iustin Pop
  with `_CreateDisks()`).
3378 a8083063 Iustin Pop

3379 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
3380 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
3381 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
3382 e4376078 Iustin Pop
  @param instance: the instance whose disks we should remove
3383 e4376078 Iustin Pop
  @rtype: boolean
3384 e4376078 Iustin Pop
  @return: the success of the removal
3385 a8083063 Iustin Pop

3386 a8083063 Iustin Pop
  """
3387 9a4f63d1 Iustin Pop
  logging.info("Removing block devices for instance %s", instance.name)
3388 a8083063 Iustin Pop
3389 a8083063 Iustin Pop
  result = True
3390 a8083063 Iustin Pop
  for device in instance.disks:
3391 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
3392 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
3393 72737a7f Iustin Pop
      if not lu.rpc.call_blockdev_remove(node, disk):
3394 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not remove block device %s on node %s,"
3395 86d9d3bb Iustin Pop
                           " continuing anyway", device.iv_name, node)
3396 a8083063 Iustin Pop
        result = False
3397 0f1a06e3 Manuel Franceschini
3398 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
3399 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3400 72737a7f Iustin Pop
    if not lu.rpc.call_file_storage_dir_remove(instance.primary_node,
3401 72737a7f Iustin Pop
                                               file_storage_dir):
3402 9a4f63d1 Iustin Pop
      logging.error("Could not remove directory '%s'", file_storage_dir)
3403 0f1a06e3 Manuel Franceschini
      result = False
3404 0f1a06e3 Manuel Franceschini
3405 a8083063 Iustin Pop
  return result
3406 a8083063 Iustin Pop
3407 a8083063 Iustin Pop
3408 08db7c5c Iustin Pop
def _ComputeDiskSize(disk_template, disks):
3409 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
3410 e2fe6369 Iustin Pop

3411 e2fe6369 Iustin Pop
  """
3412 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
3413 e2fe6369 Iustin Pop
  req_size_dict = {
3414 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
3415 08db7c5c Iustin Pop
    constants.DT_PLAIN: sum(d["size"] for d in disks),
3416 08db7c5c Iustin Pop
    # 128 MB are added for drbd metadata for each disk
3417 08db7c5c Iustin Pop
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
3418 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
3419 e2fe6369 Iustin Pop
  }
3420 e2fe6369 Iustin Pop
3421 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
3422 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
3423 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
3424 e2fe6369 Iustin Pop
3425 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
3426 e2fe6369 Iustin Pop
3427 e2fe6369 Iustin Pop
3428 74409b12 Iustin Pop
def _CheckHVParams(lu, nodenames, hvname, hvparams):
3429 74409b12 Iustin Pop
  """Hypervisor parameter validation.
3430 74409b12 Iustin Pop

3431 74409b12 Iustin Pop
  This function abstract the hypervisor parameter validation to be
3432 74409b12 Iustin Pop
  used in both instance create and instance modify.
3433 74409b12 Iustin Pop

3434 74409b12 Iustin Pop
  @type lu: L{LogicalUnit}
3435 74409b12 Iustin Pop
  @param lu: the logical unit for which we check
3436 74409b12 Iustin Pop
  @type nodenames: list
3437 74409b12 Iustin Pop
  @param nodenames: the list of nodes on which we should check
3438 74409b12 Iustin Pop
  @type hvname: string
3439 74409b12 Iustin Pop
  @param hvname: the name of the hypervisor we should use
3440 74409b12 Iustin Pop
  @type hvparams: dict
3441 74409b12 Iustin Pop
  @param hvparams: the parameters which we need to check
3442 74409b12 Iustin Pop
  @raise errors.OpPrereqError: if the parameters are not valid
3443 74409b12 Iustin Pop

3444 74409b12 Iustin Pop
  """
3445 74409b12 Iustin Pop
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
3446 74409b12 Iustin Pop
                                                  hvname,
3447 74409b12 Iustin Pop
                                                  hvparams)
3448 74409b12 Iustin Pop
  for node in nodenames:
3449 74409b12 Iustin Pop
    info = hvinfo.get(node, None)
3450 74409b12 Iustin Pop
    if not info or not isinstance(info, (tuple, list)):
3451 74409b12 Iustin Pop
      raise errors.OpPrereqError("Cannot get current information"
3452 74409b12 Iustin Pop
                                 " from node '%s' (%s)" % (node, info))
3453 74409b12 Iustin Pop
    if not info[0]:
3454 74409b12 Iustin Pop
      raise errors.OpPrereqError("Hypervisor parameter validation failed:"
3455 74409b12 Iustin Pop
                                 " %s" % info[1])
3456 74409b12 Iustin Pop
3457 74409b12 Iustin Pop
3458 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
3459 a8083063 Iustin Pop
  """Create an instance.
3460 a8083063 Iustin Pop

3461 a8083063 Iustin Pop
  """
3462 a8083063 Iustin Pop
  HPATH = "instance-add"
3463 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3464 08db7c5c Iustin Pop
  _OP_REQP = ["instance_name", "disks", "disk_template",
3465 08db7c5c Iustin Pop
              "mode", "start",
3466 08db7c5c Iustin Pop
              "wait_for_sync", "ip_check", "nics",
3467 338e51e8 Iustin Pop
              "hvparams", "beparams"]
3468 7baf741d Guido Trotter
  REQ_BGL = False
3469 7baf741d Guido Trotter
3470 7baf741d Guido Trotter
  def _ExpandNode(self, node):
3471 7baf741d Guido Trotter
    """Expands and checks one node name.
3472 7baf741d Guido Trotter

3473 7baf741d Guido Trotter
    """
3474 7baf741d Guido Trotter
    node_full = self.cfg.ExpandNodeName(node)
3475 7baf741d Guido Trotter
    if node_full is None:
3476 7baf741d Guido Trotter
      raise errors.OpPrereqError("Unknown node %s" % node)
3477 7baf741d Guido Trotter
    return node_full
3478 7baf741d Guido Trotter
3479 7baf741d Guido Trotter
  def ExpandNames(self):
3480 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
3481 7baf741d Guido Trotter

3482 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
3483 7baf741d Guido Trotter

3484 7baf741d Guido Trotter
    """
3485 7baf741d Guido Trotter
    self.needed_locks = {}
3486 7baf741d Guido Trotter
3487 7baf741d Guido Trotter
    # set optional parameters to none if they don't exist
3488 6785674e Iustin Pop
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
3489 7baf741d Guido Trotter
      if not hasattr(self.op, attr):
3490 7baf741d Guido Trotter
        setattr(self.op, attr, None)
3491 7baf741d Guido Trotter
3492 4b2f38dd Iustin Pop
    # cheap checks, mostly valid constants given
3493 4b2f38dd Iustin Pop
3494 7baf741d Guido Trotter
    # verify creation mode
3495 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
3496 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
3497 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
3498 7baf741d Guido Trotter
                                 self.op.mode)
3499 4b2f38dd Iustin Pop
3500 7baf741d Guido Trotter
    # disk template and mirror node verification
3501 7baf741d Guido Trotter
    if self.op.disk_template not in constants.DISK_TEMPLATES:
3502 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid disk template name")
3503 7baf741d Guido Trotter
3504 4b2f38dd Iustin Pop
    if self.op.hypervisor is None:
3505 4b2f38dd Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
3506 4b2f38dd Iustin Pop
3507 8705eb96 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
3508 8705eb96 Iustin Pop
    enabled_hvs = cluster.enabled_hypervisors
3509 4b2f38dd Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
3510 4b2f38dd Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
3511 4b2f38dd Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
3512 4b2f38dd Iustin Pop
                                  ",".join(enabled_hvs)))
3513 4b2f38dd Iustin Pop
3514 6785674e Iustin Pop
    # check hypervisor parameter syntax (locally)
3515 6785674e Iustin Pop
3516 8705eb96 Iustin Pop
    filled_hvp = cluster.FillDict(cluster.hvparams[self.op.hypervisor],
3517 8705eb96 Iustin Pop
                                  self.op.hvparams)
3518 6785674e Iustin Pop
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
3519 8705eb96 Iustin Pop
    hv_type.CheckParameterSyntax(filled_hvp)
3520 6785674e Iustin Pop
3521 338e51e8 Iustin Pop
    # fill and remember the beparams dict
3522 338e51e8 Iustin Pop
    self.be_full = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
3523 338e51e8 Iustin Pop
                                    self.op.beparams)
3524 338e51e8 Iustin Pop
3525 7baf741d Guido Trotter
    #### instance parameters check
3526 7baf741d Guido Trotter
3527 7baf741d Guido Trotter
    # instance name verification
3528 7baf741d Guido Trotter
    hostname1 = utils.HostInfo(self.op.instance_name)
3529 7baf741d Guido Trotter
    self.op.instance_name = instance_name = hostname1.name
3530 7baf741d Guido Trotter
3531 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
3532 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
3533 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
3534 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3535 7baf741d Guido Trotter
                                 instance_name)
3536 7baf741d Guido Trotter
3537 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
3538 7baf741d Guido Trotter
3539 08db7c5c Iustin Pop
    # NIC buildup
3540 08db7c5c Iustin Pop
    self.nics = []
3541 08db7c5c Iustin Pop
    for nic in self.op.nics:
3542 08db7c5c Iustin Pop
      # ip validity checks
3543 08db7c5c Iustin Pop
      ip = nic.get("ip", None)
3544 08db7c5c Iustin Pop
      if ip is None or ip.lower() == "none":
3545 08db7c5c Iustin Pop
        nic_ip = None
3546 08db7c5c Iustin Pop
      elif ip.lower() == constants.VALUE_AUTO:
3547 08db7c5c Iustin Pop
        nic_ip = hostname1.ip
3548 08db7c5c Iustin Pop
      else:
3549 08db7c5c Iustin Pop
        if not utils.IsValidIP(ip):
3550 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
3551 08db7c5c Iustin Pop
                                     " like a valid IP" % ip)
3552 08db7c5c Iustin Pop
        nic_ip = ip
3553 08db7c5c Iustin Pop
3554 08db7c5c Iustin Pop
      # MAC address verification
3555 08db7c5c Iustin Pop
      mac = nic.get("mac", constants.VALUE_AUTO)
3556 08db7c5c Iustin Pop
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
3557 08db7c5c Iustin Pop
        if not utils.IsValidMac(mac.lower()):
3558 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Invalid MAC address specified: %s" %
3559 08db7c5c Iustin Pop
                                     mac)
3560 08db7c5c Iustin Pop
      # bridge verification
3561 08db7c5c Iustin Pop
      bridge = nic.get("bridge", self.cfg.GetDefBridge())
3562 08db7c5c Iustin Pop
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, bridge=bridge))
3563 08db7c5c Iustin Pop
3564 08db7c5c Iustin Pop
    # disk checks/pre-build
3565 08db7c5c Iustin Pop
    self.disks = []
3566 08db7c5c Iustin Pop
    for disk in self.op.disks:
3567 08db7c5c Iustin Pop
      mode = disk.get("mode", constants.DISK_RDWR)
3568 08db7c5c Iustin Pop
      if mode not in constants.DISK_ACCESS_SET:
3569 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
3570 08db7c5c Iustin Pop
                                   mode)
3571 08db7c5c Iustin Pop
      size = disk.get("size", None)
3572 08db7c5c Iustin Pop
      if size is None:
3573 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Missing disk size")
3574 08db7c5c Iustin Pop
      try:
3575 08db7c5c Iustin Pop
        size = int(size)
3576 08db7c5c Iustin Pop
      except ValueError:
3577 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk size '%s'" % size)
3578 08db7c5c Iustin Pop
      self.disks.append({"size": size, "mode": mode})
3579 08db7c5c Iustin Pop
3580 7baf741d Guido Trotter
    # used in CheckPrereq for ip ping check
3581 7baf741d Guido Trotter
    self.check_ip = hostname1.ip
3582 7baf741d Guido Trotter
3583 7baf741d Guido Trotter
    # file storage checks
3584 7baf741d Guido Trotter
    if (self.op.file_driver and
3585 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
3586 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
3587 7baf741d Guido Trotter
                                 self.op.file_driver)
3588 7baf741d Guido Trotter
3589 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
3590 7baf741d Guido Trotter
      raise errors.OpPrereqError("File storage directory path not absolute")
3591 7baf741d Guido Trotter
3592 7baf741d Guido Trotter
    ### Node/iallocator related checks
3593 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
3594 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
3595 7baf741d Guido Trotter
                                 " node must be given")
3596 7baf741d Guido Trotter
3597 7baf741d Guido Trotter
    if self.op.iallocator:
3598 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3599 7baf741d Guido Trotter
    else:
3600 7baf741d Guido Trotter
      self.op.pnode = self._ExpandNode(self.op.pnode)
3601 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
3602 7baf741d Guido Trotter
      if self.op.snode is not None:
3603 7baf741d Guido Trotter
        self.op.snode = self._ExpandNode(self.op.snode)
3604 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
3605 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
3606 7baf741d Guido Trotter
3607 7baf741d Guido Trotter
    # in case of import lock the source node too
3608 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
3609 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
3610 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
3611 7baf741d Guido Trotter
3612 7baf741d Guido Trotter
      if src_node is None or src_path is None:
3613 7baf741d Guido Trotter
        raise errors.OpPrereqError("Importing an instance requires source"
3614 7baf741d Guido Trotter
                                   " node and path options")
3615 7baf741d Guido Trotter
3616 7baf741d Guido Trotter
      if not os.path.isabs(src_path):
3617 7baf741d Guido Trotter
        raise errors.OpPrereqError("The source path must be absolute")
3618 7baf741d Guido Trotter
3619 7baf741d Guido Trotter
      self.op.src_node = src_node = self._ExpandNode(src_node)
3620 7baf741d Guido Trotter
      if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
3621 7baf741d Guido Trotter
        self.needed_locks[locking.LEVEL_NODE].append(src_node)
3622 7baf741d Guido Trotter
3623 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
3624 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
3625 7baf741d Guido Trotter
        raise errors.OpPrereqError("No guest OS specified")
3626 a8083063 Iustin Pop
3627 538475ca Iustin Pop
  def _RunAllocator(self):
3628 538475ca Iustin Pop
    """Run the allocator based on input opcode.
3629 538475ca Iustin Pop

3630 538475ca Iustin Pop
    """
3631 08db7c5c Iustin Pop
    nics = [n.ToDict() for n in self.nics]
3632 72737a7f Iustin Pop
    ial = IAllocator(self,
3633 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
3634 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
3635 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
3636 d1c2dd75 Iustin Pop
                     tags=[],
3637 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
3638 338e51e8 Iustin Pop
                     vcpus=self.be_full[constants.BE_VCPUS],
3639 338e51e8 Iustin Pop
                     mem_size=self.be_full[constants.BE_MEMORY],
3640 08db7c5c Iustin Pop
                     disks=self.disks,
3641 d1c2dd75 Iustin Pop
                     nics=nics,
3642 8cc7e742 Guido Trotter
                     hypervisor=self.op.hypervisor,
3643 29859cb7 Iustin Pop
                     )
3644 d1c2dd75 Iustin Pop
3645 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
3646 d1c2dd75 Iustin Pop
3647 d1c2dd75 Iustin Pop
    if not ial.success:
3648 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3649 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3650 d1c2dd75 Iustin Pop
                                                           ial.info))
3651 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3652 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3653 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
3654 97abc79f Iustin Pop
                                 (self.op.iallocator, len(ial.nodes),
3655 1ce4bbe3 Renรฉ Nussbaumer
                                  ial.required_nodes))
3656 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
3657 86d9d3bb Iustin Pop
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
3658 86d9d3bb Iustin Pop
                 self.op.instance_name, self.op.iallocator,
3659 86d9d3bb Iustin Pop
                 ", ".join(ial.nodes))
3660 27579978 Iustin Pop
    if ial.required_nodes == 2:
3661 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
3662 538475ca Iustin Pop
3663 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3664 a8083063 Iustin Pop
    """Build hooks env.
3665 a8083063 Iustin Pop

3666 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3667 a8083063 Iustin Pop

3668 a8083063 Iustin Pop
    """
3669 a8083063 Iustin Pop
    env = {
3670 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
3671 08db7c5c Iustin Pop
      "INSTANCE_DISK_SIZE": ",".join(str(d["size"]) for d in self.disks),
3672 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
3673 a8083063 Iustin Pop
      }
3674 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3675 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
3676 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
3677 09acf207 Guido Trotter
      env["INSTANCE_SRC_IMAGES"] = self.src_images
3678 396e1b78 Michael Hanselmann
3679 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
3680 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
3681 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
3682 396e1b78 Michael Hanselmann
      status=self.instance_status,
3683 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
3684 338e51e8 Iustin Pop
      memory=self.be_full[constants.BE_MEMORY],
3685 338e51e8 Iustin Pop
      vcpus=self.be_full[constants.BE_VCPUS],
3686 08db7c5c Iustin Pop
      nics=[(n.ip, n.bridge, n.mac) for n in self.nics],
3687 396e1b78 Michael Hanselmann
    ))
3688 a8083063 Iustin Pop
3689 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
3690 a8083063 Iustin Pop
          self.secondaries)
3691 a8083063 Iustin Pop
    return env, nl, nl
3692 a8083063 Iustin Pop
3693 a8083063 Iustin Pop
3694 a8083063 Iustin Pop
  def CheckPrereq(self):
3695 a8083063 Iustin Pop
    """Check prerequisites.
3696 a8083063 Iustin Pop

3697 a8083063 Iustin Pop
    """
3698 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
3699 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
3700 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
3701 eedc99de Manuel Franceschini
                                 " instances")
3702 eedc99de Manuel Franceschini
3703 e69d05fd Iustin Pop
3704 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3705 7baf741d Guido Trotter
      src_node = self.op.src_node
3706 7baf741d Guido Trotter
      src_path = self.op.src_path
3707 a8083063 Iustin Pop
3708 72737a7f Iustin Pop
      export_info = self.rpc.call_export_info(src_node, src_path)
3709 a8083063 Iustin Pop
3710 a8083063 Iustin Pop
      if not export_info:
3711 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
3712 a8083063 Iustin Pop
3713 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
3714 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
3715 a8083063 Iustin Pop
3716 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
3717 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
3718 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
3719 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
3720 a8083063 Iustin Pop
3721 09acf207 Guido Trotter
      # Check that the new instance doesn't have less disks than the export
3722 08db7c5c Iustin Pop
      instance_disks = len(self.disks)
3723 09acf207 Guido Trotter
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
3724 09acf207 Guido Trotter
      if instance_disks < export_disks:
3725 09acf207 Guido Trotter
        raise errors.OpPrereqError("Not enough disks to import."
3726 09acf207 Guido Trotter
                                   " (instance: %d, export: %d)" %
3727 726d7d68 Iustin Pop
                                   (instance_disks, export_disks))
3728 a8083063 Iustin Pop
3729 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
3730 09acf207 Guido Trotter
      disk_images = []
3731 09acf207 Guido Trotter
      for idx in range(export_disks):
3732 09acf207 Guido Trotter
        option = 'disk%d_dump' % idx
3733 09acf207 Guido Trotter
        if export_info.has_option(constants.INISECT_INS, option):
3734 09acf207 Guido Trotter
          # FIXME: are the old os-es, disk sizes, etc. useful?
3735 09acf207 Guido Trotter
          export_name = export_info.get(constants.INISECT_INS, option)
3736 09acf207 Guido Trotter
          image = os.path.join(src_path, export_name)
3737 09acf207 Guido Trotter
          disk_images.append(image)
3738 09acf207 Guido Trotter
        else:
3739 09acf207 Guido Trotter
          disk_images.append(False)
3740 09acf207 Guido Trotter
3741 09acf207 Guido Trotter
      self.src_images = disk_images
3742 901a65c1 Iustin Pop
3743 b4364a6b Guido Trotter
      old_name = export_info.get(constants.INISECT_INS, 'name')
3744 b4364a6b Guido Trotter
      # FIXME: int() here could throw a ValueError on broken exports
3745 b4364a6b Guido Trotter
      exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
3746 b4364a6b Guido Trotter
      if self.op.instance_name == old_name:
3747 b4364a6b Guido Trotter
        for idx, nic in enumerate(self.nics):
3748 b4364a6b Guido Trotter
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
3749 b4364a6b Guido Trotter
            nic_mac_ini = 'nic%d_mac' % idx
3750 b4364a6b Guido Trotter
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
3751 bc89efc3 Guido Trotter
3752 7baf741d Guido Trotter
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
3753 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
3754 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
3755 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
3756 901a65c1 Iustin Pop
3757 901a65c1 Iustin Pop
    if self.op.ip_check:
3758 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
3759 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3760 7b3a8fb5 Iustin Pop
                                   (self.check_ip, self.op.instance_name))
3761 901a65c1 Iustin Pop
3762 538475ca Iustin Pop
    #### allocator run
3763 538475ca Iustin Pop
3764 538475ca Iustin Pop
    if self.op.iallocator is not None:
3765 538475ca Iustin Pop
      self._RunAllocator()
3766 0f1a06e3 Manuel Franceschini
3767 901a65c1 Iustin Pop
    #### node related checks
3768 901a65c1 Iustin Pop
3769 901a65c1 Iustin Pop
    # check primary node
3770 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
3771 7baf741d Guido Trotter
    assert self.pnode is not None, \
3772 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
3773 901a65c1 Iustin Pop
    self.secondaries = []
3774 901a65c1 Iustin Pop
3775 901a65c1 Iustin Pop
    # mirror node verification
3776 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
3777 7baf741d Guido Trotter
      if self.op.snode is None:
3778 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
3779 3ecf6786 Iustin Pop
                                   " a mirror node")
3780 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
3781 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
3782 3ecf6786 Iustin Pop
                                   " the primary node.")
3783 7baf741d Guido Trotter
      self.secondaries.append(self.op.snode)
3784 a8083063 Iustin Pop
3785 6785674e Iustin Pop
    nodenames = [pnode.name] + self.secondaries
3786 6785674e Iustin Pop
3787 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
3788 08db7c5c Iustin Pop
                                self.disks)
3789 ed1ebc60 Guido Trotter
3790 8d75db10 Iustin Pop
    # Check lv size requirements
3791 8d75db10 Iustin Pop
    if req_size is not None:
3792 72737a7f Iustin Pop
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
3793 72737a7f Iustin Pop
                                         self.op.hypervisor)
3794 8d75db10 Iustin Pop
      for node in nodenames:
3795 8d75db10 Iustin Pop
        info = nodeinfo.get(node, None)
3796 8d75db10 Iustin Pop
        if not info:
3797 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
3798 3e91897b Iustin Pop
                                     " from node '%s'" % node)
3799 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
3800 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
3801 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
3802 8d75db10 Iustin Pop
                                     " node %s" % node)
3803 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
3804 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
3805 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
3806 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
3807 ed1ebc60 Guido Trotter
3808 74409b12 Iustin Pop
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
3809 6785674e Iustin Pop
3810 a8083063 Iustin Pop
    # os verification
3811 72737a7f Iustin Pop
    os_obj = self.rpc.call_os_get(pnode.name, self.op.os_type)
3812 dfa96ded Guido Trotter
    if not os_obj:
3813 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
3814 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
3815 a8083063 Iustin Pop
3816 901a65c1 Iustin Pop
    # bridge check on primary node
3817 08db7c5c Iustin Pop
    bridges = [n.bridge for n in self.nics]
3818 08db7c5c Iustin Pop
    if not self.rpc.call_bridges_exist(self.pnode.name, bridges):
3819 08db7c5c Iustin Pop
      raise errors.OpPrereqError("one of the target bridges '%s' does not"
3820 08db7c5c Iustin Pop
                                 " exist on"
3821 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
3822 08db7c5c Iustin Pop
                                 (",".join(bridges), pnode.name))
3823 a8083063 Iustin Pop
3824 49ce1563 Iustin Pop
    # memory check on primary node
3825 49ce1563 Iustin Pop
    if self.op.start:
3826 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
3827 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
3828 338e51e8 Iustin Pop
                           self.be_full[constants.BE_MEMORY],
3829 338e51e8 Iustin Pop
                           self.op.hypervisor)
3830 49ce1563 Iustin Pop
3831 a8083063 Iustin Pop
    if self.op.start:
3832 a8083063 Iustin Pop
      self.instance_status = 'up'
3833 a8083063 Iustin Pop
    else:
3834 a8083063 Iustin Pop
      self.instance_status = 'down'
3835 a8083063 Iustin Pop
3836 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3837 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
3838 a8083063 Iustin Pop

3839 a8083063 Iustin Pop
    """
3840 a8083063 Iustin Pop
    instance = self.op.instance_name
3841 a8083063 Iustin Pop
    pnode_name = self.pnode.name
3842 a8083063 Iustin Pop
3843 08db7c5c Iustin Pop
    for nic in self.nics:
3844 08db7c5c Iustin Pop
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
3845 08db7c5c Iustin Pop
        nic.mac = self.cfg.GenerateMAC()
3846 a8083063 Iustin Pop
3847 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
3848 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
3849 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
3850 2a6469d5 Alexander Schreiber
    else:
3851 2a6469d5 Alexander Schreiber
      network_port = None
3852 58acb49d Alexander Schreiber
3853 6785674e Iustin Pop
    ##if self.op.vnc_bind_address is None:
3854 6785674e Iustin Pop
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
3855 31a853d2 Iustin Pop
3856 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
3857 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
3858 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
3859 2c313123 Manuel Franceschini
    else:
3860 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
3861 2c313123 Manuel Franceschini
3862 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
3863 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
3864 d6a02168 Michael Hanselmann
                                        self.cfg.GetFileStorageDir(),
3865 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
3866 0f1a06e3 Manuel Franceschini
3867 0f1a06e3 Manuel Franceschini
3868 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
3869 a8083063 Iustin Pop
                                  self.op.disk_template,
3870 a8083063 Iustin Pop
                                  instance, pnode_name,
3871 08db7c5c Iustin Pop
                                  self.secondaries,
3872 08db7c5c Iustin Pop
                                  self.disks,
3873 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
3874 e2a65344 Iustin Pop
                                  self.op.file_driver,
3875 e2a65344 Iustin Pop
                                  0)
3876 a8083063 Iustin Pop
3877 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3878 a8083063 Iustin Pop
                            primary_node=pnode_name,
3879 08db7c5c Iustin Pop
                            nics=self.nics, disks=disks,
3880 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
3881 a8083063 Iustin Pop
                            status=self.instance_status,
3882 58acb49d Alexander Schreiber
                            network_port=network_port,
3883 338e51e8 Iustin Pop
                            beparams=self.op.beparams,
3884 6785674e Iustin Pop
                            hvparams=self.op.hvparams,
3885 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
3886 a8083063 Iustin Pop
                            )
3887 a8083063 Iustin Pop
3888 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
3889 b9bddb6b Iustin Pop
    if not _CreateDisks(self, iobj):
3890 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
3891 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance)
3892 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
3893 a8083063 Iustin Pop
3894 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
3895 a8083063 Iustin Pop
3896 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
3897 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
3898 7baf741d Guido Trotter
    # added the instance to the config
3899 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
3900 a1578d63 Iustin Pop
    # Remove the temp. assignements for the instance's drbds
3901 a1578d63 Iustin Pop
    self.cfg.ReleaseDRBDMinors(instance)
3902 e36e96b4 Guido Trotter
    # Unlock all the nodes
3903 e36e96b4 Guido Trotter
    self.context.glm.release(locking.LEVEL_NODE)
3904 e36e96b4 Guido Trotter
    del self.acquired_locks[locking.LEVEL_NODE]
3905 a8083063 Iustin Pop
3906 a8083063 Iustin Pop
    if self.op.wait_for_sync:
3907 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
3908 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3909 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
3910 a8083063 Iustin Pop
      time.sleep(15)
3911 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
3912 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
3913 a8083063 Iustin Pop
    else:
3914 a8083063 Iustin Pop
      disk_abort = False
3915 a8083063 Iustin Pop
3916 a8083063 Iustin Pop
    if disk_abort:
3917 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
3918 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3919 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
3920 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
3921 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3922 3ecf6786 Iustin Pop
                               " this instance")
3923 a8083063 Iustin Pop
3924 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3925 a8083063 Iustin Pop
                (instance, pnode_name))
3926 a8083063 Iustin Pop
3927 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3928 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3929 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3930 d15a9ad3 Guido Trotter
        if not self.rpc.call_instance_os_add(pnode_name, iobj):
3931 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3932 3ecf6786 Iustin Pop
                                   " on node %s" %
3933 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3934 a8083063 Iustin Pop
3935 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3936 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3937 a8083063 Iustin Pop
        src_node = self.op.src_node
3938 09acf207 Guido Trotter
        src_images = self.src_images
3939 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
3940 6c0af70e Guido Trotter
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
3941 09acf207 Guido Trotter
                                                         src_node, src_images,
3942 6c0af70e Guido Trotter
                                                         cluster_name)
3943 09acf207 Guido Trotter
        for idx, result in enumerate(import_result):
3944 09acf207 Guido Trotter
          if not result:
3945 726d7d68 Iustin Pop
            self.LogWarning("Could not import the image %s for instance"
3946 726d7d68 Iustin Pop
                            " %s, disk %d, on node %s" %
3947 726d7d68 Iustin Pop
                            (src_images[idx], instance, idx, pnode_name))
3948 a8083063 Iustin Pop
      else:
3949 a8083063 Iustin Pop
        # also checked in the prereq part
3950 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3951 3ecf6786 Iustin Pop
                                     % self.op.mode)
3952 a8083063 Iustin Pop
3953 a8083063 Iustin Pop
    if self.op.start:
3954 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s", instance, pnode_name)
3955 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3956 72737a7f Iustin Pop
      if not self.rpc.call_instance_start(pnode_name, iobj, None):
3957 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3958 a8083063 Iustin Pop
3959 a8083063 Iustin Pop
3960 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3961 a8083063 Iustin Pop
  """Connect to an instance's console.
3962 a8083063 Iustin Pop

3963 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3964 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3965 a8083063 Iustin Pop
  console.
3966 a8083063 Iustin Pop

3967 a8083063 Iustin Pop
  """
3968 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3969 8659b73e Guido Trotter
  REQ_BGL = False
3970 8659b73e Guido Trotter
3971 8659b73e Guido Trotter
  def ExpandNames(self):
3972 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
3973 a8083063 Iustin Pop
3974 a8083063 Iustin Pop
  def CheckPrereq(self):
3975 a8083063 Iustin Pop
    """Check prerequisites.
3976 a8083063 Iustin Pop

3977 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3978 a8083063 Iustin Pop

3979 a8083063 Iustin Pop
    """
3980 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3981 8659b73e Guido Trotter
    assert self.instance is not None, \
3982 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3983 a8083063 Iustin Pop
3984 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3985 a8083063 Iustin Pop
    """Connect to the console of an instance
3986 a8083063 Iustin Pop

3987 a8083063 Iustin Pop
    """
3988 a8083063 Iustin Pop
    instance = self.instance
3989 a8083063 Iustin Pop
    node = instance.primary_node
3990 a8083063 Iustin Pop
3991 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
3992 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
3993 a8083063 Iustin Pop
    if node_insts is False:
3994 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3995 a8083063 Iustin Pop
3996 a8083063 Iustin Pop
    if instance.name not in node_insts:
3997 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3998 a8083063 Iustin Pop
3999 9a4f63d1 Iustin Pop
    logging.debug("Connecting to console of %s on %s", instance.name, node)
4000 a8083063 Iustin Pop
4001 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
4002 30989e69 Alexander Schreiber
    console_cmd = hyper.GetShellCommandForConsole(instance)
4003 b047857b Michael Hanselmann
4004 82122173 Iustin Pop
    # build ssh cmdline
4005 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
4006 a8083063 Iustin Pop
4007 a8083063 Iustin Pop
4008 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
4009 a8083063 Iustin Pop
  """Replace the disks of an instance.
4010 a8083063 Iustin Pop

4011 a8083063 Iustin Pop
  """
4012 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
4013 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4014 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
4015 efd990e4 Guido Trotter
  REQ_BGL = False
4016 efd990e4 Guido Trotter
4017 efd990e4 Guido Trotter
  def ExpandNames(self):
4018 efd990e4 Guido Trotter
    self._ExpandAndLockInstance()
4019 efd990e4 Guido Trotter
4020 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
4021 efd990e4 Guido Trotter
      self.op.remote_node = None
4022 efd990e4 Guido Trotter
4023 efd990e4 Guido Trotter
    ia_name = getattr(self.op, "iallocator", None)
4024 efd990e4 Guido Trotter
    if ia_name is not None:
4025 efd990e4 Guido Trotter
      if self.op.remote_node is not None:
4026 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Give either the iallocator or the new"
4027 efd990e4 Guido Trotter
                                   " secondary, not both")
4028 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4029 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
4030 efd990e4 Guido Trotter
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
4031 efd990e4 Guido Trotter
      if remote_node is None:
4032 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Node '%s' not known" %
4033 efd990e4 Guido Trotter
                                   self.op.remote_node)
4034 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
4035 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
4036 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
4037 efd990e4 Guido Trotter
    else:
4038 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
4039 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4040 efd990e4 Guido Trotter
4041 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
4042 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
4043 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
4044 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
4045 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
4046 efd990e4 Guido Trotter
      self._LockInstancesNodes()
4047 a8083063 Iustin Pop
4048 b6e82a65 Iustin Pop
  def _RunAllocator(self):
4049 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
4050 b6e82a65 Iustin Pop

4051 b6e82a65 Iustin Pop
    """
4052 72737a7f Iustin Pop
    ial = IAllocator(self,
4053 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
4054 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
4055 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
4056 b6e82a65 Iustin Pop
4057 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
4058 b6e82a65 Iustin Pop
4059 b6e82a65 Iustin Pop
    if not ial.success:
4060 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
4061 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
4062 b6e82a65 Iustin Pop
                                                           ial.info))
4063 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
4064 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4065 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
4066 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
4067 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
4068 86d9d3bb Iustin Pop
    self.LogInfo("Selected new secondary for the instance: %s",
4069 86d9d3bb Iustin Pop
                 self.op.remote_node)
4070 b6e82a65 Iustin Pop
4071 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4072 a8083063 Iustin Pop
    """Build hooks env.
4073 a8083063 Iustin Pop

4074 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
4075 a8083063 Iustin Pop

4076 a8083063 Iustin Pop
    """
4077 a8083063 Iustin Pop
    env = {
4078 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
4079 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
4080 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
4081 a8083063 Iustin Pop
      }
4082 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4083 0834c866 Iustin Pop
    nl = [
4084 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
4085 0834c866 Iustin Pop
      self.instance.primary_node,
4086 0834c866 Iustin Pop
      ]
4087 0834c866 Iustin Pop
    if self.op.remote_node is not None:
4088 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
4089 a8083063 Iustin Pop
    return env, nl, nl
4090 a8083063 Iustin Pop
4091 a8083063 Iustin Pop
  def CheckPrereq(self):
4092 a8083063 Iustin Pop
    """Check prerequisites.
4093 a8083063 Iustin Pop

4094 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4095 a8083063 Iustin Pop

4096 a8083063 Iustin Pop
    """
4097 efd990e4 Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4098 efd990e4 Guido Trotter
    assert instance is not None, \
4099 efd990e4 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4100 a8083063 Iustin Pop
    self.instance = instance
4101 a8083063 Iustin Pop
4102 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
4103 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
4104 a9e0c397 Iustin Pop
                                 " network mirrored.")
4105 a8083063 Iustin Pop
4106 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
4107 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
4108 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
4109 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
4110 a8083063 Iustin Pop
4111 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
4112 a9e0c397 Iustin Pop
4113 b6e82a65 Iustin Pop
    ia_name = getattr(self.op, "iallocator", None)
4114 b6e82a65 Iustin Pop
    if ia_name is not None:
4115 de8c7666 Guido Trotter
      self._RunAllocator()
4116 b6e82a65 Iustin Pop
4117 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
4118 a9e0c397 Iustin Pop
    if remote_node is not None:
4119 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
4120 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
4121 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
4122 a9e0c397 Iustin Pop
    else:
4123 a9e0c397 Iustin Pop
      self.remote_node_info = None
4124 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
4125 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
4126 3ecf6786 Iustin Pop
                                 " the instance.")
4127 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
4128 0834c866 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_SEC:
4129 0834c866 Iustin Pop
        # this is for DRBD8, where we can't execute the same mode of
4130 0834c866 Iustin Pop
        # replacement as for drbd7 (no different port allocated)
4131 0834c866 Iustin Pop
        raise errors.OpPrereqError("Same secondary given, cannot execute"
4132 0834c866 Iustin Pop
                                   " replacement")
4133 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
4134 7df43a76 Iustin Pop
      if (self.op.mode == constants.REPLACE_DISK_ALL and
4135 7df43a76 Iustin Pop
          remote_node is not None):
4136 7df43a76 Iustin Pop
        # switch to replace secondary mode
4137 7df43a76 Iustin Pop
        self.op.mode = constants.REPLACE_DISK_SEC
4138 7df43a76 Iustin Pop
4139 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
4140 12c3449a Michael Hanselmann
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
4141 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
4142 a9e0c397 Iustin Pop
                                   " both at once")
4143 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
4144 a9e0c397 Iustin Pop
        if remote_node is not None:
4145 12c3449a Michael Hanselmann
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
4146 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
4147 a9e0c397 Iustin Pop
                                     " node disk replacement")
4148 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
4149 cff90b79 Iustin Pop
        self.oth_node = instance.secondary_nodes[0]
4150 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
4151 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
4152 a9e0c397 Iustin Pop
                                    # we don't change the secondary
4153 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
4154 cff90b79 Iustin Pop
        self.oth_node = instance.primary_node
4155 a9e0c397 Iustin Pop
      else:
4156 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
4157 a9e0c397 Iustin Pop
4158 54155f52 Iustin Pop
    if not self.op.disks:
4159 54155f52 Iustin Pop
      self.op.disks = range(len(instance.disks))
4160 54155f52 Iustin Pop
4161 54155f52 Iustin Pop
    for disk_idx in self.op.disks:
4162 3e0cea06 Iustin Pop
      instance.FindDisk(disk_idx)
4163 a8083063 Iustin Pop
4164 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
4165 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
4166 a9e0c397 Iustin Pop

4167 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
4168 e4376078 Iustin Pop

4169 e4376078 Iustin Pop
      1. for each disk to be replaced:
4170 e4376078 Iustin Pop

4171 e4376078 Iustin Pop
        1. create new LVs on the target node with unique names
4172 e4376078 Iustin Pop
        1. detach old LVs from the drbd device
4173 e4376078 Iustin Pop
        1. rename old LVs to name_replaced.<time_t>
4174 e4376078 Iustin Pop
        1. rename new LVs to old LVs
4175 e4376078 Iustin Pop
        1. attach the new LVs (with the old names now) to the drbd device
4176 e4376078 Iustin Pop

4177 e4376078 Iustin Pop
      1. wait for sync across all devices
4178 e4376078 Iustin Pop

4179 e4376078 Iustin Pop
      1. for each modified disk:
4180 e4376078 Iustin Pop

4181 e4376078 Iustin Pop
        1. remove old LVs (which have the name name_replaces.<time_t>)
4182 a9e0c397 Iustin Pop

4183 a9e0c397 Iustin Pop
    Failures are not very well handled.
4184 cff90b79 Iustin Pop

4185 a9e0c397 Iustin Pop
    """
4186 cff90b79 Iustin Pop
    steps_total = 6
4187 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
4188 a9e0c397 Iustin Pop
    instance = self.instance
4189 a9e0c397 Iustin Pop
    iv_names = {}
4190 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
4191 a9e0c397 Iustin Pop
    # start of work
4192 a9e0c397 Iustin Pop
    cfg = self.cfg
4193 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
4194 cff90b79 Iustin Pop
    oth_node = self.oth_node
4195 cff90b79 Iustin Pop
4196 cff90b79 Iustin Pop
    # Step: check device activation
4197 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
4198 cff90b79 Iustin Pop
    info("checking volume groups")
4199 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
4200 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([oth_node, tgt_node])
4201 cff90b79 Iustin Pop
    if not results:
4202 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
4203 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
4204 cff90b79 Iustin Pop
      res = results.get(node, False)
4205 cff90b79 Iustin Pop
      if not res or my_vg not in res:
4206 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
4207 cff90b79 Iustin Pop
                                 (my_vg, node))
4208 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
4209 54155f52 Iustin Pop
      if idx not in self.op.disks:
4210 cff90b79 Iustin Pop
        continue
4211 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
4212 54155f52 Iustin Pop
        info("checking disk/%d on %s" % (idx, node))
4213 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
4214 72737a7f Iustin Pop
        if not self.rpc.call_blockdev_find(node, dev):
4215 54155f52 Iustin Pop
          raise errors.OpExecError("Can't find disk/%d on node %s" %
4216 54155f52 Iustin Pop
                                   (idx, node))
4217 cff90b79 Iustin Pop
4218 cff90b79 Iustin Pop
    # Step: check other node consistency
4219 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
4220 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
4221 54155f52 Iustin Pop
      if idx not in self.op.disks:
4222 cff90b79 Iustin Pop
        continue
4223 54155f52 Iustin Pop
      info("checking disk/%d consistency on %s" % (idx, oth_node))
4224 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, oth_node,
4225 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
4226 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
4227 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
4228 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
4229 cff90b79 Iustin Pop
4230 cff90b79 Iustin Pop
    # Step: create new storage
4231 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
4232 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
4233 54155f52 Iustin Pop
      if idx not in self.op.disks:
4234 a9e0c397 Iustin Pop
        continue
4235 a9e0c397 Iustin Pop
      size = dev.size
4236 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
4237 54155f52 Iustin Pop
      lv_names = [".disk%d_%s" % (idx, suf)
4238 54155f52 Iustin Pop
                  for suf in ["data", "meta"]]
4239 b9bddb6b Iustin Pop
      names = _GenerateUniqueNames(self, lv_names)
4240 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
4241 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
4242 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
4243 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
4244 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
4245 a9e0c397 Iustin Pop
      old_lvs = dev.children
4246 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
4247 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
4248 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
4249 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
4250 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
4251 a9e0c397 Iustin Pop
      # are talking about the secondary node
4252 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
4253 b9bddb6b Iustin Pop
        if not _CreateBlockDevOnPrimary(self, tgt_node, instance, new_lv,
4254 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
4255 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
4256 a9e0c397 Iustin Pop
                                   " node '%s'" %
4257 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
4258 a9e0c397 Iustin Pop
4259 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
4260 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
4261 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
4262 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
4263 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
4264 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
4265 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
4266 cff90b79 Iustin Pop
      #dev.children = []
4267 cff90b79 Iustin Pop
      #cfg.Update(instance)
4268 a9e0c397 Iustin Pop
4269 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
4270 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
4271 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
4272 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
4273 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
4274 cff90b79 Iustin Pop
4275 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
4276 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
4277 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
4278 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
4279 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
4280 cff90b79 Iustin Pop
      rlist = []
4281 cff90b79 Iustin Pop
      for to_ren in old_lvs:
4282 72737a7f Iustin Pop
        find_res = self.rpc.call_blockdev_find(tgt_node, to_ren)
4283 cff90b79 Iustin Pop
        if find_res is not None: # device exists
4284 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
4285 cff90b79 Iustin Pop
4286 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
4287 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_rename(tgt_node, rlist):
4288 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
4289 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
4290 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
4291 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
4292 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_rename(tgt_node, rlist):
4293 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
4294 cff90b79 Iustin Pop
4295 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
4296 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
4297 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
4298 a9e0c397 Iustin Pop
4299 cff90b79 Iustin Pop
      for disk in old_lvs:
4300 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
4301 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
4302 a9e0c397 Iustin Pop
4303 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
4304 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
4305 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
4306 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
4307 72737a7f Iustin Pop
          if not self.rpc.call_blockdev_remove(tgt_node, new_lv):
4308 79caa9ed Guido Trotter
            warning("Can't rollback device %s", hint="manually cleanup unused"
4309 cff90b79 Iustin Pop
                    " logical volumes")
4310 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
4311 a9e0c397 Iustin Pop
4312 a9e0c397 Iustin Pop
      dev.children = new_lvs
4313 a9e0c397 Iustin Pop
      cfg.Update(instance)
4314 a9e0c397 Iustin Pop
4315 cff90b79 Iustin Pop
    # Step: wait for sync
4316 a9e0c397 Iustin Pop
4317 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
4318 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
4319 a9e0c397 Iustin Pop
    # return value
4320 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
4321 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
4322 a9e0c397 Iustin Pop
4323 a9e0c397 Iustin Pop
    # so check manually all the devices
4324 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
4325 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
4326 72737a7f Iustin Pop
      is_degr = self.rpc.call_blockdev_find(instance.primary_node, dev)[5]
4327 a9e0c397 Iustin Pop
      if is_degr:
4328 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
4329 a9e0c397 Iustin Pop
4330 cff90b79 Iustin Pop
    # Step: remove old storage
4331 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
4332 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
4333 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
4334 a9e0c397 Iustin Pop
      for lv in old_lvs:
4335 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
4336 72737a7f Iustin Pop
        if not self.rpc.call_blockdev_remove(tgt_node, lv):
4337 79caa9ed Guido Trotter
          warning("Can't remove old LV", hint="manually remove unused LVs")
4338 a9e0c397 Iustin Pop
          continue
4339 a9e0c397 Iustin Pop
4340 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
4341 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
4342 a9e0c397 Iustin Pop

4343 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
4344 a9e0c397 Iustin Pop
      - for all disks of the instance:
4345 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
4346 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
4347 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
4348 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
4349 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
4350 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
4351 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
4352 a9e0c397 Iustin Pop
          not network enabled
4353 a9e0c397 Iustin Pop
      - wait for sync across all devices
4354 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
4355 a9e0c397 Iustin Pop

4356 a9e0c397 Iustin Pop
    Failures are not very well handled.
4357 0834c866 Iustin Pop

4358 a9e0c397 Iustin Pop
    """
4359 0834c866 Iustin Pop
    steps_total = 6
4360 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
4361 a9e0c397 Iustin Pop
    instance = self.instance
4362 a9e0c397 Iustin Pop
    iv_names = {}
4363 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
4364 a9e0c397 Iustin Pop
    # start of work
4365 a9e0c397 Iustin Pop
    cfg = self.cfg
4366 a9e0c397 Iustin Pop
    old_node = self.tgt_node
4367 a9e0c397 Iustin Pop
    new_node = self.new_node
4368 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
4369 0834c866 Iustin Pop
4370 0834c866 Iustin Pop
    # Step: check device activation
4371 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
4372 0834c866 Iustin Pop
    info("checking volume groups")
4373 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
4374 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([pri_node, new_node])
4375 0834c866 Iustin Pop
    if not results:
4376 0834c866 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
4377 0834c866 Iustin Pop
    for node in pri_node, new_node:
4378 0834c866 Iustin Pop
      res = results.get(node, False)
4379 0834c866 Iustin Pop
      if not res or my_vg not in res:
4380 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
4381 0834c866 Iustin Pop
                                 (my_vg, node))
4382 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
4383 d418ebfb Iustin Pop
      if idx not in self.op.disks:
4384 0834c866 Iustin Pop
        continue
4385 d418ebfb Iustin Pop
      info("checking disk/%d on %s" % (idx, pri_node))
4386 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4387 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_find(pri_node, dev):
4388 d418ebfb Iustin Pop
        raise errors.OpExecError("Can't find disk/%d on node %s" %
4389 d418ebfb Iustin Pop
                                 (idx, pri_node))
4390 0834c866 Iustin Pop
4391 0834c866 Iustin Pop
    # Step: check other node consistency
4392 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
4393 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
4394 d418ebfb Iustin Pop
      if idx not in self.op.disks:
4395 0834c866 Iustin Pop
        continue
4396 d418ebfb Iustin Pop
      info("checking disk/%d consistency on %s" % (idx, pri_node))
4397 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
4398 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
4399 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
4400 0834c866 Iustin Pop
                                 pri_node)
4401 0834c866 Iustin Pop
4402 0834c866 Iustin Pop
    # Step: create new storage
4403 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
4404 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
4405 a9e0c397 Iustin Pop
      size = dev.size
4406 d418ebfb Iustin Pop
      info("adding new local storage on %s for disk/%d" %
4407 d418ebfb Iustin Pop
           (new_node, idx))
4408 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
4409 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
4410 a9e0c397 Iustin Pop
      # are talking about the secondary node
4411 a9e0c397 Iustin Pop
      for new_lv in dev.children:
4412 b9bddb6b Iustin Pop
        if not _CreateBlockDevOnPrimary(self, new_node, instance, new_lv,
4413 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
4414 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
4415 a9e0c397 Iustin Pop
                                   " node '%s'" %
4416 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
4417 a9e0c397 Iustin Pop
4418 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
4419 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
4420 a1578d63 Iustin Pop
    # error and the success paths
4421 a1578d63 Iustin Pop
    minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
4422 a1578d63 Iustin Pop
                                   instance.name)
4423 468b46f9 Iustin Pop
    logging.debug("Allocated minors %s" % (minors,))
4424 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
4425 d418ebfb Iustin Pop
    for idx, (dev, new_minor) in enumerate(zip(instance.disks, minors)):
4426 0834c866 Iustin Pop
      size = dev.size
4427 d418ebfb Iustin Pop
      info("activating a new drbd on %s for disk/%d" % (new_node, idx))
4428 a9e0c397 Iustin Pop
      # create new devices on new_node
4429 ffa1c0dc Iustin Pop
      if pri_node == dev.logical_id[0]:
4430 ffa1c0dc Iustin Pop
        new_logical_id = (pri_node, new_node,
4431 f9518d38 Iustin Pop
                          dev.logical_id[2], dev.logical_id[3], new_minor,
4432 f9518d38 Iustin Pop
                          dev.logical_id[5])
4433 ffa1c0dc Iustin Pop
      else:
4434 ffa1c0dc Iustin Pop
        new_logical_id = (new_node, pri_node,
4435 f9518d38 Iustin Pop
                          dev.logical_id[2], new_minor, dev.logical_id[4],
4436 f9518d38 Iustin Pop
                          dev.logical_id[5])
4437 d418ebfb Iustin Pop
      iv_names[idx] = (dev, dev.children, new_logical_id)
4438 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
4439 a1578d63 Iustin Pop
                    new_logical_id)
4440 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
4441 ffa1c0dc Iustin Pop
                              logical_id=new_logical_id,
4442 a9e0c397 Iustin Pop
                              children=dev.children)
4443 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnSecondary(self, new_node, instance,
4444 3f78eef2 Iustin Pop
                                        new_drbd, False,
4445 b9bddb6b Iustin Pop
                                        _GetInstanceInfoText(instance)):
4446 a1578d63 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
4447 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
4448 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
4449 a9e0c397 Iustin Pop
4450 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
4451 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
4452 d418ebfb Iustin Pop
      info("shutting down drbd for disk/%d on old node" % idx)
4453 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
4454 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_shutdown(old_node, dev):
4455 d418ebfb Iustin Pop
        warning("Failed to shutdown drbd for disk/%d on old node" % idx,
4456 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
4457 a9e0c397 Iustin Pop
4458 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
4459 642445d9 Iustin Pop
    done = 0
4460 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
4461 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4462 f9518d38 Iustin Pop
      # set the network part of the physical (unique in bdev terms) id
4463 f9518d38 Iustin Pop
      # to None, meaning detach from network
4464 f9518d38 Iustin Pop
      dev.physical_id = (None, None, None, None) + dev.physical_id[4:]
4465 642445d9 Iustin Pop
      # and 'find' the device, which will 'fix' it to match the
4466 642445d9 Iustin Pop
      # standalone state
4467 72737a7f Iustin Pop
      if self.rpc.call_blockdev_find(pri_node, dev):
4468 642445d9 Iustin Pop
        done += 1
4469 642445d9 Iustin Pop
      else:
4470 d418ebfb Iustin Pop
        warning("Failed to detach drbd disk/%d from network, unusual case" %
4471 d418ebfb Iustin Pop
                idx)
4472 642445d9 Iustin Pop
4473 642445d9 Iustin Pop
    if not done:
4474 642445d9 Iustin Pop
      # no detaches succeeded (very unlikely)
4475 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance.name)
4476 642445d9 Iustin Pop
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
4477 642445d9 Iustin Pop
4478 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
4479 642445d9 Iustin Pop
    # the instance to point to the new secondary
4480 642445d9 Iustin Pop
    info("updating instance configuration")
4481 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
4482 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
4483 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4484 642445d9 Iustin Pop
    cfg.Update(instance)
4485 a1578d63 Iustin Pop
    # we can remove now the temp minors as now the new values are
4486 a1578d63 Iustin Pop
    # written to the config file (and therefore stable)
4487 a1578d63 Iustin Pop
    self.cfg.ReleaseDRBDMinors(instance.name)
4488 a9e0c397 Iustin Pop
4489 642445d9 Iustin Pop
    # and now perform the drbd attach
4490 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
4491 642445d9 Iustin Pop
    failures = []
4492 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
4493 d418ebfb Iustin Pop
      info("attaching primary drbd for disk/%d to new secondary node" % idx)
4494 642445d9 Iustin Pop
      # since the attach is smart, it's enough to 'find' the device,
4495 642445d9 Iustin Pop
      # it will automatically activate the network, if the physical_id
4496 642445d9 Iustin Pop
      # is correct
4497 642445d9 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4498 ffa1c0dc Iustin Pop
      logging.debug("Disk to attach: %s", dev)
4499 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_find(pri_node, dev):
4500 d418ebfb Iustin Pop
        warning("can't attach drbd disk/%d to new secondary!" % idx,
4501 642445d9 Iustin Pop
                "please do a gnt-instance info to see the status of disks")
4502 a9e0c397 Iustin Pop
4503 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
4504 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
4505 a9e0c397 Iustin Pop
    # return value
4506 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
4507 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
4508 a9e0c397 Iustin Pop
4509 a9e0c397 Iustin Pop
    # so check manually all the devices
4510 d418ebfb Iustin Pop
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
4511 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4512 72737a7f Iustin Pop
      is_degr = self.rpc.call_blockdev_find(pri_node, dev)[5]
4513 a9e0c397 Iustin Pop
      if is_degr:
4514 d418ebfb Iustin Pop
        raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx)
4515 a9e0c397 Iustin Pop
4516 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
4517 d418ebfb Iustin Pop
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
4518 d418ebfb Iustin Pop
      info("remove logical volumes for disk/%d" % idx)
4519 a9e0c397 Iustin Pop
      for lv in old_lvs:
4520 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
4521 72737a7f Iustin Pop
        if not self.rpc.call_blockdev_remove(old_node, lv):
4522 0834c866 Iustin Pop
          warning("Can't remove LV on old secondary",
4523 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
4524 a9e0c397 Iustin Pop
4525 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
4526 a9e0c397 Iustin Pop
    """Execute disk replacement.
4527 a9e0c397 Iustin Pop

4528 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
4529 a9e0c397 Iustin Pop

4530 a9e0c397 Iustin Pop
    """
4531 a9e0c397 Iustin Pop
    instance = self.instance
4532 22985314 Guido Trotter
4533 22985314 Guido Trotter
    # Activate the instance disks if we're replacing them on a down instance
4534 22985314 Guido Trotter
    if instance.status == "down":
4535 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, True)
4536 22985314 Guido Trotter
4537 abdf0113 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
4538 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
4539 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
4540 a9e0c397 Iustin Pop
      else:
4541 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
4542 a9e0c397 Iustin Pop
    else:
4543 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
4544 22985314 Guido Trotter
4545 22985314 Guido Trotter
    ret = fn(feedback_fn)
4546 22985314 Guido Trotter
4547 22985314 Guido Trotter
    # Deactivate the instance disks if we're replacing them on a down instance
4548 22985314 Guido Trotter
    if instance.status == "down":
4549 b9bddb6b Iustin Pop
      _SafeShutdownInstanceDisks(self, instance)
4550 22985314 Guido Trotter
4551 22985314 Guido Trotter
    return ret
4552 a9e0c397 Iustin Pop
4553 a8083063 Iustin Pop
4554 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
4555 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
4556 8729e0d7 Iustin Pop

4557 8729e0d7 Iustin Pop
  """
4558 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
4559 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4560 6605411d Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
4561 31e63dbf Guido Trotter
  REQ_BGL = False
4562 31e63dbf Guido Trotter
4563 31e63dbf Guido Trotter
  def ExpandNames(self):
4564 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
4565 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4566 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4567 31e63dbf Guido Trotter
4568 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
4569 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
4570 31e63dbf Guido Trotter
      self._LockInstancesNodes()
4571 8729e0d7 Iustin Pop
4572 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
4573 8729e0d7 Iustin Pop
    """Build hooks env.
4574 8729e0d7 Iustin Pop

4575 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
4576 8729e0d7 Iustin Pop

4577 8729e0d7 Iustin Pop
    """
4578 8729e0d7 Iustin Pop
    env = {
4579 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
4580 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
4581 8729e0d7 Iustin Pop
      }
4582 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4583 8729e0d7 Iustin Pop
    nl = [
4584 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
4585 8729e0d7 Iustin Pop
      self.instance.primary_node,
4586 8729e0d7 Iustin Pop
      ]
4587 8729e0d7 Iustin Pop
    return env, nl, nl
4588 8729e0d7 Iustin Pop
4589 8729e0d7 Iustin Pop
  def CheckPrereq(self):
4590 8729e0d7 Iustin Pop
    """Check prerequisites.
4591 8729e0d7 Iustin Pop

4592 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
4593 8729e0d7 Iustin Pop

4594 8729e0d7 Iustin Pop
    """
4595 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4596 31e63dbf Guido Trotter
    assert instance is not None, \
4597 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4598 31e63dbf Guido Trotter
4599 8729e0d7 Iustin Pop
    self.instance = instance
4600 8729e0d7 Iustin Pop
4601 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
4602 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
4603 8729e0d7 Iustin Pop
                                 " growing.")
4604 8729e0d7 Iustin Pop
4605 ad24e046 Iustin Pop
    self.disk = instance.FindDisk(self.op.disk)
4606 8729e0d7 Iustin Pop
4607 8729e0d7 Iustin Pop
    nodenames = [instance.primary_node] + list(instance.secondary_nodes)
4608 72737a7f Iustin Pop
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4609 72737a7f Iustin Pop
                                       instance.hypervisor)
4610 8729e0d7 Iustin Pop
    for node in nodenames:
4611 8729e0d7 Iustin Pop
      info = nodeinfo.get(node, None)
4612 8729e0d7 Iustin Pop
      if not info:
4613 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
4614 8729e0d7 Iustin Pop
                                   " from node '%s'" % node)
4615 8729e0d7 Iustin Pop
      vg_free = info.get('vg_free', None)
4616 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
4617 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
4618 8729e0d7 Iustin Pop
                                   " node %s" % node)
4619 8729e0d7 Iustin Pop
      if self.op.amount > info['vg_free']:
4620 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
4621 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
4622 8729e0d7 Iustin Pop
                                   (node, info['vg_free'], self.op.amount))
4623 8729e0d7 Iustin Pop
4624 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
4625 8729e0d7 Iustin Pop
    """Execute disk grow.
4626 8729e0d7 Iustin Pop

4627 8729e0d7 Iustin Pop
    """
4628 8729e0d7 Iustin Pop
    instance = self.instance
4629 ad24e046 Iustin Pop
    disk = self.disk
4630 8729e0d7 Iustin Pop
    for node in (instance.secondary_nodes + (instance.primary_node,)):
4631 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
4632 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
4633 72737a7f Iustin Pop
      if (not result or not isinstance(result, (list, tuple)) or
4634 72737a7f Iustin Pop
          len(result) != 2):
4635 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s" % node)
4636 8729e0d7 Iustin Pop
      elif not result[0]:
4637 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s: %s" %
4638 8729e0d7 Iustin Pop
                                 (node, result[1]))
4639 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
4640 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
4641 6605411d Iustin Pop
    if self.op.wait_for_sync:
4642 cd4d138f Guido Trotter
      disk_abort = not _WaitForSync(self, instance)
4643 6605411d Iustin Pop
      if disk_abort:
4644 86d9d3bb Iustin Pop
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
4645 86d9d3bb Iustin Pop
                             " status.\nPlease check the instance.")
4646 8729e0d7 Iustin Pop
4647 8729e0d7 Iustin Pop
4648 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
4649 a8083063 Iustin Pop
  """Query runtime instance data.
4650 a8083063 Iustin Pop

4651 a8083063 Iustin Pop
  """
4652 57821cac Iustin Pop
  _OP_REQP = ["instances", "static"]
4653 a987fa48 Guido Trotter
  REQ_BGL = False
4654 ae5849b5 Michael Hanselmann
4655 a987fa48 Guido Trotter
  def ExpandNames(self):
4656 a987fa48 Guido Trotter
    self.needed_locks = {}
4657 a987fa48 Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
4658 a987fa48 Guido Trotter
4659 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
4660 a987fa48 Guido Trotter
      raise errors.OpPrereqError("Invalid argument type 'instances'")
4661 a987fa48 Guido Trotter
4662 a987fa48 Guido Trotter
    if self.op.instances:
4663 a987fa48 Guido Trotter
      self.wanted_names = []
4664 a987fa48 Guido Trotter
      for name in self.op.instances:
4665 a987fa48 Guido Trotter
        full_name = self.cfg.ExpandInstanceName(name)
4666 a987fa48 Guido Trotter
        if full_name is None:
4667 a987fa48 Guido Trotter
          raise errors.OpPrereqError("Instance '%s' not known" %
4668 a987fa48 Guido Trotter
                                     self.op.instance_name)
4669 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
4670 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
4671 a987fa48 Guido Trotter
    else:
4672 a987fa48 Guido Trotter
      self.wanted_names = None
4673 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
4674 a987fa48 Guido Trotter
4675 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4676 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4677 a987fa48 Guido Trotter
4678 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
4679 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
4680 a987fa48 Guido Trotter
      self._LockInstancesNodes()
4681 a8083063 Iustin Pop
4682 a8083063 Iustin Pop
  def CheckPrereq(self):
4683 a8083063 Iustin Pop
    """Check prerequisites.
4684 a8083063 Iustin Pop

4685 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
4686 a8083063 Iustin Pop

4687 a8083063 Iustin Pop
    """
4688 a987fa48 Guido Trotter
    if self.wanted_names is None:
4689 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
4690 a8083063 Iustin Pop
4691 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
4692 a987fa48 Guido Trotter
                             in self.wanted_names]
4693 a987fa48 Guido Trotter
    return
4694 a8083063 Iustin Pop
4695 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
4696 a8083063 Iustin Pop
    """Compute block device status.
4697 a8083063 Iustin Pop

4698 a8083063 Iustin Pop
    """
4699 57821cac Iustin Pop
    static = self.op.static
4700 57821cac Iustin Pop
    if not static:
4701 57821cac Iustin Pop
      self.cfg.SetDiskID(dev, instance.primary_node)
4702 57821cac Iustin Pop
      dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
4703 57821cac Iustin Pop
    else:
4704 57821cac Iustin Pop
      dev_pstatus = None
4705 57821cac Iustin Pop
4706 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
4707 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
4708 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
4709 a8083063 Iustin Pop
        snode = dev.logical_id[1]
4710 a8083063 Iustin Pop
      else:
4711 a8083063 Iustin Pop
        snode = dev.logical_id[0]
4712 a8083063 Iustin Pop
4713 57821cac Iustin Pop
    if snode and not static:
4714 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
4715 72737a7f Iustin Pop
      dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
4716 a8083063 Iustin Pop
    else:
4717 a8083063 Iustin Pop
      dev_sstatus = None
4718 a8083063 Iustin Pop
4719 a8083063 Iustin Pop
    if dev.children:
4720 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
4721 a8083063 Iustin Pop
                      for child in dev.children]
4722 a8083063 Iustin Pop
    else:
4723 a8083063 Iustin Pop
      dev_children = []
4724 a8083063 Iustin Pop
4725 a8083063 Iustin Pop
    data = {
4726 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
4727 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
4728 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
4729 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
4730 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
4731 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
4732 a8083063 Iustin Pop
      "children": dev_children,
4733 b6fdf8b8 Iustin Pop
      "mode": dev.mode,
4734 a8083063 Iustin Pop
      }
4735 a8083063 Iustin Pop
4736 a8083063 Iustin Pop
    return data
4737 a8083063 Iustin Pop
4738 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4739 a8083063 Iustin Pop
    """Gather and return data"""
4740 a8083063 Iustin Pop
    result = {}
4741 338e51e8 Iustin Pop
4742 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
4743 338e51e8 Iustin Pop
4744 a8083063 Iustin Pop
    for instance in self.wanted_instances:
4745 57821cac Iustin Pop
      if not self.op.static:
4746 57821cac Iustin Pop
        remote_info = self.rpc.call_instance_info(instance.primary_node,
4747 57821cac Iustin Pop
                                                  instance.name,
4748 57821cac Iustin Pop
                                                  instance.hypervisor)
4749 57821cac Iustin Pop
        if remote_info and "state" in remote_info:
4750 57821cac Iustin Pop
          remote_state = "up"
4751 57821cac Iustin Pop
        else:
4752 57821cac Iustin Pop
          remote_state = "down"
4753 a8083063 Iustin Pop
      else:
4754 57821cac Iustin Pop
        remote_state = None
4755 a8083063 Iustin Pop
      if instance.status == "down":
4756 a8083063 Iustin Pop
        config_state = "down"
4757 a8083063 Iustin Pop
      else:
4758 a8083063 Iustin Pop
        config_state = "up"
4759 a8083063 Iustin Pop
4760 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
4761 a8083063 Iustin Pop
               for device in instance.disks]
4762 a8083063 Iustin Pop
4763 a8083063 Iustin Pop
      idict = {
4764 a8083063 Iustin Pop
        "name": instance.name,
4765 a8083063 Iustin Pop
        "config_state": config_state,
4766 a8083063 Iustin Pop
        "run_state": remote_state,
4767 a8083063 Iustin Pop
        "pnode": instance.primary_node,
4768 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
4769 a8083063 Iustin Pop
        "os": instance.os,
4770 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4771 a8083063 Iustin Pop
        "disks": disks,
4772 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
4773 24838135 Iustin Pop
        "network_port": instance.network_port,
4774 24838135 Iustin Pop
        "hv_instance": instance.hvparams,
4775 338e51e8 Iustin Pop
        "hv_actual": cluster.FillHV(instance),
4776 338e51e8 Iustin Pop
        "be_instance": instance.beparams,
4777 338e51e8 Iustin Pop
        "be_actual": cluster.FillBE(instance),
4778 a8083063 Iustin Pop
        }
4779 a8083063 Iustin Pop
4780 a8083063 Iustin Pop
      result[instance.name] = idict
4781 a8083063 Iustin Pop
4782 a8083063 Iustin Pop
    return result
4783 a8083063 Iustin Pop
4784 a8083063 Iustin Pop
4785 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
4786 a8083063 Iustin Pop
  """Modifies an instances's parameters.
4787 a8083063 Iustin Pop

4788 a8083063 Iustin Pop
  """
4789 a8083063 Iustin Pop
  HPATH = "instance-modify"
4790 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4791 24991749 Iustin Pop
  _OP_REQP = ["instance_name"]
4792 1a5c7281 Guido Trotter
  REQ_BGL = False
4793 1a5c7281 Guido Trotter
4794 24991749 Iustin Pop
  def CheckArguments(self):
4795 24991749 Iustin Pop
    if not hasattr(self.op, 'nics'):
4796 24991749 Iustin Pop
      self.op.nics = []
4797 24991749 Iustin Pop
    if not hasattr(self.op, 'disks'):
4798 24991749 Iustin Pop
      self.op.disks = []
4799 24991749 Iustin Pop
    if not hasattr(self.op, 'beparams'):
4800 24991749 Iustin Pop
      self.op.beparams = {}
4801 24991749 Iustin Pop
    if not hasattr(self.op, 'hvparams'):
4802 24991749 Iustin Pop
      self.op.hvparams = {}
4803 24991749 Iustin Pop
    self.op.force = getattr(self.op, "force", False)
4804 24991749 Iustin Pop
    if not (self.op.nics or self.op.disks or
4805 24991749 Iustin Pop
            self.op.hvparams or self.op.beparams):
4806 24991749 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
4807 24991749 Iustin Pop
4808 24991749 Iustin Pop
    for item in (constants.BE_MEMORY, constants.BE_VCPUS):
4809 24991749 Iustin Pop
      val = self.op.beparams.get(item, None)
4810 24991749 Iustin Pop
      if val is not None:
4811 24991749 Iustin Pop
        try:
4812 24991749 Iustin Pop
          val = int(val)
4813 24991749 Iustin Pop
        except ValueError, err:
4814 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid %s size: %s" % (item, str(err)))
4815 24991749 Iustin Pop
        self.op.beparams[item] = val
4816 24991749 Iustin Pop
    # Disk validation
4817 24991749 Iustin Pop
    disk_addremove = 0
4818 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
4819 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
4820 24991749 Iustin Pop
        disk_addremove += 1
4821 24991749 Iustin Pop
        continue
4822 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
4823 24991749 Iustin Pop
        disk_addremove += 1
4824 24991749 Iustin Pop
      else:
4825 24991749 Iustin Pop
        if not isinstance(disk_op, int):
4826 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index")
4827 24991749 Iustin Pop
      if disk_op == constants.DDM_ADD:
4828 24991749 Iustin Pop
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
4829 24991749 Iustin Pop
        if mode not in (constants.DISK_RDONLY, constants.DISK_RDWR):
4830 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode)
4831 24991749 Iustin Pop
        size = disk_dict.get('size', None)
4832 24991749 Iustin Pop
        if size is None:
4833 24991749 Iustin Pop
          raise errors.OpPrereqError("Required disk parameter size missing")
4834 24991749 Iustin Pop
        try:
4835 24991749 Iustin Pop
          size = int(size)
4836 24991749 Iustin Pop
        except ValueError, err:
4837 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
4838 24991749 Iustin Pop
                                     str(err))
4839 24991749 Iustin Pop
        disk_dict['size'] = size
4840 24991749 Iustin Pop
      else:
4841 24991749 Iustin Pop
        # modification of disk
4842 24991749 Iustin Pop
        if 'size' in disk_dict:
4843 24991749 Iustin Pop
          raise errors.OpPrereqError("Disk size change not possible, use"
4844 24991749 Iustin Pop
                                     " grow-disk")
4845 24991749 Iustin Pop
4846 24991749 Iustin Pop
    if disk_addremove > 1:
4847 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one disk add or remove operation"
4848 24991749 Iustin Pop
                                 " supported at a time")
4849 24991749 Iustin Pop
4850 24991749 Iustin Pop
    # NIC validation
4851 24991749 Iustin Pop
    nic_addremove = 0
4852 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
4853 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
4854 24991749 Iustin Pop
        nic_addremove += 1
4855 24991749 Iustin Pop
        continue
4856 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
4857 24991749 Iustin Pop
        nic_addremove += 1
4858 24991749 Iustin Pop
      else:
4859 24991749 Iustin Pop
        if not isinstance(nic_op, int):
4860 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid nic index")
4861 24991749 Iustin Pop
4862 24991749 Iustin Pop
      # nic_dict should be a dict
4863 24991749 Iustin Pop
      nic_ip = nic_dict.get('ip', None)
4864 24991749 Iustin Pop
      if nic_ip is not None:
4865 24991749 Iustin Pop
        if nic_ip.lower() == "none":
4866 24991749 Iustin Pop
          nic_dict['ip'] = None
4867 24991749 Iustin Pop
        else:
4868 24991749 Iustin Pop
          if not utils.IsValidIP(nic_ip):
4869 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip)
4870 24991749 Iustin Pop
      # we can only check None bridges and assign the default one
4871 24991749 Iustin Pop
      nic_bridge = nic_dict.get('bridge', None)
4872 24991749 Iustin Pop
      if nic_bridge is None:
4873 24991749 Iustin Pop
        nic_dict['bridge'] = self.cfg.GetDefBridge()
4874 24991749 Iustin Pop
      # but we can validate MACs
4875 24991749 Iustin Pop
      nic_mac = nic_dict.get('mac', None)
4876 24991749 Iustin Pop
      if nic_mac is not None:
4877 24991749 Iustin Pop
        if self.cfg.IsMacInUse(nic_mac):
4878 24991749 Iustin Pop
          raise errors.OpPrereqError("MAC address %s already in use"
4879 24991749 Iustin Pop
                                     " in cluster" % nic_mac)
4880 24991749 Iustin Pop
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4881 24991749 Iustin Pop
          if not utils.IsValidMac(nic_mac):
4882 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac)
4883 24991749 Iustin Pop
    if nic_addremove > 1:
4884 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one NIC add or remove operation"
4885 24991749 Iustin Pop
                                 " supported at a time")
4886 24991749 Iustin Pop
4887 1a5c7281 Guido Trotter
  def ExpandNames(self):
4888 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
4889 74409b12 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
4890 74409b12 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4891 74409b12 Iustin Pop
4892 74409b12 Iustin Pop
  def DeclareLocks(self, level):
4893 74409b12 Iustin Pop
    if level == locking.LEVEL_NODE:
4894 74409b12 Iustin Pop
      self._LockInstancesNodes()
4895 a8083063 Iustin Pop
4896 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4897 a8083063 Iustin Pop
    """Build hooks env.
4898 a8083063 Iustin Pop

4899 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
4900 a8083063 Iustin Pop

4901 a8083063 Iustin Pop
    """
4902 396e1b78 Michael Hanselmann
    args = dict()
4903 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.be_new:
4904 338e51e8 Iustin Pop
      args['memory'] = self.be_new[constants.BE_MEMORY]
4905 338e51e8 Iustin Pop
    if constants.BE_VCPUS in self.be_new:
4906 61be6ba4 Iustin Pop
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
4907 24991749 Iustin Pop
    # FIXME: readd disk/nic changes
4908 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
4909 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(),
4910 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
4911 a8083063 Iustin Pop
    return env, nl, nl
4912 a8083063 Iustin Pop
4913 a8083063 Iustin Pop
  def CheckPrereq(self):
4914 a8083063 Iustin Pop
    """Check prerequisites.
4915 a8083063 Iustin Pop

4916 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
4917 a8083063 Iustin Pop

4918 a8083063 Iustin Pop
    """
4919 24991749 Iustin Pop
    force = self.force = self.op.force
4920 a8083063 Iustin Pop
4921 74409b12 Iustin Pop
    # checking the new params on the primary/secondary nodes
4922 31a853d2 Iustin Pop
4923 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4924 1a5c7281 Guido Trotter
    assert self.instance is not None, \
4925 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4926 74409b12 Iustin Pop
    pnode = self.instance.primary_node
4927 74409b12 Iustin Pop
    nodelist = [pnode]
4928 74409b12 Iustin Pop
    nodelist.extend(instance.secondary_nodes)
4929 74409b12 Iustin Pop
4930 338e51e8 Iustin Pop
    # hvparams processing
4931 74409b12 Iustin Pop
    if self.op.hvparams:
4932 74409b12 Iustin Pop
      i_hvdict = copy.deepcopy(instance.hvparams)
4933 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
4934 74409b12 Iustin Pop
        if val is None:
4935 74409b12 Iustin Pop
          try:
4936 74409b12 Iustin Pop
            del i_hvdict[key]
4937 74409b12 Iustin Pop
          except KeyError:
4938 74409b12 Iustin Pop
            pass
4939 74409b12 Iustin Pop
        else:
4940 74409b12 Iustin Pop
          i_hvdict[key] = val
4941 74409b12 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
4942 74409b12 Iustin Pop
      hv_new = cluster.FillDict(cluster.hvparams[instance.hypervisor],
4943 74409b12 Iustin Pop
                                i_hvdict)
4944 74409b12 Iustin Pop
      # local check
4945 74409b12 Iustin Pop
      hypervisor.GetHypervisor(
4946 74409b12 Iustin Pop
        instance.hypervisor).CheckParameterSyntax(hv_new)
4947 74409b12 Iustin Pop
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
4948 338e51e8 Iustin Pop
      self.hv_new = hv_new # the new actual values
4949 338e51e8 Iustin Pop
      self.hv_inst = i_hvdict # the new dict (without defaults)
4950 338e51e8 Iustin Pop
    else:
4951 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
4952 338e51e8 Iustin Pop
4953 338e51e8 Iustin Pop
    # beparams processing
4954 338e51e8 Iustin Pop
    if self.op.beparams:
4955 338e51e8 Iustin Pop
      i_bedict = copy.deepcopy(instance.beparams)
4956 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
4957 338e51e8 Iustin Pop
        if val is None:
4958 338e51e8 Iustin Pop
          try:
4959 338e51e8 Iustin Pop
            del i_bedict[key]
4960 338e51e8 Iustin Pop
          except KeyError:
4961 338e51e8 Iustin Pop
            pass
4962 338e51e8 Iustin Pop
        else:
4963 338e51e8 Iustin Pop
          i_bedict[key] = val
4964 338e51e8 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
4965 338e51e8 Iustin Pop
      be_new = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
4966 338e51e8 Iustin Pop
                                i_bedict)
4967 338e51e8 Iustin Pop
      self.be_new = be_new # the new actual values
4968 338e51e8 Iustin Pop
      self.be_inst = i_bedict # the new dict (without defaults)
4969 338e51e8 Iustin Pop
    else:
4970 b637ae4d Iustin Pop
      self.be_new = self.be_inst = {}
4971 74409b12 Iustin Pop
4972 cfefe007 Guido Trotter
    self.warn = []
4973 647a5d80 Iustin Pop
4974 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.op.beparams and not self.force:
4975 647a5d80 Iustin Pop
      mem_check_list = [pnode]
4976 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
4977 c0f2b229 Iustin Pop
        # either we changed auto_balance to yes or it was from before
4978 647a5d80 Iustin Pop
        mem_check_list.extend(instance.secondary_nodes)
4979 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
4980 72737a7f Iustin Pop
                                                  instance.hypervisor)
4981 647a5d80 Iustin Pop
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
4982 72737a7f Iustin Pop
                                         instance.hypervisor)
4983 cfefe007 Guido Trotter
4984 cfefe007 Guido Trotter
      if pnode not in nodeinfo or not isinstance(nodeinfo[pnode], dict):
4985 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
4986 cfefe007 Guido Trotter
        self.warn.append("Can't get info from primary node %s" % pnode)
4987 cfefe007 Guido Trotter
      else:
4988 cfefe007 Guido Trotter
        if instance_info:
4989 cfefe007 Guido Trotter
          current_mem = instance_info['memory']
4990 cfefe007 Guido Trotter
        else:
4991 cfefe007 Guido Trotter
          # Assume instance not running
4992 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
4993 cfefe007 Guido Trotter
          # and we have no other way to check)
4994 cfefe007 Guido Trotter
          current_mem = 0
4995 338e51e8 Iustin Pop
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
4996 338e51e8 Iustin Pop
                    nodeinfo[pnode]['memory_free'])
4997 cfefe007 Guido Trotter
        if miss_mem > 0:
4998 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
4999 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
5000 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
5001 cfefe007 Guido Trotter
5002 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
5003 647a5d80 Iustin Pop
        for node in instance.secondary_nodes:
5004 647a5d80 Iustin Pop
          if node not in nodeinfo or not isinstance(nodeinfo[node], dict):
5005 647a5d80 Iustin Pop
            self.warn.append("Can't get info from secondary node %s" % node)
5006 647a5d80 Iustin Pop
          elif be_new[constants.BE_MEMORY] > nodeinfo[node]['memory_free']:
5007 647a5d80 Iustin Pop
            self.warn.append("Not enough memory to failover instance to"
5008 647a5d80 Iustin Pop
                             " secondary node %s" % node)
5009 5bc84f33 Alexander Schreiber
5010 24991749 Iustin Pop
    # NIC processing
5011 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
5012 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
5013 24991749 Iustin Pop
        if not instance.nics:
5014 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance has no NICs, cannot remove")
5015 24991749 Iustin Pop
        continue
5016 24991749 Iustin Pop
      if nic_op != constants.DDM_ADD:
5017 24991749 Iustin Pop
        # an existing nic
5018 24991749 Iustin Pop
        if nic_op < 0 or nic_op >= len(instance.nics):
5019 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
5020 24991749 Iustin Pop
                                     " are 0 to %d" %
5021 24991749 Iustin Pop
                                     (nic_op, len(instance.nics)))
5022 24991749 Iustin Pop
      nic_bridge = nic_dict.get('bridge', None)
5023 24991749 Iustin Pop
      if nic_bridge is not None:
5024 24991749 Iustin Pop
        if not self.rpc.call_bridges_exist(pnode, [nic_bridge]):
5025 24991749 Iustin Pop
          msg = ("Bridge '%s' doesn't exist on one of"
5026 24991749 Iustin Pop
                 " the instance nodes" % nic_bridge)
5027 24991749 Iustin Pop
          if self.force:
5028 24991749 Iustin Pop
            self.warn.append(msg)
5029 24991749 Iustin Pop
          else:
5030 24991749 Iustin Pop
            raise errors.OpPrereqError(msg)
5031 24991749 Iustin Pop
5032 24991749 Iustin Pop
    # DISK processing
5033 24991749 Iustin Pop
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
5034 24991749 Iustin Pop
      raise errors.OpPrereqError("Disk operations not supported for"
5035 24991749 Iustin Pop
                                 " diskless instances")
5036 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
5037 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
5038 24991749 Iustin Pop
        if len(instance.disks) == 1:
5039 24991749 Iustin Pop
          raise errors.OpPrereqError("Cannot remove the last disk of"
5040 24991749 Iustin Pop
                                     " an instance")
5041 24991749 Iustin Pop
        ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
5042 24991749 Iustin Pop
        ins_l = ins_l[pnode]
5043 24991749 Iustin Pop
        if not type(ins_l) is list:
5044 24991749 Iustin Pop
          raise errors.OpPrereqError("Can't contact node '%s'" % pnode)
5045 24991749 Iustin Pop
        if instance.name in ins_l:
5046 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance is running, can't remove"
5047 24991749 Iustin Pop
                                     " disks.")
5048 24991749 Iustin Pop
5049 24991749 Iustin Pop
      if (disk_op == constants.DDM_ADD and
5050 24991749 Iustin Pop
          len(instance.nics) >= constants.MAX_DISKS):
5051 24991749 Iustin Pop
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
5052 24991749 Iustin Pop
                                   " add more" % constants.MAX_DISKS)
5053 24991749 Iustin Pop
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
5054 24991749 Iustin Pop
        # an existing disk
5055 24991749 Iustin Pop
        if disk_op < 0 or disk_op >= len(instance.disks):
5056 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
5057 24991749 Iustin Pop
                                     " are 0 to %d" %
5058 24991749 Iustin Pop
                                     (disk_op, len(instance.disks)))
5059 24991749 Iustin Pop
5060 a8083063 Iustin Pop
    return
5061 a8083063 Iustin Pop
5062 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5063 a8083063 Iustin Pop
    """Modifies an instance.
5064 a8083063 Iustin Pop

5065 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
5066 24991749 Iustin Pop

5067 a8083063 Iustin Pop
    """
5068 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
5069 cfefe007 Guido Trotter
    # feedback_fn there.
5070 cfefe007 Guido Trotter
    for warn in self.warn:
5071 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
5072 cfefe007 Guido Trotter
5073 a8083063 Iustin Pop
    result = []
5074 a8083063 Iustin Pop
    instance = self.instance
5075 24991749 Iustin Pop
    # disk changes
5076 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
5077 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
5078 24991749 Iustin Pop
        # remove the last disk
5079 24991749 Iustin Pop
        device = instance.disks.pop()
5080 24991749 Iustin Pop
        device_idx = len(instance.disks)
5081 24991749 Iustin Pop
        for node, disk in device.ComputeNodeTree(instance.primary_node):
5082 24991749 Iustin Pop
          self.cfg.SetDiskID(disk, node)
5083 24991749 Iustin Pop
          if not self.rpc.call_blockdev_remove(node, disk):
5084 24991749 Iustin Pop
            self.proc.LogWarning("Could not remove disk/%d on node %s,"
5085 24991749 Iustin Pop
                                 " continuing anyway", device_idx, node)
5086 24991749 Iustin Pop
        result.append(("disk/%d" % device_idx, "remove"))
5087 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
5088 24991749 Iustin Pop
        # add a new disk
5089 24991749 Iustin Pop
        if instance.disk_template == constants.DT_FILE:
5090 24991749 Iustin Pop
          file_driver, file_path = instance.disks[0].logical_id
5091 24991749 Iustin Pop
          file_path = os.path.dirname(file_path)
5092 24991749 Iustin Pop
        else:
5093 24991749 Iustin Pop
          file_driver = file_path = None
5094 24991749 Iustin Pop
        disk_idx_base = len(instance.disks)
5095 24991749 Iustin Pop
        new_disk = _GenerateDiskTemplate(self,
5096 24991749 Iustin Pop
                                         instance.disk_template,
5097 24991749 Iustin Pop
                                         instance, instance.primary_node,
5098 24991749 Iustin Pop
                                         instance.secondary_nodes,
5099 24991749 Iustin Pop
                                         [disk_dict],
5100 24991749 Iustin Pop
                                         file_path,
5101 24991749 Iustin Pop
                                         file_driver,
5102 24991749 Iustin Pop
                                         disk_idx_base)[0]
5103 24991749 Iustin Pop
        new_disk.mode = disk_dict['mode']
5104 24991749 Iustin Pop
        instance.disks.append(new_disk)
5105 24991749 Iustin Pop
        info = _GetInstanceInfoText(instance)
5106 24991749 Iustin Pop
5107 24991749 Iustin Pop
        logging.info("Creating volume %s for instance %s",
5108 24991749 Iustin Pop
                     new_disk.iv_name, instance.name)
5109 24991749 Iustin Pop
        # Note: this needs to be kept in sync with _CreateDisks
5110 24991749 Iustin Pop
        #HARDCODE
5111 24991749 Iustin Pop
        for secondary_node in instance.secondary_nodes:
5112 24991749 Iustin Pop
          if not _CreateBlockDevOnSecondary(self, secondary_node, instance,
5113 24991749 Iustin Pop
                                            new_disk, False, info):
5114 24991749 Iustin Pop
            self.LogWarning("Failed to create volume %s (%s) on"
5115 24991749 Iustin Pop
                            " secondary node %s!",
5116 24991749 Iustin Pop
                            new_disk.iv_name, new_disk, secondary_node)
5117 24991749 Iustin Pop
        #HARDCODE
5118 24991749 Iustin Pop
        if not _CreateBlockDevOnPrimary(self, instance.primary_node,
5119 24991749 Iustin Pop
                                        instance, new_disk, info):
5120 24991749 Iustin Pop
          self.LogWarning("Failed to create volume %s on primary!",
5121 24991749 Iustin Pop
                          new_disk.iv_name)
5122 24991749 Iustin Pop
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
5123 24991749 Iustin Pop
                       (new_disk.size, new_disk.mode)))
5124 24991749 Iustin Pop
      else:
5125 24991749 Iustin Pop
        # change a given disk
5126 24991749 Iustin Pop
        instance.disks[disk_op].mode = disk_dict['mode']
5127 24991749 Iustin Pop
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
5128 24991749 Iustin Pop
    # NIC changes
5129 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
5130 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
5131 24991749 Iustin Pop
        # remove the last nic
5132 24991749 Iustin Pop
        del instance.nics[-1]
5133 24991749 Iustin Pop
        result.append(("nic.%d" % len(instance.nics), "remove"))
5134 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
5135 24991749 Iustin Pop
        # add a new nic
5136 24991749 Iustin Pop
        if 'mac' not in nic_dict:
5137 24991749 Iustin Pop
          mac = constants.VALUE_GENERATE
5138 24991749 Iustin Pop
        else:
5139 24991749 Iustin Pop
          mac = nic_dict['mac']
5140 24991749 Iustin Pop
        if mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5141 24991749 Iustin Pop
          mac = self.cfg.GenerateMAC()
5142 24991749 Iustin Pop
        new_nic = objects.NIC(mac=mac, ip=nic_dict.get('ip', None),
5143 24991749 Iustin Pop
                              bridge=nic_dict.get('bridge', None))
5144 24991749 Iustin Pop
        instance.nics.append(new_nic)
5145 24991749 Iustin Pop
        result.append(("nic.%d" % (len(instance.nics) - 1),
5146 24991749 Iustin Pop
                       "add:mac=%s,ip=%s,bridge=%s" %
5147 24991749 Iustin Pop
                       (new_nic.mac, new_nic.ip, new_nic.bridge)))
5148 24991749 Iustin Pop
      else:
5149 24991749 Iustin Pop
        # change a given nic
5150 24991749 Iustin Pop
        for key in 'mac', 'ip', 'bridge':
5151 24991749 Iustin Pop
          if key in nic_dict:
5152 24991749 Iustin Pop
            setattr(instance.nics[nic_op], key, nic_dict[key])
5153 24991749 Iustin Pop
            result.append(("nic.%s/%d" % (key, nic_op), nic_dict[key]))
5154 24991749 Iustin Pop
5155 24991749 Iustin Pop
    # hvparams changes
5156 74409b12 Iustin Pop
    if self.op.hvparams:
5157 74409b12 Iustin Pop
      instance.hvparams = self.hv_new
5158 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
5159 74409b12 Iustin Pop
        result.append(("hv/%s" % key, val))
5160 24991749 Iustin Pop
5161 24991749 Iustin Pop
    # beparams changes
5162 338e51e8 Iustin Pop
    if self.op.beparams:
5163 338e51e8 Iustin Pop
      instance.beparams = self.be_inst
5164 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
5165 338e51e8 Iustin Pop
        result.append(("be/%s" % key, val))
5166 a8083063 Iustin Pop
5167 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
5168 a8083063 Iustin Pop
5169 a8083063 Iustin Pop
    return result
5170 a8083063 Iustin Pop
5171 a8083063 Iustin Pop
5172 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
5173 a8083063 Iustin Pop
  """Query the exports list
5174 a8083063 Iustin Pop

5175 a8083063 Iustin Pop
  """
5176 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
5177 21a15682 Guido Trotter
  REQ_BGL = False
5178 21a15682 Guido Trotter
5179 21a15682 Guido Trotter
  def ExpandNames(self):
5180 21a15682 Guido Trotter
    self.needed_locks = {}
5181 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
5182 21a15682 Guido Trotter
    if not self.op.nodes:
5183 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5184 21a15682 Guido Trotter
    else:
5185 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
5186 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
5187 a8083063 Iustin Pop
5188 a8083063 Iustin Pop
  def CheckPrereq(self):
5189 21a15682 Guido Trotter
    """Check prerequisites.
5190 a8083063 Iustin Pop

5191 a8083063 Iustin Pop
    """
5192 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
5193 a8083063 Iustin Pop
5194 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5195 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
5196 a8083063 Iustin Pop

5197 e4376078 Iustin Pop
    @rtype: dict
5198 e4376078 Iustin Pop
    @return: a dictionary with the structure node->(export-list)
5199 e4376078 Iustin Pop
        where export-list is a list of the instances exported on
5200 e4376078 Iustin Pop
        that node.
5201 a8083063 Iustin Pop

5202 a8083063 Iustin Pop
    """
5203 72737a7f Iustin Pop
    return self.rpc.call_export_list(self.nodes)
5204 a8083063 Iustin Pop
5205 a8083063 Iustin Pop
5206 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
5207 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
5208 a8083063 Iustin Pop

5209 a8083063 Iustin Pop
  """
5210 a8083063 Iustin Pop
  HPATH = "instance-export"
5211 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5212 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
5213 6657590e Guido Trotter
  REQ_BGL = False
5214 6657590e Guido Trotter
5215 6657590e Guido Trotter
  def ExpandNames(self):
5216 6657590e Guido Trotter
    self._ExpandAndLockInstance()
5217 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
5218 6657590e Guido Trotter
    #
5219 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
5220 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
5221 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
5222 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
5223 6657590e Guido Trotter
    #    then one to remove, after
5224 6657590e Guido Trotter
    #  - removing the removal operation altoghether
5225 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5226 6657590e Guido Trotter
5227 6657590e Guido Trotter
  def DeclareLocks(self, level):
5228 6657590e Guido Trotter
    """Last minute lock declaration."""
5229 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
5230 a8083063 Iustin Pop
5231 a8083063 Iustin Pop
  def BuildHooksEnv(self):
5232 a8083063 Iustin Pop
    """Build hooks env.
5233 a8083063 Iustin Pop

5234 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
5235 a8083063 Iustin Pop

5236 a8083063 Iustin Pop
    """
5237 a8083063 Iustin Pop
    env = {
5238 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
5239 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
5240 a8083063 Iustin Pop
      }
5241 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5242 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
5243 a8083063 Iustin Pop
          self.op.target_node]
5244 a8083063 Iustin Pop
    return env, nl, nl
5245 a8083063 Iustin Pop
5246 a8083063 Iustin Pop
  def CheckPrereq(self):
5247 a8083063 Iustin Pop
    """Check prerequisites.
5248 a8083063 Iustin Pop

5249 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
5250 a8083063 Iustin Pop

5251 a8083063 Iustin Pop
    """
5252 6657590e Guido Trotter
    instance_name = self.op.instance_name
5253 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
5254 6657590e Guido Trotter
    assert self.instance is not None, \
5255 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
5256 a8083063 Iustin Pop
5257 6657590e Guido Trotter
    self.dst_node = self.cfg.GetNodeInfo(
5258 6657590e Guido Trotter
      self.cfg.ExpandNodeName(self.op.target_node))
5259 a8083063 Iustin Pop
5260 268b8e42 Iustin Pop
    if self.dst_node is None:
5261 268b8e42 Iustin Pop
      # This is wrong node name, not a non-locked node
5262 268b8e42 Iustin Pop
      raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node)
5263 a8083063 Iustin Pop
5264 b6023d6c Manuel Franceschini
    # instance disk type verification
5265 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
5266 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
5267 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
5268 b6023d6c Manuel Franceschini
                                   " file-based disks")
5269 b6023d6c Manuel Franceschini
5270 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5271 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
5272 a8083063 Iustin Pop

5273 a8083063 Iustin Pop
    """
5274 a8083063 Iustin Pop
    instance = self.instance
5275 a8083063 Iustin Pop
    dst_node = self.dst_node
5276 a8083063 Iustin Pop
    src_node = instance.primary_node
5277 a8083063 Iustin Pop
    if self.op.shutdown:
5278 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
5279 72737a7f Iustin Pop
      if not self.rpc.call_instance_shutdown(src_node, instance):
5280 38206f3c Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
5281 38206f3c Iustin Pop
                                 (instance.name, src_node))
5282 a8083063 Iustin Pop
5283 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
5284 a8083063 Iustin Pop
5285 a8083063 Iustin Pop
    snap_disks = []
5286 a8083063 Iustin Pop
5287 a8083063 Iustin Pop
    try:
5288 a8083063 Iustin Pop
      for disk in instance.disks:
5289 19d7f90a Guido Trotter
        # new_dev_name will be a snapshot of an lvm leaf of the one we passed
5290 19d7f90a Guido Trotter
        new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk)
5291 a8083063 Iustin Pop
5292 19d7f90a Guido Trotter
        if not new_dev_name:
5293 19d7f90a Guido Trotter
          self.LogWarning("Could not snapshot block device %s on node %s",
5294 9a4f63d1 Iustin Pop
                          disk.logical_id[1], src_node)
5295 19d7f90a Guido Trotter
          snap_disks.append(False)
5296 19d7f90a Guido Trotter
        else:
5297 19d7f90a Guido Trotter
          new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
5298 19d7f90a Guido Trotter
                                 logical_id=(vgname, new_dev_name),
5299 19d7f90a Guido Trotter
                                 physical_id=(vgname, new_dev_name),
5300 19d7f90a Guido Trotter
                                 iv_name=disk.iv_name)
5301 19d7f90a Guido Trotter
          snap_disks.append(new_dev)
5302 a8083063 Iustin Pop
5303 a8083063 Iustin Pop
    finally:
5304 fb300fb7 Guido Trotter
      if self.op.shutdown and instance.status == "up":
5305 72737a7f Iustin Pop
        if not self.rpc.call_instance_start(src_node, instance, None):
5306 b9bddb6b Iustin Pop
          _ShutdownInstanceDisks(self, instance)
5307 fb300fb7 Guido Trotter
          raise errors.OpExecError("Could not start instance")
5308 a8083063 Iustin Pop
5309 a8083063 Iustin Pop
    # TODO: check for size
5310 a8083063 Iustin Pop
5311 62c9ec92 Iustin Pop
    cluster_name = self.cfg.GetClusterName()
5312 74c47259 Iustin Pop
    for idx, dev in enumerate(snap_disks):
5313 19d7f90a Guido Trotter
      if dev:
5314 19d7f90a Guido Trotter
        if not self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
5315 74c47259 Iustin Pop
                                             instance, cluster_name, idx):
5316 19d7f90a Guido Trotter
          self.LogWarning("Could not export block device %s from node %s to"
5317 19d7f90a Guido Trotter
                          " node %s", dev.logical_id[1], src_node,
5318 19d7f90a Guido Trotter
                          dst_node.name)
5319 19d7f90a Guido Trotter
        if not self.rpc.call_blockdev_remove(src_node, dev):
5320 19d7f90a Guido Trotter
          self.LogWarning("Could not remove snapshot block device %s from node"
5321 19d7f90a Guido Trotter
                          " %s", dev.logical_id[1], src_node)
5322 a8083063 Iustin Pop
5323 72737a7f Iustin Pop
    if not self.rpc.call_finalize_export(dst_node.name, instance, snap_disks):
5324 19d7f90a Guido Trotter
      self.LogWarning("Could not finalize export for instance %s on node %s",
5325 19d7f90a Guido Trotter
                      instance.name, dst_node.name)
5326 a8083063 Iustin Pop
5327 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
5328 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
5329 a8083063 Iustin Pop
5330 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
5331 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
5332 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
5333 a8083063 Iustin Pop
    if nodelist:
5334 72737a7f Iustin Pop
      exportlist = self.rpc.call_export_list(nodelist)
5335 a8083063 Iustin Pop
      for node in exportlist:
5336 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
5337 72737a7f Iustin Pop
          if not self.rpc.call_export_remove(node, instance.name):
5338 19d7f90a Guido Trotter
            self.LogWarning("Could not remove older export for instance %s"
5339 19d7f90a Guido Trotter
                            " on node %s", instance.name, node)
5340 5c947f38 Iustin Pop
5341 5c947f38 Iustin Pop
5342 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
5343 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
5344 9ac99fda Guido Trotter

5345 9ac99fda Guido Trotter
  """
5346 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
5347 3656b3af Guido Trotter
  REQ_BGL = False
5348 3656b3af Guido Trotter
5349 3656b3af Guido Trotter
  def ExpandNames(self):
5350 3656b3af Guido Trotter
    self.needed_locks = {}
5351 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
5352 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
5353 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
5354 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5355 9ac99fda Guido Trotter
5356 9ac99fda Guido Trotter
  def CheckPrereq(self):
5357 9ac99fda Guido Trotter
    """Check prerequisites.
5358 9ac99fda Guido Trotter
    """
5359 9ac99fda Guido Trotter
    pass
5360 9ac99fda Guido Trotter
5361 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
5362 9ac99fda Guido Trotter
    """Remove any export.
5363 9ac99fda Guido Trotter

5364 9ac99fda Guido Trotter
    """
5365 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
5366 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
5367 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
5368 9ac99fda Guido Trotter
    fqdn_warn = False
5369 9ac99fda Guido Trotter
    if not instance_name:
5370 9ac99fda Guido Trotter
      fqdn_warn = True
5371 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
5372 9ac99fda Guido Trotter
5373 72737a7f Iustin Pop
    exportlist = self.rpc.call_export_list(self.acquired_locks[
5374 72737a7f Iustin Pop
      locking.LEVEL_NODE])
5375 9ac99fda Guido Trotter
    found = False
5376 9ac99fda Guido Trotter
    for node in exportlist:
5377 9ac99fda Guido Trotter
      if instance_name in exportlist[node]:
5378 9ac99fda Guido Trotter
        found = True
5379 72737a7f Iustin Pop
        if not self.rpc.call_export_remove(node, instance_name):
5380 9a4f63d1 Iustin Pop
          logging.error("Could not remove export for instance %s"
5381 9a4f63d1 Iustin Pop
                        " on node %s", instance_name, node)
5382 9ac99fda Guido Trotter
5383 9ac99fda Guido Trotter
    if fqdn_warn and not found:
5384 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
5385 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
5386 9ac99fda Guido Trotter
                  " Domain Name.")
5387 9ac99fda Guido Trotter
5388 9ac99fda Guido Trotter
5389 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
5390 5c947f38 Iustin Pop
  """Generic tags LU.
5391 5c947f38 Iustin Pop

5392 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
5393 5c947f38 Iustin Pop

5394 5c947f38 Iustin Pop
  """
5395 5c947f38 Iustin Pop
5396 8646adce Guido Trotter
  def ExpandNames(self):
5397 8646adce Guido Trotter
    self.needed_locks = {}
5398 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
5399 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
5400 5c947f38 Iustin Pop
      if name is None:
5401 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
5402 3ecf6786 Iustin Pop
                                   (self.op.name,))
5403 5c947f38 Iustin Pop
      self.op.name = name
5404 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = name
5405 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
5406 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
5407 5c947f38 Iustin Pop
      if name is None:
5408 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
5409 3ecf6786 Iustin Pop
                                   (self.op.name,))
5410 5c947f38 Iustin Pop
      self.op.name = name
5411 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = name
5412 8646adce Guido Trotter
5413 8646adce Guido Trotter
  def CheckPrereq(self):
5414 8646adce Guido Trotter
    """Check prerequisites.
5415 8646adce Guido Trotter

5416 8646adce Guido Trotter
    """
5417 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
5418 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
5419 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
5420 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
5421 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
5422 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
5423 5c947f38 Iustin Pop
    else:
5424 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
5425 3ecf6786 Iustin Pop
                                 str(self.op.kind))
5426 5c947f38 Iustin Pop
5427 5c947f38 Iustin Pop
5428 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
5429 5c947f38 Iustin Pop
  """Returns the tags of a given object.
5430 5c947f38 Iustin Pop

5431 5c947f38 Iustin Pop
  """
5432 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
5433 8646adce Guido Trotter
  REQ_BGL = False
5434 5c947f38 Iustin Pop
5435 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5436 5c947f38 Iustin Pop
    """Returns the tag list.
5437 5c947f38 Iustin Pop

5438 5c947f38 Iustin Pop
    """
5439 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
5440 5c947f38 Iustin Pop
5441 5c947f38 Iustin Pop
5442 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
5443 73415719 Iustin Pop
  """Searches the tags for a given pattern.
5444 73415719 Iustin Pop

5445 73415719 Iustin Pop
  """
5446 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
5447 8646adce Guido Trotter
  REQ_BGL = False
5448 8646adce Guido Trotter
5449 8646adce Guido Trotter
  def ExpandNames(self):
5450 8646adce Guido Trotter
    self.needed_locks = {}
5451 73415719 Iustin Pop
5452 73415719 Iustin Pop
  def CheckPrereq(self):
5453 73415719 Iustin Pop
    """Check prerequisites.
5454 73415719 Iustin Pop

5455 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
5456 73415719 Iustin Pop

5457 73415719 Iustin Pop
    """
5458 73415719 Iustin Pop
    try:
5459 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
5460 73415719 Iustin Pop
    except re.error, err:
5461 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
5462 73415719 Iustin Pop
                                 (self.op.pattern, err))
5463 73415719 Iustin Pop
5464 73415719 Iustin Pop
  def Exec(self, feedback_fn):
5465 73415719 Iustin Pop
    """Returns the tag list.
5466 73415719 Iustin Pop

5467 73415719 Iustin Pop
    """
5468 73415719 Iustin Pop
    cfg = self.cfg
5469 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
5470 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
5471 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
5472 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
5473 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
5474 73415719 Iustin Pop
    results = []
5475 73415719 Iustin Pop
    for path, target in tgts:
5476 73415719 Iustin Pop
      for tag in target.GetTags():
5477 73415719 Iustin Pop
        if self.re.search(tag):
5478 73415719 Iustin Pop
          results.append((path, tag))
5479 73415719 Iustin Pop
    return results
5480 73415719 Iustin Pop
5481 73415719 Iustin Pop
5482 f27302fa Iustin Pop
class LUAddTags(TagsLU):
5483 5c947f38 Iustin Pop
  """Sets a tag on a given object.
5484 5c947f38 Iustin Pop

5485 5c947f38 Iustin Pop
  """
5486 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
5487 8646adce Guido Trotter
  REQ_BGL = False
5488 5c947f38 Iustin Pop
5489 5c947f38 Iustin Pop
  def CheckPrereq(self):
5490 5c947f38 Iustin Pop
    """Check prerequisites.
5491 5c947f38 Iustin Pop

5492 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
5493 5c947f38 Iustin Pop

5494 5c947f38 Iustin Pop
    """
5495 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
5496 f27302fa Iustin Pop
    for tag in self.op.tags:
5497 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
5498 5c947f38 Iustin Pop
5499 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5500 5c947f38 Iustin Pop
    """Sets the tag.
5501 5c947f38 Iustin Pop

5502 5c947f38 Iustin Pop
    """
5503 5c947f38 Iustin Pop
    try:
5504 f27302fa Iustin Pop
      for tag in self.op.tags:
5505 f27302fa Iustin Pop
        self.target.AddTag(tag)
5506 5c947f38 Iustin Pop
    except errors.TagError, err:
5507 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
5508 5c947f38 Iustin Pop
    try:
5509 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
5510 5c947f38 Iustin Pop
    except errors.ConfigurationError:
5511 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
5512 3ecf6786 Iustin Pop
                                " config file and the operation has been"
5513 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
5514 5c947f38 Iustin Pop
5515 5c947f38 Iustin Pop
5516 f27302fa Iustin Pop
class LUDelTags(TagsLU):
5517 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
5518 5c947f38 Iustin Pop

5519 5c947f38 Iustin Pop
  """
5520 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
5521 8646adce Guido Trotter
  REQ_BGL = False
5522 5c947f38 Iustin Pop
5523 5c947f38 Iustin Pop
  def CheckPrereq(self):
5524 5c947f38 Iustin Pop
    """Check prerequisites.
5525 5c947f38 Iustin Pop

5526 5c947f38 Iustin Pop
    This checks that we have the given tag.
5527 5c947f38 Iustin Pop

5528 5c947f38 Iustin Pop
    """
5529 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
5530 f27302fa Iustin Pop
    for tag in self.op.tags:
5531 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
5532 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
5533 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
5534 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
5535 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
5536 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
5537 f27302fa Iustin Pop
      diff_names.sort()
5538 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
5539 f27302fa Iustin Pop
                                 (",".join(diff_names)))
5540 5c947f38 Iustin Pop
5541 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5542 5c947f38 Iustin Pop
    """Remove the tag from the object.
5543 5c947f38 Iustin Pop

5544 5c947f38 Iustin Pop
    """
5545 f27302fa Iustin Pop
    for tag in self.op.tags:
5546 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
5547 5c947f38 Iustin Pop
    try:
5548 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
5549 5c947f38 Iustin Pop
    except errors.ConfigurationError:
5550 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
5551 3ecf6786 Iustin Pop
                                " config file and the operation has been"
5552 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
5553 06009e27 Iustin Pop
5554 0eed6e61 Guido Trotter
5555 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
5556 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
5557 06009e27 Iustin Pop

5558 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
5559 06009e27 Iustin Pop
  time.
5560 06009e27 Iustin Pop

5561 06009e27 Iustin Pop
  """
5562 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
5563 fbe9022f Guido Trotter
  REQ_BGL = False
5564 06009e27 Iustin Pop
5565 fbe9022f Guido Trotter
  def ExpandNames(self):
5566 fbe9022f Guido Trotter
    """Expand names and set required locks.
5567 06009e27 Iustin Pop

5568 fbe9022f Guido Trotter
    This expands the node list, if any.
5569 06009e27 Iustin Pop

5570 06009e27 Iustin Pop
    """
5571 fbe9022f Guido Trotter
    self.needed_locks = {}
5572 06009e27 Iustin Pop
    if self.op.on_nodes:
5573 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
5574 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
5575 fbe9022f Guido Trotter
      # more information.
5576 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
5577 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
5578 fbe9022f Guido Trotter
5579 fbe9022f Guido Trotter
  def CheckPrereq(self):
5580 fbe9022f Guido Trotter
    """Check prerequisites.
5581 fbe9022f Guido Trotter

5582 fbe9022f Guido Trotter
    """
5583 06009e27 Iustin Pop
5584 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
5585 06009e27 Iustin Pop
    """Do the actual sleep.
5586 06009e27 Iustin Pop

5587 06009e27 Iustin Pop
    """
5588 06009e27 Iustin Pop
    if self.op.on_master:
5589 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
5590 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
5591 06009e27 Iustin Pop
    if self.op.on_nodes:
5592 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
5593 06009e27 Iustin Pop
      if not result:
5594 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
5595 06009e27 Iustin Pop
      for node, node_result in result.items():
5596 06009e27 Iustin Pop
        if not node_result:
5597 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
5598 06009e27 Iustin Pop
                                   " result: %s" % (node, node_result))
5599 d61df03e Iustin Pop
5600 d61df03e Iustin Pop
5601 d1c2dd75 Iustin Pop
class IAllocator(object):
5602 d1c2dd75 Iustin Pop
  """IAllocator framework.
5603 d61df03e Iustin Pop

5604 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
5605 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
5606 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
5607 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
5608 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
5609 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
5610 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
5611 d1c2dd75 Iustin Pop
      easy usage
5612 d61df03e Iustin Pop

5613 d61df03e Iustin Pop
  """
5614 29859cb7 Iustin Pop
  _ALLO_KEYS = [
5615 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
5616 8cc7e742 Guido Trotter
    "os", "tags", "nics", "vcpus", "hypervisor",
5617 d1c2dd75 Iustin Pop
    ]
5618 29859cb7 Iustin Pop
  _RELO_KEYS = [
5619 29859cb7 Iustin Pop
    "relocate_from",
5620 29859cb7 Iustin Pop
    ]
5621 d1c2dd75 Iustin Pop
5622 72737a7f Iustin Pop
  def __init__(self, lu, mode, name, **kwargs):
5623 72737a7f Iustin Pop
    self.lu = lu
5624 d1c2dd75 Iustin Pop
    # init buffer variables
5625 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
5626 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
5627 29859cb7 Iustin Pop
    self.mode = mode
5628 29859cb7 Iustin Pop
    self.name = name
5629 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
5630 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
5631 29859cb7 Iustin Pop
    self.relocate_from = None
5632 27579978 Iustin Pop
    # computed fields
5633 27579978 Iustin Pop
    self.required_nodes = None
5634 d1c2dd75 Iustin Pop
    # init result fields
5635 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
5636 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5637 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
5638 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
5639 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
5640 29859cb7 Iustin Pop
    else:
5641 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
5642 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
5643 d1c2dd75 Iustin Pop
    for key in kwargs:
5644 29859cb7 Iustin Pop
      if key not in keyset:
5645 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
5646 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
5647 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
5648 29859cb7 Iustin Pop
    for key in keyset:
5649 d1c2dd75 Iustin Pop
      if key not in kwargs:
5650 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
5651 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
5652 d1c2dd75 Iustin Pop
    self._BuildInputData()
5653 d1c2dd75 Iustin Pop
5654 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
5655 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
5656 d1c2dd75 Iustin Pop

5657 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
5658 d1c2dd75 Iustin Pop

5659 d1c2dd75 Iustin Pop
    """
5660 72737a7f Iustin Pop
    cfg = self.lu.cfg
5661 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
5662 d1c2dd75 Iustin Pop
    # cluster data
5663 d1c2dd75 Iustin Pop
    data = {
5664 d1c2dd75 Iustin Pop
      "version": 1,
5665 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
5666 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
5667 e69d05fd Iustin Pop
      "enable_hypervisors": list(cluster_info.enabled_hypervisors),
5668 d1c2dd75 Iustin Pop
      # we don't have job IDs
5669 d61df03e Iustin Pop
      }
5670 b57e9819 Guido Trotter
    iinfo = cfg.GetAllInstancesInfo().values()
5671 b57e9819 Guido Trotter
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
5672 6286519f Iustin Pop
5673 d1c2dd75 Iustin Pop
    # node data
5674 d1c2dd75 Iustin Pop
    node_results = {}
5675 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
5676 8cc7e742 Guido Trotter
5677 8cc7e742 Guido Trotter
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5678 8cc7e742 Guido Trotter
      hypervisor = self.hypervisor
5679 8cc7e742 Guido Trotter
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
5680 8cc7e742 Guido Trotter
      hypervisor = cfg.GetInstanceInfo(self.name).hypervisor
5681 8cc7e742 Guido Trotter
5682 72737a7f Iustin Pop
    node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
5683 8cc7e742 Guido Trotter
                                           hypervisor)
5684 18640d69 Guido Trotter
    node_iinfo = self.lu.rpc.call_all_instances_info(node_list,
5685 18640d69 Guido Trotter
                       cluster_info.enabled_hypervisors)
5686 d1c2dd75 Iustin Pop
    for nname in node_list:
5687 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
5688 d1c2dd75 Iustin Pop
      if nname not in node_data or not isinstance(node_data[nname], dict):
5689 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't get data for node %s" % nname)
5690 d1c2dd75 Iustin Pop
      remote_info = node_data[nname]
5691 b2662e7f Iustin Pop
      for attr in ['memory_total', 'memory_free', 'memory_dom0',
5692 4337cf1b Iustin Pop
                   'vg_size', 'vg_free', 'cpu_total']:
5693 d1c2dd75 Iustin Pop
        if attr not in remote_info:
5694 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' didn't return attribute '%s'" %
5695 d1c2dd75 Iustin Pop
                                   (nname, attr))
5696 d1c2dd75 Iustin Pop
        try:
5697 b2662e7f Iustin Pop
          remote_info[attr] = int(remote_info[attr])
5698 d1c2dd75 Iustin Pop
        except ValueError, err:
5699 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' returned invalid value for '%s':"
5700 d1c2dd75 Iustin Pop
                                   " %s" % (nname, attr, str(err)))
5701 6286519f Iustin Pop
      # compute memory used by primary instances
5702 6286519f Iustin Pop
      i_p_mem = i_p_up_mem = 0
5703 338e51e8 Iustin Pop
      for iinfo, beinfo in i_list:
5704 6286519f Iustin Pop
        if iinfo.primary_node == nname:
5705 338e51e8 Iustin Pop
          i_p_mem += beinfo[constants.BE_MEMORY]
5706 18640d69 Guido Trotter
          if iinfo.name not in node_iinfo[nname]:
5707 18640d69 Guido Trotter
            i_used_mem = 0
5708 18640d69 Guido Trotter
          else:
5709 18640d69 Guido Trotter
            i_used_mem = int(node_iinfo[nname][iinfo.name]['memory'])
5710 18640d69 Guido Trotter
          i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
5711 18640d69 Guido Trotter
          remote_info['memory_free'] -= max(0, i_mem_diff)
5712 18640d69 Guido Trotter
5713 6286519f Iustin Pop
          if iinfo.status == "up":
5714 338e51e8 Iustin Pop
            i_p_up_mem += beinfo[constants.BE_MEMORY]
5715 6286519f Iustin Pop
5716 b2662e7f Iustin Pop
      # compute memory used by instances
5717 d1c2dd75 Iustin Pop
      pnr = {
5718 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
5719 b2662e7f Iustin Pop
        "total_memory": remote_info['memory_total'],
5720 b2662e7f Iustin Pop
        "reserved_memory": remote_info['memory_dom0'],
5721 b2662e7f Iustin Pop
        "free_memory": remote_info['memory_free'],
5722 6286519f Iustin Pop
        "i_pri_memory": i_p_mem,
5723 6286519f Iustin Pop
        "i_pri_up_memory": i_p_up_mem,
5724 b2662e7f Iustin Pop
        "total_disk": remote_info['vg_size'],
5725 b2662e7f Iustin Pop
        "free_disk": remote_info['vg_free'],
5726 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
5727 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
5728 4337cf1b Iustin Pop
        "total_cpus": remote_info['cpu_total'],
5729 d1c2dd75 Iustin Pop
        }
5730 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
5731 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
5732 d1c2dd75 Iustin Pop
5733 d1c2dd75 Iustin Pop
    # instance data
5734 d1c2dd75 Iustin Pop
    instance_data = {}
5735 338e51e8 Iustin Pop
    for iinfo, beinfo in i_list:
5736 d1c2dd75 Iustin Pop
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
5737 d1c2dd75 Iustin Pop
                  for n in iinfo.nics]
5738 d1c2dd75 Iustin Pop
      pir = {
5739 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
5740 d1c2dd75 Iustin Pop
        "should_run": iinfo.status == "up",
5741 338e51e8 Iustin Pop
        "vcpus": beinfo[constants.BE_VCPUS],
5742 338e51e8 Iustin Pop
        "memory": beinfo[constants.BE_MEMORY],
5743 d1c2dd75 Iustin Pop
        "os": iinfo.os,
5744 d1c2dd75 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
5745 d1c2dd75 Iustin Pop
        "nics": nic_data,
5746 d1c2dd75 Iustin Pop
        "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
5747 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
5748 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
5749 d1c2dd75 Iustin Pop
        }
5750 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
5751 d61df03e Iustin Pop
5752 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
5753 d61df03e Iustin Pop
5754 d1c2dd75 Iustin Pop
    self.in_data = data
5755 d61df03e Iustin Pop
5756 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
5757 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
5758 d61df03e Iustin Pop

5759 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
5760 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5761 d61df03e Iustin Pop

5762 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5763 d1c2dd75 Iustin Pop
    done.
5764 d61df03e Iustin Pop

5765 d1c2dd75 Iustin Pop
    """
5766 d1c2dd75 Iustin Pop
    data = self.in_data
5767 d1c2dd75 Iustin Pop
    if len(self.disks) != 2:
5768 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Only two-disk configurations supported")
5769 d1c2dd75 Iustin Pop
5770 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
5771 d1c2dd75 Iustin Pop
5772 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
5773 27579978 Iustin Pop
      self.required_nodes = 2
5774 27579978 Iustin Pop
    else:
5775 27579978 Iustin Pop
      self.required_nodes = 1
5776 d1c2dd75 Iustin Pop
    request = {
5777 d1c2dd75 Iustin Pop
      "type": "allocate",
5778 d1c2dd75 Iustin Pop
      "name": self.name,
5779 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
5780 d1c2dd75 Iustin Pop
      "tags": self.tags,
5781 d1c2dd75 Iustin Pop
      "os": self.os,
5782 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
5783 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
5784 d1c2dd75 Iustin Pop
      "disks": self.disks,
5785 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
5786 d1c2dd75 Iustin Pop
      "nics": self.nics,
5787 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5788 d1c2dd75 Iustin Pop
      }
5789 d1c2dd75 Iustin Pop
    data["request"] = request
5790 298fe380 Iustin Pop
5791 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
5792 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
5793 298fe380 Iustin Pop

5794 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
5795 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5796 d61df03e Iustin Pop

5797 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5798 d1c2dd75 Iustin Pop
    done.
5799 d61df03e Iustin Pop

5800 d1c2dd75 Iustin Pop
    """
5801 72737a7f Iustin Pop
    instance = self.lu.cfg.GetInstanceInfo(self.name)
5802 27579978 Iustin Pop
    if instance is None:
5803 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
5804 27579978 Iustin Pop
                                   " IAllocator" % self.name)
5805 27579978 Iustin Pop
5806 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
5807 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
5808 27579978 Iustin Pop
5809 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
5810 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
5811 2a139bb0 Iustin Pop
5812 27579978 Iustin Pop
    self.required_nodes = 1
5813 dafc7302 Guido Trotter
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
5814 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
5815 27579978 Iustin Pop
5816 d1c2dd75 Iustin Pop
    request = {
5817 2a139bb0 Iustin Pop
      "type": "relocate",
5818 d1c2dd75 Iustin Pop
      "name": self.name,
5819 27579978 Iustin Pop
      "disk_space_total": disk_space,
5820 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5821 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
5822 d1c2dd75 Iustin Pop
      }
5823 27579978 Iustin Pop
    self.in_data["request"] = request
5824 d61df03e Iustin Pop
5825 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
5826 d1c2dd75 Iustin Pop
    """Build input data structures.
5827 d61df03e Iustin Pop

5828 d1c2dd75 Iustin Pop
    """
5829 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
5830 d61df03e Iustin Pop
5831 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5832 d1c2dd75 Iustin Pop
      self._AddNewInstance()
5833 d1c2dd75 Iustin Pop
    else:
5834 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
5835 d61df03e Iustin Pop
5836 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
5837 d61df03e Iustin Pop
5838 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
5839 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
5840 298fe380 Iustin Pop

5841 d1c2dd75 Iustin Pop
    """
5842 72737a7f Iustin Pop
    if call_fn is None:
5843 72737a7f Iustin Pop
      call_fn = self.lu.rpc.call_iallocator_runner
5844 d1c2dd75 Iustin Pop
    data = self.in_text
5845 298fe380 Iustin Pop
5846 72737a7f Iustin Pop
    result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
5847 298fe380 Iustin Pop
5848 43f5ea7a Guido Trotter
    if not isinstance(result, (list, tuple)) or len(result) != 4:
5849 8d528b7c Iustin Pop
      raise errors.OpExecError("Invalid result from master iallocator runner")
5850 8d528b7c Iustin Pop
5851 8d528b7c Iustin Pop
    rcode, stdout, stderr, fail = result
5852 8d528b7c Iustin Pop
5853 8d528b7c Iustin Pop
    if rcode == constants.IARUN_NOTFOUND:
5854 8d528b7c Iustin Pop
      raise errors.OpExecError("Can't find allocator '%s'" % name)
5855 8d528b7c Iustin Pop
    elif rcode == constants.IARUN_FAILURE:
5856 38206f3c Iustin Pop
      raise errors.OpExecError("Instance allocator call failed: %s,"
5857 38206f3c Iustin Pop
                               " output: %s" % (fail, stdout+stderr))
5858 8d528b7c Iustin Pop
    self.out_text = stdout
5859 d1c2dd75 Iustin Pop
    if validate:
5860 d1c2dd75 Iustin Pop
      self._ValidateResult()
5861 298fe380 Iustin Pop
5862 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
5863 d1c2dd75 Iustin Pop
    """Process the allocator results.
5864 538475ca Iustin Pop

5865 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
5866 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
5867 538475ca Iustin Pop

5868 d1c2dd75 Iustin Pop
    """
5869 d1c2dd75 Iustin Pop
    try:
5870 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
5871 d1c2dd75 Iustin Pop
    except Exception, err:
5872 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
5873 d1c2dd75 Iustin Pop
5874 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
5875 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
5876 538475ca Iustin Pop
5877 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
5878 d1c2dd75 Iustin Pop
      if key not in rdict:
5879 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
5880 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
5881 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
5882 538475ca Iustin Pop
5883 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
5884 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
5885 d1c2dd75 Iustin Pop
                               " is not a list")
5886 d1c2dd75 Iustin Pop
    self.out_data = rdict
5887 538475ca Iustin Pop
5888 538475ca Iustin Pop
5889 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
5890 d61df03e Iustin Pop
  """Run allocator tests.
5891 d61df03e Iustin Pop

5892 d61df03e Iustin Pop
  This LU runs the allocator tests
5893 d61df03e Iustin Pop

5894 d61df03e Iustin Pop
  """
5895 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
5896 d61df03e Iustin Pop
5897 d61df03e Iustin Pop
  def CheckPrereq(self):
5898 d61df03e Iustin Pop
    """Check prerequisites.
5899 d61df03e Iustin Pop

5900 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
5901 d61df03e Iustin Pop

5902 d61df03e Iustin Pop
    """
5903 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5904 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
5905 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
5906 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
5907 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
5908 d61df03e Iustin Pop
                                     attr)
5909 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
5910 d61df03e Iustin Pop
      if iname is not None:
5911 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
5912 d61df03e Iustin Pop
                                   iname)
5913 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
5914 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
5915 d61df03e Iustin Pop
      for row in self.op.nics:
5916 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5917 d61df03e Iustin Pop
            "mac" not in row or
5918 d61df03e Iustin Pop
            "ip" not in row or
5919 d61df03e Iustin Pop
            "bridge" not in row):
5920 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5921 d61df03e Iustin Pop
                                     " 'nics' parameter")
5922 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
5923 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
5924 298fe380 Iustin Pop
      if len(self.op.disks) != 2:
5925 298fe380 Iustin Pop
        raise errors.OpPrereqError("Only two-disk configurations supported")
5926 d61df03e Iustin Pop
      for row in self.op.disks:
5927 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5928 d61df03e Iustin Pop
            "size" not in row or
5929 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
5930 d61df03e Iustin Pop
            "mode" not in row or
5931 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
5932 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5933 d61df03e Iustin Pop
                                     " 'disks' parameter")
5934 8cc7e742 Guido Trotter
      if self.op.hypervisor is None:
5935 8cc7e742 Guido Trotter
        self.op.hypervisor = self.cfg.GetHypervisorType()
5936 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
5937 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
5938 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
5939 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
5940 d61df03e Iustin Pop
      if fname is None:
5941 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
5942 d61df03e Iustin Pop
                                   self.op.name)
5943 d61df03e Iustin Pop
      self.op.name = fname
5944 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
5945 d61df03e Iustin Pop
    else:
5946 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
5947 d61df03e Iustin Pop
                                 self.op.mode)
5948 d61df03e Iustin Pop
5949 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
5950 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
5951 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
5952 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
5953 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
5954 d61df03e Iustin Pop
                                 self.op.direction)
5955 d61df03e Iustin Pop
5956 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
5957 d61df03e Iustin Pop
    """Run the allocator test.
5958 d61df03e Iustin Pop

5959 d61df03e Iustin Pop
    """
5960 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5961 72737a7f Iustin Pop
      ial = IAllocator(self,
5962 29859cb7 Iustin Pop
                       mode=self.op.mode,
5963 29859cb7 Iustin Pop
                       name=self.op.name,
5964 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
5965 29859cb7 Iustin Pop
                       disks=self.op.disks,
5966 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
5967 29859cb7 Iustin Pop
                       os=self.op.os,
5968 29859cb7 Iustin Pop
                       tags=self.op.tags,
5969 29859cb7 Iustin Pop
                       nics=self.op.nics,
5970 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
5971 8cc7e742 Guido Trotter
                       hypervisor=self.op.hypervisor,
5972 29859cb7 Iustin Pop
                       )
5973 29859cb7 Iustin Pop
    else:
5974 72737a7f Iustin Pop
      ial = IAllocator(self,
5975 29859cb7 Iustin Pop
                       mode=self.op.mode,
5976 29859cb7 Iustin Pop
                       name=self.op.name,
5977 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
5978 29859cb7 Iustin Pop
                       )
5979 d61df03e Iustin Pop
5980 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
5981 d1c2dd75 Iustin Pop
      result = ial.in_text
5982 298fe380 Iustin Pop
    else:
5983 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
5984 d1c2dd75 Iustin Pop
      result = ial.out_text
5985 298fe380 Iustin Pop
    return result