Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 0e67cdbe

History | View | Annotate | Download (203.6 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 ffa1c0dc Iustin Pop
import logging
34 74409b12 Iustin Pop
import copy
35 a8083063 Iustin Pop
36 a8083063 Iustin Pop
from ganeti import ssh
37 a8083063 Iustin Pop
from ganeti import utils
38 a8083063 Iustin Pop
from ganeti import errors
39 a8083063 Iustin Pop
from ganeti import hypervisor
40 6048c986 Guido Trotter
from ganeti import locking
41 a8083063 Iustin Pop
from ganeti import constants
42 a8083063 Iustin Pop
from ganeti import objects
43 a8083063 Iustin Pop
from ganeti import opcodes
44 8d14b30d Iustin Pop
from ganeti import serializer
45 d61df03e Iustin Pop
46 d61df03e Iustin Pop
47 a8083063 Iustin Pop
class LogicalUnit(object):
48 396e1b78 Michael Hanselmann
  """Logical Unit base class.
49 a8083063 Iustin Pop

50 a8083063 Iustin Pop
  Subclasses must follow these rules:
51 d465bdc8 Guido Trotter
    - implement ExpandNames
52 d465bdc8 Guido Trotter
    - implement CheckPrereq
53 a8083063 Iustin Pop
    - implement Exec
54 a8083063 Iustin Pop
    - implement BuildHooksEnv
55 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
56 05f86716 Guido Trotter
    - optionally redefine their run requirements:
57 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
58 05f86716 Guido Trotter

59 05f86716 Guido Trotter
  Note that all commands require root permissions.
60 a8083063 Iustin Pop

61 a8083063 Iustin Pop
  """
62 a8083063 Iustin Pop
  HPATH = None
63 a8083063 Iustin Pop
  HTYPE = None
64 a8083063 Iustin Pop
  _OP_REQP = []
65 7e55040e Guido Trotter
  REQ_BGL = True
66 a8083063 Iustin Pop
67 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
68 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
69 a8083063 Iustin Pop

70 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
71 a8083063 Iustin Pop
    validity.
72 a8083063 Iustin Pop

73 a8083063 Iustin Pop
    """
74 5bfac263 Iustin Pop
    self.proc = processor
75 a8083063 Iustin Pop
    self.op = op
76 77b657a3 Guido Trotter
    self.cfg = context.cfg
77 77b657a3 Guido Trotter
    self.context = context
78 72737a7f Iustin Pop
    self.rpc = rpc
79 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
80 d465bdc8 Guido Trotter
    self.needed_locks = None
81 6683bba2 Guido Trotter
    self.acquired_locks = {}
82 3977a4c1 Guido Trotter
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
83 ca2a79e1 Guido Trotter
    self.add_locks = {}
84 ca2a79e1 Guido Trotter
    self.remove_locks = {}
85 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
86 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
87 c92b310a Michael Hanselmann
    self.__ssh = None
88 86d9d3bb Iustin Pop
    # logging
89 86d9d3bb Iustin Pop
    self.LogWarning = processor.LogWarning
90 86d9d3bb Iustin Pop
    self.LogInfo = processor.LogInfo
91 c92b310a Michael Hanselmann
92 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
93 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
94 a8083063 Iustin Pop
      if attr_val is None:
95 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
96 3ecf6786 Iustin Pop
                                   attr_name)
97 4be4691d Iustin Pop
    self.CheckArguments()
98 a8083063 Iustin Pop
99 c92b310a Michael Hanselmann
  def __GetSSH(self):
100 c92b310a Michael Hanselmann
    """Returns the SshRunner object
101 c92b310a Michael Hanselmann

102 c92b310a Michael Hanselmann
    """
103 c92b310a Michael Hanselmann
    if not self.__ssh:
104 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
105 c92b310a Michael Hanselmann
    return self.__ssh
106 c92b310a Michael Hanselmann
107 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
108 c92b310a Michael Hanselmann
109 4be4691d Iustin Pop
  def CheckArguments(self):
110 4be4691d Iustin Pop
    """Check syntactic validity for the opcode arguments.
111 4be4691d Iustin Pop

112 4be4691d Iustin Pop
    This method is for doing a simple syntactic check and ensure
113 4be4691d Iustin Pop
    validity of opcode parameters, without any cluster-related
114 4be4691d Iustin Pop
    checks. While the same can be accomplished in ExpandNames and/or
115 4be4691d Iustin Pop
    CheckPrereq, doing these separate is better because:
116 4be4691d Iustin Pop

117 4be4691d Iustin Pop
      - ExpandNames is left as as purely a lock-related function
118 4be4691d Iustin Pop
      - CheckPrereq is run after we have aquired locks (and possible
119 4be4691d Iustin Pop
        waited for them)
120 4be4691d Iustin Pop

121 4be4691d Iustin Pop
    The function is allowed to change the self.op attribute so that
122 4be4691d Iustin Pop
    later methods can no longer worry about missing parameters.
123 4be4691d Iustin Pop

124 4be4691d Iustin Pop
    """
125 4be4691d Iustin Pop
    pass
126 4be4691d Iustin Pop
127 d465bdc8 Guido Trotter
  def ExpandNames(self):
128 d465bdc8 Guido Trotter
    """Expand names for this LU.
129 d465bdc8 Guido Trotter

130 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
131 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
132 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
133 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
134 d465bdc8 Guido Trotter

135 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
136 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
137 d465bdc8 Guido Trotter
    as values. Rules:
138 e4376078 Iustin Pop

139 e4376078 Iustin Pop
      - use an empty dict if you don't need any lock
140 e4376078 Iustin Pop
      - if you don't need any lock at a particular level omit that level
141 e4376078 Iustin Pop
      - don't put anything for the BGL level
142 e4376078 Iustin Pop
      - if you want all locks at a level use locking.ALL_SET as a value
143 d465bdc8 Guido Trotter

144 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
145 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
146 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
147 3977a4c1 Guido Trotter

148 e4376078 Iustin Pop
    Examples::
149 e4376078 Iustin Pop

150 e4376078 Iustin Pop
      # Acquire all nodes and one instance
151 e4376078 Iustin Pop
      self.needed_locks = {
152 e4376078 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
153 e4376078 Iustin Pop
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
154 e4376078 Iustin Pop
      }
155 e4376078 Iustin Pop
      # Acquire just two nodes
156 e4376078 Iustin Pop
      self.needed_locks = {
157 e4376078 Iustin Pop
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
158 e4376078 Iustin Pop
      }
159 e4376078 Iustin Pop
      # Acquire no locks
160 e4376078 Iustin Pop
      self.needed_locks = {} # No, you can't leave it to the default value None
161 d465bdc8 Guido Trotter

162 d465bdc8 Guido Trotter
    """
163 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
164 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
165 d465bdc8 Guido Trotter
    # time.
166 d465bdc8 Guido Trotter
    if self.REQ_BGL:
167 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
168 d465bdc8 Guido Trotter
    else:
169 d465bdc8 Guido Trotter
      raise NotImplementedError
170 d465bdc8 Guido Trotter
171 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
172 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
173 fb8dcb62 Guido Trotter

174 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
175 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
176 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
177 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
178 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
179 fb8dcb62 Guido Trotter
    default it does nothing.
180 fb8dcb62 Guido Trotter

181 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
182 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
183 fb8dcb62 Guido Trotter

184 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
185 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
186 fb8dcb62 Guido Trotter

187 fb8dcb62 Guido Trotter
    """
188 fb8dcb62 Guido Trotter
189 a8083063 Iustin Pop
  def CheckPrereq(self):
190 a8083063 Iustin Pop
    """Check prerequisites for this LU.
191 a8083063 Iustin Pop

192 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
193 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
194 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
195 a8083063 Iustin Pop
    allowed.
196 a8083063 Iustin Pop

197 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
198 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
199 a8083063 Iustin Pop

200 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
201 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
202 a8083063 Iustin Pop

203 a8083063 Iustin Pop
    """
204 a8083063 Iustin Pop
    raise NotImplementedError
205 a8083063 Iustin Pop
206 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
207 a8083063 Iustin Pop
    """Execute the LU.
208 a8083063 Iustin Pop

209 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
210 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
211 a8083063 Iustin Pop
    code, or expected.
212 a8083063 Iustin Pop

213 a8083063 Iustin Pop
    """
214 a8083063 Iustin Pop
    raise NotImplementedError
215 a8083063 Iustin Pop
216 a8083063 Iustin Pop
  def BuildHooksEnv(self):
217 a8083063 Iustin Pop
    """Build hooks environment for this LU.
218 a8083063 Iustin Pop

219 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
220 a8083063 Iustin Pop
    containing the environment that will be used for running the
221 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
222 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
223 a8083063 Iustin Pop
    the hook should run after the execution.
224 a8083063 Iustin Pop

225 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
226 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
227 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
228 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
229 a8083063 Iustin Pop

230 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
231 a8083063 Iustin Pop

232 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
233 a8083063 Iustin Pop
    not be called.
234 a8083063 Iustin Pop

235 a8083063 Iustin Pop
    """
236 a8083063 Iustin Pop
    raise NotImplementedError
237 a8083063 Iustin Pop
238 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
239 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
240 1fce5219 Guido Trotter

241 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
242 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
243 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
244 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
245 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
246 1fce5219 Guido Trotter

247 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
248 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
249 e4376078 Iustin Pop
    @param hook_results: the results of the multi-node hooks rpc call
250 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
251 e4376078 Iustin Pop
    @param lu_result: the previous Exec result this LU had, or None
252 e4376078 Iustin Pop
        in the PRE phase
253 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
254 e4376078 Iustin Pop
        and hook results
255 1fce5219 Guido Trotter

256 1fce5219 Guido Trotter
    """
257 1fce5219 Guido Trotter
    return lu_result
258 1fce5219 Guido Trotter
259 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
260 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
261 43905206 Guido Trotter

262 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
263 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
264 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
265 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
266 43905206 Guido Trotter
    before.
267 43905206 Guido Trotter

268 43905206 Guido Trotter
    """
269 43905206 Guido Trotter
    if self.needed_locks is None:
270 43905206 Guido Trotter
      self.needed_locks = {}
271 43905206 Guido Trotter
    else:
272 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
273 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
274 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
275 43905206 Guido Trotter
    if expanded_name is None:
276 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
277 43905206 Guido Trotter
                                  self.op.instance_name)
278 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
279 43905206 Guido Trotter
    self.op.instance_name = expanded_name
280 43905206 Guido Trotter
281 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
282 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
283 c4a2fee1 Guido Trotter

284 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
285 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
286 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
287 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
288 c4a2fee1 Guido Trotter

289 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
290 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
291 c4a2fee1 Guido Trotter

292 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
293 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
294 c4a2fee1 Guido Trotter

295 e4376078 Iustin Pop
    If should be called in DeclareLocks in a way similar to::
296 c4a2fee1 Guido Trotter

297 e4376078 Iustin Pop
      if level == locking.LEVEL_NODE:
298 e4376078 Iustin Pop
        self._LockInstancesNodes()
299 c4a2fee1 Guido Trotter

300 a82ce292 Guido Trotter
    @type primary_only: boolean
301 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
302 a82ce292 Guido Trotter

303 c4a2fee1 Guido Trotter
    """
304 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
305 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
306 c4a2fee1 Guido Trotter
307 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
308 c4a2fee1 Guido Trotter
309 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
310 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
311 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
312 c4a2fee1 Guido Trotter
    wanted_nodes = []
313 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
314 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
315 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
316 a82ce292 Guido Trotter
      if not primary_only:
317 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
318 9513b6ab Guido Trotter
319 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
320 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
321 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
322 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
323 c4a2fee1 Guido Trotter
324 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
325 c4a2fee1 Guido Trotter
326 a8083063 Iustin Pop
327 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
328 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
329 a8083063 Iustin Pop

330 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
331 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
332 a8083063 Iustin Pop

333 a8083063 Iustin Pop
  """
334 a8083063 Iustin Pop
  HPATH = None
335 a8083063 Iustin Pop
  HTYPE = None
336 a8083063 Iustin Pop
337 a8083063 Iustin Pop
338 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
339 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
340 83120a01 Michael Hanselmann

341 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
342 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
343 e4376078 Iustin Pop
  @type nodes: list
344 e4376078 Iustin Pop
  @param nodes: list of node names or None for all nodes
345 e4376078 Iustin Pop
  @rtype: list
346 e4376078 Iustin Pop
  @return: the list of nodes, sorted
347 e4376078 Iustin Pop
  @raise errors.OpProgrammerError: if the nodes parameter is wrong type
348 83120a01 Michael Hanselmann

349 83120a01 Michael Hanselmann
  """
350 3312b702 Iustin Pop
  if not isinstance(nodes, list):
351 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
352 dcb93971 Michael Hanselmann
353 ea47808a Guido Trotter
  if not nodes:
354 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
355 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
356 dcb93971 Michael Hanselmann
357 ea47808a Guido Trotter
  wanted = []
358 ea47808a Guido Trotter
  for name in nodes:
359 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
360 ea47808a Guido Trotter
    if node is None:
361 ea47808a Guido Trotter
      raise errors.OpPrereqError("No such node name '%s'" % name)
362 ea47808a Guido Trotter
    wanted.append(node)
363 dcb93971 Michael Hanselmann
364 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
365 3312b702 Iustin Pop
366 3312b702 Iustin Pop
367 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
368 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
369 3312b702 Iustin Pop

370 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
371 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
372 e4376078 Iustin Pop
  @type instances: list
373 e4376078 Iustin Pop
  @param instances: list of instance names or None for all instances
374 e4376078 Iustin Pop
  @rtype: list
375 e4376078 Iustin Pop
  @return: the list of instances, sorted
376 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if the instances parameter is wrong type
377 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if any of the passed instances is not found
378 3312b702 Iustin Pop

379 3312b702 Iustin Pop
  """
380 3312b702 Iustin Pop
  if not isinstance(instances, list):
381 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
382 3312b702 Iustin Pop
383 3312b702 Iustin Pop
  if instances:
384 3312b702 Iustin Pop
    wanted = []
385 3312b702 Iustin Pop
386 3312b702 Iustin Pop
    for name in instances:
387 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
388 3312b702 Iustin Pop
      if instance is None:
389 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
390 3312b702 Iustin Pop
      wanted.append(instance)
391 3312b702 Iustin Pop
392 3312b702 Iustin Pop
  else:
393 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
394 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
395 dcb93971 Michael Hanselmann
396 dcb93971 Michael Hanselmann
397 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
398 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
399 83120a01 Michael Hanselmann

400 a2d2e1a7 Iustin Pop
  @type static: L{utils.FieldSet}
401 31bf511f Iustin Pop
  @param static: static fields set
402 a2d2e1a7 Iustin Pop
  @type dynamic: L{utils.FieldSet}
403 31bf511f Iustin Pop
  @param dynamic: dynamic fields set
404 83120a01 Michael Hanselmann

405 83120a01 Michael Hanselmann
  """
406 a2d2e1a7 Iustin Pop
  f = utils.FieldSet()
407 31bf511f Iustin Pop
  f.Extend(static)
408 31bf511f Iustin Pop
  f.Extend(dynamic)
409 dcb93971 Michael Hanselmann
410 31bf511f Iustin Pop
  delta = f.NonMatching(selected)
411 31bf511f Iustin Pop
  if delta:
412 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
413 31bf511f Iustin Pop
                               % ",".join(delta))
414 dcb93971 Michael Hanselmann
415 dcb93971 Michael Hanselmann
416 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
417 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
418 e4376078 Iustin Pop
  """Builds instance related env variables for hooks
419 e4376078 Iustin Pop

420 e4376078 Iustin Pop
  This builds the hook environment from individual variables.
421 e4376078 Iustin Pop

422 e4376078 Iustin Pop
  @type name: string
423 e4376078 Iustin Pop
  @param name: the name of the instance
424 e4376078 Iustin Pop
  @type primary_node: string
425 e4376078 Iustin Pop
  @param primary_node: the name of the instance's primary node
426 e4376078 Iustin Pop
  @type secondary_nodes: list
427 e4376078 Iustin Pop
  @param secondary_nodes: list of secondary nodes as strings
428 e4376078 Iustin Pop
  @type os_type: string
429 e4376078 Iustin Pop
  @param os_type: the name of the instance's OS
430 e4376078 Iustin Pop
  @type status: string
431 e4376078 Iustin Pop
  @param status: the desired status of the instances
432 e4376078 Iustin Pop
  @type memory: string
433 e4376078 Iustin Pop
  @param memory: the memory size of the instance
434 e4376078 Iustin Pop
  @type vcpus: string
435 e4376078 Iustin Pop
  @param vcpus: the count of VCPUs the instance has
436 e4376078 Iustin Pop
  @type nics: list
437 e4376078 Iustin Pop
  @param nics: list of tuples (ip, bridge, mac) representing
438 e4376078 Iustin Pop
      the NICs the instance  has
439 e4376078 Iustin Pop
  @rtype: dict
440 e4376078 Iustin Pop
  @return: the hook environment for this instance
441 ecb215b5 Michael Hanselmann

442 396e1b78 Michael Hanselmann
  """
443 396e1b78 Michael Hanselmann
  env = {
444 0e137c28 Iustin Pop
    "OP_TARGET": name,
445 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
446 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
447 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
448 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
449 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
450 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
451 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
452 396e1b78 Michael Hanselmann
  }
453 396e1b78 Michael Hanselmann
454 396e1b78 Michael Hanselmann
  if nics:
455 396e1b78 Michael Hanselmann
    nic_count = len(nics)
456 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
457 396e1b78 Michael Hanselmann
      if ip is None:
458 396e1b78 Michael Hanselmann
        ip = ""
459 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
460 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
461 53e4e875 Guido Trotter
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
462 396e1b78 Michael Hanselmann
  else:
463 396e1b78 Michael Hanselmann
    nic_count = 0
464 396e1b78 Michael Hanselmann
465 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
466 396e1b78 Michael Hanselmann
467 396e1b78 Michael Hanselmann
  return env
468 396e1b78 Michael Hanselmann
469 396e1b78 Michael Hanselmann
470 338e51e8 Iustin Pop
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
471 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
472 ecb215b5 Michael Hanselmann

473 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
474 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
475 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
476 e4376078 Iustin Pop
  @param instance: the instance for which we should build the
477 e4376078 Iustin Pop
      environment
478 e4376078 Iustin Pop
  @type override: dict
479 e4376078 Iustin Pop
  @param override: dictionary with key/values that will override
480 e4376078 Iustin Pop
      our values
481 e4376078 Iustin Pop
  @rtype: dict
482 e4376078 Iustin Pop
  @return: the hook environment dictionary
483 e4376078 Iustin Pop

484 ecb215b5 Michael Hanselmann
  """
485 338e51e8 Iustin Pop
  bep = lu.cfg.GetClusterInfo().FillBE(instance)
486 396e1b78 Michael Hanselmann
  args = {
487 396e1b78 Michael Hanselmann
    'name': instance.name,
488 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
489 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
490 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
491 396e1b78 Michael Hanselmann
    'status': instance.os,
492 338e51e8 Iustin Pop
    'memory': bep[constants.BE_MEMORY],
493 338e51e8 Iustin Pop
    'vcpus': bep[constants.BE_VCPUS],
494 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
495 396e1b78 Michael Hanselmann
  }
496 396e1b78 Michael Hanselmann
  if override:
497 396e1b78 Michael Hanselmann
    args.update(override)
498 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
499 396e1b78 Michael Hanselmann
500 396e1b78 Michael Hanselmann
501 b9bddb6b Iustin Pop
def _CheckInstanceBridgesExist(lu, instance):
502 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
503 bf6929a2 Alexander Schreiber

504 bf6929a2 Alexander Schreiber
  """
505 bf6929a2 Alexander Schreiber
  # check bridges existance
506 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
507 72737a7f Iustin Pop
  if not lu.rpc.call_bridges_exist(instance.primary_node, brlist):
508 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
509 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
510 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
511 bf6929a2 Alexander Schreiber
512 bf6929a2 Alexander Schreiber
513 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
514 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
515 a8083063 Iustin Pop

516 a8083063 Iustin Pop
  """
517 a8083063 Iustin Pop
  _OP_REQP = []
518 a8083063 Iustin Pop
519 a8083063 Iustin Pop
  def CheckPrereq(self):
520 a8083063 Iustin Pop
    """Check prerequisites.
521 a8083063 Iustin Pop

522 a8083063 Iustin Pop
    This checks whether the cluster is empty.
523 a8083063 Iustin Pop

524 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
525 a8083063 Iustin Pop

526 a8083063 Iustin Pop
    """
527 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
528 a8083063 Iustin Pop
529 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
530 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
531 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
532 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
533 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
534 db915bd1 Michael Hanselmann
    if instancelist:
535 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
536 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
537 a8083063 Iustin Pop
538 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
539 a8083063 Iustin Pop
    """Destroys the cluster.
540 a8083063 Iustin Pop

541 a8083063 Iustin Pop
    """
542 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
543 72737a7f Iustin Pop
    if not self.rpc.call_node_stop_master(master, False):
544 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
545 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
546 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
547 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
548 140aa4a8 Iustin Pop
    return master
549 a8083063 Iustin Pop
550 a8083063 Iustin Pop
551 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
552 a8083063 Iustin Pop
  """Verifies the cluster status.
553 a8083063 Iustin Pop

554 a8083063 Iustin Pop
  """
555 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
556 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
557 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
558 d4b9d97f Guido Trotter
  REQ_BGL = False
559 d4b9d97f Guido Trotter
560 d4b9d97f Guido Trotter
  def ExpandNames(self):
561 d4b9d97f Guido Trotter
    self.needed_locks = {
562 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
563 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
564 d4b9d97f Guido Trotter
    }
565 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
566 a8083063 Iustin Pop
567 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
568 a8083063 Iustin Pop
                  remote_version, feedback_fn):
569 a8083063 Iustin Pop
    """Run multiple tests against a node.
570 a8083063 Iustin Pop

571 e4376078 Iustin Pop
    Test list::
572 e4376078 Iustin Pop

573 a8083063 Iustin Pop
      - compares ganeti version
574 a8083063 Iustin Pop
      - checks vg existance and size > 20G
575 a8083063 Iustin Pop
      - checks config file checksum
576 a8083063 Iustin Pop
      - checks ssh to other nodes
577 a8083063 Iustin Pop

578 e4376078 Iustin Pop
    @type node: string
579 e4376078 Iustin Pop
    @param node: the name of the node to check
580 e4376078 Iustin Pop
    @param file_list: required list of files
581 e4376078 Iustin Pop
    @param local_cksum: dictionary of local files and their checksums
582 e4376078 Iustin Pop
    @type vglist: dict
583 e4376078 Iustin Pop
    @param vglist: dictionary of volume group names and their size
584 e4376078 Iustin Pop
    @param node_result: the results from the node
585 e4376078 Iustin Pop
    @param remote_version: the RPC version from the remote node
586 e4376078 Iustin Pop
    @param feedback_fn: function used to accumulate results
587 098c0958 Michael Hanselmann

588 a8083063 Iustin Pop
    """
589 a8083063 Iustin Pop
    # compares ganeti version
590 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
591 a8083063 Iustin Pop
    if not remote_version:
592 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
593 a8083063 Iustin Pop
      return True
594 a8083063 Iustin Pop
595 a8083063 Iustin Pop
    if local_version != remote_version:
596 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
597 a8083063 Iustin Pop
                      (local_version, node, remote_version))
598 a8083063 Iustin Pop
      return True
599 a8083063 Iustin Pop
600 a8083063 Iustin Pop
    # checks vg existance and size > 20G
601 a8083063 Iustin Pop
602 a8083063 Iustin Pop
    bad = False
603 a8083063 Iustin Pop
    if not vglist:
604 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
605 a8083063 Iustin Pop
                      (node,))
606 a8083063 Iustin Pop
      bad = True
607 a8083063 Iustin Pop
    else:
608 8d1a2a64 Michael Hanselmann
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
609 8d1a2a64 Michael Hanselmann
                                            constants.MIN_VG_SIZE)
610 a8083063 Iustin Pop
      if vgstatus:
611 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
612 a8083063 Iustin Pop
        bad = True
613 a8083063 Iustin Pop
614 2eb78bc8 Guido Trotter
    if not node_result:
615 2eb78bc8 Guido Trotter
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
616 2eb78bc8 Guido Trotter
      return True
617 2eb78bc8 Guido Trotter
618 a8083063 Iustin Pop
    # checks config file checksum
619 a8083063 Iustin Pop
    # checks ssh to any
620 a8083063 Iustin Pop
621 a8083063 Iustin Pop
    if 'filelist' not in node_result:
622 a8083063 Iustin Pop
      bad = True
623 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
624 a8083063 Iustin Pop
    else:
625 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
626 a8083063 Iustin Pop
      for file_name in file_list:
627 a8083063 Iustin Pop
        if file_name not in remote_cksum:
628 a8083063 Iustin Pop
          bad = True
629 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
630 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
631 a8083063 Iustin Pop
          bad = True
632 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
633 a8083063 Iustin Pop
634 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
635 a8083063 Iustin Pop
      bad = True
636 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
637 a8083063 Iustin Pop
    else:
638 a8083063 Iustin Pop
      if node_result['nodelist']:
639 a8083063 Iustin Pop
        bad = True
640 a8083063 Iustin Pop
        for node in node_result['nodelist']:
641 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
642 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
643 9d4bfc96 Iustin Pop
    if 'node-net-test' not in node_result:
644 9d4bfc96 Iustin Pop
      bad = True
645 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
646 9d4bfc96 Iustin Pop
    else:
647 9d4bfc96 Iustin Pop
      if node_result['node-net-test']:
648 9d4bfc96 Iustin Pop
        bad = True
649 9d4bfc96 Iustin Pop
        nlist = utils.NiceSort(node_result['node-net-test'].keys())
650 9d4bfc96 Iustin Pop
        for node in nlist:
651 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
652 9d4bfc96 Iustin Pop
                          (node, node_result['node-net-test'][node]))
653 9d4bfc96 Iustin Pop
654 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
655 e69d05fd Iustin Pop
    if isinstance(hyp_result, dict):
656 e69d05fd Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
657 e69d05fd Iustin Pop
        if hv_result is not None:
658 e69d05fd Iustin Pop
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
659 e69d05fd Iustin Pop
                      (hv_name, hv_result))
660 a8083063 Iustin Pop
    return bad
661 a8083063 Iustin Pop
662 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
663 c5705f58 Guido Trotter
                      node_instance, feedback_fn):
664 a8083063 Iustin Pop
    """Verify an instance.
665 a8083063 Iustin Pop

666 a8083063 Iustin Pop
    This function checks to see if the required block devices are
667 a8083063 Iustin Pop
    available on the instance's node.
668 a8083063 Iustin Pop

669 a8083063 Iustin Pop
    """
670 a8083063 Iustin Pop
    bad = False
671 a8083063 Iustin Pop
672 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
673 a8083063 Iustin Pop
674 a8083063 Iustin Pop
    node_vol_should = {}
675 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
676 a8083063 Iustin Pop
677 a8083063 Iustin Pop
    for node in node_vol_should:
678 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
679 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
680 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
681 a8083063 Iustin Pop
                          (volume, node))
682 a8083063 Iustin Pop
          bad = True
683 a8083063 Iustin Pop
684 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
685 a872dae6 Guido Trotter
      if (node_current not in node_instance or
686 a872dae6 Guido Trotter
          not instance in node_instance[node_current]):
687 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
688 a8083063 Iustin Pop
                        (instance, node_current))
689 a8083063 Iustin Pop
        bad = True
690 a8083063 Iustin Pop
691 a8083063 Iustin Pop
    for node in node_instance:
692 a8083063 Iustin Pop
      if (not node == node_current):
693 a8083063 Iustin Pop
        if instance in node_instance[node]:
694 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
695 a8083063 Iustin Pop
                          (instance, node))
696 a8083063 Iustin Pop
          bad = True
697 a8083063 Iustin Pop
698 6a438c98 Michael Hanselmann
    return bad
699 a8083063 Iustin Pop
700 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
701 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
702 a8083063 Iustin Pop

703 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
704 a8083063 Iustin Pop
    reported as unknown.
705 a8083063 Iustin Pop

706 a8083063 Iustin Pop
    """
707 a8083063 Iustin Pop
    bad = False
708 a8083063 Iustin Pop
709 a8083063 Iustin Pop
    for node in node_vol_is:
710 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
711 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
712 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
713 a8083063 Iustin Pop
                      (volume, node))
714 a8083063 Iustin Pop
          bad = True
715 a8083063 Iustin Pop
    return bad
716 a8083063 Iustin Pop
717 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
718 a8083063 Iustin Pop
    """Verify the list of running instances.
719 a8083063 Iustin Pop

720 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
721 a8083063 Iustin Pop

722 a8083063 Iustin Pop
    """
723 a8083063 Iustin Pop
    bad = False
724 a8083063 Iustin Pop
    for node in node_instance:
725 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
726 a8083063 Iustin Pop
        if runninginstance not in instancelist:
727 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
728 a8083063 Iustin Pop
                          (runninginstance, node))
729 a8083063 Iustin Pop
          bad = True
730 a8083063 Iustin Pop
    return bad
731 a8083063 Iustin Pop
732 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
733 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
734 2b3b6ddd Guido Trotter

735 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
736 2b3b6ddd Guido Trotter
    was primary for.
737 2b3b6ddd Guido Trotter

738 2b3b6ddd Guido Trotter
    """
739 2b3b6ddd Guido Trotter
    bad = False
740 2b3b6ddd Guido Trotter
741 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
742 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
743 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
744 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
745 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
746 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
747 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
748 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
749 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
750 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
751 2b3b6ddd Guido Trotter
        needed_mem = 0
752 2b3b6ddd Guido Trotter
        for instance in instances:
753 338e51e8 Iustin Pop
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
754 c0f2b229 Iustin Pop
          if bep[constants.BE_AUTO_BALANCE]:
755 3924700f Iustin Pop
            needed_mem += bep[constants.BE_MEMORY]
756 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
757 2b3b6ddd Guido Trotter
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
758 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
759 2b3b6ddd Guido Trotter
          bad = True
760 2b3b6ddd Guido Trotter
    return bad
761 2b3b6ddd Guido Trotter
762 a8083063 Iustin Pop
  def CheckPrereq(self):
763 a8083063 Iustin Pop
    """Check prerequisites.
764 a8083063 Iustin Pop

765 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
766 e54c4c5e Guido Trotter
    all its members are valid.
767 a8083063 Iustin Pop

768 a8083063 Iustin Pop
    """
769 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
770 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
771 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
772 a8083063 Iustin Pop
773 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
774 d8fff41c Guido Trotter
    """Build hooks env.
775 d8fff41c Guido Trotter

776 d8fff41c Guido Trotter
    Cluster-Verify hooks just rone in the post phase and their failure makes
777 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
778 d8fff41c Guido Trotter

779 d8fff41c Guido Trotter
    """
780 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
781 d8fff41c Guido Trotter
    # TODO: populate the environment with useful information for verify hooks
782 d8fff41c Guido Trotter
    env = {}
783 d8fff41c Guido Trotter
    return env, [], all_nodes
784 d8fff41c Guido Trotter
785 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
786 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
787 a8083063 Iustin Pop

788 a8083063 Iustin Pop
    """
789 a8083063 Iustin Pop
    bad = False
790 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
791 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
792 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
793 a8083063 Iustin Pop
794 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
795 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
796 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
797 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
798 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
799 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
800 3924700f Iustin Pop
    i_non_a_balanced = [] # Non auto-balanced instances
801 a8083063 Iustin Pop
    node_volume = {}
802 a8083063 Iustin Pop
    node_instance = {}
803 9c9c7d30 Guido Trotter
    node_info = {}
804 26b6af5e Guido Trotter
    instance_cfg = {}
805 a8083063 Iustin Pop
806 a8083063 Iustin Pop
    # FIXME: verify OS list
807 a8083063 Iustin Pop
    # do local checksums
808 d6a02168 Michael Hanselmann
    file_names = []
809 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
810 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
811 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
812 a8083063 Iustin Pop
813 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
814 72737a7f Iustin Pop
    all_volumeinfo = self.rpc.call_volume_list(nodelist, vg_name)
815 72737a7f Iustin Pop
    all_instanceinfo = self.rpc.call_instance_list(nodelist, hypervisors)
816 72737a7f Iustin Pop
    all_vglist = self.rpc.call_vg_list(nodelist)
817 a8083063 Iustin Pop
    node_verify_param = {
818 a8083063 Iustin Pop
      'filelist': file_names,
819 a8083063 Iustin Pop
      'nodelist': nodelist,
820 e69d05fd Iustin Pop
      'hypervisor': hypervisors,
821 9d4bfc96 Iustin Pop
      'node-net-test': [(node.name, node.primary_ip, node.secondary_ip)
822 9d4bfc96 Iustin Pop
                        for node in nodeinfo]
823 a8083063 Iustin Pop
      }
824 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
825 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
826 72737a7f Iustin Pop
    all_rversion = self.rpc.call_version(nodelist)
827 72737a7f Iustin Pop
    all_ninfo = self.rpc.call_node_info(nodelist, self.cfg.GetVGName(),
828 72737a7f Iustin Pop
                                        self.cfg.GetHypervisorType())
829 a8083063 Iustin Pop
830 3924700f Iustin Pop
    cluster = self.cfg.GetClusterInfo()
831 a8083063 Iustin Pop
    for node in nodelist:
832 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
833 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
834 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
835 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
836 a8083063 Iustin Pop
      bad = bad or result
837 a8083063 Iustin Pop
838 a8083063 Iustin Pop
      # node_volume
839 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
840 a8083063 Iustin Pop
841 b63ed789 Iustin Pop
      if isinstance(volumeinfo, basestring):
842 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
843 b63ed789 Iustin Pop
                    (node, volumeinfo[-400:].encode('string_escape')))
844 b63ed789 Iustin Pop
        bad = True
845 b63ed789 Iustin Pop
        node_volume[node] = {}
846 b63ed789 Iustin Pop
      elif not isinstance(volumeinfo, dict):
847 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
848 a8083063 Iustin Pop
        bad = True
849 a8083063 Iustin Pop
        continue
850 b63ed789 Iustin Pop
      else:
851 b63ed789 Iustin Pop
        node_volume[node] = volumeinfo
852 a8083063 Iustin Pop
853 a8083063 Iustin Pop
      # node_instance
854 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
855 a8083063 Iustin Pop
      if type(nodeinstance) != list:
856 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
857 a8083063 Iustin Pop
        bad = True
858 a8083063 Iustin Pop
        continue
859 a8083063 Iustin Pop
860 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
861 a8083063 Iustin Pop
862 9c9c7d30 Guido Trotter
      # node_info
863 9c9c7d30 Guido Trotter
      nodeinfo = all_ninfo[node]
864 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
865 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
866 9c9c7d30 Guido Trotter
        bad = True
867 9c9c7d30 Guido Trotter
        continue
868 9c9c7d30 Guido Trotter
869 9c9c7d30 Guido Trotter
      try:
870 9c9c7d30 Guido Trotter
        node_info[node] = {
871 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
872 9c9c7d30 Guido Trotter
          "dfree": int(nodeinfo['vg_free']),
873 93e4c50b Guido Trotter
          "pinst": [],
874 93e4c50b Guido Trotter
          "sinst": [],
875 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
876 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
877 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
878 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
879 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
880 36e7da50 Guido Trotter
          # secondary.
881 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
882 9c9c7d30 Guido Trotter
        }
883 9c9c7d30 Guido Trotter
      except ValueError:
884 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
885 9c9c7d30 Guido Trotter
        bad = True
886 9c9c7d30 Guido Trotter
        continue
887 9c9c7d30 Guido Trotter
888 a8083063 Iustin Pop
    node_vol_should = {}
889 a8083063 Iustin Pop
890 a8083063 Iustin Pop
    for instance in instancelist:
891 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
892 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
893 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
894 c5705f58 Guido Trotter
                                     node_instance, feedback_fn)
895 c5705f58 Guido Trotter
      bad = bad or result
896 a8083063 Iustin Pop
897 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
898 a8083063 Iustin Pop
899 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
900 26b6af5e Guido Trotter
901 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
902 93e4c50b Guido Trotter
      if pnode in node_info:
903 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
904 93e4c50b Guido Trotter
      else:
905 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
906 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
907 93e4c50b Guido Trotter
        bad = True
908 93e4c50b Guido Trotter
909 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
910 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
911 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
912 93e4c50b Guido Trotter
      # supported either.
913 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
914 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
915 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
916 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
917 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
918 93e4c50b Guido Trotter
                    % instance)
919 93e4c50b Guido Trotter
920 c0f2b229 Iustin Pop
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
921 3924700f Iustin Pop
        i_non_a_balanced.append(instance)
922 3924700f Iustin Pop
923 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
924 93e4c50b Guido Trotter
        if snode in node_info:
925 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
926 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
927 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
928 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
929 93e4c50b Guido Trotter
        else:
930 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
931 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
932 93e4c50b Guido Trotter
933 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
934 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
935 a8083063 Iustin Pop
                                       feedback_fn)
936 a8083063 Iustin Pop
    bad = bad or result
937 a8083063 Iustin Pop
938 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
939 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
940 a8083063 Iustin Pop
                                         feedback_fn)
941 a8083063 Iustin Pop
    bad = bad or result
942 a8083063 Iustin Pop
943 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
944 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
945 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
946 e54c4c5e Guido Trotter
      bad = bad or result
947 2b3b6ddd Guido Trotter
948 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
949 2b3b6ddd Guido Trotter
    if i_non_redundant:
950 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
951 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
952 2b3b6ddd Guido Trotter
953 3924700f Iustin Pop
    if i_non_a_balanced:
954 3924700f Iustin Pop
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
955 3924700f Iustin Pop
                  % len(i_non_a_balanced))
956 3924700f Iustin Pop
957 34290825 Michael Hanselmann
    return not bad
958 a8083063 Iustin Pop
959 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
960 e4376078 Iustin Pop
    """Analize the post-hooks' result
961 e4376078 Iustin Pop

962 e4376078 Iustin Pop
    This method analyses the hook result, handles it, and sends some
963 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
964 d8fff41c Guido Trotter

965 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
966 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
967 e4376078 Iustin Pop
    @param hooks_results: the results of the multi-node hooks rpc call
968 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
969 e4376078 Iustin Pop
    @param lu_result: previous Exec result
970 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
971 e4376078 Iustin Pop
        and hook results
972 d8fff41c Guido Trotter

973 d8fff41c Guido Trotter
    """
974 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
975 38206f3c Iustin Pop
    # their results
976 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
977 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
978 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
979 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
980 d8fff41c Guido Trotter
      if not hooks_results:
981 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
982 d8fff41c Guido Trotter
        lu_result = 1
983 d8fff41c Guido Trotter
      else:
984 d8fff41c Guido Trotter
        for node_name in hooks_results:
985 d8fff41c Guido Trotter
          show_node_header = True
986 d8fff41c Guido Trotter
          res = hooks_results[node_name]
987 d8fff41c Guido Trotter
          if res is False or not isinstance(res, list):
988 d8fff41c Guido Trotter
            feedback_fn("    Communication failure")
989 d8fff41c Guido Trotter
            lu_result = 1
990 d8fff41c Guido Trotter
            continue
991 d8fff41c Guido Trotter
          for script, hkr, output in res:
992 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
993 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
994 d8fff41c Guido Trotter
              # failing hooks on that node
995 d8fff41c Guido Trotter
              if show_node_header:
996 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
997 d8fff41c Guido Trotter
                show_node_header = False
998 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
999 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
1000 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
1001 d8fff41c Guido Trotter
              lu_result = 1
1002 d8fff41c Guido Trotter
1003 d8fff41c Guido Trotter
      return lu_result
1004 d8fff41c Guido Trotter
1005 a8083063 Iustin Pop
1006 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
1007 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
1008 2c95a8d4 Iustin Pop

1009 2c95a8d4 Iustin Pop
  """
1010 2c95a8d4 Iustin Pop
  _OP_REQP = []
1011 d4b9d97f Guido Trotter
  REQ_BGL = False
1012 d4b9d97f Guido Trotter
1013 d4b9d97f Guido Trotter
  def ExpandNames(self):
1014 d4b9d97f Guido Trotter
    self.needed_locks = {
1015 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1016 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1017 d4b9d97f Guido Trotter
    }
1018 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1019 2c95a8d4 Iustin Pop
1020 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
1021 2c95a8d4 Iustin Pop
    """Check prerequisites.
1022 2c95a8d4 Iustin Pop

1023 2c95a8d4 Iustin Pop
    This has no prerequisites.
1024 2c95a8d4 Iustin Pop

1025 2c95a8d4 Iustin Pop
    """
1026 2c95a8d4 Iustin Pop
    pass
1027 2c95a8d4 Iustin Pop
1028 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
1029 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
1030 2c95a8d4 Iustin Pop

1031 2c95a8d4 Iustin Pop
    """
1032 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
1033 2c95a8d4 Iustin Pop
1034 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
1035 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1036 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
1037 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
1038 2c95a8d4 Iustin Pop
1039 2c95a8d4 Iustin Pop
    nv_dict = {}
1040 2c95a8d4 Iustin Pop
    for inst in instances:
1041 2c95a8d4 Iustin Pop
      inst_lvs = {}
1042 2c95a8d4 Iustin Pop
      if (inst.status != "up" or
1043 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
1044 2c95a8d4 Iustin Pop
        continue
1045 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
1046 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1047 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
1048 2c95a8d4 Iustin Pop
        for vol in vol_list:
1049 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
1050 2c95a8d4 Iustin Pop
1051 2c95a8d4 Iustin Pop
    if not nv_dict:
1052 2c95a8d4 Iustin Pop
      return result
1053 2c95a8d4 Iustin Pop
1054 72737a7f Iustin Pop
    node_lvs = self.rpc.call_volume_list(nodes, vg_name)
1055 2c95a8d4 Iustin Pop
1056 2c95a8d4 Iustin Pop
    to_act = set()
1057 2c95a8d4 Iustin Pop
    for node in nodes:
1058 2c95a8d4 Iustin Pop
      # node_volume
1059 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
1060 2c95a8d4 Iustin Pop
1061 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
1062 9a4f63d1 Iustin Pop
        logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
1063 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
1064 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
1065 9a4f63d1 Iustin Pop
        logging.warning("Connection to node %s failed or invalid data"
1066 9a4f63d1 Iustin Pop
                        " returned", node)
1067 2c95a8d4 Iustin Pop
        res_nodes.append(node)
1068 2c95a8d4 Iustin Pop
        continue
1069 2c95a8d4 Iustin Pop
1070 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
1071 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
1072 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
1073 b63ed789 Iustin Pop
            and inst.name not in res_instances):
1074 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
1075 2c95a8d4 Iustin Pop
1076 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1077 b63ed789 Iustin Pop
    # data better
1078 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1079 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1080 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1081 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1082 b63ed789 Iustin Pop
1083 2c95a8d4 Iustin Pop
    return result
1084 2c95a8d4 Iustin Pop
1085 2c95a8d4 Iustin Pop
1086 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1087 07bd8a51 Iustin Pop
  """Rename the cluster.
1088 07bd8a51 Iustin Pop

1089 07bd8a51 Iustin Pop
  """
1090 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1091 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1092 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1093 07bd8a51 Iustin Pop
1094 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1095 07bd8a51 Iustin Pop
    """Build hooks env.
1096 07bd8a51 Iustin Pop

1097 07bd8a51 Iustin Pop
    """
1098 07bd8a51 Iustin Pop
    env = {
1099 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1100 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1101 07bd8a51 Iustin Pop
      }
1102 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1103 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1104 07bd8a51 Iustin Pop
1105 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1106 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1107 07bd8a51 Iustin Pop

1108 07bd8a51 Iustin Pop
    """
1109 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1110 07bd8a51 Iustin Pop
1111 bcf043c9 Iustin Pop
    new_name = hostname.name
1112 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1113 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
1114 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
1115 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1116 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1117 07bd8a51 Iustin Pop
                                 " cluster has changed")
1118 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1119 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1120 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1121 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1122 07bd8a51 Iustin Pop
                                   new_ip)
1123 07bd8a51 Iustin Pop
1124 07bd8a51 Iustin Pop
    self.op.name = new_name
1125 07bd8a51 Iustin Pop
1126 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1127 07bd8a51 Iustin Pop
    """Rename the cluster.
1128 07bd8a51 Iustin Pop

1129 07bd8a51 Iustin Pop
    """
1130 07bd8a51 Iustin Pop
    clustername = self.op.name
1131 07bd8a51 Iustin Pop
    ip = self.ip
1132 07bd8a51 Iustin Pop
1133 07bd8a51 Iustin Pop
    # shutdown the master IP
1134 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
1135 72737a7f Iustin Pop
    if not self.rpc.call_node_stop_master(master, False):
1136 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1137 07bd8a51 Iustin Pop
1138 07bd8a51 Iustin Pop
    try:
1139 07bd8a51 Iustin Pop
      # modify the sstore
1140 d6a02168 Michael Hanselmann
      # TODO: sstore
1141 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
1142 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
1143 07bd8a51 Iustin Pop
1144 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
1145 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
1146 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
1147 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
1148 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
1149 07bd8a51 Iustin Pop
1150 9a4f63d1 Iustin Pop
      logging.debug("Copying updated ssconf data to all nodes")
1151 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1152 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
1153 72737a7f Iustin Pop
        result = self.rpc.call_upload_file(dist_nodes, fname)
1154 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
1155 07bd8a51 Iustin Pop
          if not result[to_node]:
1156 86d9d3bb Iustin Pop
            self.LogWarning("Copy of file %s to node %s failed",
1157 86d9d3bb Iustin Pop
                            fname, to_node)
1158 07bd8a51 Iustin Pop
    finally:
1159 72737a7f Iustin Pop
      if not self.rpc.call_node_start_master(master, False):
1160 86d9d3bb Iustin Pop
        self.LogWarning("Could not re-enable the master role on"
1161 86d9d3bb Iustin Pop
                        " the master, please restart manually.")
1162 07bd8a51 Iustin Pop
1163 07bd8a51 Iustin Pop
1164 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1165 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1166 8084f9f6 Manuel Franceschini

1167 e4376078 Iustin Pop
  @type disk: L{objects.Disk}
1168 e4376078 Iustin Pop
  @param disk: the disk to check
1169 e4376078 Iustin Pop
  @rtype: booleean
1170 e4376078 Iustin Pop
  @return: boolean indicating whether a LD_LV dev_type was found or not
1171 8084f9f6 Manuel Franceschini

1172 8084f9f6 Manuel Franceschini
  """
1173 8084f9f6 Manuel Franceschini
  if disk.children:
1174 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1175 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1176 8084f9f6 Manuel Franceschini
        return True
1177 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1178 8084f9f6 Manuel Franceschini
1179 8084f9f6 Manuel Franceschini
1180 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1181 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1182 8084f9f6 Manuel Franceschini

1183 8084f9f6 Manuel Franceschini
  """
1184 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1185 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1186 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1187 c53279cf Guido Trotter
  REQ_BGL = False
1188 c53279cf Guido Trotter
1189 c53279cf Guido Trotter
  def ExpandNames(self):
1190 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
1191 c53279cf Guido Trotter
    # all nodes to be modified.
1192 c53279cf Guido Trotter
    self.needed_locks = {
1193 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1194 c53279cf Guido Trotter
    }
1195 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1196 8084f9f6 Manuel Franceschini
1197 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1198 8084f9f6 Manuel Franceschini
    """Build hooks env.
1199 8084f9f6 Manuel Franceschini

1200 8084f9f6 Manuel Franceschini
    """
1201 8084f9f6 Manuel Franceschini
    env = {
1202 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1203 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1204 8084f9f6 Manuel Franceschini
      }
1205 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1206 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1207 8084f9f6 Manuel Franceschini
1208 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1209 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1210 8084f9f6 Manuel Franceschini

1211 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1212 5f83e263 Iustin Pop
    if the given volume group is valid.
1213 8084f9f6 Manuel Franceschini

1214 8084f9f6 Manuel Franceschini
    """
1215 c53279cf Guido Trotter
    # FIXME: This only works because there is only one parameter that can be
1216 c53279cf Guido Trotter
    # changed or removed.
1217 779c15bb Iustin Pop
    if self.op.vg_name is not None and not self.op.vg_name:
1218 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
1219 8084f9f6 Manuel Franceschini
      for inst in instances:
1220 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1221 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1222 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1223 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1224 8084f9f6 Manuel Franceschini
1225 779c15bb Iustin Pop
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1226 779c15bb Iustin Pop
1227 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1228 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1229 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
1230 8084f9f6 Manuel Franceschini
      for node in node_list:
1231 8d1a2a64 Michael Hanselmann
        vgstatus = utils.CheckVolumeGroupSize(vglist[node], self.op.vg_name,
1232 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1233 8084f9f6 Manuel Franceschini
        if vgstatus:
1234 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1235 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1236 8084f9f6 Manuel Franceschini
1237 779c15bb Iustin Pop
    self.cluster = cluster = self.cfg.GetClusterInfo()
1238 779c15bb Iustin Pop
    # beparams changes do not need validation (we can't validate?),
1239 779c15bb Iustin Pop
    # but we still process here
1240 779c15bb Iustin Pop
    if self.op.beparams:
1241 779c15bb Iustin Pop
      self.new_beparams = cluster.FillDict(
1242 779c15bb Iustin Pop
        cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)
1243 779c15bb Iustin Pop
1244 779c15bb Iustin Pop
    # hypervisor list/parameters
1245 779c15bb Iustin Pop
    self.new_hvparams = cluster.FillDict(cluster.hvparams, {})
1246 779c15bb Iustin Pop
    if self.op.hvparams:
1247 779c15bb Iustin Pop
      if not isinstance(self.op.hvparams, dict):
1248 779c15bb Iustin Pop
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1249 779c15bb Iustin Pop
      for hv_name, hv_dict in self.op.hvparams.items():
1250 779c15bb Iustin Pop
        if hv_name not in self.new_hvparams:
1251 779c15bb Iustin Pop
          self.new_hvparams[hv_name] = hv_dict
1252 779c15bb Iustin Pop
        else:
1253 779c15bb Iustin Pop
          self.new_hvparams[hv_name].update(hv_dict)
1254 779c15bb Iustin Pop
1255 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1256 779c15bb Iustin Pop
      self.hv_list = self.op.enabled_hypervisors
1257 779c15bb Iustin Pop
    else:
1258 779c15bb Iustin Pop
      self.hv_list = cluster.enabled_hypervisors
1259 779c15bb Iustin Pop
1260 779c15bb Iustin Pop
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1261 779c15bb Iustin Pop
      # either the enabled list has changed, or the parameters have, validate
1262 779c15bb Iustin Pop
      for hv_name, hv_params in self.new_hvparams.items():
1263 779c15bb Iustin Pop
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1264 779c15bb Iustin Pop
            (self.op.enabled_hypervisors and
1265 779c15bb Iustin Pop
             hv_name in self.op.enabled_hypervisors)):
1266 779c15bb Iustin Pop
          # either this is a new hypervisor, or its parameters have changed
1267 779c15bb Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
1268 779c15bb Iustin Pop
          hv_class.CheckParameterSyntax(hv_params)
1269 779c15bb Iustin Pop
          _CheckHVParams(self, node_list, hv_name, hv_params)
1270 779c15bb Iustin Pop
1271 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1272 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1273 8084f9f6 Manuel Franceschini

1274 8084f9f6 Manuel Franceschini
    """
1275 779c15bb Iustin Pop
    if self.op.vg_name is not None:
1276 779c15bb Iustin Pop
      if self.op.vg_name != self.cfg.GetVGName():
1277 779c15bb Iustin Pop
        self.cfg.SetVGName(self.op.vg_name)
1278 779c15bb Iustin Pop
      else:
1279 779c15bb Iustin Pop
        feedback_fn("Cluster LVM configuration already in desired"
1280 779c15bb Iustin Pop
                    " state, not changing")
1281 779c15bb Iustin Pop
    if self.op.hvparams:
1282 779c15bb Iustin Pop
      self.cluster.hvparams = self.new_hvparams
1283 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1284 779c15bb Iustin Pop
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1285 779c15bb Iustin Pop
    if self.op.beparams:
1286 779c15bb Iustin Pop
      self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams
1287 779c15bb Iustin Pop
    self.cfg.Update(self.cluster)
1288 8084f9f6 Manuel Franceschini
1289 8084f9f6 Manuel Franceschini
1290 b9bddb6b Iustin Pop
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1291 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1292 a8083063 Iustin Pop

1293 a8083063 Iustin Pop
  """
1294 a8083063 Iustin Pop
  if not instance.disks:
1295 a8083063 Iustin Pop
    return True
1296 a8083063 Iustin Pop
1297 a8083063 Iustin Pop
  if not oneshot:
1298 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1299 a8083063 Iustin Pop
1300 a8083063 Iustin Pop
  node = instance.primary_node
1301 a8083063 Iustin Pop
1302 a8083063 Iustin Pop
  for dev in instance.disks:
1303 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
1304 a8083063 Iustin Pop
1305 a8083063 Iustin Pop
  retries = 0
1306 a8083063 Iustin Pop
  while True:
1307 a8083063 Iustin Pop
    max_time = 0
1308 a8083063 Iustin Pop
    done = True
1309 a8083063 Iustin Pop
    cumul_degraded = False
1310 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1311 a8083063 Iustin Pop
    if not rstats:
1312 86d9d3bb Iustin Pop
      lu.LogWarning("Can't get any data from node %s", node)
1313 a8083063 Iustin Pop
      retries += 1
1314 a8083063 Iustin Pop
      if retries >= 10:
1315 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1316 3ecf6786 Iustin Pop
                                 " aborting." % node)
1317 a8083063 Iustin Pop
      time.sleep(6)
1318 a8083063 Iustin Pop
      continue
1319 a8083063 Iustin Pop
    retries = 0
1320 a8083063 Iustin Pop
    for i in range(len(rstats)):
1321 a8083063 Iustin Pop
      mstat = rstats[i]
1322 a8083063 Iustin Pop
      if mstat is None:
1323 86d9d3bb Iustin Pop
        lu.LogWarning("Can't compute data for node %s/%s",
1324 86d9d3bb Iustin Pop
                           node, instance.disks[i].iv_name)
1325 a8083063 Iustin Pop
        continue
1326 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1327 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1328 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1329 a8083063 Iustin Pop
      if perc_done is not None:
1330 a8083063 Iustin Pop
        done = False
1331 a8083063 Iustin Pop
        if est_time is not None:
1332 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1333 a8083063 Iustin Pop
          max_time = est_time
1334 a8083063 Iustin Pop
        else:
1335 a8083063 Iustin Pop
          rem_time = "no time estimate"
1336 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1337 b9bddb6b Iustin Pop
                        (instance.disks[i].iv_name, perc_done, rem_time))
1338 a8083063 Iustin Pop
    if done or oneshot:
1339 a8083063 Iustin Pop
      break
1340 a8083063 Iustin Pop
1341 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
1342 a8083063 Iustin Pop
1343 a8083063 Iustin Pop
  if done:
1344 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1345 a8083063 Iustin Pop
  return not cumul_degraded
1346 a8083063 Iustin Pop
1347 a8083063 Iustin Pop
1348 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1349 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1350 a8083063 Iustin Pop

1351 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1352 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1353 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1354 0834c866 Iustin Pop

1355 a8083063 Iustin Pop
  """
1356 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
1357 0834c866 Iustin Pop
  if ldisk:
1358 0834c866 Iustin Pop
    idx = 6
1359 0834c866 Iustin Pop
  else:
1360 0834c866 Iustin Pop
    idx = 5
1361 a8083063 Iustin Pop
1362 a8083063 Iustin Pop
  result = True
1363 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1364 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
1365 a8083063 Iustin Pop
    if not rstats:
1366 9a4f63d1 Iustin Pop
      logging.warning("Node %s: disk degraded, not found or node down", node)
1367 a8083063 Iustin Pop
      result = False
1368 a8083063 Iustin Pop
    else:
1369 0834c866 Iustin Pop
      result = result and (not rstats[idx])
1370 a8083063 Iustin Pop
  if dev.children:
1371 a8083063 Iustin Pop
    for child in dev.children:
1372 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1373 a8083063 Iustin Pop
1374 a8083063 Iustin Pop
  return result
1375 a8083063 Iustin Pop
1376 a8083063 Iustin Pop
1377 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1378 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1379 a8083063 Iustin Pop

1380 a8083063 Iustin Pop
  """
1381 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1382 6bf01bbb Guido Trotter
  REQ_BGL = False
1383 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet()
1384 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
1385 a8083063 Iustin Pop
1386 6bf01bbb Guido Trotter
  def ExpandNames(self):
1387 1f9430d6 Iustin Pop
    if self.op.names:
1388 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1389 1f9430d6 Iustin Pop
1390 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1391 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1392 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1393 1f9430d6 Iustin Pop
1394 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
1395 6bf01bbb Guido Trotter
    self.needed_locks = {}
1396 6bf01bbb Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1397 e310b019 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1398 6bf01bbb Guido Trotter
1399 6bf01bbb Guido Trotter
  def CheckPrereq(self):
1400 6bf01bbb Guido Trotter
    """Check prerequisites.
1401 6bf01bbb Guido Trotter

1402 6bf01bbb Guido Trotter
    """
1403 6bf01bbb Guido Trotter
1404 1f9430d6 Iustin Pop
  @staticmethod
1405 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1406 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1407 1f9430d6 Iustin Pop

1408 e4376078 Iustin Pop
    @param node_list: a list with the names of all nodes
1409 e4376078 Iustin Pop
    @param rlist: a map with node names as keys and OS objects as values
1410 1f9430d6 Iustin Pop

1411 e4376078 Iustin Pop
    @rtype: dict
1412 e4376078 Iustin Pop
    @returns: a dictionary with osnames as keys and as value another map, with
1413 e4376078 Iustin Pop
        nodes as keys and list of OS objects as values, eg::
1414 e4376078 Iustin Pop

1415 e4376078 Iustin Pop
          {"debian-etch": {"node1": [<object>,...],
1416 e4376078 Iustin Pop
                           "node2": [<object>,]}
1417 e4376078 Iustin Pop
          }
1418 1f9430d6 Iustin Pop

1419 1f9430d6 Iustin Pop
    """
1420 1f9430d6 Iustin Pop
    all_os = {}
1421 1f9430d6 Iustin Pop
    for node_name, nr in rlist.iteritems():
1422 1f9430d6 Iustin Pop
      if not nr:
1423 1f9430d6 Iustin Pop
        continue
1424 b4de68a9 Iustin Pop
      for os_obj in nr:
1425 b4de68a9 Iustin Pop
        if os_obj.name not in all_os:
1426 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1427 1f9430d6 Iustin Pop
          # for each node in node_list
1428 b4de68a9 Iustin Pop
          all_os[os_obj.name] = {}
1429 1f9430d6 Iustin Pop
          for nname in node_list:
1430 b4de68a9 Iustin Pop
            all_os[os_obj.name][nname] = []
1431 b4de68a9 Iustin Pop
        all_os[os_obj.name][node_name].append(os_obj)
1432 1f9430d6 Iustin Pop
    return all_os
1433 a8083063 Iustin Pop
1434 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1435 a8083063 Iustin Pop
    """Compute the list of OSes.
1436 a8083063 Iustin Pop

1437 a8083063 Iustin Pop
    """
1438 6bf01bbb Guido Trotter
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1439 72737a7f Iustin Pop
    node_data = self.rpc.call_os_diagnose(node_list)
1440 a8083063 Iustin Pop
    if node_data == False:
1441 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1442 1f9430d6 Iustin Pop
    pol = self._DiagnoseByOS(node_list, node_data)
1443 1f9430d6 Iustin Pop
    output = []
1444 1f9430d6 Iustin Pop
    for os_name, os_data in pol.iteritems():
1445 1f9430d6 Iustin Pop
      row = []
1446 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1447 1f9430d6 Iustin Pop
        if field == "name":
1448 1f9430d6 Iustin Pop
          val = os_name
1449 1f9430d6 Iustin Pop
        elif field == "valid":
1450 1f9430d6 Iustin Pop
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1451 1f9430d6 Iustin Pop
        elif field == "node_status":
1452 1f9430d6 Iustin Pop
          val = {}
1453 1f9430d6 Iustin Pop
          for node_name, nos_list in os_data.iteritems():
1454 1f9430d6 Iustin Pop
            val[node_name] = [(v.status, v.path) for v in nos_list]
1455 1f9430d6 Iustin Pop
        else:
1456 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1457 1f9430d6 Iustin Pop
        row.append(val)
1458 1f9430d6 Iustin Pop
      output.append(row)
1459 1f9430d6 Iustin Pop
1460 1f9430d6 Iustin Pop
    return output
1461 a8083063 Iustin Pop
1462 a8083063 Iustin Pop
1463 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1464 a8083063 Iustin Pop
  """Logical unit for removing a node.
1465 a8083063 Iustin Pop

1466 a8083063 Iustin Pop
  """
1467 a8083063 Iustin Pop
  HPATH = "node-remove"
1468 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1469 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1470 a8083063 Iustin Pop
1471 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1472 a8083063 Iustin Pop
    """Build hooks env.
1473 a8083063 Iustin Pop

1474 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1475 d08869ee Guido Trotter
    node would then be impossible to remove.
1476 a8083063 Iustin Pop

1477 a8083063 Iustin Pop
    """
1478 396e1b78 Michael Hanselmann
    env = {
1479 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1480 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1481 396e1b78 Michael Hanselmann
      }
1482 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1483 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1484 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1485 a8083063 Iustin Pop
1486 a8083063 Iustin Pop
  def CheckPrereq(self):
1487 a8083063 Iustin Pop
    """Check prerequisites.
1488 a8083063 Iustin Pop

1489 a8083063 Iustin Pop
    This checks:
1490 a8083063 Iustin Pop
     - the node exists in the configuration
1491 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1492 a8083063 Iustin Pop
     - it's not the master
1493 a8083063 Iustin Pop

1494 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1495 a8083063 Iustin Pop

1496 a8083063 Iustin Pop
    """
1497 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1498 a8083063 Iustin Pop
    if node is None:
1499 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1500 a8083063 Iustin Pop
1501 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1502 a8083063 Iustin Pop
1503 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
1504 a8083063 Iustin Pop
    if node.name == masternode:
1505 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1506 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1507 a8083063 Iustin Pop
1508 a8083063 Iustin Pop
    for instance_name in instance_list:
1509 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1510 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1511 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1512 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1513 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1514 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1515 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1516 a8083063 Iustin Pop
    self.op.node_name = node.name
1517 a8083063 Iustin Pop
    self.node = node
1518 a8083063 Iustin Pop
1519 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1520 a8083063 Iustin Pop
    """Removes the node from the cluster.
1521 a8083063 Iustin Pop

1522 a8083063 Iustin Pop
    """
1523 a8083063 Iustin Pop
    node = self.node
1524 9a4f63d1 Iustin Pop
    logging.info("Stopping the node daemon and removing configs from node %s",
1525 9a4f63d1 Iustin Pop
                 node.name)
1526 a8083063 Iustin Pop
1527 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
1528 a8083063 Iustin Pop
1529 72737a7f Iustin Pop
    self.rpc.call_node_leave_cluster(node.name)
1530 c8a0948f Michael Hanselmann
1531 a8083063 Iustin Pop
1532 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1533 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1534 a8083063 Iustin Pop

1535 a8083063 Iustin Pop
  """
1536 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1537 35705d8f Guido Trotter
  REQ_BGL = False
1538 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet(
1539 31bf511f Iustin Pop
    "dtotal", "dfree",
1540 31bf511f Iustin Pop
    "mtotal", "mnode", "mfree",
1541 31bf511f Iustin Pop
    "bootid",
1542 31bf511f Iustin Pop
    "ctotal",
1543 31bf511f Iustin Pop
    )
1544 31bf511f Iustin Pop
1545 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(
1546 31bf511f Iustin Pop
    "name", "pinst_cnt", "sinst_cnt",
1547 31bf511f Iustin Pop
    "pinst_list", "sinst_list",
1548 31bf511f Iustin Pop
    "pip", "sip", "tags",
1549 31bf511f Iustin Pop
    "serial_no",
1550 0e67cdbe Iustin Pop
    "master_candidate",
1551 0e67cdbe Iustin Pop
    "master",
1552 31bf511f Iustin Pop
    )
1553 a8083063 Iustin Pop
1554 35705d8f Guido Trotter
  def ExpandNames(self):
1555 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1556 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1557 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1558 a8083063 Iustin Pop
1559 35705d8f Guido Trotter
    self.needed_locks = {}
1560 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1561 c8d8b4c8 Iustin Pop
1562 c8d8b4c8 Iustin Pop
    if self.op.names:
1563 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
1564 35705d8f Guido Trotter
    else:
1565 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
1566 c8d8b4c8 Iustin Pop
1567 31bf511f Iustin Pop
    self.do_locking = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
1568 c8d8b4c8 Iustin Pop
    if self.do_locking:
1569 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
1570 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
1571 c8d8b4c8 Iustin Pop
1572 35705d8f Guido Trotter
1573 35705d8f Guido Trotter
  def CheckPrereq(self):
1574 35705d8f Guido Trotter
    """Check prerequisites.
1575 35705d8f Guido Trotter

1576 35705d8f Guido Trotter
    """
1577 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
1578 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
1579 c8d8b4c8 Iustin Pop
    pass
1580 a8083063 Iustin Pop
1581 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1582 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1583 a8083063 Iustin Pop

1584 a8083063 Iustin Pop
    """
1585 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
1586 c8d8b4c8 Iustin Pop
    if self.do_locking:
1587 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
1588 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
1589 3fa93523 Guido Trotter
      nodenames = self.wanted
1590 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
1591 3fa93523 Guido Trotter
      if missing:
1592 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
1593 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
1594 c8d8b4c8 Iustin Pop
    else:
1595 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
1596 c1f1cbb2 Iustin Pop
1597 c1f1cbb2 Iustin Pop
    nodenames = utils.NiceSort(nodenames)
1598 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
1599 a8083063 Iustin Pop
1600 a8083063 Iustin Pop
    # begin data gathering
1601 a8083063 Iustin Pop
1602 31bf511f Iustin Pop
    if self.do_locking:
1603 a8083063 Iustin Pop
      live_data = {}
1604 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
1605 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
1606 a8083063 Iustin Pop
      for name in nodenames:
1607 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1608 a8083063 Iustin Pop
        if nodeinfo:
1609 d599d686 Iustin Pop
          fn = utils.TryConvert
1610 a8083063 Iustin Pop
          live_data[name] = {
1611 d599d686 Iustin Pop
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
1612 d599d686 Iustin Pop
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
1613 d599d686 Iustin Pop
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
1614 d599d686 Iustin Pop
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
1615 d599d686 Iustin Pop
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
1616 d599d686 Iustin Pop
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
1617 d599d686 Iustin Pop
            "bootid": nodeinfo.get('bootid', None),
1618 a8083063 Iustin Pop
            }
1619 a8083063 Iustin Pop
        else:
1620 a8083063 Iustin Pop
          live_data[name] = {}
1621 a8083063 Iustin Pop
    else:
1622 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1623 a8083063 Iustin Pop
1624 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1625 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1626 a8083063 Iustin Pop
1627 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1628 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1629 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1630 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1631 a8083063 Iustin Pop
1632 ec223efb Iustin Pop
      for instance_name in instancelist:
1633 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1634 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1635 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1636 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1637 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1638 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1639 a8083063 Iustin Pop
1640 0e67cdbe Iustin Pop
    master_node = self.cfg.GetMasterNode()
1641 0e67cdbe Iustin Pop
1642 a8083063 Iustin Pop
    # end data gathering
1643 a8083063 Iustin Pop
1644 a8083063 Iustin Pop
    output = []
1645 a8083063 Iustin Pop
    for node in nodelist:
1646 a8083063 Iustin Pop
      node_output = []
1647 a8083063 Iustin Pop
      for field in self.op.output_fields:
1648 a8083063 Iustin Pop
        if field == "name":
1649 a8083063 Iustin Pop
          val = node.name
1650 ec223efb Iustin Pop
        elif field == "pinst_list":
1651 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1652 ec223efb Iustin Pop
        elif field == "sinst_list":
1653 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1654 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1655 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1656 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1657 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1658 a8083063 Iustin Pop
        elif field == "pip":
1659 a8083063 Iustin Pop
          val = node.primary_ip
1660 a8083063 Iustin Pop
        elif field == "sip":
1661 a8083063 Iustin Pop
          val = node.secondary_ip
1662 130a6a6f Iustin Pop
        elif field == "tags":
1663 130a6a6f Iustin Pop
          val = list(node.GetTags())
1664 38d7239a Iustin Pop
        elif field == "serial_no":
1665 38d7239a Iustin Pop
          val = node.serial_no
1666 0e67cdbe Iustin Pop
        elif field == "master_candidate":
1667 0e67cdbe Iustin Pop
          val = node.master_candidate
1668 0e67cdbe Iustin Pop
        elif field == "master":
1669 0e67cdbe Iustin Pop
          val = node.name == master_node
1670 31bf511f Iustin Pop
        elif self._FIELDS_DYNAMIC.Matches(field):
1671 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1672 a8083063 Iustin Pop
        else:
1673 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1674 a8083063 Iustin Pop
        node_output.append(val)
1675 a8083063 Iustin Pop
      output.append(node_output)
1676 a8083063 Iustin Pop
1677 a8083063 Iustin Pop
    return output
1678 a8083063 Iustin Pop
1679 a8083063 Iustin Pop
1680 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1681 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1682 dcb93971 Michael Hanselmann

1683 dcb93971 Michael Hanselmann
  """
1684 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1685 21a15682 Guido Trotter
  REQ_BGL = False
1686 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
1687 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("node")
1688 21a15682 Guido Trotter
1689 21a15682 Guido Trotter
  def ExpandNames(self):
1690 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1691 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1692 21a15682 Guido Trotter
                       selected=self.op.output_fields)
1693 21a15682 Guido Trotter
1694 21a15682 Guido Trotter
    self.needed_locks = {}
1695 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1696 21a15682 Guido Trotter
    if not self.op.nodes:
1697 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1698 21a15682 Guido Trotter
    else:
1699 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
1700 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
1701 dcb93971 Michael Hanselmann
1702 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1703 dcb93971 Michael Hanselmann
    """Check prerequisites.
1704 dcb93971 Michael Hanselmann

1705 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1706 dcb93971 Michael Hanselmann

1707 dcb93971 Michael Hanselmann
    """
1708 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
1709 dcb93971 Michael Hanselmann
1710 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1711 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1712 dcb93971 Michael Hanselmann

1713 dcb93971 Michael Hanselmann
    """
1714 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1715 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
1716 dcb93971 Michael Hanselmann
1717 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1718 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1719 dcb93971 Michael Hanselmann
1720 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1721 dcb93971 Michael Hanselmann
1722 dcb93971 Michael Hanselmann
    output = []
1723 dcb93971 Michael Hanselmann
    for node in nodenames:
1724 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1725 37d19eb2 Michael Hanselmann
        continue
1726 37d19eb2 Michael Hanselmann
1727 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1728 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1729 dcb93971 Michael Hanselmann
1730 dcb93971 Michael Hanselmann
      for vol in node_vols:
1731 dcb93971 Michael Hanselmann
        node_output = []
1732 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1733 dcb93971 Michael Hanselmann
          if field == "node":
1734 dcb93971 Michael Hanselmann
            val = node
1735 dcb93971 Michael Hanselmann
          elif field == "phys":
1736 dcb93971 Michael Hanselmann
            val = vol['dev']
1737 dcb93971 Michael Hanselmann
          elif field == "vg":
1738 dcb93971 Michael Hanselmann
            val = vol['vg']
1739 dcb93971 Michael Hanselmann
          elif field == "name":
1740 dcb93971 Michael Hanselmann
            val = vol['name']
1741 dcb93971 Michael Hanselmann
          elif field == "size":
1742 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1743 dcb93971 Michael Hanselmann
          elif field == "instance":
1744 dcb93971 Michael Hanselmann
            for inst in ilist:
1745 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1746 dcb93971 Michael Hanselmann
                continue
1747 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1748 dcb93971 Michael Hanselmann
                val = inst.name
1749 dcb93971 Michael Hanselmann
                break
1750 dcb93971 Michael Hanselmann
            else:
1751 dcb93971 Michael Hanselmann
              val = '-'
1752 dcb93971 Michael Hanselmann
          else:
1753 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1754 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1755 dcb93971 Michael Hanselmann
1756 dcb93971 Michael Hanselmann
        output.append(node_output)
1757 dcb93971 Michael Hanselmann
1758 dcb93971 Michael Hanselmann
    return output
1759 dcb93971 Michael Hanselmann
1760 dcb93971 Michael Hanselmann
1761 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1762 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1763 a8083063 Iustin Pop

1764 a8083063 Iustin Pop
  """
1765 a8083063 Iustin Pop
  HPATH = "node-add"
1766 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1767 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1768 a8083063 Iustin Pop
1769 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1770 a8083063 Iustin Pop
    """Build hooks env.
1771 a8083063 Iustin Pop

1772 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1773 a8083063 Iustin Pop

1774 a8083063 Iustin Pop
    """
1775 a8083063 Iustin Pop
    env = {
1776 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1777 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1778 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1779 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1780 a8083063 Iustin Pop
      }
1781 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1782 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1783 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1784 a8083063 Iustin Pop
1785 a8083063 Iustin Pop
  def CheckPrereq(self):
1786 a8083063 Iustin Pop
    """Check prerequisites.
1787 a8083063 Iustin Pop

1788 a8083063 Iustin Pop
    This checks:
1789 a8083063 Iustin Pop
     - the new node is not already in the config
1790 a8083063 Iustin Pop
     - it is resolvable
1791 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1792 a8083063 Iustin Pop

1793 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1794 a8083063 Iustin Pop

1795 a8083063 Iustin Pop
    """
1796 a8083063 Iustin Pop
    node_name = self.op.node_name
1797 a8083063 Iustin Pop
    cfg = self.cfg
1798 a8083063 Iustin Pop
1799 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1800 a8083063 Iustin Pop
1801 bcf043c9 Iustin Pop
    node = dns_data.name
1802 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1803 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1804 a8083063 Iustin Pop
    if secondary_ip is None:
1805 a8083063 Iustin Pop
      secondary_ip = primary_ip
1806 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1807 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1808 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1809 e7c6e02b Michael Hanselmann
1810 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1811 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
1812 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
1813 e7c6e02b Michael Hanselmann
                                 node)
1814 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
1815 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
1816 a8083063 Iustin Pop
1817 a8083063 Iustin Pop
    for existing_node_name in node_list:
1818 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1819 e7c6e02b Michael Hanselmann
1820 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
1821 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
1822 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
1823 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
1824 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
1825 e7c6e02b Michael Hanselmann
        continue
1826 e7c6e02b Michael Hanselmann
1827 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1828 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1829 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1830 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1831 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1832 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1833 a8083063 Iustin Pop
1834 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1835 a8083063 Iustin Pop
    # same as for the master
1836 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
1837 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1838 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1839 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1840 a8083063 Iustin Pop
      if master_singlehomed:
1841 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1842 3ecf6786 Iustin Pop
                                   " new node has one")
1843 a8083063 Iustin Pop
      else:
1844 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1845 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1846 a8083063 Iustin Pop
1847 a8083063 Iustin Pop
    # checks reachablity
1848 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
1849 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1850 a8083063 Iustin Pop
1851 a8083063 Iustin Pop
    if not newbie_singlehomed:
1852 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1853 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
1854 b15d625f Iustin Pop
                           source=myself.secondary_ip):
1855 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1856 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
1857 a8083063 Iustin Pop
1858 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1859 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1860 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1861 a8083063 Iustin Pop
1862 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1863 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1864 a8083063 Iustin Pop

1865 a8083063 Iustin Pop
    """
1866 a8083063 Iustin Pop
    new_node = self.new_node
1867 a8083063 Iustin Pop
    node = new_node.name
1868 a8083063 Iustin Pop
1869 a8083063 Iustin Pop
    # check connectivity
1870 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
1871 a8083063 Iustin Pop
    if result:
1872 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1873 9a4f63d1 Iustin Pop
        logging.info("Communication to node %s fine, sw version %s match",
1874 9a4f63d1 Iustin Pop
                     node, result)
1875 a8083063 Iustin Pop
      else:
1876 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1877 3ecf6786 Iustin Pop
                                 " node version %s" %
1878 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1879 a8083063 Iustin Pop
    else:
1880 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1881 a8083063 Iustin Pop
1882 a8083063 Iustin Pop
    # setup ssh on node
1883 9a4f63d1 Iustin Pop
    logging.info("Copy ssh key to node %s", node)
1884 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1885 a8083063 Iustin Pop
    keyarray = []
1886 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1887 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1888 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1889 a8083063 Iustin Pop
1890 a8083063 Iustin Pop
    for i in keyfiles:
1891 a8083063 Iustin Pop
      f = open(i, 'r')
1892 a8083063 Iustin Pop
      try:
1893 a8083063 Iustin Pop
        keyarray.append(f.read())
1894 a8083063 Iustin Pop
      finally:
1895 a8083063 Iustin Pop
        f.close()
1896 a8083063 Iustin Pop
1897 72737a7f Iustin Pop
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
1898 72737a7f Iustin Pop
                                    keyarray[2],
1899 72737a7f Iustin Pop
                                    keyarray[3], keyarray[4], keyarray[5])
1900 a8083063 Iustin Pop
1901 a8083063 Iustin Pop
    if not result:
1902 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1903 a8083063 Iustin Pop
1904 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1905 d9c02ca6 Michael Hanselmann
    utils.AddHostToEtcHosts(new_node.name)
1906 c8a0948f Michael Hanselmann
1907 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1908 caad16e2 Iustin Pop
      if not self.rpc.call_node_has_ip_address(new_node.name,
1909 caad16e2 Iustin Pop
                                               new_node.secondary_ip):
1910 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
1911 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
1912 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
1913 a8083063 Iustin Pop
1914 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
1915 5c0527ed Guido Trotter
    node_verify_param = {
1916 5c0527ed Guido Trotter
      'nodelist': [node],
1917 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
1918 5c0527ed Guido Trotter
    }
1919 5c0527ed Guido Trotter
1920 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
1921 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
1922 5c0527ed Guido Trotter
    for verifier in node_verify_list:
1923 5c0527ed Guido Trotter
      if not result[verifier]:
1924 5c0527ed Guido Trotter
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
1925 5c0527ed Guido Trotter
                                 " for remote verification" % verifier)
1926 5c0527ed Guido Trotter
      if result[verifier]['nodelist']:
1927 5c0527ed Guido Trotter
        for failed in result[verifier]['nodelist']:
1928 5c0527ed Guido Trotter
          feedback_fn("ssh/hostname verification failed %s -> %s" %
1929 5c0527ed Guido Trotter
                      (verifier, result[verifier]['nodelist'][failed]))
1930 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
1931 ff98055b Iustin Pop
1932 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1933 a8083063 Iustin Pop
    # including the node just added
1934 d6a02168 Michael Hanselmann
    myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
1935 102b115b Michael Hanselmann
    dist_nodes = self.cfg.GetNodeList()
1936 102b115b Michael Hanselmann
    if not self.op.readd:
1937 102b115b Michael Hanselmann
      dist_nodes.append(node)
1938 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1939 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1940 a8083063 Iustin Pop
1941 9a4f63d1 Iustin Pop
    logging.debug("Copying hosts and known_hosts to all nodes")
1942 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
1943 72737a7f Iustin Pop
      result = self.rpc.call_upload_file(dist_nodes, fname)
1944 a8083063 Iustin Pop
      for to_node in dist_nodes:
1945 a8083063 Iustin Pop
        if not result[to_node]:
1946 9a4f63d1 Iustin Pop
          logging.error("Copy of file %s to node %s failed", fname, to_node)
1947 a8083063 Iustin Pop
1948 d6a02168 Michael Hanselmann
    to_copy = []
1949 00cd937c Iustin Pop
    if constants.HT_XEN_HVM in self.cfg.GetClusterInfo().enabled_hypervisors:
1950 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
1951 a8083063 Iustin Pop
    for fname in to_copy:
1952 72737a7f Iustin Pop
      result = self.rpc.call_upload_file([node], fname)
1953 b5602d15 Guido Trotter
      if not result[node]:
1954 9a4f63d1 Iustin Pop
        logging.error("Could not copy file %s to node %s", fname, node)
1955 a8083063 Iustin Pop
1956 d8470559 Michael Hanselmann
    if self.op.readd:
1957 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
1958 d8470559 Michael Hanselmann
    else:
1959 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
1960 a8083063 Iustin Pop
1961 a8083063 Iustin Pop
1962 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1963 a8083063 Iustin Pop
  """Query cluster configuration.
1964 a8083063 Iustin Pop

1965 a8083063 Iustin Pop
  """
1966 a8083063 Iustin Pop
  _OP_REQP = []
1967 642339cf Guido Trotter
  REQ_BGL = False
1968 642339cf Guido Trotter
1969 642339cf Guido Trotter
  def ExpandNames(self):
1970 642339cf Guido Trotter
    self.needed_locks = {}
1971 a8083063 Iustin Pop
1972 a8083063 Iustin Pop
  def CheckPrereq(self):
1973 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1974 a8083063 Iustin Pop

1975 a8083063 Iustin Pop
    """
1976 a8083063 Iustin Pop
    pass
1977 a8083063 Iustin Pop
1978 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1979 a8083063 Iustin Pop
    """Return cluster config.
1980 a8083063 Iustin Pop

1981 a8083063 Iustin Pop
    """
1982 469f88e1 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
1983 a8083063 Iustin Pop
    result = {
1984 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1985 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1986 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1987 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1988 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1989 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1990 469f88e1 Iustin Pop
      "name": cluster.cluster_name,
1991 469f88e1 Iustin Pop
      "master": cluster.master_node,
1992 02691904 Alexander Schreiber
      "default_hypervisor": cluster.default_hypervisor,
1993 469f88e1 Iustin Pop
      "enabled_hypervisors": cluster.enabled_hypervisors,
1994 469f88e1 Iustin Pop
      "hvparams": cluster.hvparams,
1995 469f88e1 Iustin Pop
      "beparams": cluster.beparams,
1996 a8083063 Iustin Pop
      }
1997 a8083063 Iustin Pop
1998 a8083063 Iustin Pop
    return result
1999 a8083063 Iustin Pop
2000 a8083063 Iustin Pop
2001 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
2002 ae5849b5 Michael Hanselmann
  """Return configuration values.
2003 a8083063 Iustin Pop

2004 a8083063 Iustin Pop
  """
2005 a8083063 Iustin Pop
  _OP_REQP = []
2006 642339cf Guido Trotter
  REQ_BGL = False
2007 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet()
2008 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag")
2009 642339cf Guido Trotter
2010 642339cf Guido Trotter
  def ExpandNames(self):
2011 642339cf Guido Trotter
    self.needed_locks = {}
2012 a8083063 Iustin Pop
2013 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2014 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2015 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
2016 ae5849b5 Michael Hanselmann
2017 a8083063 Iustin Pop
  def CheckPrereq(self):
2018 a8083063 Iustin Pop
    """No prerequisites.
2019 a8083063 Iustin Pop

2020 a8083063 Iustin Pop
    """
2021 a8083063 Iustin Pop
    pass
2022 a8083063 Iustin Pop
2023 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2024 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
2025 a8083063 Iustin Pop

2026 a8083063 Iustin Pop
    """
2027 ae5849b5 Michael Hanselmann
    values = []
2028 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
2029 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
2030 3ccafd0e Iustin Pop
        entry = self.cfg.GetClusterName()
2031 ae5849b5 Michael Hanselmann
      elif field == "master_node":
2032 3ccafd0e Iustin Pop
        entry = self.cfg.GetMasterNode()
2033 3ccafd0e Iustin Pop
      elif field == "drain_flag":
2034 3ccafd0e Iustin Pop
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2035 ae5849b5 Michael Hanselmann
      else:
2036 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
2037 3ccafd0e Iustin Pop
      values.append(entry)
2038 ae5849b5 Michael Hanselmann
    return values
2039 a8083063 Iustin Pop
2040 a8083063 Iustin Pop
2041 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
2042 a8083063 Iustin Pop
  """Bring up an instance's disks.
2043 a8083063 Iustin Pop

2044 a8083063 Iustin Pop
  """
2045 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2046 f22a8ba3 Guido Trotter
  REQ_BGL = False
2047 f22a8ba3 Guido Trotter
2048 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2049 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2050 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2051 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2052 f22a8ba3 Guido Trotter
2053 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2054 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2055 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2056 a8083063 Iustin Pop
2057 a8083063 Iustin Pop
  def CheckPrereq(self):
2058 a8083063 Iustin Pop
    """Check prerequisites.
2059 a8083063 Iustin Pop

2060 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2061 a8083063 Iustin Pop

2062 a8083063 Iustin Pop
    """
2063 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2064 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2065 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2066 a8083063 Iustin Pop
2067 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2068 a8083063 Iustin Pop
    """Activate the disks.
2069 a8083063 Iustin Pop

2070 a8083063 Iustin Pop
    """
2071 b9bddb6b Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
2072 a8083063 Iustin Pop
    if not disks_ok:
2073 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
2074 a8083063 Iustin Pop
2075 a8083063 Iustin Pop
    return disks_info
2076 a8083063 Iustin Pop
2077 a8083063 Iustin Pop
2078 b9bddb6b Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
2079 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
2080 a8083063 Iustin Pop

2081 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
2082 a8083063 Iustin Pop

2083 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
2084 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
2085 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
2086 e4376078 Iustin Pop
  @param instance: the instance for whose disks we assemble
2087 e4376078 Iustin Pop
  @type ignore_secondaries: boolean
2088 e4376078 Iustin Pop
  @param ignore_secondaries: if true, errors on secondary nodes
2089 e4376078 Iustin Pop
      won't result in an error return from the function
2090 e4376078 Iustin Pop
  @return: False if the operation failed, otherwise a list of
2091 e4376078 Iustin Pop
      (host, instance_visible_name, node_visible_name)
2092 e4376078 Iustin Pop
      with the mapping from node devices to instance devices
2093 a8083063 Iustin Pop

2094 a8083063 Iustin Pop
  """
2095 a8083063 Iustin Pop
  device_info = []
2096 a8083063 Iustin Pop
  disks_ok = True
2097 fdbd668d Iustin Pop
  iname = instance.name
2098 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
2099 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
2100 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
2101 fdbd668d Iustin Pop
2102 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
2103 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
2104 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
2105 fdbd668d Iustin Pop
  # SyncSource, etc.)
2106 fdbd668d Iustin Pop
2107 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
2108 a8083063 Iustin Pop
  for inst_disk in instance.disks:
2109 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2110 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2111 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2112 a8083063 Iustin Pop
      if not result:
2113 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2114 86d9d3bb Iustin Pop
                           " (is_primary=False, pass=1)",
2115 86d9d3bb Iustin Pop
                           inst_disk.iv_name, node)
2116 fdbd668d Iustin Pop
        if not ignore_secondaries:
2117 a8083063 Iustin Pop
          disks_ok = False
2118 fdbd668d Iustin Pop
2119 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
2120 fdbd668d Iustin Pop
2121 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
2122 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
2123 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2124 fdbd668d Iustin Pop
      if node != instance.primary_node:
2125 fdbd668d Iustin Pop
        continue
2126 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2127 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2128 fdbd668d Iustin Pop
      if not result:
2129 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2130 86d9d3bb Iustin Pop
                           " (is_primary=True, pass=2)",
2131 86d9d3bb Iustin Pop
                           inst_disk.iv_name, node)
2132 fdbd668d Iustin Pop
        disks_ok = False
2133 fdbd668d Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name, result))
2134 a8083063 Iustin Pop
2135 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
2136 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
2137 b352ab5b Iustin Pop
  # improving the logical/physical id handling
2138 b352ab5b Iustin Pop
  for disk in instance.disks:
2139 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
2140 b352ab5b Iustin Pop
2141 a8083063 Iustin Pop
  return disks_ok, device_info
2142 a8083063 Iustin Pop
2143 a8083063 Iustin Pop
2144 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
2145 3ecf6786 Iustin Pop
  """Start the disks of an instance.
2146 3ecf6786 Iustin Pop

2147 3ecf6786 Iustin Pop
  """
2148 b9bddb6b Iustin Pop
  disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2149 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
2150 fe7b0351 Michael Hanselmann
  if not disks_ok:
2151 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
2152 fe7b0351 Michael Hanselmann
    if force is not None and not force:
2153 86d9d3bb Iustin Pop
      lu.proc.LogWarning("", hint="If the message above refers to a"
2154 86d9d3bb Iustin Pop
                         " secondary node,"
2155 86d9d3bb Iustin Pop
                         " you can retry the operation using '--force'.")
2156 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
2157 fe7b0351 Michael Hanselmann
2158 fe7b0351 Michael Hanselmann
2159 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
2160 a8083063 Iustin Pop
  """Shutdown an instance's disks.
2161 a8083063 Iustin Pop

2162 a8083063 Iustin Pop
  """
2163 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2164 f22a8ba3 Guido Trotter
  REQ_BGL = False
2165 f22a8ba3 Guido Trotter
2166 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2167 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2168 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2169 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2170 f22a8ba3 Guido Trotter
2171 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2172 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2173 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2174 a8083063 Iustin Pop
2175 a8083063 Iustin Pop
  def CheckPrereq(self):
2176 a8083063 Iustin Pop
    """Check prerequisites.
2177 a8083063 Iustin Pop

2178 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2179 a8083063 Iustin Pop

2180 a8083063 Iustin Pop
    """
2181 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2182 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2183 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2184 a8083063 Iustin Pop
2185 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2186 a8083063 Iustin Pop
    """Deactivate the disks
2187 a8083063 Iustin Pop

2188 a8083063 Iustin Pop
    """
2189 a8083063 Iustin Pop
    instance = self.instance
2190 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
2191 a8083063 Iustin Pop
2192 a8083063 Iustin Pop
2193 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
2194 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
2195 155d6c75 Guido Trotter

2196 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
2197 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
2198 155d6c75 Guido Trotter

2199 155d6c75 Guido Trotter
  """
2200 72737a7f Iustin Pop
  ins_l = lu.rpc.call_instance_list([instance.primary_node],
2201 72737a7f Iustin Pop
                                      [instance.hypervisor])
2202 155d6c75 Guido Trotter
  ins_l = ins_l[instance.primary_node]
2203 155d6c75 Guido Trotter
  if not type(ins_l) is list:
2204 155d6c75 Guido Trotter
    raise errors.OpExecError("Can't contact node '%s'" %
2205 155d6c75 Guido Trotter
                             instance.primary_node)
2206 155d6c75 Guido Trotter
2207 155d6c75 Guido Trotter
  if instance.name in ins_l:
2208 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
2209 155d6c75 Guido Trotter
                             " block devices.")
2210 155d6c75 Guido Trotter
2211 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
2212 a8083063 Iustin Pop
2213 a8083063 Iustin Pop
2214 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2215 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
2216 a8083063 Iustin Pop

2217 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
2218 a8083063 Iustin Pop

2219 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
2220 a8083063 Iustin Pop
  ignored.
2221 a8083063 Iustin Pop

2222 a8083063 Iustin Pop
  """
2223 a8083063 Iustin Pop
  result = True
2224 a8083063 Iustin Pop
  for disk in instance.disks:
2225 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2226 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
2227 72737a7f Iustin Pop
      if not lu.rpc.call_blockdev_shutdown(node, top_disk):
2228 9a4f63d1 Iustin Pop
        logging.error("Could not shutdown block device %s on node %s",
2229 9a4f63d1 Iustin Pop
                      disk.iv_name, node)
2230 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
2231 a8083063 Iustin Pop
          result = False
2232 a8083063 Iustin Pop
  return result
2233 a8083063 Iustin Pop
2234 a8083063 Iustin Pop
2235 b9bddb6b Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor):
2236 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
2237 d4f16fd9 Iustin Pop

2238 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
2239 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
2240 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
2241 d4f16fd9 Iustin Pop
  exception.
2242 d4f16fd9 Iustin Pop

2243 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
2244 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
2245 e69d05fd Iustin Pop
  @type node: C{str}
2246 e69d05fd Iustin Pop
  @param node: the node to check
2247 e69d05fd Iustin Pop
  @type reason: C{str}
2248 e69d05fd Iustin Pop
  @param reason: string to use in the error message
2249 e69d05fd Iustin Pop
  @type requested: C{int}
2250 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
2251 e69d05fd Iustin Pop
  @type hypervisor: C{str}
2252 e69d05fd Iustin Pop
  @param hypervisor: the hypervisor to ask for memory stats
2253 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2254 e69d05fd Iustin Pop
      we cannot check the node
2255 d4f16fd9 Iustin Pop

2256 d4f16fd9 Iustin Pop
  """
2257 72737a7f Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor)
2258 d4f16fd9 Iustin Pop
  if not nodeinfo or not isinstance(nodeinfo, dict):
2259 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Could not contact node %s for resource"
2260 d4f16fd9 Iustin Pop
                             " information" % (node,))
2261 d4f16fd9 Iustin Pop
2262 d4f16fd9 Iustin Pop
  free_mem = nodeinfo[node].get('memory_free')
2263 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2264 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2265 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
2266 d4f16fd9 Iustin Pop
  if requested > free_mem:
2267 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2268 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
2269 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
2270 d4f16fd9 Iustin Pop
2271 d4f16fd9 Iustin Pop
2272 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2273 a8083063 Iustin Pop
  """Starts an instance.
2274 a8083063 Iustin Pop

2275 a8083063 Iustin Pop
  """
2276 a8083063 Iustin Pop
  HPATH = "instance-start"
2277 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2278 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2279 e873317a Guido Trotter
  REQ_BGL = False
2280 e873317a Guido Trotter
2281 e873317a Guido Trotter
  def ExpandNames(self):
2282 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2283 a8083063 Iustin Pop
2284 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2285 a8083063 Iustin Pop
    """Build hooks env.
2286 a8083063 Iustin Pop

2287 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2288 a8083063 Iustin Pop

2289 a8083063 Iustin Pop
    """
2290 a8083063 Iustin Pop
    env = {
2291 a8083063 Iustin Pop
      "FORCE": self.op.force,
2292 a8083063 Iustin Pop
      }
2293 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2294 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2295 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2296 a8083063 Iustin Pop
    return env, nl, nl
2297 a8083063 Iustin Pop
2298 a8083063 Iustin Pop
  def CheckPrereq(self):
2299 a8083063 Iustin Pop
    """Check prerequisites.
2300 a8083063 Iustin Pop

2301 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2302 a8083063 Iustin Pop

2303 a8083063 Iustin Pop
    """
2304 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2305 e873317a Guido Trotter
    assert self.instance is not None, \
2306 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2307 a8083063 Iustin Pop
2308 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2309 a8083063 Iustin Pop
    # check bridges existance
2310 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2311 a8083063 Iustin Pop
2312 b9bddb6b Iustin Pop
    _CheckNodeFreeMemory(self, instance.primary_node,
2313 d4f16fd9 Iustin Pop
                         "starting instance %s" % instance.name,
2314 338e51e8 Iustin Pop
                         bep[constants.BE_MEMORY], instance.hypervisor)
2315 d4f16fd9 Iustin Pop
2316 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2317 a8083063 Iustin Pop
    """Start the instance.
2318 a8083063 Iustin Pop

2319 a8083063 Iustin Pop
    """
2320 a8083063 Iustin Pop
    instance = self.instance
2321 a8083063 Iustin Pop
    force = self.op.force
2322 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
2323 a8083063 Iustin Pop
2324 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2325 fe482621 Iustin Pop
2326 a8083063 Iustin Pop
    node_current = instance.primary_node
2327 a8083063 Iustin Pop
2328 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
2329 a8083063 Iustin Pop
2330 72737a7f Iustin Pop
    if not self.rpc.call_instance_start(node_current, instance, extra_args):
2331 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
2332 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
2333 a8083063 Iustin Pop
2334 a8083063 Iustin Pop
2335 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2336 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2337 bf6929a2 Alexander Schreiber

2338 bf6929a2 Alexander Schreiber
  """
2339 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2340 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2341 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2342 e873317a Guido Trotter
  REQ_BGL = False
2343 e873317a Guido Trotter
2344 e873317a Guido Trotter
  def ExpandNames(self):
2345 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2346 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2347 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
2348 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2349 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
2350 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2351 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
2352 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2353 bf6929a2 Alexander Schreiber
2354 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2355 bf6929a2 Alexander Schreiber
    """Build hooks env.
2356 bf6929a2 Alexander Schreiber

2357 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2358 bf6929a2 Alexander Schreiber

2359 bf6929a2 Alexander Schreiber
    """
2360 bf6929a2 Alexander Schreiber
    env = {
2361 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2362 bf6929a2 Alexander Schreiber
      }
2363 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2364 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2365 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
2366 bf6929a2 Alexander Schreiber
    return env, nl, nl
2367 bf6929a2 Alexander Schreiber
2368 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2369 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2370 bf6929a2 Alexander Schreiber

2371 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2372 bf6929a2 Alexander Schreiber

2373 bf6929a2 Alexander Schreiber
    """
2374 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2375 e873317a Guido Trotter
    assert self.instance is not None, \
2376 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2377 bf6929a2 Alexander Schreiber
2378 bf6929a2 Alexander Schreiber
    # check bridges existance
2379 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2380 bf6929a2 Alexander Schreiber
2381 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2382 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2383 bf6929a2 Alexander Schreiber

2384 bf6929a2 Alexander Schreiber
    """
2385 bf6929a2 Alexander Schreiber
    instance = self.instance
2386 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2387 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2388 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2389 bf6929a2 Alexander Schreiber
2390 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2391 bf6929a2 Alexander Schreiber
2392 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2393 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2394 72737a7f Iustin Pop
      if not self.rpc.call_instance_reboot(node_current, instance,
2395 72737a7f Iustin Pop
                                           reboot_type, extra_args):
2396 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2397 bf6929a2 Alexander Schreiber
    else:
2398 72737a7f Iustin Pop
      if not self.rpc.call_instance_shutdown(node_current, instance):
2399 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2400 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
2401 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
2402 72737a7f Iustin Pop
      if not self.rpc.call_instance_start(node_current, instance, extra_args):
2403 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
2404 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2405 bf6929a2 Alexander Schreiber
2406 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2407 bf6929a2 Alexander Schreiber
2408 bf6929a2 Alexander Schreiber
2409 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2410 a8083063 Iustin Pop
  """Shutdown an instance.
2411 a8083063 Iustin Pop

2412 a8083063 Iustin Pop
  """
2413 a8083063 Iustin Pop
  HPATH = "instance-stop"
2414 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2415 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2416 e873317a Guido Trotter
  REQ_BGL = False
2417 e873317a Guido Trotter
2418 e873317a Guido Trotter
  def ExpandNames(self):
2419 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2420 a8083063 Iustin Pop
2421 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2422 a8083063 Iustin Pop
    """Build hooks env.
2423 a8083063 Iustin Pop

2424 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2425 a8083063 Iustin Pop

2426 a8083063 Iustin Pop
    """
2427 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2428 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2429 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2430 a8083063 Iustin Pop
    return env, nl, nl
2431 a8083063 Iustin Pop
2432 a8083063 Iustin Pop
  def CheckPrereq(self):
2433 a8083063 Iustin Pop
    """Check prerequisites.
2434 a8083063 Iustin Pop

2435 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2436 a8083063 Iustin Pop

2437 a8083063 Iustin Pop
    """
2438 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2439 e873317a Guido Trotter
    assert self.instance is not None, \
2440 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2441 a8083063 Iustin Pop
2442 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2443 a8083063 Iustin Pop
    """Shutdown the instance.
2444 a8083063 Iustin Pop

2445 a8083063 Iustin Pop
    """
2446 a8083063 Iustin Pop
    instance = self.instance
2447 a8083063 Iustin Pop
    node_current = instance.primary_node
2448 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2449 72737a7f Iustin Pop
    if not self.rpc.call_instance_shutdown(node_current, instance):
2450 86d9d3bb Iustin Pop
      self.proc.LogWarning("Could not shutdown instance")
2451 a8083063 Iustin Pop
2452 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
2453 a8083063 Iustin Pop
2454 a8083063 Iustin Pop
2455 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2456 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2457 fe7b0351 Michael Hanselmann

2458 fe7b0351 Michael Hanselmann
  """
2459 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2460 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2461 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2462 4e0b4d2d Guido Trotter
  REQ_BGL = False
2463 4e0b4d2d Guido Trotter
2464 4e0b4d2d Guido Trotter
  def ExpandNames(self):
2465 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
2466 fe7b0351 Michael Hanselmann
2467 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2468 fe7b0351 Michael Hanselmann
    """Build hooks env.
2469 fe7b0351 Michael Hanselmann

2470 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2471 fe7b0351 Michael Hanselmann

2472 fe7b0351 Michael Hanselmann
    """
2473 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2474 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2475 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2476 fe7b0351 Michael Hanselmann
    return env, nl, nl
2477 fe7b0351 Michael Hanselmann
2478 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2479 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2480 fe7b0351 Michael Hanselmann

2481 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2482 fe7b0351 Michael Hanselmann

2483 fe7b0351 Michael Hanselmann
    """
2484 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2485 4e0b4d2d Guido Trotter
    assert instance is not None, \
2486 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2487 4e0b4d2d Guido Trotter
2488 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2489 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2490 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2491 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2492 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2493 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2494 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2495 72737a7f Iustin Pop
                                              instance.name,
2496 72737a7f Iustin Pop
                                              instance.hypervisor)
2497 fe7b0351 Michael Hanselmann
    if remote_info:
2498 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2499 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2500 3ecf6786 Iustin Pop
                                  instance.primary_node))
2501 d0834de3 Michael Hanselmann
2502 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2503 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2504 d0834de3 Michael Hanselmann
      # OS verification
2505 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2506 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2507 d0834de3 Michael Hanselmann
      if pnode is None:
2508 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2509 3ecf6786 Iustin Pop
                                   self.op.pnode)
2510 72737a7f Iustin Pop
      os_obj = self.rpc.call_os_get(pnode.name, self.op.os_type)
2511 dfa96ded Guido Trotter
      if not os_obj:
2512 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2513 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2514 d0834de3 Michael Hanselmann
2515 fe7b0351 Michael Hanselmann
    self.instance = instance
2516 fe7b0351 Michael Hanselmann
2517 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2518 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2519 fe7b0351 Michael Hanselmann

2520 fe7b0351 Michael Hanselmann
    """
2521 fe7b0351 Michael Hanselmann
    inst = self.instance
2522 fe7b0351 Michael Hanselmann
2523 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2524 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2525 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2526 97abc79f Iustin Pop
      self.cfg.Update(inst)
2527 d0834de3 Michael Hanselmann
2528 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
2529 fe7b0351 Michael Hanselmann
    try:
2530 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2531 bb2ee932 Iustin Pop
      if not self.rpc.call_instance_os_add(inst.primary_node, inst):
2532 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
2533 f4bc1f2c Michael Hanselmann
                                 " on node %s" %
2534 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2535 fe7b0351 Michael Hanselmann
    finally:
2536 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
2537 fe7b0351 Michael Hanselmann
2538 fe7b0351 Michael Hanselmann
2539 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2540 decd5f45 Iustin Pop
  """Rename an instance.
2541 decd5f45 Iustin Pop

2542 decd5f45 Iustin Pop
  """
2543 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2544 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2545 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2546 decd5f45 Iustin Pop
2547 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2548 decd5f45 Iustin Pop
    """Build hooks env.
2549 decd5f45 Iustin Pop

2550 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2551 decd5f45 Iustin Pop

2552 decd5f45 Iustin Pop
    """
2553 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2554 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2555 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2556 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2557 decd5f45 Iustin Pop
    return env, nl, nl
2558 decd5f45 Iustin Pop
2559 decd5f45 Iustin Pop
  def CheckPrereq(self):
2560 decd5f45 Iustin Pop
    """Check prerequisites.
2561 decd5f45 Iustin Pop

2562 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2563 decd5f45 Iustin Pop

2564 decd5f45 Iustin Pop
    """
2565 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2566 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2567 decd5f45 Iustin Pop
    if instance is None:
2568 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2569 decd5f45 Iustin Pop
                                 self.op.instance_name)
2570 decd5f45 Iustin Pop
    if instance.status != "down":
2571 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2572 decd5f45 Iustin Pop
                                 self.op.instance_name)
2573 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2574 72737a7f Iustin Pop
                                              instance.name,
2575 72737a7f Iustin Pop
                                              instance.hypervisor)
2576 decd5f45 Iustin Pop
    if remote_info:
2577 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2578 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2579 decd5f45 Iustin Pop
                                  instance.primary_node))
2580 decd5f45 Iustin Pop
    self.instance = instance
2581 decd5f45 Iustin Pop
2582 decd5f45 Iustin Pop
    # new name verification
2583 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2584 decd5f45 Iustin Pop
2585 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2586 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
2587 7bde3275 Guido Trotter
    if new_name in instance_list:
2588 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2589 c09f363f Manuel Franceschini
                                 new_name)
2590 7bde3275 Guido Trotter
2591 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2592 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
2593 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2594 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2595 decd5f45 Iustin Pop
2596 decd5f45 Iustin Pop
2597 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2598 decd5f45 Iustin Pop
    """Reinstall the instance.
2599 decd5f45 Iustin Pop

2600 decd5f45 Iustin Pop
    """
2601 decd5f45 Iustin Pop
    inst = self.instance
2602 decd5f45 Iustin Pop
    old_name = inst.name
2603 decd5f45 Iustin Pop
2604 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2605 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2606 b23c4333 Manuel Franceschini
2607 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2608 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
2609 cb4e8387 Iustin Pop
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
2610 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
2611 decd5f45 Iustin Pop
2612 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2613 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2614 decd5f45 Iustin Pop
2615 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2616 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2617 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
2618 72737a7f Iustin Pop
                                                     old_file_storage_dir,
2619 72737a7f Iustin Pop
                                                     new_file_storage_dir)
2620 b23c4333 Manuel Franceschini
2621 b23c4333 Manuel Franceschini
      if not result:
2622 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not connect to node '%s' to rename"
2623 b23c4333 Manuel Franceschini
                                 " directory '%s' to '%s' (but the instance"
2624 b23c4333 Manuel Franceschini
                                 " has been renamed in Ganeti)" % (
2625 b23c4333 Manuel Franceschini
                                 inst.primary_node, old_file_storage_dir,
2626 b23c4333 Manuel Franceschini
                                 new_file_storage_dir))
2627 b23c4333 Manuel Franceschini
2628 b23c4333 Manuel Franceschini
      if not result[0]:
2629 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2630 b23c4333 Manuel Franceschini
                                 " (but the instance has been renamed in"
2631 b23c4333 Manuel Franceschini
                                 " Ganeti)" % (old_file_storage_dir,
2632 b23c4333 Manuel Franceschini
                                               new_file_storage_dir))
2633 b23c4333 Manuel Franceschini
2634 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
2635 decd5f45 Iustin Pop
    try:
2636 72737a7f Iustin Pop
      if not self.rpc.call_instance_run_rename(inst.primary_node, inst,
2637 d15a9ad3 Guido Trotter
                                               old_name):
2638 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
2639 6291574d Alexander Schreiber
               " (but the instance has been renamed in Ganeti)" %
2640 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2641 86d9d3bb Iustin Pop
        self.proc.LogWarning(msg)
2642 decd5f45 Iustin Pop
    finally:
2643 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
2644 decd5f45 Iustin Pop
2645 decd5f45 Iustin Pop
2646 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2647 a8083063 Iustin Pop
  """Remove an instance.
2648 a8083063 Iustin Pop

2649 a8083063 Iustin Pop
  """
2650 a8083063 Iustin Pop
  HPATH = "instance-remove"
2651 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2652 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
2653 cf472233 Guido Trotter
  REQ_BGL = False
2654 cf472233 Guido Trotter
2655 cf472233 Guido Trotter
  def ExpandNames(self):
2656 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
2657 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2658 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2659 cf472233 Guido Trotter
2660 cf472233 Guido Trotter
  def DeclareLocks(self, level):
2661 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
2662 cf472233 Guido Trotter
      self._LockInstancesNodes()
2663 a8083063 Iustin Pop
2664 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2665 a8083063 Iustin Pop
    """Build hooks env.
2666 a8083063 Iustin Pop

2667 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2668 a8083063 Iustin Pop

2669 a8083063 Iustin Pop
    """
2670 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2671 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
2672 a8083063 Iustin Pop
    return env, nl, nl
2673 a8083063 Iustin Pop
2674 a8083063 Iustin Pop
  def CheckPrereq(self):
2675 a8083063 Iustin Pop
    """Check prerequisites.
2676 a8083063 Iustin Pop

2677 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2678 a8083063 Iustin Pop

2679 a8083063 Iustin Pop
    """
2680 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2681 cf472233 Guido Trotter
    assert self.instance is not None, \
2682 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2683 a8083063 Iustin Pop
2684 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2685 a8083063 Iustin Pop
    """Remove the instance.
2686 a8083063 Iustin Pop

2687 a8083063 Iustin Pop
    """
2688 a8083063 Iustin Pop
    instance = self.instance
2689 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
2690 9a4f63d1 Iustin Pop
                 instance.name, instance.primary_node)
2691 a8083063 Iustin Pop
2692 72737a7f Iustin Pop
    if not self.rpc.call_instance_shutdown(instance.primary_node, instance):
2693 1d67656e Iustin Pop
      if self.op.ignore_failures:
2694 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2695 1d67656e Iustin Pop
      else:
2696 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2697 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2698 a8083063 Iustin Pop
2699 9a4f63d1 Iustin Pop
    logging.info("Removing block devices for instance %s", instance.name)
2700 a8083063 Iustin Pop
2701 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
2702 1d67656e Iustin Pop
      if self.op.ignore_failures:
2703 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2704 1d67656e Iustin Pop
      else:
2705 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2706 a8083063 Iustin Pop
2707 9a4f63d1 Iustin Pop
    logging.info("Removing instance %s out of cluster config", instance.name)
2708 a8083063 Iustin Pop
2709 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2710 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
2711 a8083063 Iustin Pop
2712 a8083063 Iustin Pop
2713 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2714 a8083063 Iustin Pop
  """Logical unit for querying instances.
2715 a8083063 Iustin Pop

2716 a8083063 Iustin Pop
  """
2717 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2718 7eb9d8f7 Guido Trotter
  REQ_BGL = False
2719 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
2720 a2d2e1a7 Iustin Pop
                                    "admin_state", "admin_ram",
2721 a2d2e1a7 Iustin Pop
                                    "disk_template", "ip", "mac", "bridge",
2722 a2d2e1a7 Iustin Pop
                                    "sda_size", "sdb_size", "vcpus", "tags",
2723 a2d2e1a7 Iustin Pop
                                    "network_port", "beparams",
2724 a2d2e1a7 Iustin Pop
                                    "(disk).(size)/([0-9]+)",
2725 a2d2e1a7 Iustin Pop
                                    "(disk).(sizes)",
2726 a2d2e1a7 Iustin Pop
                                    "(nic).(mac|ip|bridge)/([0-9]+)",
2727 a2d2e1a7 Iustin Pop
                                    "(nic).(macs|ips|bridges)",
2728 a2d2e1a7 Iustin Pop
                                    "(disk|nic).(count)",
2729 a2d2e1a7 Iustin Pop
                                    "serial_no", "hypervisor", "hvparams",] +
2730 a2d2e1a7 Iustin Pop
                                  ["hv/%s" % name
2731 a2d2e1a7 Iustin Pop
                                   for name in constants.HVS_PARAMETERS] +
2732 a2d2e1a7 Iustin Pop
                                  ["be/%s" % name
2733 a2d2e1a7 Iustin Pop
                                   for name in constants.BES_PARAMETERS])
2734 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
2735 31bf511f Iustin Pop
2736 a8083063 Iustin Pop
2737 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
2738 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2739 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2740 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2741 a8083063 Iustin Pop
2742 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
2743 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
2744 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2745 7eb9d8f7 Guido Trotter
2746 57a2fb91 Iustin Pop
    if self.op.names:
2747 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
2748 7eb9d8f7 Guido Trotter
    else:
2749 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
2750 7eb9d8f7 Guido Trotter
2751 31bf511f Iustin Pop
    self.do_locking = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
2752 57a2fb91 Iustin Pop
    if self.do_locking:
2753 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
2754 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
2755 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2756 7eb9d8f7 Guido Trotter
2757 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
2758 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
2759 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
2760 7eb9d8f7 Guido Trotter
2761 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
2762 7eb9d8f7 Guido Trotter
    """Check prerequisites.
2763 7eb9d8f7 Guido Trotter

2764 7eb9d8f7 Guido Trotter
    """
2765 57a2fb91 Iustin Pop
    pass
2766 069dcc86 Iustin Pop
2767 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2768 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2769 a8083063 Iustin Pop

2770 a8083063 Iustin Pop
    """
2771 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
2772 57a2fb91 Iustin Pop
    if self.do_locking:
2773 57a2fb91 Iustin Pop
      instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2774 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
2775 3fa93523 Guido Trotter
      instance_names = self.wanted
2776 3fa93523 Guido Trotter
      missing = set(instance_names).difference(all_info.keys())
2777 3fa93523 Guido Trotter
      if missing:
2778 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
2779 3fa93523 Guido Trotter
          "Some instances were removed before retrieving their data: %s"
2780 3fa93523 Guido Trotter
          % missing)
2781 57a2fb91 Iustin Pop
    else:
2782 57a2fb91 Iustin Pop
      instance_names = all_info.keys()
2783 c1f1cbb2 Iustin Pop
2784 c1f1cbb2 Iustin Pop
    instance_names = utils.NiceSort(instance_names)
2785 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
2786 a8083063 Iustin Pop
2787 a8083063 Iustin Pop
    # begin data gathering
2788 a8083063 Iustin Pop
2789 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2790 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
2791 a8083063 Iustin Pop
2792 a8083063 Iustin Pop
    bad_nodes = []
2793 31bf511f Iustin Pop
    if self.do_locking:
2794 a8083063 Iustin Pop
      live_data = {}
2795 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
2796 a8083063 Iustin Pop
      for name in nodes:
2797 a8083063 Iustin Pop
        result = node_data[name]
2798 a8083063 Iustin Pop
        if result:
2799 a8083063 Iustin Pop
          live_data.update(result)
2800 a8083063 Iustin Pop
        elif result == False:
2801 a8083063 Iustin Pop
          bad_nodes.append(name)
2802 a8083063 Iustin Pop
        # else no instance is alive
2803 a8083063 Iustin Pop
    else:
2804 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2805 a8083063 Iustin Pop
2806 a8083063 Iustin Pop
    # end data gathering
2807 a8083063 Iustin Pop
2808 5018a335 Iustin Pop
    HVPREFIX = "hv/"
2809 338e51e8 Iustin Pop
    BEPREFIX = "be/"
2810 a8083063 Iustin Pop
    output = []
2811 a8083063 Iustin Pop
    for instance in instance_list:
2812 a8083063 Iustin Pop
      iout = []
2813 5018a335 Iustin Pop
      i_hv = self.cfg.GetClusterInfo().FillHV(instance)
2814 338e51e8 Iustin Pop
      i_be = self.cfg.GetClusterInfo().FillBE(instance)
2815 a8083063 Iustin Pop
      for field in self.op.output_fields:
2816 71c1af58 Iustin Pop
        st_match = self._FIELDS_STATIC.Matches(field)
2817 a8083063 Iustin Pop
        if field == "name":
2818 a8083063 Iustin Pop
          val = instance.name
2819 a8083063 Iustin Pop
        elif field == "os":
2820 a8083063 Iustin Pop
          val = instance.os
2821 a8083063 Iustin Pop
        elif field == "pnode":
2822 a8083063 Iustin Pop
          val = instance.primary_node
2823 a8083063 Iustin Pop
        elif field == "snodes":
2824 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2825 a8083063 Iustin Pop
        elif field == "admin_state":
2826 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2827 a8083063 Iustin Pop
        elif field == "oper_state":
2828 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2829 8a23d2d3 Iustin Pop
            val = None
2830 a8083063 Iustin Pop
          else:
2831 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2832 d8052456 Iustin Pop
        elif field == "status":
2833 d8052456 Iustin Pop
          if instance.primary_node in bad_nodes:
2834 d8052456 Iustin Pop
            val = "ERROR_nodedown"
2835 d8052456 Iustin Pop
          else:
2836 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
2837 d8052456 Iustin Pop
            if running:
2838 d8052456 Iustin Pop
              if instance.status != "down":
2839 d8052456 Iustin Pop
                val = "running"
2840 d8052456 Iustin Pop
              else:
2841 d8052456 Iustin Pop
                val = "ERROR_up"
2842 d8052456 Iustin Pop
            else:
2843 d8052456 Iustin Pop
              if instance.status != "down":
2844 d8052456 Iustin Pop
                val = "ERROR_down"
2845 d8052456 Iustin Pop
              else:
2846 d8052456 Iustin Pop
                val = "ADMIN_down"
2847 a8083063 Iustin Pop
        elif field == "oper_ram":
2848 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2849 8a23d2d3 Iustin Pop
            val = None
2850 a8083063 Iustin Pop
          elif instance.name in live_data:
2851 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2852 a8083063 Iustin Pop
          else:
2853 a8083063 Iustin Pop
            val = "-"
2854 a8083063 Iustin Pop
        elif field == "disk_template":
2855 a8083063 Iustin Pop
          val = instance.disk_template
2856 a8083063 Iustin Pop
        elif field == "ip":
2857 a8083063 Iustin Pop
          val = instance.nics[0].ip
2858 a8083063 Iustin Pop
        elif field == "bridge":
2859 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2860 a8083063 Iustin Pop
        elif field == "mac":
2861 a8083063 Iustin Pop
          val = instance.nics[0].mac
2862 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2863 ad24e046 Iustin Pop
          idx = ord(field[2]) - ord('a')
2864 ad24e046 Iustin Pop
          try:
2865 ad24e046 Iustin Pop
            val = instance.FindDisk(idx).size
2866 ad24e046 Iustin Pop
          except errors.OpPrereqError:
2867 8a23d2d3 Iustin Pop
            val = None
2868 130a6a6f Iustin Pop
        elif field == "tags":
2869 130a6a6f Iustin Pop
          val = list(instance.GetTags())
2870 38d7239a Iustin Pop
        elif field == "serial_no":
2871 38d7239a Iustin Pop
          val = instance.serial_no
2872 5018a335 Iustin Pop
        elif field == "network_port":
2873 5018a335 Iustin Pop
          val = instance.network_port
2874 338e51e8 Iustin Pop
        elif field == "hypervisor":
2875 338e51e8 Iustin Pop
          val = instance.hypervisor
2876 338e51e8 Iustin Pop
        elif field == "hvparams":
2877 338e51e8 Iustin Pop
          val = i_hv
2878 5018a335 Iustin Pop
        elif (field.startswith(HVPREFIX) and
2879 5018a335 Iustin Pop
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
2880 5018a335 Iustin Pop
          val = i_hv.get(field[len(HVPREFIX):], None)
2881 338e51e8 Iustin Pop
        elif field == "beparams":
2882 338e51e8 Iustin Pop
          val = i_be
2883 338e51e8 Iustin Pop
        elif (field.startswith(BEPREFIX) and
2884 338e51e8 Iustin Pop
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
2885 338e51e8 Iustin Pop
          val = i_be.get(field[len(BEPREFIX):], None)
2886 71c1af58 Iustin Pop
        elif st_match and st_match.groups():
2887 71c1af58 Iustin Pop
          # matches a variable list
2888 71c1af58 Iustin Pop
          st_groups = st_match.groups()
2889 71c1af58 Iustin Pop
          if st_groups and st_groups[0] == "disk":
2890 71c1af58 Iustin Pop
            if st_groups[1] == "count":
2891 71c1af58 Iustin Pop
              val = len(instance.disks)
2892 41a776da Iustin Pop
            elif st_groups[1] == "sizes":
2893 41a776da Iustin Pop
              val = [disk.size for disk in instance.disks]
2894 71c1af58 Iustin Pop
            elif st_groups[1] == "size":
2895 3e0cea06 Iustin Pop
              try:
2896 3e0cea06 Iustin Pop
                val = instance.FindDisk(st_groups[2]).size
2897 3e0cea06 Iustin Pop
              except errors.OpPrereqError:
2898 71c1af58 Iustin Pop
                val = None
2899 71c1af58 Iustin Pop
            else:
2900 71c1af58 Iustin Pop
              assert False, "Unhandled disk parameter"
2901 71c1af58 Iustin Pop
          elif st_groups[0] == "nic":
2902 71c1af58 Iustin Pop
            if st_groups[1] == "count":
2903 71c1af58 Iustin Pop
              val = len(instance.nics)
2904 41a776da Iustin Pop
            elif st_groups[1] == "macs":
2905 41a776da Iustin Pop
              val = [nic.mac for nic in instance.nics]
2906 41a776da Iustin Pop
            elif st_groups[1] == "ips":
2907 41a776da Iustin Pop
              val = [nic.ip for nic in instance.nics]
2908 41a776da Iustin Pop
            elif st_groups[1] == "bridges":
2909 41a776da Iustin Pop
              val = [nic.bridge for nic in instance.nics]
2910 71c1af58 Iustin Pop
            else:
2911 71c1af58 Iustin Pop
              # index-based item
2912 71c1af58 Iustin Pop
              nic_idx = int(st_groups[2])
2913 71c1af58 Iustin Pop
              if nic_idx >= len(instance.nics):
2914 71c1af58 Iustin Pop
                val = None
2915 71c1af58 Iustin Pop
              else:
2916 71c1af58 Iustin Pop
                if st_groups[1] == "mac":
2917 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].mac
2918 71c1af58 Iustin Pop
                elif st_groups[1] == "ip":
2919 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].ip
2920 71c1af58 Iustin Pop
                elif st_groups[1] == "bridge":
2921 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].bridge
2922 71c1af58 Iustin Pop
                else:
2923 71c1af58 Iustin Pop
                  assert False, "Unhandled NIC parameter"
2924 71c1af58 Iustin Pop
          else:
2925 71c1af58 Iustin Pop
            assert False, "Unhandled variable parameter"
2926 a8083063 Iustin Pop
        else:
2927 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2928 a8083063 Iustin Pop
        iout.append(val)
2929 a8083063 Iustin Pop
      output.append(iout)
2930 a8083063 Iustin Pop
2931 a8083063 Iustin Pop
    return output
2932 a8083063 Iustin Pop
2933 a8083063 Iustin Pop
2934 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2935 a8083063 Iustin Pop
  """Failover an instance.
2936 a8083063 Iustin Pop

2937 a8083063 Iustin Pop
  """
2938 a8083063 Iustin Pop
  HPATH = "instance-failover"
2939 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2940 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2941 c9e5c064 Guido Trotter
  REQ_BGL = False
2942 c9e5c064 Guido Trotter
2943 c9e5c064 Guido Trotter
  def ExpandNames(self):
2944 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
2945 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2946 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2947 c9e5c064 Guido Trotter
2948 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
2949 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
2950 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
2951 a8083063 Iustin Pop
2952 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2953 a8083063 Iustin Pop
    """Build hooks env.
2954 a8083063 Iustin Pop

2955 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2956 a8083063 Iustin Pop

2957 a8083063 Iustin Pop
    """
2958 a8083063 Iustin Pop
    env = {
2959 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2960 a8083063 Iustin Pop
      }
2961 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2962 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
2963 a8083063 Iustin Pop
    return env, nl, nl
2964 a8083063 Iustin Pop
2965 a8083063 Iustin Pop
  def CheckPrereq(self):
2966 a8083063 Iustin Pop
    """Check prerequisites.
2967 a8083063 Iustin Pop

2968 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2969 a8083063 Iustin Pop

2970 a8083063 Iustin Pop
    """
2971 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2972 c9e5c064 Guido Trotter
    assert self.instance is not None, \
2973 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2974 a8083063 Iustin Pop
2975 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2976 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2977 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2978 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
2979 2a710df1 Michael Hanselmann
2980 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2981 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2982 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2983 abdf0113 Iustin Pop
                                   "a mirrored disk template")
2984 2a710df1 Michael Hanselmann
2985 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2986 d4f16fd9 Iustin Pop
    # check memory requirements on the secondary node
2987 b9bddb6b Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
2988 338e51e8 Iustin Pop
                         instance.name, bep[constants.BE_MEMORY],
2989 e69d05fd Iustin Pop
                         instance.hypervisor)
2990 3a7c308e Guido Trotter
2991 a8083063 Iustin Pop
    # check bridge existance
2992 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2993 72737a7f Iustin Pop
    if not self.rpc.call_bridges_exist(target_node, brlist):
2994 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2995 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2996 50ff9a7a Iustin Pop
                                 (brlist, target_node))
2997 a8083063 Iustin Pop
2998 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2999 a8083063 Iustin Pop
    """Failover an instance.
3000 a8083063 Iustin Pop

3001 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
3002 a8083063 Iustin Pop
    starting it on the secondary.
3003 a8083063 Iustin Pop

3004 a8083063 Iustin Pop
    """
3005 a8083063 Iustin Pop
    instance = self.instance
3006 a8083063 Iustin Pop
3007 a8083063 Iustin Pop
    source_node = instance.primary_node
3008 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
3009 a8083063 Iustin Pop
3010 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
3011 a8083063 Iustin Pop
    for dev in instance.disks:
3012 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
3013 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
3014 a0aaa0d0 Guido Trotter
        if instance.status == "up" and not self.op.ignore_consistency:
3015 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
3016 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
3017 a8083063 Iustin Pop
3018 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
3019 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
3020 9a4f63d1 Iustin Pop
                 instance.name, source_node)
3021 a8083063 Iustin Pop
3022 72737a7f Iustin Pop
    if not self.rpc.call_instance_shutdown(source_node, instance):
3023 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
3024 86d9d3bb Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
3025 86d9d3bb Iustin Pop
                             " Proceeding"
3026 86d9d3bb Iustin Pop
                             " anyway. Please make sure node %s is down",
3027 86d9d3bb Iustin Pop
                             instance.name, source_node, source_node)
3028 24a40d57 Iustin Pop
      else:
3029 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
3030 24a40d57 Iustin Pop
                                 (instance.name, source_node))
3031 a8083063 Iustin Pop
3032 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
3033 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3034 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
3035 a8083063 Iustin Pop
3036 a8083063 Iustin Pop
    instance.primary_node = target_node
3037 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
3038 b6102dab Guido Trotter
    self.cfg.Update(instance)
3039 a8083063 Iustin Pop
3040 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
3041 12a0cfbe Guido Trotter
    if instance.status == "up":
3042 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
3043 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s",
3044 9a4f63d1 Iustin Pop
                   instance.name, target_node)
3045 12a0cfbe Guido Trotter
3046 b9bddb6b Iustin Pop
      disks_ok, dummy = _AssembleInstanceDisks(self, instance,
3047 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
3048 12a0cfbe Guido Trotter
      if not disks_ok:
3049 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3050 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
3051 a8083063 Iustin Pop
3052 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
3053 72737a7f Iustin Pop
      if not self.rpc.call_instance_start(target_node, instance, None):
3054 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3055 12a0cfbe Guido Trotter
        raise errors.OpExecError("Could not start instance %s on node %s." %
3056 12a0cfbe Guido Trotter
                                 (instance.name, target_node))
3057 a8083063 Iustin Pop
3058 a8083063 Iustin Pop
3059 b9bddb6b Iustin Pop
def _CreateBlockDevOnPrimary(lu, node, instance, device, info):
3060 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
3061 a8083063 Iustin Pop

3062 a8083063 Iustin Pop
  This always creates all devices.
3063 a8083063 Iustin Pop

3064 a8083063 Iustin Pop
  """
3065 a8083063 Iustin Pop
  if device.children:
3066 a8083063 Iustin Pop
    for child in device.children:
3067 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnPrimary(lu, node, instance, child, info):
3068 a8083063 Iustin Pop
        return False
3069 a8083063 Iustin Pop
3070 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
3071 72737a7f Iustin Pop
  new_id = lu.rpc.call_blockdev_create(node, device, device.size,
3072 72737a7f Iustin Pop
                                       instance.name, True, info)
3073 a8083063 Iustin Pop
  if not new_id:
3074 a8083063 Iustin Pop
    return False
3075 a8083063 Iustin Pop
  if device.physical_id is None:
3076 a8083063 Iustin Pop
    device.physical_id = new_id
3077 a8083063 Iustin Pop
  return True
3078 a8083063 Iustin Pop
3079 a8083063 Iustin Pop
3080 b9bddb6b Iustin Pop
def _CreateBlockDevOnSecondary(lu, node, instance, device, force, info):
3081 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
3082 a8083063 Iustin Pop

3083 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
3084 a8083063 Iustin Pop
  all its children.
3085 a8083063 Iustin Pop

3086 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
3087 a8083063 Iustin Pop

3088 a8083063 Iustin Pop
  """
3089 a8083063 Iustin Pop
  if device.CreateOnSecondary():
3090 a8083063 Iustin Pop
    force = True
3091 a8083063 Iustin Pop
  if device.children:
3092 a8083063 Iustin Pop
    for child in device.children:
3093 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnSecondary(lu, node, instance,
3094 3f78eef2 Iustin Pop
                                        child, force, info):
3095 a8083063 Iustin Pop
        return False
3096 a8083063 Iustin Pop
3097 a8083063 Iustin Pop
  if not force:
3098 a8083063 Iustin Pop
    return True
3099 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
3100 72737a7f Iustin Pop
  new_id = lu.rpc.call_blockdev_create(node, device, device.size,
3101 72737a7f Iustin Pop
                                       instance.name, False, info)
3102 a8083063 Iustin Pop
  if not new_id:
3103 a8083063 Iustin Pop
    return False
3104 a8083063 Iustin Pop
  if device.physical_id is None:
3105 a8083063 Iustin Pop
    device.physical_id = new_id
3106 a8083063 Iustin Pop
  return True
3107 a8083063 Iustin Pop
3108 a8083063 Iustin Pop
3109 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
3110 923b1523 Iustin Pop
  """Generate a suitable LV name.
3111 923b1523 Iustin Pop

3112 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
3113 923b1523 Iustin Pop

3114 923b1523 Iustin Pop
  """
3115 923b1523 Iustin Pop
  results = []
3116 923b1523 Iustin Pop
  for val in exts:
3117 b9bddb6b Iustin Pop
    new_id = lu.cfg.GenerateUniqueID()
3118 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
3119 923b1523 Iustin Pop
  return results
3120 923b1523 Iustin Pop
3121 923b1523 Iustin Pop
3122 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
3123 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
3124 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
3125 a1f445d3 Iustin Pop

3126 a1f445d3 Iustin Pop
  """
3127 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
3128 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
3129 b9bddb6b Iustin Pop
  shared_secret = lu.cfg.GenerateDRBDSecret()
3130 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3131 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
3132 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3133 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
3134 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
3135 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
3136 f9518d38 Iustin Pop
                                      p_minor, s_minor,
3137 f9518d38 Iustin Pop
                                      shared_secret),
3138 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
3139 a1f445d3 Iustin Pop
                          iv_name=iv_name)
3140 a1f445d3 Iustin Pop
  return drbd_dev
3141 a1f445d3 Iustin Pop
3142 7c0d6283 Michael Hanselmann
3143 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
3144 a8083063 Iustin Pop
                          instance_name, primary_node,
3145 08db7c5c Iustin Pop
                          secondary_nodes, disk_info,
3146 e2a65344 Iustin Pop
                          file_storage_dir, file_driver,
3147 e2a65344 Iustin Pop
                          base_index):
3148 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
3149 a8083063 Iustin Pop

3150 a8083063 Iustin Pop
  """
3151 a8083063 Iustin Pop
  #TODO: compute space requirements
3152 a8083063 Iustin Pop
3153 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
3154 08db7c5c Iustin Pop
  disk_count = len(disk_info)
3155 08db7c5c Iustin Pop
  disks = []
3156 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
3157 08db7c5c Iustin Pop
    pass
3158 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
3159 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
3160 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
3161 923b1523 Iustin Pop
3162 08db7c5c Iustin Pop
    names = _GenerateUniqueNames(lu, [".disk%d" % i
3163 08db7c5c Iustin Pop
                                      for i in range(disk_count)])
3164 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
3165 e2a65344 Iustin Pop
      disk_index = idx + base_index
3166 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
3167 08db7c5c Iustin Pop
                              logical_id=(vgname, names[idx]),
3168 e2a65344 Iustin Pop
                              iv_name="disk/%d" % disk_index)
3169 08db7c5c Iustin Pop
      disks.append(disk_dev)
3170 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
3171 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
3172 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
3173 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
3174 08db7c5c Iustin Pop
    minors = lu.cfg.AllocateDRBDMinor(
3175 08db7c5c Iustin Pop
      [primary_node, remote_node] * len(disk_info), instance_name)
3176 08db7c5c Iustin Pop
3177 08db7c5c Iustin Pop
    names = _GenerateUniqueNames(lu,
3178 08db7c5c Iustin Pop
                                 [".disk%d_%s" % (i, s)
3179 08db7c5c Iustin Pop
                                  for i in range(disk_count)
3180 08db7c5c Iustin Pop
                                  for s in ("data", "meta")
3181 08db7c5c Iustin Pop
                                  ])
3182 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
3183 112050d9 Iustin Pop
      disk_index = idx + base_index
3184 08db7c5c Iustin Pop
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
3185 08db7c5c Iustin Pop
                                      disk["size"], names[idx*2:idx*2+2],
3186 e2a65344 Iustin Pop
                                      "disk/%d" % disk_index,
3187 08db7c5c Iustin Pop
                                      minors[idx*2], minors[idx*2+1])
3188 08db7c5c Iustin Pop
      disks.append(disk_dev)
3189 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
3190 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
3191 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
3192 0f1a06e3 Manuel Franceschini
3193 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
3194 112050d9 Iustin Pop
      disk_index = idx + base_index
3195 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
3196 e2a65344 Iustin Pop
                              iv_name="disk/%d" % disk_index,
3197 08db7c5c Iustin Pop
                              logical_id=(file_driver,
3198 08db7c5c Iustin Pop
                                          "%s/disk%d" % (file_storage_dir,
3199 08db7c5c Iustin Pop
                                                         idx)))
3200 08db7c5c Iustin Pop
      disks.append(disk_dev)
3201 a8083063 Iustin Pop
  else:
3202 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
3203 a8083063 Iustin Pop
  return disks
3204 a8083063 Iustin Pop
3205 a8083063 Iustin Pop
3206 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
3207 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
3208 3ecf6786 Iustin Pop

3209 3ecf6786 Iustin Pop
  """
3210 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
3211 a0c3fea1 Michael Hanselmann
3212 a0c3fea1 Michael Hanselmann
3213 b9bddb6b Iustin Pop
def _CreateDisks(lu, instance):
3214 a8083063 Iustin Pop
  """Create all disks for an instance.
3215 a8083063 Iustin Pop

3216 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
3217 a8083063 Iustin Pop

3218 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
3219 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
3220 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
3221 e4376078 Iustin Pop
  @param instance: the instance whose disks we should create
3222 e4376078 Iustin Pop
  @rtype: boolean
3223 e4376078 Iustin Pop
  @return: the success of the creation
3224 a8083063 Iustin Pop

3225 a8083063 Iustin Pop
  """
3226 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
3227 a0c3fea1 Michael Hanselmann
3228 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
3229 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3230 72737a7f Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(instance.primary_node,
3231 72737a7f Iustin Pop
                                                 file_storage_dir)
3232 0f1a06e3 Manuel Franceschini
3233 0f1a06e3 Manuel Franceschini
    if not result:
3234 9a4f63d1 Iustin Pop
      logging.error("Could not connect to node '%s'", instance.primary_node)
3235 0f1a06e3 Manuel Franceschini
      return False
3236 0f1a06e3 Manuel Franceschini
3237 0f1a06e3 Manuel Franceschini
    if not result[0]:
3238 9a4f63d1 Iustin Pop
      logging.error("Failed to create directory '%s'", file_storage_dir)
3239 0f1a06e3 Manuel Franceschini
      return False
3240 0f1a06e3 Manuel Franceschini
3241 24991749 Iustin Pop
  # Note: this needs to be kept in sync with adding of disks in
3242 24991749 Iustin Pop
  # LUSetInstanceParams
3243 a8083063 Iustin Pop
  for device in instance.disks:
3244 9a4f63d1 Iustin Pop
    logging.info("Creating volume %s for instance %s",
3245 9a4f63d1 Iustin Pop
                 device.iv_name, instance.name)
3246 a8083063 Iustin Pop
    #HARDCODE
3247 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
3248 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnSecondary(lu, secondary_node, instance,
3249 3f78eef2 Iustin Pop
                                        device, False, info):
3250 9a4f63d1 Iustin Pop
        logging.error("Failed to create volume %s (%s) on secondary node %s!",
3251 9a4f63d1 Iustin Pop
                      device.iv_name, device, secondary_node)
3252 a8083063 Iustin Pop
        return False
3253 a8083063 Iustin Pop
    #HARDCODE
3254 b9bddb6b Iustin Pop
    if not _CreateBlockDevOnPrimary(lu, instance.primary_node,
3255 3f78eef2 Iustin Pop
                                    instance, device, info):
3256 9a4f63d1 Iustin Pop
      logging.error("Failed to create volume %s on primary!", device.iv_name)
3257 a8083063 Iustin Pop
      return False
3258 1c6e3627 Manuel Franceschini
3259 a8083063 Iustin Pop
  return True
3260 a8083063 Iustin Pop
3261 a8083063 Iustin Pop
3262 b9bddb6b Iustin Pop
def _RemoveDisks(lu, instance):
3263 a8083063 Iustin Pop
  """Remove all disks for an instance.
3264 a8083063 Iustin Pop

3265 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
3266 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
3267 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
3268 a8083063 Iustin Pop
  with `_CreateDisks()`).
3269 a8083063 Iustin Pop

3270 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
3271 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
3272 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
3273 e4376078 Iustin Pop
  @param instance: the instance whose disks we should remove
3274 e4376078 Iustin Pop
  @rtype: boolean
3275 e4376078 Iustin Pop
  @return: the success of the removal
3276 a8083063 Iustin Pop

3277 a8083063 Iustin Pop
  """
3278 9a4f63d1 Iustin Pop
  logging.info("Removing block devices for instance %s", instance.name)
3279 a8083063 Iustin Pop
3280 a8083063 Iustin Pop
  result = True
3281 a8083063 Iustin Pop
  for device in instance.disks:
3282 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
3283 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
3284 72737a7f Iustin Pop
      if not lu.rpc.call_blockdev_remove(node, disk):
3285 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not remove block device %s on node %s,"
3286 86d9d3bb Iustin Pop
                           " continuing anyway", device.iv_name, node)
3287 a8083063 Iustin Pop
        result = False
3288 0f1a06e3 Manuel Franceschini
3289 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
3290 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3291 72737a7f Iustin Pop
    if not lu.rpc.call_file_storage_dir_remove(instance.primary_node,
3292 72737a7f Iustin Pop
                                               file_storage_dir):
3293 9a4f63d1 Iustin Pop
      logging.error("Could not remove directory '%s'", file_storage_dir)
3294 0f1a06e3 Manuel Franceschini
      result = False
3295 0f1a06e3 Manuel Franceschini
3296 a8083063 Iustin Pop
  return result
3297 a8083063 Iustin Pop
3298 a8083063 Iustin Pop
3299 08db7c5c Iustin Pop
def _ComputeDiskSize(disk_template, disks):
3300 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
3301 e2fe6369 Iustin Pop

3302 e2fe6369 Iustin Pop
  """
3303 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
3304 e2fe6369 Iustin Pop
  req_size_dict = {
3305 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
3306 08db7c5c Iustin Pop
    constants.DT_PLAIN: sum(d["size"] for d in disks),
3307 08db7c5c Iustin Pop
    # 128 MB are added for drbd metadata for each disk
3308 08db7c5c Iustin Pop
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
3309 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
3310 e2fe6369 Iustin Pop
  }
3311 e2fe6369 Iustin Pop
3312 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
3313 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
3314 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
3315 e2fe6369 Iustin Pop
3316 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
3317 e2fe6369 Iustin Pop
3318 e2fe6369 Iustin Pop
3319 74409b12 Iustin Pop
def _CheckHVParams(lu, nodenames, hvname, hvparams):
3320 74409b12 Iustin Pop
  """Hypervisor parameter validation.
3321 74409b12 Iustin Pop

3322 74409b12 Iustin Pop
  This function abstract the hypervisor parameter validation to be
3323 74409b12 Iustin Pop
  used in both instance create and instance modify.
3324 74409b12 Iustin Pop

3325 74409b12 Iustin Pop
  @type lu: L{LogicalUnit}
3326 74409b12 Iustin Pop
  @param lu: the logical unit for which we check
3327 74409b12 Iustin Pop
  @type nodenames: list
3328 74409b12 Iustin Pop
  @param nodenames: the list of nodes on which we should check
3329 74409b12 Iustin Pop
  @type hvname: string
3330 74409b12 Iustin Pop
  @param hvname: the name of the hypervisor we should use
3331 74409b12 Iustin Pop
  @type hvparams: dict
3332 74409b12 Iustin Pop
  @param hvparams: the parameters which we need to check
3333 74409b12 Iustin Pop
  @raise errors.OpPrereqError: if the parameters are not valid
3334 74409b12 Iustin Pop

3335 74409b12 Iustin Pop
  """
3336 74409b12 Iustin Pop
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
3337 74409b12 Iustin Pop
                                                  hvname,
3338 74409b12 Iustin Pop
                                                  hvparams)
3339 74409b12 Iustin Pop
  for node in nodenames:
3340 74409b12 Iustin Pop
    info = hvinfo.get(node, None)
3341 74409b12 Iustin Pop
    if not info or not isinstance(info, (tuple, list)):
3342 74409b12 Iustin Pop
      raise errors.OpPrereqError("Cannot get current information"
3343 74409b12 Iustin Pop
                                 " from node '%s' (%s)" % (node, info))
3344 74409b12 Iustin Pop
    if not info[0]:
3345 74409b12 Iustin Pop
      raise errors.OpPrereqError("Hypervisor parameter validation failed:"
3346 74409b12 Iustin Pop
                                 " %s" % info[1])
3347 74409b12 Iustin Pop
3348 74409b12 Iustin Pop
3349 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
3350 a8083063 Iustin Pop
  """Create an instance.
3351 a8083063 Iustin Pop

3352 a8083063 Iustin Pop
  """
3353 a8083063 Iustin Pop
  HPATH = "instance-add"
3354 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3355 08db7c5c Iustin Pop
  _OP_REQP = ["instance_name", "disks", "disk_template",
3356 08db7c5c Iustin Pop
              "mode", "start",
3357 08db7c5c Iustin Pop
              "wait_for_sync", "ip_check", "nics",
3358 338e51e8 Iustin Pop
              "hvparams", "beparams"]
3359 7baf741d Guido Trotter
  REQ_BGL = False
3360 7baf741d Guido Trotter
3361 7baf741d Guido Trotter
  def _ExpandNode(self, node):
3362 7baf741d Guido Trotter
    """Expands and checks one node name.
3363 7baf741d Guido Trotter

3364 7baf741d Guido Trotter
    """
3365 7baf741d Guido Trotter
    node_full = self.cfg.ExpandNodeName(node)
3366 7baf741d Guido Trotter
    if node_full is None:
3367 7baf741d Guido Trotter
      raise errors.OpPrereqError("Unknown node %s" % node)
3368 7baf741d Guido Trotter
    return node_full
3369 7baf741d Guido Trotter
3370 7baf741d Guido Trotter
  def ExpandNames(self):
3371 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
3372 7baf741d Guido Trotter

3373 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
3374 7baf741d Guido Trotter

3375 7baf741d Guido Trotter
    """
3376 7baf741d Guido Trotter
    self.needed_locks = {}
3377 7baf741d Guido Trotter
3378 7baf741d Guido Trotter
    # set optional parameters to none if they don't exist
3379 6785674e Iustin Pop
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
3380 7baf741d Guido Trotter
      if not hasattr(self.op, attr):
3381 7baf741d Guido Trotter
        setattr(self.op, attr, None)
3382 7baf741d Guido Trotter
3383 4b2f38dd Iustin Pop
    # cheap checks, mostly valid constants given
3384 4b2f38dd Iustin Pop
3385 7baf741d Guido Trotter
    # verify creation mode
3386 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
3387 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
3388 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
3389 7baf741d Guido Trotter
                                 self.op.mode)
3390 4b2f38dd Iustin Pop
3391 7baf741d Guido Trotter
    # disk template and mirror node verification
3392 7baf741d Guido Trotter
    if self.op.disk_template not in constants.DISK_TEMPLATES:
3393 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid disk template name")
3394 7baf741d Guido Trotter
3395 4b2f38dd Iustin Pop
    if self.op.hypervisor is None:
3396 4b2f38dd Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
3397 4b2f38dd Iustin Pop
3398 8705eb96 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
3399 8705eb96 Iustin Pop
    enabled_hvs = cluster.enabled_hypervisors
3400 4b2f38dd Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
3401 4b2f38dd Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
3402 4b2f38dd Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
3403 4b2f38dd Iustin Pop
                                  ",".join(enabled_hvs)))
3404 4b2f38dd Iustin Pop
3405 6785674e Iustin Pop
    # check hypervisor parameter syntax (locally)
3406 6785674e Iustin Pop
3407 8705eb96 Iustin Pop
    filled_hvp = cluster.FillDict(cluster.hvparams[self.op.hypervisor],
3408 8705eb96 Iustin Pop
                                  self.op.hvparams)
3409 6785674e Iustin Pop
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
3410 8705eb96 Iustin Pop
    hv_type.CheckParameterSyntax(filled_hvp)
3411 6785674e Iustin Pop
3412 338e51e8 Iustin Pop
    # fill and remember the beparams dict
3413 338e51e8 Iustin Pop
    self.be_full = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
3414 338e51e8 Iustin Pop
                                    self.op.beparams)
3415 338e51e8 Iustin Pop
3416 7baf741d Guido Trotter
    #### instance parameters check
3417 7baf741d Guido Trotter
3418 7baf741d Guido Trotter
    # instance name verification
3419 7baf741d Guido Trotter
    hostname1 = utils.HostInfo(self.op.instance_name)
3420 7baf741d Guido Trotter
    self.op.instance_name = instance_name = hostname1.name
3421 7baf741d Guido Trotter
3422 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
3423 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
3424 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
3425 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3426 7baf741d Guido Trotter
                                 instance_name)
3427 7baf741d Guido Trotter
3428 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
3429 7baf741d Guido Trotter
3430 08db7c5c Iustin Pop
    # NIC buildup
3431 08db7c5c Iustin Pop
    self.nics = []
3432 08db7c5c Iustin Pop
    for nic in self.op.nics:
3433 08db7c5c Iustin Pop
      # ip validity checks
3434 08db7c5c Iustin Pop
      ip = nic.get("ip", None)
3435 08db7c5c Iustin Pop
      if ip is None or ip.lower() == "none":
3436 08db7c5c Iustin Pop
        nic_ip = None
3437 08db7c5c Iustin Pop
      elif ip.lower() == constants.VALUE_AUTO:
3438 08db7c5c Iustin Pop
        nic_ip = hostname1.ip
3439 08db7c5c Iustin Pop
      else:
3440 08db7c5c Iustin Pop
        if not utils.IsValidIP(ip):
3441 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
3442 08db7c5c Iustin Pop
                                     " like a valid IP" % ip)
3443 08db7c5c Iustin Pop
        nic_ip = ip
3444 08db7c5c Iustin Pop
3445 08db7c5c Iustin Pop
      # MAC address verification
3446 08db7c5c Iustin Pop
      mac = nic.get("mac", constants.VALUE_AUTO)
3447 08db7c5c Iustin Pop
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
3448 08db7c5c Iustin Pop
        if not utils.IsValidMac(mac.lower()):
3449 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Invalid MAC address specified: %s" %
3450 08db7c5c Iustin Pop
                                     mac)
3451 08db7c5c Iustin Pop
      # bridge verification
3452 08db7c5c Iustin Pop
      bridge = nic.get("bridge", self.cfg.GetDefBridge())
3453 08db7c5c Iustin Pop
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, bridge=bridge))
3454 08db7c5c Iustin Pop
3455 08db7c5c Iustin Pop
    # disk checks/pre-build
3456 08db7c5c Iustin Pop
    self.disks = []
3457 08db7c5c Iustin Pop
    for disk in self.op.disks:
3458 08db7c5c Iustin Pop
      mode = disk.get("mode", constants.DISK_RDWR)
3459 08db7c5c Iustin Pop
      if mode not in constants.DISK_ACCESS_SET:
3460 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
3461 08db7c5c Iustin Pop
                                   mode)
3462 08db7c5c Iustin Pop
      size = disk.get("size", None)
3463 08db7c5c Iustin Pop
      if size is None:
3464 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Missing disk size")
3465 08db7c5c Iustin Pop
      try:
3466 08db7c5c Iustin Pop
        size = int(size)
3467 08db7c5c Iustin Pop
      except ValueError:
3468 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk size '%s'" % size)
3469 08db7c5c Iustin Pop
      self.disks.append({"size": size, "mode": mode})
3470 08db7c5c Iustin Pop
3471 7baf741d Guido Trotter
    # used in CheckPrereq for ip ping check
3472 7baf741d Guido Trotter
    self.check_ip = hostname1.ip
3473 7baf741d Guido Trotter
3474 7baf741d Guido Trotter
    # file storage checks
3475 7baf741d Guido Trotter
    if (self.op.file_driver and
3476 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
3477 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
3478 7baf741d Guido Trotter
                                 self.op.file_driver)
3479 7baf741d Guido Trotter
3480 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
3481 7baf741d Guido Trotter
      raise errors.OpPrereqError("File storage directory path not absolute")
3482 7baf741d Guido Trotter
3483 7baf741d Guido Trotter
    ### Node/iallocator related checks
3484 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
3485 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
3486 7baf741d Guido Trotter
                                 " node must be given")
3487 7baf741d Guido Trotter
3488 7baf741d Guido Trotter
    if self.op.iallocator:
3489 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3490 7baf741d Guido Trotter
    else:
3491 7baf741d Guido Trotter
      self.op.pnode = self._ExpandNode(self.op.pnode)
3492 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
3493 7baf741d Guido Trotter
      if self.op.snode is not None:
3494 7baf741d Guido Trotter
        self.op.snode = self._ExpandNode(self.op.snode)
3495 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
3496 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
3497 7baf741d Guido Trotter
3498 7baf741d Guido Trotter
    # in case of import lock the source node too
3499 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
3500 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
3501 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
3502 7baf741d Guido Trotter
3503 7baf741d Guido Trotter
      if src_node is None or src_path is None:
3504 7baf741d Guido Trotter
        raise errors.OpPrereqError("Importing an instance requires source"
3505 7baf741d Guido Trotter
                                   " node and path options")
3506 7baf741d Guido Trotter
3507 7baf741d Guido Trotter
      if not os.path.isabs(src_path):
3508 7baf741d Guido Trotter
        raise errors.OpPrereqError("The source path must be absolute")
3509 7baf741d Guido Trotter
3510 7baf741d Guido Trotter
      self.op.src_node = src_node = self._ExpandNode(src_node)
3511 7baf741d Guido Trotter
      if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
3512 7baf741d Guido Trotter
        self.needed_locks[locking.LEVEL_NODE].append(src_node)
3513 7baf741d Guido Trotter
3514 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
3515 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
3516 7baf741d Guido Trotter
        raise errors.OpPrereqError("No guest OS specified")
3517 a8083063 Iustin Pop
3518 538475ca Iustin Pop
  def _RunAllocator(self):
3519 538475ca Iustin Pop
    """Run the allocator based on input opcode.
3520 538475ca Iustin Pop

3521 538475ca Iustin Pop
    """
3522 08db7c5c Iustin Pop
    nics = [n.ToDict() for n in self.nics]
3523 72737a7f Iustin Pop
    ial = IAllocator(self,
3524 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
3525 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
3526 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
3527 d1c2dd75 Iustin Pop
                     tags=[],
3528 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
3529 338e51e8 Iustin Pop
                     vcpus=self.be_full[constants.BE_VCPUS],
3530 338e51e8 Iustin Pop
                     mem_size=self.be_full[constants.BE_MEMORY],
3531 08db7c5c Iustin Pop
                     disks=self.disks,
3532 d1c2dd75 Iustin Pop
                     nics=nics,
3533 8cc7e742 Guido Trotter
                     hypervisor=self.op.hypervisor,
3534 29859cb7 Iustin Pop
                     )
3535 d1c2dd75 Iustin Pop
3536 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
3537 d1c2dd75 Iustin Pop
3538 d1c2dd75 Iustin Pop
    if not ial.success:
3539 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3540 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3541 d1c2dd75 Iustin Pop
                                                           ial.info))
3542 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3543 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3544 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
3545 97abc79f Iustin Pop
                                 (self.op.iallocator, len(ial.nodes),
3546 1ce4bbe3 René Nussbaumer
                                  ial.required_nodes))
3547 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
3548 86d9d3bb Iustin Pop
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
3549 86d9d3bb Iustin Pop
                 self.op.instance_name, self.op.iallocator,
3550 86d9d3bb Iustin Pop
                 ", ".join(ial.nodes))
3551 27579978 Iustin Pop
    if ial.required_nodes == 2:
3552 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
3553 538475ca Iustin Pop
3554 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3555 a8083063 Iustin Pop
    """Build hooks env.
3556 a8083063 Iustin Pop

3557 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3558 a8083063 Iustin Pop

3559 a8083063 Iustin Pop
    """
3560 a8083063 Iustin Pop
    env = {
3561 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
3562 08db7c5c Iustin Pop
      "INSTANCE_DISK_SIZE": ",".join(str(d["size"]) for d in self.disks),
3563 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
3564 a8083063 Iustin Pop
      }
3565 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3566 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
3567 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
3568 09acf207 Guido Trotter
      env["INSTANCE_SRC_IMAGES"] = self.src_images
3569 396e1b78 Michael Hanselmann
3570 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
3571 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
3572 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
3573 396e1b78 Michael Hanselmann
      status=self.instance_status,
3574 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
3575 338e51e8 Iustin Pop
      memory=self.be_full[constants.BE_MEMORY],
3576 338e51e8 Iustin Pop
      vcpus=self.be_full[constants.BE_VCPUS],
3577 08db7c5c Iustin Pop
      nics=[(n.ip, n.bridge, n.mac) for n in self.nics],
3578 396e1b78 Michael Hanselmann
    ))
3579 a8083063 Iustin Pop
3580 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
3581 a8083063 Iustin Pop
          self.secondaries)
3582 a8083063 Iustin Pop
    return env, nl, nl
3583 a8083063 Iustin Pop
3584 a8083063 Iustin Pop
3585 a8083063 Iustin Pop
  def CheckPrereq(self):
3586 a8083063 Iustin Pop
    """Check prerequisites.
3587 a8083063 Iustin Pop

3588 a8083063 Iustin Pop
    """
3589 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
3590 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
3591 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
3592 eedc99de Manuel Franceschini
                                 " instances")
3593 eedc99de Manuel Franceschini
3594 e69d05fd Iustin Pop
3595 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3596 7baf741d Guido Trotter
      src_node = self.op.src_node
3597 7baf741d Guido Trotter
      src_path = self.op.src_path
3598 a8083063 Iustin Pop
3599 72737a7f Iustin Pop
      export_info = self.rpc.call_export_info(src_node, src_path)
3600 a8083063 Iustin Pop
3601 a8083063 Iustin Pop
      if not export_info:
3602 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
3603 a8083063 Iustin Pop
3604 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
3605 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
3606 a8083063 Iustin Pop
3607 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
3608 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
3609 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
3610 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
3611 a8083063 Iustin Pop
3612 09acf207 Guido Trotter
      # Check that the new instance doesn't have less disks than the export
3613 08db7c5c Iustin Pop
      instance_disks = len(self.disks)
3614 09acf207 Guido Trotter
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
3615 09acf207 Guido Trotter
      if instance_disks < export_disks:
3616 09acf207 Guido Trotter
        raise errors.OpPrereqError("Not enough disks to import."
3617 09acf207 Guido Trotter
                                   " (instance: %d, export: %d)" %
3618 726d7d68 Iustin Pop
                                   (instance_disks, export_disks))
3619 a8083063 Iustin Pop
3620 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
3621 09acf207 Guido Trotter
      disk_images = []
3622 09acf207 Guido Trotter
      for idx in range(export_disks):
3623 09acf207 Guido Trotter
        option = 'disk%d_dump' % idx
3624 09acf207 Guido Trotter
        if export_info.has_option(constants.INISECT_INS, option):
3625 09acf207 Guido Trotter
          # FIXME: are the old os-es, disk sizes, etc. useful?
3626 09acf207 Guido Trotter
          export_name = export_info.get(constants.INISECT_INS, option)
3627 09acf207 Guido Trotter
          image = os.path.join(src_path, export_name)
3628 09acf207 Guido Trotter
          disk_images.append(image)
3629 09acf207 Guido Trotter
        else:
3630 09acf207 Guido Trotter
          disk_images.append(False)
3631 09acf207 Guido Trotter
3632 09acf207 Guido Trotter
      self.src_images = disk_images
3633 901a65c1 Iustin Pop
3634 b4364a6b Guido Trotter
      old_name = export_info.get(constants.INISECT_INS, 'name')
3635 b4364a6b Guido Trotter
      # FIXME: int() here could throw a ValueError on broken exports
3636 b4364a6b Guido Trotter
      exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
3637 b4364a6b Guido Trotter
      if self.op.instance_name == old_name:
3638 b4364a6b Guido Trotter
        for idx, nic in enumerate(self.nics):
3639 b4364a6b Guido Trotter
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
3640 b4364a6b Guido Trotter
            nic_mac_ini = 'nic%d_mac' % idx
3641 b4364a6b Guido Trotter
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
3642 bc89efc3 Guido Trotter
3643 7baf741d Guido Trotter
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
3644 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
3645 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
3646 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
3647 901a65c1 Iustin Pop
3648 901a65c1 Iustin Pop
    if self.op.ip_check:
3649 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
3650 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3651 7b3a8fb5 Iustin Pop
                                   (self.check_ip, self.op.instance_name))
3652 901a65c1 Iustin Pop
3653 538475ca Iustin Pop
    #### allocator run
3654 538475ca Iustin Pop
3655 538475ca Iustin Pop
    if self.op.iallocator is not None:
3656 538475ca Iustin Pop
      self._RunAllocator()
3657 0f1a06e3 Manuel Franceschini
3658 901a65c1 Iustin Pop
    #### node related checks
3659 901a65c1 Iustin Pop
3660 901a65c1 Iustin Pop
    # check primary node
3661 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
3662 7baf741d Guido Trotter
    assert self.pnode is not None, \
3663 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
3664 901a65c1 Iustin Pop
    self.secondaries = []
3665 901a65c1 Iustin Pop
3666 901a65c1 Iustin Pop
    # mirror node verification
3667 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
3668 7baf741d Guido Trotter
      if self.op.snode is None:
3669 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
3670 3ecf6786 Iustin Pop
                                   " a mirror node")
3671 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
3672 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
3673 3ecf6786 Iustin Pop
                                   " the primary node.")
3674 7baf741d Guido Trotter
      self.secondaries.append(self.op.snode)
3675 a8083063 Iustin Pop
3676 6785674e Iustin Pop
    nodenames = [pnode.name] + self.secondaries
3677 6785674e Iustin Pop
3678 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
3679 08db7c5c Iustin Pop
                                self.disks)
3680 ed1ebc60 Guido Trotter
3681 8d75db10 Iustin Pop
    # Check lv size requirements
3682 8d75db10 Iustin Pop
    if req_size is not None:
3683 72737a7f Iustin Pop
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
3684 72737a7f Iustin Pop
                                         self.op.hypervisor)
3685 8d75db10 Iustin Pop
      for node in nodenames:
3686 8d75db10 Iustin Pop
        info = nodeinfo.get(node, None)
3687 8d75db10 Iustin Pop
        if not info:
3688 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
3689 3e91897b Iustin Pop
                                     " from node '%s'" % node)
3690 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
3691 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
3692 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
3693 8d75db10 Iustin Pop
                                     " node %s" % node)
3694 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
3695 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
3696 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
3697 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
3698 ed1ebc60 Guido Trotter
3699 74409b12 Iustin Pop
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
3700 6785674e Iustin Pop
3701 a8083063 Iustin Pop
    # os verification
3702 72737a7f Iustin Pop
    os_obj = self.rpc.call_os_get(pnode.name, self.op.os_type)
3703 dfa96ded Guido Trotter
    if not os_obj:
3704 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
3705 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
3706 a8083063 Iustin Pop
3707 901a65c1 Iustin Pop
    # bridge check on primary node
3708 08db7c5c Iustin Pop
    bridges = [n.bridge for n in self.nics]
3709 08db7c5c Iustin Pop
    if not self.rpc.call_bridges_exist(self.pnode.name, bridges):
3710 08db7c5c Iustin Pop
      raise errors.OpPrereqError("one of the target bridges '%s' does not"
3711 08db7c5c Iustin Pop
                                 " exist on"
3712 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
3713 08db7c5c Iustin Pop
                                 (",".join(bridges), pnode.name))
3714 a8083063 Iustin Pop
3715 49ce1563 Iustin Pop
    # memory check on primary node
3716 49ce1563 Iustin Pop
    if self.op.start:
3717 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
3718 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
3719 338e51e8 Iustin Pop
                           self.be_full[constants.BE_MEMORY],
3720 338e51e8 Iustin Pop
                           self.op.hypervisor)
3721 49ce1563 Iustin Pop
3722 a8083063 Iustin Pop
    if self.op.start:
3723 a8083063 Iustin Pop
      self.instance_status = 'up'
3724 a8083063 Iustin Pop
    else:
3725 a8083063 Iustin Pop
      self.instance_status = 'down'
3726 a8083063 Iustin Pop
3727 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3728 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
3729 a8083063 Iustin Pop

3730 a8083063 Iustin Pop
    """
3731 a8083063 Iustin Pop
    instance = self.op.instance_name
3732 a8083063 Iustin Pop
    pnode_name = self.pnode.name
3733 a8083063 Iustin Pop
3734 08db7c5c Iustin Pop
    for nic in self.nics:
3735 08db7c5c Iustin Pop
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
3736 08db7c5c Iustin Pop
        nic.mac = self.cfg.GenerateMAC()
3737 a8083063 Iustin Pop
3738 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
3739 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
3740 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
3741 2a6469d5 Alexander Schreiber
    else:
3742 2a6469d5 Alexander Schreiber
      network_port = None
3743 58acb49d Alexander Schreiber
3744 6785674e Iustin Pop
    ##if self.op.vnc_bind_address is None:
3745 6785674e Iustin Pop
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
3746 31a853d2 Iustin Pop
3747 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
3748 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
3749 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
3750 2c313123 Manuel Franceschini
    else:
3751 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
3752 2c313123 Manuel Franceschini
3753 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
3754 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
3755 d6a02168 Michael Hanselmann
                                        self.cfg.GetFileStorageDir(),
3756 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
3757 0f1a06e3 Manuel Franceschini
3758 0f1a06e3 Manuel Franceschini
3759 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
3760 a8083063 Iustin Pop
                                  self.op.disk_template,
3761 a8083063 Iustin Pop
                                  instance, pnode_name,
3762 08db7c5c Iustin Pop
                                  self.secondaries,
3763 08db7c5c Iustin Pop
                                  self.disks,
3764 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
3765 e2a65344 Iustin Pop
                                  self.op.file_driver,
3766 e2a65344 Iustin Pop
                                  0)
3767 a8083063 Iustin Pop
3768 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3769 a8083063 Iustin Pop
                            primary_node=pnode_name,
3770 08db7c5c Iustin Pop
                            nics=self.nics, disks=disks,
3771 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
3772 a8083063 Iustin Pop
                            status=self.instance_status,
3773 58acb49d Alexander Schreiber
                            network_port=network_port,
3774 338e51e8 Iustin Pop
                            beparams=self.op.beparams,
3775 6785674e Iustin Pop
                            hvparams=self.op.hvparams,
3776 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
3777 a8083063 Iustin Pop
                            )
3778 a8083063 Iustin Pop
3779 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
3780 b9bddb6b Iustin Pop
    if not _CreateDisks(self, iobj):
3781 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
3782 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance)
3783 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
3784 a8083063 Iustin Pop
3785 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
3786 a8083063 Iustin Pop
3787 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
3788 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
3789 7baf741d Guido Trotter
    # added the instance to the config
3790 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
3791 a1578d63 Iustin Pop
    # Remove the temp. assignements for the instance's drbds
3792 a1578d63 Iustin Pop
    self.cfg.ReleaseDRBDMinors(instance)
3793 e36e96b4 Guido Trotter
    # Unlock all the nodes
3794 e36e96b4 Guido Trotter
    self.context.glm.release(locking.LEVEL_NODE)
3795 e36e96b4 Guido Trotter
    del self.acquired_locks[locking.LEVEL_NODE]
3796 a8083063 Iustin Pop
3797 a8083063 Iustin Pop
    if self.op.wait_for_sync:
3798 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
3799 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3800 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
3801 a8083063 Iustin Pop
      time.sleep(15)
3802 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
3803 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
3804 a8083063 Iustin Pop
    else:
3805 a8083063 Iustin Pop
      disk_abort = False
3806 a8083063 Iustin Pop
3807 a8083063 Iustin Pop
    if disk_abort:
3808 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
3809 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3810 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
3811 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
3812 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3813 3ecf6786 Iustin Pop
                               " this instance")
3814 a8083063 Iustin Pop
3815 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3816 a8083063 Iustin Pop
                (instance, pnode_name))
3817 a8083063 Iustin Pop
3818 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3819 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3820 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3821 d15a9ad3 Guido Trotter
        if not self.rpc.call_instance_os_add(pnode_name, iobj):
3822 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3823 3ecf6786 Iustin Pop
                                   " on node %s" %
3824 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3825 a8083063 Iustin Pop
3826 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3827 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3828 a8083063 Iustin Pop
        src_node = self.op.src_node
3829 09acf207 Guido Trotter
        src_images = self.src_images
3830 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
3831 6c0af70e Guido Trotter
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
3832 09acf207 Guido Trotter
                                                         src_node, src_images,
3833 6c0af70e Guido Trotter
                                                         cluster_name)
3834 09acf207 Guido Trotter
        for idx, result in enumerate(import_result):
3835 09acf207 Guido Trotter
          if not result:
3836 726d7d68 Iustin Pop
            self.LogWarning("Could not import the image %s for instance"
3837 726d7d68 Iustin Pop
                            " %s, disk %d, on node %s" %
3838 726d7d68 Iustin Pop
                            (src_images[idx], instance, idx, pnode_name))
3839 a8083063 Iustin Pop
      else:
3840 a8083063 Iustin Pop
        # also checked in the prereq part
3841 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3842 3ecf6786 Iustin Pop
                                     % self.op.mode)
3843 a8083063 Iustin Pop
3844 a8083063 Iustin Pop
    if self.op.start:
3845 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s", instance, pnode_name)
3846 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3847 72737a7f Iustin Pop
      if not self.rpc.call_instance_start(pnode_name, iobj, None):
3848 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3849 a8083063 Iustin Pop
3850 a8083063 Iustin Pop
3851 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3852 a8083063 Iustin Pop
  """Connect to an instance's console.
3853 a8083063 Iustin Pop

3854 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3855 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3856 a8083063 Iustin Pop
  console.
3857 a8083063 Iustin Pop

3858 a8083063 Iustin Pop
  """
3859 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3860 8659b73e Guido Trotter
  REQ_BGL = False
3861 8659b73e Guido Trotter
3862 8659b73e Guido Trotter
  def ExpandNames(self):
3863 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
3864 a8083063 Iustin Pop
3865 a8083063 Iustin Pop
  def CheckPrereq(self):
3866 a8083063 Iustin Pop
    """Check prerequisites.
3867 a8083063 Iustin Pop

3868 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3869 a8083063 Iustin Pop

3870 a8083063 Iustin Pop
    """
3871 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3872 8659b73e Guido Trotter
    assert self.instance is not None, \
3873 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3874 a8083063 Iustin Pop
3875 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3876 a8083063 Iustin Pop
    """Connect to the console of an instance
3877 a8083063 Iustin Pop

3878 a8083063 Iustin Pop
    """
3879 a8083063 Iustin Pop
    instance = self.instance
3880 a8083063 Iustin Pop
    node = instance.primary_node
3881 a8083063 Iustin Pop
3882 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
3883 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
3884 a8083063 Iustin Pop
    if node_insts is False:
3885 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3886 a8083063 Iustin Pop
3887 a8083063 Iustin Pop
    if instance.name not in node_insts:
3888 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3889 a8083063 Iustin Pop
3890 9a4f63d1 Iustin Pop
    logging.debug("Connecting to console of %s on %s", instance.name, node)
3891 a8083063 Iustin Pop
3892 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
3893 30989e69 Alexander Schreiber
    console_cmd = hyper.GetShellCommandForConsole(instance)
3894 b047857b Michael Hanselmann
3895 82122173 Iustin Pop
    # build ssh cmdline
3896 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
3897 a8083063 Iustin Pop
3898 a8083063 Iustin Pop
3899 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3900 a8083063 Iustin Pop
  """Replace the disks of an instance.
3901 a8083063 Iustin Pop

3902 a8083063 Iustin Pop
  """
3903 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3904 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3905 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
3906 efd990e4 Guido Trotter
  REQ_BGL = False
3907 efd990e4 Guido Trotter
3908 efd990e4 Guido Trotter
  def ExpandNames(self):
3909 efd990e4 Guido Trotter
    self._ExpandAndLockInstance()
3910 efd990e4 Guido Trotter
3911 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
3912 efd990e4 Guido Trotter
      self.op.remote_node = None
3913 efd990e4 Guido Trotter
3914 efd990e4 Guido Trotter
    ia_name = getattr(self.op, "iallocator", None)
3915 efd990e4 Guido Trotter
    if ia_name is not None:
3916 efd990e4 Guido Trotter
      if self.op.remote_node is not None:
3917 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Give either the iallocator or the new"
3918 efd990e4 Guido Trotter
                                   " secondary, not both")
3919 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3920 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
3921 efd990e4 Guido Trotter
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
3922 efd990e4 Guido Trotter
      if remote_node is None:
3923 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Node '%s' not known" %
3924 efd990e4 Guido Trotter
                                   self.op.remote_node)
3925 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
3926 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
3927 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3928 efd990e4 Guido Trotter
    else:
3929 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
3930 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3931 efd990e4 Guido Trotter
3932 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
3933 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
3934 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
3935 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
3936 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
3937 efd990e4 Guido Trotter
      self._LockInstancesNodes()
3938 a8083063 Iustin Pop
3939 b6e82a65 Iustin Pop
  def _RunAllocator(self):
3940 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
3941 b6e82a65 Iustin Pop

3942 b6e82a65 Iustin Pop
    """
3943 72737a7f Iustin Pop
    ial = IAllocator(self,
3944 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
3945 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
3946 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
3947 b6e82a65 Iustin Pop
3948 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
3949 b6e82a65 Iustin Pop
3950 b6e82a65 Iustin Pop
    if not ial.success:
3951 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3952 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3953 b6e82a65 Iustin Pop
                                                           ial.info))
3954 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3955 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3956 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
3957 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
3958 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
3959 86d9d3bb Iustin Pop
    self.LogInfo("Selected new secondary for the instance: %s",
3960 86d9d3bb Iustin Pop
                 self.op.remote_node)
3961 b6e82a65 Iustin Pop
3962 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3963 a8083063 Iustin Pop
    """Build hooks env.
3964 a8083063 Iustin Pop

3965 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3966 a8083063 Iustin Pop

3967 a8083063 Iustin Pop
    """
3968 a8083063 Iustin Pop
    env = {
3969 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
3970 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3971 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3972 a8083063 Iustin Pop
      }
3973 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3974 0834c866 Iustin Pop
    nl = [
3975 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
3976 0834c866 Iustin Pop
      self.instance.primary_node,
3977 0834c866 Iustin Pop
      ]
3978 0834c866 Iustin Pop
    if self.op.remote_node is not None:
3979 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
3980 a8083063 Iustin Pop
    return env, nl, nl
3981 a8083063 Iustin Pop
3982 a8083063 Iustin Pop
  def CheckPrereq(self):
3983 a8083063 Iustin Pop
    """Check prerequisites.
3984 a8083063 Iustin Pop

3985 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3986 a8083063 Iustin Pop

3987 a8083063 Iustin Pop
    """
3988 efd990e4 Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3989 efd990e4 Guido Trotter
    assert instance is not None, \
3990 efd990e4 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3991 a8083063 Iustin Pop
    self.instance = instance
3992 a8083063 Iustin Pop
3993 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3994 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3995 a9e0c397 Iustin Pop
                                 " network mirrored.")
3996 a8083063 Iustin Pop
3997 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3998 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3999 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
4000 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
4001 a8083063 Iustin Pop
4002 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
4003 a9e0c397 Iustin Pop
4004 b6e82a65 Iustin Pop
    ia_name = getattr(self.op, "iallocator", None)
4005 b6e82a65 Iustin Pop
    if ia_name is not None:
4006 de8c7666 Guido Trotter
      self._RunAllocator()
4007 b6e82a65 Iustin Pop
4008 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
4009 a9e0c397 Iustin Pop
    if remote_node is not None:
4010 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
4011 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
4012 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
4013 a9e0c397 Iustin Pop
    else:
4014 a9e0c397 Iustin Pop
      self.remote_node_info = None
4015 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
4016 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
4017 3ecf6786 Iustin Pop
                                 " the instance.")
4018 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
4019 0834c866 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_SEC:
4020 0834c866 Iustin Pop
        # this is for DRBD8, where we can't execute the same mode of
4021 0834c866 Iustin Pop
        # replacement as for drbd7 (no different port allocated)
4022 0834c866 Iustin Pop
        raise errors.OpPrereqError("Same secondary given, cannot execute"
4023 0834c866 Iustin Pop
                                   " replacement")
4024 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
4025 7df43a76 Iustin Pop
      if (self.op.mode == constants.REPLACE_DISK_ALL and
4026 7df43a76 Iustin Pop
          remote_node is not None):
4027 7df43a76 Iustin Pop
        # switch to replace secondary mode
4028 7df43a76 Iustin Pop
        self.op.mode = constants.REPLACE_DISK_SEC
4029 7df43a76 Iustin Pop
4030 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
4031 12c3449a Michael Hanselmann
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
4032 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
4033 a9e0c397 Iustin Pop
                                   " both at once")
4034 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
4035 a9e0c397 Iustin Pop
        if remote_node is not None:
4036 12c3449a Michael Hanselmann
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
4037 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
4038 a9e0c397 Iustin Pop
                                     " node disk replacement")
4039 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
4040 cff90b79 Iustin Pop
        self.oth_node = instance.secondary_nodes[0]
4041 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
4042 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
4043 a9e0c397 Iustin Pop
                                    # we don't change the secondary
4044 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
4045 cff90b79 Iustin Pop
        self.oth_node = instance.primary_node
4046 a9e0c397 Iustin Pop
      else:
4047 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
4048 a9e0c397 Iustin Pop
4049 54155f52 Iustin Pop
    if not self.op.disks:
4050 54155f52 Iustin Pop
      self.op.disks = range(len(instance.disks))
4051 54155f52 Iustin Pop
4052 54155f52 Iustin Pop
    for disk_idx in self.op.disks:
4053 3e0cea06 Iustin Pop
      instance.FindDisk(disk_idx)
4054 a8083063 Iustin Pop
4055 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
4056 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
4057 a9e0c397 Iustin Pop

4058 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
4059 e4376078 Iustin Pop

4060 e4376078 Iustin Pop
      1. for each disk to be replaced:
4061 e4376078 Iustin Pop

4062 e4376078 Iustin Pop
        1. create new LVs on the target node with unique names
4063 e4376078 Iustin Pop
        1. detach old LVs from the drbd device
4064 e4376078 Iustin Pop
        1. rename old LVs to name_replaced.<time_t>
4065 e4376078 Iustin Pop
        1. rename new LVs to old LVs
4066 e4376078 Iustin Pop
        1. attach the new LVs (with the old names now) to the drbd device
4067 e4376078 Iustin Pop

4068 e4376078 Iustin Pop
      1. wait for sync across all devices
4069 e4376078 Iustin Pop

4070 e4376078 Iustin Pop
      1. for each modified disk:
4071 e4376078 Iustin Pop

4072 e4376078 Iustin Pop
        1. remove old LVs (which have the name name_replaces.<time_t>)
4073 a9e0c397 Iustin Pop

4074 a9e0c397 Iustin Pop
    Failures are not very well handled.
4075 cff90b79 Iustin Pop

4076 a9e0c397 Iustin Pop
    """
4077 cff90b79 Iustin Pop
    steps_total = 6
4078 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
4079 a9e0c397 Iustin Pop
    instance = self.instance
4080 a9e0c397 Iustin Pop
    iv_names = {}
4081 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
4082 a9e0c397 Iustin Pop
    # start of work
4083 a9e0c397 Iustin Pop
    cfg = self.cfg
4084 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
4085 cff90b79 Iustin Pop
    oth_node = self.oth_node
4086 cff90b79 Iustin Pop
4087 cff90b79 Iustin Pop
    # Step: check device activation
4088 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
4089 cff90b79 Iustin Pop
    info("checking volume groups")
4090 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
4091 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([oth_node, tgt_node])
4092 cff90b79 Iustin Pop
    if not results:
4093 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
4094 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
4095 cff90b79 Iustin Pop
      res = results.get(node, False)
4096 cff90b79 Iustin Pop
      if not res or my_vg not in res:
4097 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
4098 cff90b79 Iustin Pop
                                 (my_vg, node))
4099 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
4100 54155f52 Iustin Pop
      if idx not in self.op.disks:
4101 cff90b79 Iustin Pop
        continue
4102 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
4103 54155f52 Iustin Pop
        info("checking disk/%d on %s" % (idx, node))
4104 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
4105 72737a7f Iustin Pop
        if not self.rpc.call_blockdev_find(node, dev):
4106 54155f52 Iustin Pop
          raise errors.OpExecError("Can't find disk/%d on node %s" %
4107 54155f52 Iustin Pop
                                   (idx, node))
4108 cff90b79 Iustin Pop
4109 cff90b79 Iustin Pop
    # Step: check other node consistency
4110 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
4111 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
4112 54155f52 Iustin Pop
      if idx not in self.op.disks:
4113 cff90b79 Iustin Pop
        continue
4114 54155f52 Iustin Pop
      info("checking disk/%d consistency on %s" % (idx, oth_node))
4115 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, oth_node,
4116 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
4117 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
4118 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
4119 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
4120 cff90b79 Iustin Pop
4121 cff90b79 Iustin Pop
    # Step: create new storage
4122 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
4123 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
4124 54155f52 Iustin Pop
      if idx not in self.op.disks:
4125 a9e0c397 Iustin Pop
        continue
4126 a9e0c397 Iustin Pop
      size = dev.size
4127 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
4128 54155f52 Iustin Pop
      lv_names = [".disk%d_%s" % (idx, suf)
4129 54155f52 Iustin Pop
                  for suf in ["data", "meta"]]
4130 b9bddb6b Iustin Pop
      names = _GenerateUniqueNames(self, lv_names)
4131 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
4132 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
4133 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
4134 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
4135 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
4136 a9e0c397 Iustin Pop
      old_lvs = dev.children
4137 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
4138 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
4139 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
4140 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
4141 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
4142 a9e0c397 Iustin Pop
      # are talking about the secondary node
4143 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
4144 b9bddb6b Iustin Pop
        if not _CreateBlockDevOnPrimary(self, tgt_node, instance, new_lv,
4145 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
4146 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
4147 a9e0c397 Iustin Pop
                                   " node '%s'" %
4148 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
4149 a9e0c397 Iustin Pop
4150 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
4151 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
4152 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
4153 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
4154 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
4155 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
4156 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
4157 cff90b79 Iustin Pop
      #dev.children = []
4158 cff90b79 Iustin Pop
      #cfg.Update(instance)
4159 a9e0c397 Iustin Pop
4160 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
4161 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
4162 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
4163 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
4164 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
4165 cff90b79 Iustin Pop
4166 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
4167 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
4168 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
4169 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
4170 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
4171 cff90b79 Iustin Pop
      rlist = []
4172 cff90b79 Iustin Pop
      for to_ren in old_lvs:
4173 72737a7f Iustin Pop
        find_res = self.rpc.call_blockdev_find(tgt_node, to_ren)
4174 cff90b79 Iustin Pop
        if find_res is not None: # device exists
4175 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
4176 cff90b79 Iustin Pop
4177 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
4178 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_rename(tgt_node, rlist):
4179 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
4180 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
4181 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
4182 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
4183 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_rename(tgt_node, rlist):
4184 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
4185 cff90b79 Iustin Pop
4186 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
4187 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
4188 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
4189 a9e0c397 Iustin Pop
4190 cff90b79 Iustin Pop
      for disk in old_lvs:
4191 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
4192 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
4193 a9e0c397 Iustin Pop
4194 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
4195 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
4196 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
4197 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
4198 72737a7f Iustin Pop
          if not self.rpc.call_blockdev_remove(tgt_node, new_lv):
4199 79caa9ed Guido Trotter
            warning("Can't rollback device %s", hint="manually cleanup unused"
4200 cff90b79 Iustin Pop
                    " logical volumes")
4201 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
4202 a9e0c397 Iustin Pop
4203 a9e0c397 Iustin Pop
      dev.children = new_lvs
4204 a9e0c397 Iustin Pop
      cfg.Update(instance)
4205 a9e0c397 Iustin Pop
4206 cff90b79 Iustin Pop
    # Step: wait for sync
4207 a9e0c397 Iustin Pop
4208 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
4209 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
4210 a9e0c397 Iustin Pop
    # return value
4211 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
4212 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
4213 a9e0c397 Iustin Pop
4214 a9e0c397 Iustin Pop
    # so check manually all the devices
4215 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
4216 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
4217 72737a7f Iustin Pop
      is_degr = self.rpc.call_blockdev_find(instance.primary_node, dev)[5]
4218 a9e0c397 Iustin Pop
      if is_degr:
4219 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
4220 a9e0c397 Iustin Pop
4221 cff90b79 Iustin Pop
    # Step: remove old storage
4222 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
4223 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
4224 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
4225 a9e0c397 Iustin Pop
      for lv in old_lvs:
4226 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
4227 72737a7f Iustin Pop
        if not self.rpc.call_blockdev_remove(tgt_node, lv):
4228 79caa9ed Guido Trotter
          warning("Can't remove old LV", hint="manually remove unused LVs")
4229 a9e0c397 Iustin Pop
          continue
4230 a9e0c397 Iustin Pop
4231 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
4232 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
4233 a9e0c397 Iustin Pop

4234 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
4235 a9e0c397 Iustin Pop
      - for all disks of the instance:
4236 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
4237 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
4238 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
4239 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
4240 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
4241 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
4242 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
4243 a9e0c397 Iustin Pop
          not network enabled
4244 a9e0c397 Iustin Pop
      - wait for sync across all devices
4245 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
4246 a9e0c397 Iustin Pop

4247 a9e0c397 Iustin Pop
    Failures are not very well handled.
4248 0834c866 Iustin Pop

4249 a9e0c397 Iustin Pop
    """
4250 0834c866 Iustin Pop
    steps_total = 6
4251 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
4252 a9e0c397 Iustin Pop
    instance = self.instance
4253 a9e0c397 Iustin Pop
    iv_names = {}
4254 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
4255 a9e0c397 Iustin Pop
    # start of work
4256 a9e0c397 Iustin Pop
    cfg = self.cfg
4257 a9e0c397 Iustin Pop
    old_node = self.tgt_node
4258 a9e0c397 Iustin Pop
    new_node = self.new_node
4259 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
4260 0834c866 Iustin Pop
4261 0834c866 Iustin Pop
    # Step: check device activation
4262 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
4263 0834c866 Iustin Pop
    info("checking volume groups")
4264 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
4265 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([pri_node, new_node])
4266 0834c866 Iustin Pop
    if not results:
4267 0834c866 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
4268 0834c866 Iustin Pop
    for node in pri_node, new_node:
4269 0834c866 Iustin Pop
      res = results.get(node, False)
4270 0834c866 Iustin Pop
      if not res or my_vg not in res:
4271 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
4272 0834c866 Iustin Pop
                                 (my_vg, node))
4273 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
4274 d418ebfb Iustin Pop
      if idx not in self.op.disks:
4275 0834c866 Iustin Pop
        continue
4276 d418ebfb Iustin Pop
      info("checking disk/%d on %s" % (idx, pri_node))
4277 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4278 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_find(pri_node, dev):
4279 d418ebfb Iustin Pop
        raise errors.OpExecError("Can't find disk/%d on node %s" %
4280 d418ebfb Iustin Pop
                                 (idx, pri_node))
4281 0834c866 Iustin Pop
4282 0834c866 Iustin Pop
    # Step: check other node consistency
4283 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
4284 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
4285 d418ebfb Iustin Pop
      if idx not in self.op.disks:
4286 0834c866 Iustin Pop
        continue
4287 d418ebfb Iustin Pop
      info("checking disk/%d consistency on %s" % (idx, pri_node))
4288 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
4289 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
4290 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
4291 0834c866 Iustin Pop
                                 pri_node)
4292 0834c866 Iustin Pop
4293 0834c866 Iustin Pop
    # Step: create new storage
4294 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
4295 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
4296 a9e0c397 Iustin Pop
      size = dev.size
4297 d418ebfb Iustin Pop
      info("adding new local storage on %s for disk/%d" %
4298 d418ebfb Iustin Pop
           (new_node, idx))
4299 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
4300 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
4301 a9e0c397 Iustin Pop
      # are talking about the secondary node
4302 a9e0c397 Iustin Pop
      for new_lv in dev.children:
4303 b9bddb6b Iustin Pop
        if not _CreateBlockDevOnPrimary(self, new_node, instance, new_lv,
4304 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
4305 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
4306 a9e0c397 Iustin Pop
                                   " node '%s'" %
4307 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
4308 a9e0c397 Iustin Pop
4309 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
4310 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
4311 a1578d63 Iustin Pop
    # error and the success paths
4312 a1578d63 Iustin Pop
    minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
4313 a1578d63 Iustin Pop
                                   instance.name)
4314 468b46f9 Iustin Pop
    logging.debug("Allocated minors %s" % (minors,))
4315 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
4316 d418ebfb Iustin Pop
    for idx, (dev, new_minor) in enumerate(zip(instance.disks, minors)):
4317 0834c866 Iustin Pop
      size = dev.size
4318 d418ebfb Iustin Pop
      info("activating a new drbd on %s for disk/%d" % (new_node, idx))
4319 a9e0c397 Iustin Pop
      # create new devices on new_node
4320 ffa1c0dc Iustin Pop
      if pri_node == dev.logical_id[0]:
4321 ffa1c0dc Iustin Pop
        new_logical_id = (pri_node, new_node,
4322 f9518d38 Iustin Pop
                          dev.logical_id[2], dev.logical_id[3], new_minor,
4323 f9518d38 Iustin Pop
                          dev.logical_id[5])
4324 ffa1c0dc Iustin Pop
      else:
4325 ffa1c0dc Iustin Pop
        new_logical_id = (new_node, pri_node,
4326 f9518d38 Iustin Pop
                          dev.logical_id[2], new_minor, dev.logical_id[4],
4327 f9518d38 Iustin Pop
                          dev.logical_id[5])
4328 d418ebfb Iustin Pop
      iv_names[idx] = (dev, dev.children, new_logical_id)
4329 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
4330 a1578d63 Iustin Pop
                    new_logical_id)
4331 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
4332 ffa1c0dc Iustin Pop
                              logical_id=new_logical_id,
4333 a9e0c397 Iustin Pop
                              children=dev.children)
4334 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnSecondary(self, new_node, instance,
4335 3f78eef2 Iustin Pop
                                        new_drbd, False,
4336 b9bddb6b Iustin Pop
                                        _GetInstanceInfoText(instance)):
4337 a1578d63 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
4338 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
4339 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
4340 a9e0c397 Iustin Pop
4341 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
4342 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
4343 d418ebfb Iustin Pop
      info("shutting down drbd for disk/%d on old node" % idx)
4344 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
4345 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_shutdown(old_node, dev):
4346 d418ebfb Iustin Pop
        warning("Failed to shutdown drbd for disk/%d on old node" % idx,
4347 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
4348 a9e0c397 Iustin Pop
4349 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
4350 642445d9 Iustin Pop
    done = 0
4351 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
4352 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4353 f9518d38 Iustin Pop
      # set the network part of the physical (unique in bdev terms) id
4354 f9518d38 Iustin Pop
      # to None, meaning detach from network
4355 f9518d38 Iustin Pop
      dev.physical_id = (None, None, None, None) + dev.physical_id[4:]
4356 642445d9 Iustin Pop
      # and 'find' the device, which will 'fix' it to match the
4357 642445d9 Iustin Pop
      # standalone state
4358 72737a7f Iustin Pop
      if self.rpc.call_blockdev_find(pri_node, dev):
4359 642445d9 Iustin Pop
        done += 1
4360 642445d9 Iustin Pop
      else:
4361 d418ebfb Iustin Pop
        warning("Failed to detach drbd disk/%d from network, unusual case" %
4362 d418ebfb Iustin Pop
                idx)
4363 642445d9 Iustin Pop
4364 642445d9 Iustin Pop
    if not done:
4365 642445d9 Iustin Pop
      # no detaches succeeded (very unlikely)
4366 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance.name)
4367 642445d9 Iustin Pop
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
4368 642445d9 Iustin Pop
4369 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
4370 642445d9 Iustin Pop
    # the instance to point to the new secondary
4371 642445d9 Iustin Pop
    info("updating instance configuration")
4372 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
4373 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
4374 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4375 642445d9 Iustin Pop
    cfg.Update(instance)
4376 a1578d63 Iustin Pop
    # we can remove now the temp minors as now the new values are
4377 a1578d63 Iustin Pop
    # written to the config file (and therefore stable)
4378 a1578d63 Iustin Pop
    self.cfg.ReleaseDRBDMinors(instance.name)
4379 a9e0c397 Iustin Pop
4380 642445d9 Iustin Pop
    # and now perform the drbd attach
4381 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
4382 642445d9 Iustin Pop
    failures = []
4383 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
4384 d418ebfb Iustin Pop
      info("attaching primary drbd for disk/%d to new secondary node" % idx)
4385 642445d9 Iustin Pop
      # since the attach is smart, it's enough to 'find' the device,
4386 642445d9 Iustin Pop
      # it will automatically activate the network, if the physical_id
4387 642445d9 Iustin Pop
      # is correct
4388 642445d9 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4389 ffa1c0dc Iustin Pop
      logging.debug("Disk to attach: %s", dev)
4390 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_find(pri_node, dev):
4391 d418ebfb Iustin Pop
        warning("can't attach drbd disk/%d to new secondary!" % idx,
4392 642445d9 Iustin Pop
                "please do a gnt-instance info to see the status of disks")
4393 a9e0c397 Iustin Pop
4394 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
4395 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
4396 a9e0c397 Iustin Pop
    # return value
4397 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
4398 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
4399 a9e0c397 Iustin Pop
4400 a9e0c397 Iustin Pop
    # so check manually all the devices
4401 d418ebfb Iustin Pop
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
4402 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4403 72737a7f Iustin Pop
      is_degr = self.rpc.call_blockdev_find(pri_node, dev)[5]
4404 a9e0c397 Iustin Pop
      if is_degr:
4405 d418ebfb Iustin Pop
        raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx)
4406 a9e0c397 Iustin Pop
4407 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
4408 d418ebfb Iustin Pop
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
4409 d418ebfb Iustin Pop
      info("remove logical volumes for disk/%d" % idx)
4410 a9e0c397 Iustin Pop
      for lv in old_lvs:
4411 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
4412 72737a7f Iustin Pop
        if not self.rpc.call_blockdev_remove(old_node, lv):
4413 0834c866 Iustin Pop
          warning("Can't remove LV on old secondary",
4414 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
4415 a9e0c397 Iustin Pop
4416 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
4417 a9e0c397 Iustin Pop
    """Execute disk replacement.
4418 a9e0c397 Iustin Pop

4419 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
4420 a9e0c397 Iustin Pop

4421 a9e0c397 Iustin Pop
    """
4422 a9e0c397 Iustin Pop
    instance = self.instance
4423 22985314 Guido Trotter
4424 22985314 Guido Trotter
    # Activate the instance disks if we're replacing them on a down instance
4425 22985314 Guido Trotter
    if instance.status == "down":
4426 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, True)
4427 22985314 Guido Trotter
4428 abdf0113 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
4429 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
4430 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
4431 a9e0c397 Iustin Pop
      else:
4432 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
4433 a9e0c397 Iustin Pop
    else:
4434 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
4435 22985314 Guido Trotter
4436 22985314 Guido Trotter
    ret = fn(feedback_fn)
4437 22985314 Guido Trotter
4438 22985314 Guido Trotter
    # Deactivate the instance disks if we're replacing them on a down instance
4439 22985314 Guido Trotter
    if instance.status == "down":
4440 b9bddb6b Iustin Pop
      _SafeShutdownInstanceDisks(self, instance)
4441 22985314 Guido Trotter
4442 22985314 Guido Trotter
    return ret
4443 a9e0c397 Iustin Pop
4444 a8083063 Iustin Pop
4445 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
4446 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
4447 8729e0d7 Iustin Pop

4448 8729e0d7 Iustin Pop
  """
4449 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
4450 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4451 6605411d Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
4452 31e63dbf Guido Trotter
  REQ_BGL = False
4453 31e63dbf Guido Trotter
4454 31e63dbf Guido Trotter
  def ExpandNames(self):
4455 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
4456 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4457 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4458 31e63dbf Guido Trotter
4459 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
4460 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
4461 31e63dbf Guido Trotter
      self._LockInstancesNodes()
4462 8729e0d7 Iustin Pop
4463 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
4464 8729e0d7 Iustin Pop
    """Build hooks env.
4465 8729e0d7 Iustin Pop

4466 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
4467 8729e0d7 Iustin Pop

4468 8729e0d7 Iustin Pop
    """
4469 8729e0d7 Iustin Pop
    env = {
4470 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
4471 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
4472 8729e0d7 Iustin Pop
      }
4473 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4474 8729e0d7 Iustin Pop
    nl = [
4475 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
4476 8729e0d7 Iustin Pop
      self.instance.primary_node,
4477 8729e0d7 Iustin Pop
      ]
4478 8729e0d7 Iustin Pop
    return env, nl, nl
4479 8729e0d7 Iustin Pop
4480 8729e0d7 Iustin Pop
  def CheckPrereq(self):
4481 8729e0d7 Iustin Pop
    """Check prerequisites.
4482 8729e0d7 Iustin Pop

4483 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
4484 8729e0d7 Iustin Pop

4485 8729e0d7 Iustin Pop
    """
4486 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4487 31e63dbf Guido Trotter
    assert instance is not None, \
4488 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4489 31e63dbf Guido Trotter
4490 8729e0d7 Iustin Pop
    self.instance = instance
4491 8729e0d7 Iustin Pop
4492 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
4493 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
4494 8729e0d7 Iustin Pop
                                 " growing.")
4495 8729e0d7 Iustin Pop
4496 ad24e046 Iustin Pop
    self.disk = instance.FindDisk(self.op.disk)
4497 8729e0d7 Iustin Pop
4498 8729e0d7 Iustin Pop
    nodenames = [instance.primary_node] + list(instance.secondary_nodes)
4499 72737a7f Iustin Pop
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4500 72737a7f Iustin Pop
                                       instance.hypervisor)
4501 8729e0d7 Iustin Pop
    for node in nodenames:
4502 8729e0d7 Iustin Pop
      info = nodeinfo.get(node, None)
4503 8729e0d7 Iustin Pop
      if not info:
4504 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
4505 8729e0d7 Iustin Pop
                                   " from node '%s'" % node)
4506 8729e0d7 Iustin Pop
      vg_free = info.get('vg_free', None)
4507 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
4508 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
4509 8729e0d7 Iustin Pop
                                   " node %s" % node)
4510 8729e0d7 Iustin Pop
      if self.op.amount > info['vg_free']:
4511 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
4512 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
4513 8729e0d7 Iustin Pop
                                   (node, info['vg_free'], self.op.amount))
4514 8729e0d7 Iustin Pop
4515 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
4516 8729e0d7 Iustin Pop
    """Execute disk grow.
4517 8729e0d7 Iustin Pop

4518 8729e0d7 Iustin Pop
    """
4519 8729e0d7 Iustin Pop
    instance = self.instance
4520 ad24e046 Iustin Pop
    disk = self.disk
4521 8729e0d7 Iustin Pop
    for node in (instance.secondary_nodes + (instance.primary_node,)):
4522 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
4523 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
4524 72737a7f Iustin Pop
      if (not result or not isinstance(result, (list, tuple)) or
4525 72737a7f Iustin Pop
          len(result) != 2):
4526 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s" % node)
4527 8729e0d7 Iustin Pop
      elif not result[0]:
4528 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s: %s" %
4529 8729e0d7 Iustin Pop
                                 (node, result[1]))
4530 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
4531 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
4532 6605411d Iustin Pop
    if self.op.wait_for_sync:
4533 cd4d138f Guido Trotter
      disk_abort = not _WaitForSync(self, instance)
4534 6605411d Iustin Pop
      if disk_abort:
4535 86d9d3bb Iustin Pop
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
4536 86d9d3bb Iustin Pop
                             " status.\nPlease check the instance.")
4537 8729e0d7 Iustin Pop
4538 8729e0d7 Iustin Pop
4539 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
4540 a8083063 Iustin Pop
  """Query runtime instance data.
4541 a8083063 Iustin Pop

4542 a8083063 Iustin Pop
  """
4543 57821cac Iustin Pop
  _OP_REQP = ["instances", "static"]
4544 a987fa48 Guido Trotter
  REQ_BGL = False
4545 ae5849b5 Michael Hanselmann
4546 a987fa48 Guido Trotter
  def ExpandNames(self):
4547 a987fa48 Guido Trotter
    self.needed_locks = {}
4548 a987fa48 Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
4549 a987fa48 Guido Trotter
4550 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
4551 a987fa48 Guido Trotter
      raise errors.OpPrereqError("Invalid argument type 'instances'")
4552 a987fa48 Guido Trotter
4553 a987fa48 Guido Trotter
    if self.op.instances:
4554 a987fa48 Guido Trotter
      self.wanted_names = []
4555 a987fa48 Guido Trotter
      for name in self.op.instances:
4556 a987fa48 Guido Trotter
        full_name = self.cfg.ExpandInstanceName(name)
4557 a987fa48 Guido Trotter
        if full_name is None:
4558 a987fa48 Guido Trotter
          raise errors.OpPrereqError("Instance '%s' not known" %
4559 a987fa48 Guido Trotter
                                     self.op.instance_name)
4560 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
4561 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
4562 a987fa48 Guido Trotter
    else:
4563 a987fa48 Guido Trotter
      self.wanted_names = None
4564 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
4565 a987fa48 Guido Trotter
4566 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4567 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4568 a987fa48 Guido Trotter
4569 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
4570 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
4571 a987fa48 Guido Trotter
      self._LockInstancesNodes()
4572 a8083063 Iustin Pop
4573 a8083063 Iustin Pop
  def CheckPrereq(self):
4574 a8083063 Iustin Pop
    """Check prerequisites.
4575 a8083063 Iustin Pop

4576 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
4577 a8083063 Iustin Pop

4578 a8083063 Iustin Pop
    """
4579 a987fa48 Guido Trotter
    if self.wanted_names is None:
4580 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
4581 a8083063 Iustin Pop
4582 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
4583 a987fa48 Guido Trotter
                             in self.wanted_names]
4584 a987fa48 Guido Trotter
    return
4585 a8083063 Iustin Pop
4586 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
4587 a8083063 Iustin Pop
    """Compute block device status.
4588 a8083063 Iustin Pop

4589 a8083063 Iustin Pop
    """
4590 57821cac Iustin Pop
    static = self.op.static
4591 57821cac Iustin Pop
    if not static:
4592 57821cac Iustin Pop
      self.cfg.SetDiskID(dev, instance.primary_node)
4593 57821cac Iustin Pop
      dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
4594 57821cac Iustin Pop
    else:
4595 57821cac Iustin Pop
      dev_pstatus = None
4596 57821cac Iustin Pop
4597 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
4598 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
4599 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
4600 a8083063 Iustin Pop
        snode = dev.logical_id[1]
4601 a8083063 Iustin Pop
      else:
4602 a8083063 Iustin Pop
        snode = dev.logical_id[0]
4603 a8083063 Iustin Pop
4604 57821cac Iustin Pop
    if snode and not static:
4605 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
4606 72737a7f Iustin Pop
      dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
4607 a8083063 Iustin Pop
    else:
4608 a8083063 Iustin Pop
      dev_sstatus = None
4609 a8083063 Iustin Pop
4610 a8083063 Iustin Pop
    if dev.children:
4611 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
4612 a8083063 Iustin Pop
                      for child in dev.children]
4613 a8083063 Iustin Pop
    else:
4614 a8083063 Iustin Pop
      dev_children = []
4615 a8083063 Iustin Pop
4616 a8083063 Iustin Pop
    data = {
4617 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
4618 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
4619 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
4620 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
4621 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
4622 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
4623 a8083063 Iustin Pop
      "children": dev_children,
4624 b6fdf8b8 Iustin Pop
      "mode": dev.mode,
4625 a8083063 Iustin Pop
      }
4626 a8083063 Iustin Pop
4627 a8083063 Iustin Pop
    return data
4628 a8083063 Iustin Pop
4629 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4630 a8083063 Iustin Pop
    """Gather and return data"""
4631 a8083063 Iustin Pop
    result = {}
4632 338e51e8 Iustin Pop
4633 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
4634 338e51e8 Iustin Pop
4635 a8083063 Iustin Pop
    for instance in self.wanted_instances:
4636 57821cac Iustin Pop
      if not self.op.static:
4637 57821cac Iustin Pop
        remote_info = self.rpc.call_instance_info(instance.primary_node,
4638 57821cac Iustin Pop
                                                  instance.name,
4639 57821cac Iustin Pop
                                                  instance.hypervisor)
4640 57821cac Iustin Pop
        if remote_info and "state" in remote_info:
4641 57821cac Iustin Pop
          remote_state = "up"
4642 57821cac Iustin Pop
        else:
4643 57821cac Iustin Pop
          remote_state = "down"
4644 a8083063 Iustin Pop
      else:
4645 57821cac Iustin Pop
        remote_state = None
4646 a8083063 Iustin Pop
      if instance.status == "down":
4647 a8083063 Iustin Pop
        config_state = "down"
4648 a8083063 Iustin Pop
      else:
4649 a8083063 Iustin Pop
        config_state = "up"
4650 a8083063 Iustin Pop
4651 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
4652 a8083063 Iustin Pop
               for device in instance.disks]
4653 a8083063 Iustin Pop
4654 a8083063 Iustin Pop
      idict = {
4655 a8083063 Iustin Pop
        "name": instance.name,
4656 a8083063 Iustin Pop
        "config_state": config_state,
4657 a8083063 Iustin Pop
        "run_state": remote_state,
4658 a8083063 Iustin Pop
        "pnode": instance.primary_node,
4659 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
4660 a8083063 Iustin Pop
        "os": instance.os,
4661 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4662 a8083063 Iustin Pop
        "disks": disks,
4663 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
4664 24838135 Iustin Pop
        "network_port": instance.network_port,
4665 24838135 Iustin Pop
        "hv_instance": instance.hvparams,
4666 338e51e8 Iustin Pop
        "hv_actual": cluster.FillHV(instance),
4667 338e51e8 Iustin Pop
        "be_instance": instance.beparams,
4668 338e51e8 Iustin Pop
        "be_actual": cluster.FillBE(instance),
4669 a8083063 Iustin Pop
        }
4670 a8083063 Iustin Pop
4671 a8083063 Iustin Pop
      result[instance.name] = idict
4672 a8083063 Iustin Pop
4673 a8083063 Iustin Pop
    return result
4674 a8083063 Iustin Pop
4675 a8083063 Iustin Pop
4676 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
4677 a8083063 Iustin Pop
  """Modifies an instances's parameters.
4678 a8083063 Iustin Pop

4679 a8083063 Iustin Pop
  """
4680 a8083063 Iustin Pop
  HPATH = "instance-modify"
4681 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4682 24991749 Iustin Pop
  _OP_REQP = ["instance_name"]
4683 1a5c7281 Guido Trotter
  REQ_BGL = False
4684 1a5c7281 Guido Trotter
4685 24991749 Iustin Pop
  def CheckArguments(self):
4686 24991749 Iustin Pop
    if not hasattr(self.op, 'nics'):
4687 24991749 Iustin Pop
      self.op.nics = []
4688 24991749 Iustin Pop
    if not hasattr(self.op, 'disks'):
4689 24991749 Iustin Pop
      self.op.disks = []
4690 24991749 Iustin Pop
    if not hasattr(self.op, 'beparams'):
4691 24991749 Iustin Pop
      self.op.beparams = {}
4692 24991749 Iustin Pop
    if not hasattr(self.op, 'hvparams'):
4693 24991749 Iustin Pop
      self.op.hvparams = {}
4694 24991749 Iustin Pop
    self.op.force = getattr(self.op, "force", False)
4695 24991749 Iustin Pop
    if not (self.op.nics or self.op.disks or
4696 24991749 Iustin Pop
            self.op.hvparams or self.op.beparams):
4697 24991749 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
4698 24991749 Iustin Pop
4699 24991749 Iustin Pop
    for item in (constants.BE_MEMORY, constants.BE_VCPUS):
4700 24991749 Iustin Pop
      val = self.op.beparams.get(item, None)
4701 24991749 Iustin Pop
      if val is not None:
4702 24991749 Iustin Pop
        try:
4703 24991749 Iustin Pop
          val = int(val)
4704 24991749 Iustin Pop
        except ValueError, err:
4705 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid %s size: %s" % (item, str(err)))
4706 24991749 Iustin Pop
        self.op.beparams[item] = val
4707 24991749 Iustin Pop
    # Disk validation
4708 24991749 Iustin Pop
    disk_addremove = 0
4709 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
4710 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
4711 24991749 Iustin Pop
        disk_addremove += 1
4712 24991749 Iustin Pop
        continue
4713 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
4714 24991749 Iustin Pop
        disk_addremove += 1
4715 24991749 Iustin Pop
      else:
4716 24991749 Iustin Pop
        if not isinstance(disk_op, int):
4717 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index")
4718 24991749 Iustin Pop
      if disk_op == constants.DDM_ADD:
4719 24991749 Iustin Pop
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
4720 24991749 Iustin Pop
        if mode not in (constants.DISK_RDONLY, constants.DISK_RDWR):
4721 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode)
4722 24991749 Iustin Pop
        size = disk_dict.get('size', None)
4723 24991749 Iustin Pop
        if size is None:
4724 24991749 Iustin Pop
          raise errors.OpPrereqError("Required disk parameter size missing")
4725 24991749 Iustin Pop
        try:
4726 24991749 Iustin Pop
          size = int(size)
4727 24991749 Iustin Pop
        except ValueError, err:
4728 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
4729 24991749 Iustin Pop
                                     str(err))
4730 24991749 Iustin Pop
        disk_dict['size'] = size
4731 24991749 Iustin Pop
      else:
4732 24991749 Iustin Pop
        # modification of disk
4733 24991749 Iustin Pop
        if 'size' in disk_dict:
4734 24991749 Iustin Pop
          raise errors.OpPrereqError("Disk size change not possible, use"
4735 24991749 Iustin Pop
                                     " grow-disk")
4736 24991749 Iustin Pop
4737 24991749 Iustin Pop
    if disk_addremove > 1:
4738 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one disk add or remove operation"
4739 24991749 Iustin Pop
                                 " supported at a time")
4740 24991749 Iustin Pop
4741 24991749 Iustin Pop
    # NIC validation
4742 24991749 Iustin Pop
    nic_addremove = 0
4743 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
4744 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
4745 24991749 Iustin Pop
        nic_addremove += 1
4746 24991749 Iustin Pop
        continue
4747 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
4748 24991749 Iustin Pop
        nic_addremove += 1
4749 24991749 Iustin Pop
      else:
4750 24991749 Iustin Pop
        if not isinstance(nic_op, int):
4751 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid nic index")
4752 24991749 Iustin Pop
4753 24991749 Iustin Pop
      # nic_dict should be a dict
4754 24991749 Iustin Pop
      nic_ip = nic_dict.get('ip', None)
4755 24991749 Iustin Pop
      if nic_ip is not None:
4756 24991749 Iustin Pop
        if nic_ip.lower() == "none":
4757 24991749 Iustin Pop
          nic_dict['ip'] = None
4758 24991749 Iustin Pop
        else:
4759 24991749 Iustin Pop
          if not utils.IsValidIP(nic_ip):
4760 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip)
4761 24991749 Iustin Pop
      # we can only check None bridges and assign the default one
4762 24991749 Iustin Pop
      nic_bridge = nic_dict.get('bridge', None)
4763 24991749 Iustin Pop
      if nic_bridge is None:
4764 24991749 Iustin Pop
        nic_dict['bridge'] = self.cfg.GetDefBridge()
4765 24991749 Iustin Pop
      # but we can validate MACs
4766 24991749 Iustin Pop
      nic_mac = nic_dict.get('mac', None)
4767 24991749 Iustin Pop
      if nic_mac is not None:
4768 24991749 Iustin Pop
        if self.cfg.IsMacInUse(nic_mac):
4769 24991749 Iustin Pop
          raise errors.OpPrereqError("MAC address %s already in use"
4770 24991749 Iustin Pop
                                     " in cluster" % nic_mac)
4771 24991749 Iustin Pop
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4772 24991749 Iustin Pop
          if not utils.IsValidMac(nic_mac):
4773 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac)
4774 24991749 Iustin Pop
    if nic_addremove > 1:
4775 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one NIC add or remove operation"
4776 24991749 Iustin Pop
                                 " supported at a time")
4777 24991749 Iustin Pop
4778 1a5c7281 Guido Trotter
  def ExpandNames(self):
4779 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
4780 74409b12 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
4781 74409b12 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4782 74409b12 Iustin Pop
4783 74409b12 Iustin Pop
  def DeclareLocks(self, level):
4784 74409b12 Iustin Pop
    if level == locking.LEVEL_NODE:
4785 74409b12 Iustin Pop
      self._LockInstancesNodes()
4786 a8083063 Iustin Pop
4787 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4788 a8083063 Iustin Pop
    """Build hooks env.
4789 a8083063 Iustin Pop

4790 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
4791 a8083063 Iustin Pop

4792 a8083063 Iustin Pop
    """
4793 396e1b78 Michael Hanselmann
    args = dict()
4794 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.be_new:
4795 338e51e8 Iustin Pop
      args['memory'] = self.be_new[constants.BE_MEMORY]
4796 338e51e8 Iustin Pop
    if constants.BE_VCPUS in self.be_new:
4797 61be6ba4 Iustin Pop
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
4798 24991749 Iustin Pop
    # FIXME: readd disk/nic changes
4799 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
4800 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(),
4801 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
4802 a8083063 Iustin Pop
    return env, nl, nl
4803 a8083063 Iustin Pop
4804 a8083063 Iustin Pop
  def CheckPrereq(self):
4805 a8083063 Iustin Pop
    """Check prerequisites.
4806 a8083063 Iustin Pop

4807 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
4808 a8083063 Iustin Pop

4809 a8083063 Iustin Pop
    """
4810 24991749 Iustin Pop
    force = self.force = self.op.force
4811 a8083063 Iustin Pop
4812 74409b12 Iustin Pop
    # checking the new params on the primary/secondary nodes
4813 31a853d2 Iustin Pop
4814 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4815 1a5c7281 Guido Trotter
    assert self.instance is not None, \
4816 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4817 74409b12 Iustin Pop
    pnode = self.instance.primary_node
4818 74409b12 Iustin Pop
    nodelist = [pnode]
4819 74409b12 Iustin Pop
    nodelist.extend(instance.secondary_nodes)
4820 74409b12 Iustin Pop
4821 338e51e8 Iustin Pop
    # hvparams processing
4822 74409b12 Iustin Pop
    if self.op.hvparams:
4823 74409b12 Iustin Pop
      i_hvdict = copy.deepcopy(instance.hvparams)
4824 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
4825 74409b12 Iustin Pop
        if val is None:
4826 74409b12 Iustin Pop
          try:
4827 74409b12 Iustin Pop
            del i_hvdict[key]
4828 74409b12 Iustin Pop
          except KeyError:
4829 74409b12 Iustin Pop
            pass
4830 74409b12 Iustin Pop
        else:
4831 74409b12 Iustin Pop
          i_hvdict[key] = val
4832 74409b12 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
4833 74409b12 Iustin Pop
      hv_new = cluster.FillDict(cluster.hvparams[instance.hypervisor],
4834 74409b12 Iustin Pop
                                i_hvdict)
4835 74409b12 Iustin Pop
      # local check
4836 74409b12 Iustin Pop
      hypervisor.GetHypervisor(
4837 74409b12 Iustin Pop
        instance.hypervisor).CheckParameterSyntax(hv_new)
4838 74409b12 Iustin Pop
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
4839 338e51e8 Iustin Pop
      self.hv_new = hv_new # the new actual values
4840 338e51e8 Iustin Pop
      self.hv_inst = i_hvdict # the new dict (without defaults)
4841 338e51e8 Iustin Pop
    else:
4842 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
4843 338e51e8 Iustin Pop
4844 338e51e8 Iustin Pop
    # beparams processing
4845 338e51e8 Iustin Pop
    if self.op.beparams:
4846 338e51e8 Iustin Pop
      i_bedict = copy.deepcopy(instance.beparams)
4847 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
4848 338e51e8 Iustin Pop
        if val is None:
4849 338e51e8 Iustin Pop
          try:
4850 338e51e8 Iustin Pop
            del i_bedict[key]
4851 338e51e8 Iustin Pop
          except KeyError:
4852 338e51e8 Iustin Pop
            pass
4853 338e51e8 Iustin Pop
        else:
4854 338e51e8 Iustin Pop
          i_bedict[key] = val
4855 338e51e8 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
4856 338e51e8 Iustin Pop
      be_new = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
4857 338e51e8 Iustin Pop
                                i_bedict)
4858 338e51e8 Iustin Pop
      self.be_new = be_new # the new actual values
4859 338e51e8 Iustin Pop
      self.be_inst = i_bedict # the new dict (without defaults)
4860 338e51e8 Iustin Pop
    else:
4861 b637ae4d Iustin Pop
      self.be_new = self.be_inst = {}
4862 74409b12 Iustin Pop
4863 cfefe007 Guido Trotter
    self.warn = []
4864 647a5d80 Iustin Pop
4865 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.op.beparams and not self.force:
4866 647a5d80 Iustin Pop
      mem_check_list = [pnode]
4867 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
4868 c0f2b229 Iustin Pop
        # either we changed auto_balance to yes or it was from before
4869 647a5d80 Iustin Pop
        mem_check_list.extend(instance.secondary_nodes)
4870 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
4871 72737a7f Iustin Pop
                                                  instance.hypervisor)
4872 647a5d80 Iustin Pop
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
4873 72737a7f Iustin Pop
                                         instance.hypervisor)
4874 cfefe007 Guido Trotter
4875 cfefe007 Guido Trotter
      if pnode not in nodeinfo or not isinstance(nodeinfo[pnode], dict):
4876 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
4877 cfefe007 Guido Trotter
        self.warn.append("Can't get info from primary node %s" % pnode)
4878 cfefe007 Guido Trotter
      else:
4879 cfefe007 Guido Trotter
        if instance_info:
4880 cfefe007 Guido Trotter
          current_mem = instance_info['memory']
4881 cfefe007 Guido Trotter
        else:
4882 cfefe007 Guido Trotter
          # Assume instance not running
4883 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
4884 cfefe007 Guido Trotter
          # and we have no other way to check)
4885 cfefe007 Guido Trotter
          current_mem = 0
4886 338e51e8 Iustin Pop
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
4887 338e51e8 Iustin Pop
                    nodeinfo[pnode]['memory_free'])
4888 cfefe007 Guido Trotter
        if miss_mem > 0:
4889 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
4890 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
4891 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
4892 cfefe007 Guido Trotter
4893 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
4894 647a5d80 Iustin Pop
        for node in instance.secondary_nodes:
4895 647a5d80 Iustin Pop
          if node not in nodeinfo or not isinstance(nodeinfo[node], dict):
4896 647a5d80 Iustin Pop
            self.warn.append("Can't get info from secondary node %s" % node)
4897 647a5d80 Iustin Pop
          elif be_new[constants.BE_MEMORY] > nodeinfo[node]['memory_free']:
4898 647a5d80 Iustin Pop
            self.warn.append("Not enough memory to failover instance to"
4899 647a5d80 Iustin Pop
                             " secondary node %s" % node)
4900 5bc84f33 Alexander Schreiber
4901 24991749 Iustin Pop
    # NIC processing
4902 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
4903 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
4904 24991749 Iustin Pop
        if not instance.nics:
4905 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance has no NICs, cannot remove")
4906 24991749 Iustin Pop
        continue
4907 24991749 Iustin Pop
      if nic_op != constants.DDM_ADD:
4908 24991749 Iustin Pop
        # an existing nic
4909 24991749 Iustin Pop
        if nic_op < 0 or nic_op >= len(instance.nics):
4910 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
4911 24991749 Iustin Pop
                                     " are 0 to %d" %
4912 24991749 Iustin Pop
                                     (nic_op, len(instance.nics)))
4913 24991749 Iustin Pop
      nic_bridge = nic_dict.get('bridge', None)
4914 24991749 Iustin Pop
      if nic_bridge is not None:
4915 24991749 Iustin Pop
        if not self.rpc.call_bridges_exist(pnode, [nic_bridge]):
4916 24991749 Iustin Pop
          msg = ("Bridge '%s' doesn't exist on one of"
4917 24991749 Iustin Pop
                 " the instance nodes" % nic_bridge)
4918 24991749 Iustin Pop
          if self.force:
4919 24991749 Iustin Pop
            self.warn.append(msg)
4920 24991749 Iustin Pop
          else:
4921 24991749 Iustin Pop
            raise errors.OpPrereqError(msg)
4922 24991749 Iustin Pop
4923 24991749 Iustin Pop
    # DISK processing
4924 24991749 Iustin Pop
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
4925 24991749 Iustin Pop
      raise errors.OpPrereqError("Disk operations not supported for"
4926 24991749 Iustin Pop
                                 " diskless instances")
4927 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
4928 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
4929 24991749 Iustin Pop
        if len(instance.disks) == 1:
4930 24991749 Iustin Pop
          raise errors.OpPrereqError("Cannot remove the last disk of"
4931 24991749 Iustin Pop
                                     " an instance")
4932 24991749 Iustin Pop
        ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
4933 24991749 Iustin Pop
        ins_l = ins_l[pnode]
4934 24991749 Iustin Pop
        if not type(ins_l) is list:
4935 24991749 Iustin Pop
          raise errors.OpPrereqError("Can't contact node '%s'" % pnode)
4936 24991749 Iustin Pop
        if instance.name in ins_l:
4937 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance is running, can't remove"
4938 24991749 Iustin Pop
                                     " disks.")
4939 24991749 Iustin Pop
4940 24991749 Iustin Pop
      if (disk_op == constants.DDM_ADD and
4941 24991749 Iustin Pop
          len(instance.nics) >= constants.MAX_DISKS):
4942 24991749 Iustin Pop
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
4943 24991749 Iustin Pop
                                   " add more" % constants.MAX_DISKS)
4944 24991749 Iustin Pop
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
4945 24991749 Iustin Pop
        # an existing disk
4946 24991749 Iustin Pop
        if disk_op < 0 or disk_op >= len(instance.disks):
4947 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
4948 24991749 Iustin Pop
                                     " are 0 to %d" %
4949 24991749 Iustin Pop
                                     (disk_op, len(instance.disks)))
4950 24991749 Iustin Pop
4951 a8083063 Iustin Pop
    return
4952 a8083063 Iustin Pop
4953 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4954 a8083063 Iustin Pop
    """Modifies an instance.
4955 a8083063 Iustin Pop

4956 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
4957 24991749 Iustin Pop

4958 a8083063 Iustin Pop
    """
4959 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
4960 cfefe007 Guido Trotter
    # feedback_fn there.
4961 cfefe007 Guido Trotter
    for warn in self.warn:
4962 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
4963 cfefe007 Guido Trotter
4964 a8083063 Iustin Pop
    result = []
4965 a8083063 Iustin Pop
    instance = self.instance
4966 24991749 Iustin Pop
    # disk changes
4967 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
4968 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
4969 24991749 Iustin Pop
        # remove the last disk
4970 24991749 Iustin Pop
        device = instance.disks.pop()
4971 24991749 Iustin Pop
        device_idx = len(instance.disks)
4972 24991749 Iustin Pop
        for node, disk in device.ComputeNodeTree(instance.primary_node):
4973 24991749 Iustin Pop
          self.cfg.SetDiskID(disk, node)
4974 24991749 Iustin Pop
          if not self.rpc.call_blockdev_remove(node, disk):
4975 24991749 Iustin Pop
            self.proc.LogWarning("Could not remove disk/%d on node %s,"
4976 24991749 Iustin Pop
                                 " continuing anyway", device_idx, node)
4977 24991749 Iustin Pop
        result.append(("disk/%d" % device_idx, "remove"))
4978 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
4979 24991749 Iustin Pop
        # add a new disk
4980 24991749 Iustin Pop
        if instance.disk_template == constants.DT_FILE:
4981 24991749 Iustin Pop
          file_driver, file_path = instance.disks[0].logical_id
4982 24991749 Iustin Pop
          file_path = os.path.dirname(file_path)
4983 24991749 Iustin Pop
        else:
4984 24991749 Iustin Pop
          file_driver = file_path = None
4985 24991749 Iustin Pop
        disk_idx_base = len(instance.disks)
4986 24991749 Iustin Pop
        new_disk = _GenerateDiskTemplate(self,
4987 24991749 Iustin Pop
                                         instance.disk_template,
4988 24991749 Iustin Pop
                                         instance, instance.primary_node,
4989 24991749 Iustin Pop
                                         instance.secondary_nodes,
4990 24991749 Iustin Pop
                                         [disk_dict],
4991 24991749 Iustin Pop
                                         file_path,
4992 24991749 Iustin Pop
                                         file_driver,
4993 24991749 Iustin Pop
                                         disk_idx_base)[0]
4994 24991749 Iustin Pop
        new_disk.mode = disk_dict['mode']
4995 24991749 Iustin Pop
        instance.disks.append(new_disk)
4996 24991749 Iustin Pop
        info = _GetInstanceInfoText(instance)
4997 24991749 Iustin Pop
4998 24991749 Iustin Pop
        logging.info("Creating volume %s for instance %s",
4999 24991749 Iustin Pop
                     new_disk.iv_name, instance.name)
5000 24991749 Iustin Pop
        # Note: this needs to be kept in sync with _CreateDisks
5001 24991749 Iustin Pop
        #HARDCODE
5002 24991749 Iustin Pop
        for secondary_node in instance.secondary_nodes:
5003 24991749 Iustin Pop
          if not _CreateBlockDevOnSecondary(self, secondary_node, instance,
5004 24991749 Iustin Pop
                                            new_disk, False, info):
5005 24991749 Iustin Pop
            self.LogWarning("Failed to create volume %s (%s) on"
5006 24991749 Iustin Pop
                            " secondary node %s!",
5007 24991749 Iustin Pop
                            new_disk.iv_name, new_disk, secondary_node)
5008 24991749 Iustin Pop
        #HARDCODE
5009 24991749 Iustin Pop
        if not _CreateBlockDevOnPrimary(self, instance.primary_node,
5010 24991749 Iustin Pop
                                        instance, new_disk, info):
5011 24991749 Iustin Pop
          self.LogWarning("Failed to create volume %s on primary!",
5012 24991749 Iustin Pop
                          new_disk.iv_name)
5013 24991749 Iustin Pop
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
5014 24991749 Iustin Pop
                       (new_disk.size, new_disk.mode)))
5015 24991749 Iustin Pop
      else:
5016 24991749 Iustin Pop
        # change a given disk
5017 24991749 Iustin Pop
        instance.disks[disk_op].mode = disk_dict['mode']
5018 24991749 Iustin Pop
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
5019 24991749 Iustin Pop
    # NIC changes
5020 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
5021 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
5022 24991749 Iustin Pop
        # remove the last nic
5023 24991749 Iustin Pop
        del instance.nics[-1]
5024 24991749 Iustin Pop
        result.append(("nic.%d" % len(instance.nics), "remove"))
5025 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
5026 24991749 Iustin Pop
        # add a new nic
5027 24991749 Iustin Pop
        if 'mac' not in nic_dict:
5028 24991749 Iustin Pop
          mac = constants.VALUE_GENERATE
5029 24991749 Iustin Pop
        else:
5030 24991749 Iustin Pop
          mac = nic_dict['mac']
5031 24991749 Iustin Pop
        if mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5032 24991749 Iustin Pop
          mac = self.cfg.GenerateMAC()
5033 24991749 Iustin Pop
        new_nic = objects.NIC(mac=mac, ip=nic_dict.get('ip', None),
5034 24991749 Iustin Pop
                              bridge=nic_dict.get('bridge', None))
5035 24991749 Iustin Pop
        instance.nics.append(new_nic)
5036 24991749 Iustin Pop
        result.append(("nic.%d" % (len(instance.nics) - 1),
5037 24991749 Iustin Pop
                       "add:mac=%s,ip=%s,bridge=%s" %
5038 24991749 Iustin Pop
                       (new_nic.mac, new_nic.ip, new_nic.bridge)))
5039 24991749 Iustin Pop
      else:
5040 24991749 Iustin Pop
        # change a given nic
5041 24991749 Iustin Pop
        for key in 'mac', 'ip', 'bridge':
5042 24991749 Iustin Pop
          if key in nic_dict:
5043 24991749 Iustin Pop
            setattr(instance.nics[nic_op], key, nic_dict[key])
5044 24991749 Iustin Pop
            result.append(("nic.%s/%d" % (key, nic_op), nic_dict[key]))
5045 24991749 Iustin Pop
5046 24991749 Iustin Pop
    # hvparams changes
5047 74409b12 Iustin Pop
    if self.op.hvparams:
5048 74409b12 Iustin Pop
      instance.hvparams = self.hv_new
5049 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
5050 74409b12 Iustin Pop
        result.append(("hv/%s" % key, val))
5051 24991749 Iustin Pop
5052 24991749 Iustin Pop
    # beparams changes
5053 338e51e8 Iustin Pop
    if self.op.beparams:
5054 338e51e8 Iustin Pop
      instance.beparams = self.be_inst
5055 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
5056 338e51e8 Iustin Pop
        result.append(("be/%s" % key, val))
5057 a8083063 Iustin Pop
5058 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
5059 a8083063 Iustin Pop
5060 a8083063 Iustin Pop
    return result
5061 a8083063 Iustin Pop
5062 a8083063 Iustin Pop
5063 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
5064 a8083063 Iustin Pop
  """Query the exports list
5065 a8083063 Iustin Pop

5066 a8083063 Iustin Pop
  """
5067 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
5068 21a15682 Guido Trotter
  REQ_BGL = False
5069 21a15682 Guido Trotter
5070 21a15682 Guido Trotter
  def ExpandNames(self):
5071 21a15682 Guido Trotter
    self.needed_locks = {}
5072 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
5073 21a15682 Guido Trotter
    if not self.op.nodes:
5074 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5075 21a15682 Guido Trotter
    else:
5076 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
5077 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
5078 a8083063 Iustin Pop
5079 a8083063 Iustin Pop
  def CheckPrereq(self):
5080 21a15682 Guido Trotter
    """Check prerequisites.
5081 a8083063 Iustin Pop

5082 a8083063 Iustin Pop
    """
5083 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
5084 a8083063 Iustin Pop
5085 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5086 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
5087 a8083063 Iustin Pop

5088 e4376078 Iustin Pop
    @rtype: dict
5089 e4376078 Iustin Pop
    @return: a dictionary with the structure node->(export-list)
5090 e4376078 Iustin Pop
        where export-list is a list of the instances exported on
5091 e4376078 Iustin Pop
        that node.
5092 a8083063 Iustin Pop

5093 a8083063 Iustin Pop
    """
5094 72737a7f Iustin Pop
    return self.rpc.call_export_list(self.nodes)
5095 a8083063 Iustin Pop
5096 a8083063 Iustin Pop
5097 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
5098 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
5099 a8083063 Iustin Pop

5100 a8083063 Iustin Pop
  """
5101 a8083063 Iustin Pop
  HPATH = "instance-export"
5102 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5103 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
5104 6657590e Guido Trotter
  REQ_BGL = False
5105 6657590e Guido Trotter
5106 6657590e Guido Trotter
  def ExpandNames(self):
5107 6657590e Guido Trotter
    self._ExpandAndLockInstance()
5108 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
5109 6657590e Guido Trotter
    #
5110 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
5111 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
5112 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
5113 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
5114 6657590e Guido Trotter
    #    then one to remove, after
5115 6657590e Guido Trotter
    #  - removing the removal operation altoghether
5116 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5117 6657590e Guido Trotter
5118 6657590e Guido Trotter
  def DeclareLocks(self, level):
5119 6657590e Guido Trotter
    """Last minute lock declaration."""
5120 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
5121 a8083063 Iustin Pop
5122 a8083063 Iustin Pop
  def BuildHooksEnv(self):
5123 a8083063 Iustin Pop
    """Build hooks env.
5124 a8083063 Iustin Pop

5125 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
5126 a8083063 Iustin Pop

5127 a8083063 Iustin Pop
    """
5128 a8083063 Iustin Pop
    env = {
5129 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
5130 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
5131 a8083063 Iustin Pop
      }
5132 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5133 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
5134 a8083063 Iustin Pop
          self.op.target_node]
5135 a8083063 Iustin Pop
    return env, nl, nl
5136 a8083063 Iustin Pop
5137 a8083063 Iustin Pop
  def CheckPrereq(self):
5138 a8083063 Iustin Pop
    """Check prerequisites.
5139 a8083063 Iustin Pop

5140 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
5141 a8083063 Iustin Pop

5142 a8083063 Iustin Pop
    """
5143 6657590e Guido Trotter
    instance_name = self.op.instance_name
5144 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
5145 6657590e Guido Trotter
    assert self.instance is not None, \
5146 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
5147 a8083063 Iustin Pop
5148 6657590e Guido Trotter
    self.dst_node = self.cfg.GetNodeInfo(
5149 6657590e Guido Trotter
      self.cfg.ExpandNodeName(self.op.target_node))
5150 a8083063 Iustin Pop
5151 268b8e42 Iustin Pop
    if self.dst_node is None:
5152 268b8e42 Iustin Pop
      # This is wrong node name, not a non-locked node
5153 268b8e42 Iustin Pop
      raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node)
5154 a8083063 Iustin Pop
5155 b6023d6c Manuel Franceschini
    # instance disk type verification
5156 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
5157 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
5158 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
5159 b6023d6c Manuel Franceschini
                                   " file-based disks")
5160 b6023d6c Manuel Franceschini
5161 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5162 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
5163 a8083063 Iustin Pop

5164 a8083063 Iustin Pop
    """
5165 a8083063 Iustin Pop
    instance = self.instance
5166 a8083063 Iustin Pop
    dst_node = self.dst_node
5167 a8083063 Iustin Pop
    src_node = instance.primary_node
5168 a8083063 Iustin Pop
    if self.op.shutdown:
5169 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
5170 72737a7f Iustin Pop
      if not self.rpc.call_instance_shutdown(src_node, instance):
5171 38206f3c Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
5172 38206f3c Iustin Pop
                                 (instance.name, src_node))
5173 a8083063 Iustin Pop
5174 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
5175 a8083063 Iustin Pop
5176 a8083063 Iustin Pop
    snap_disks = []
5177 a8083063 Iustin Pop
5178 a8083063 Iustin Pop
    try:
5179 a8083063 Iustin Pop
      for disk in instance.disks:
5180 19d7f90a Guido Trotter
        # new_dev_name will be a snapshot of an lvm leaf of the one we passed
5181 19d7f90a Guido Trotter
        new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk)
5182 a8083063 Iustin Pop
5183 19d7f90a Guido Trotter
        if not new_dev_name:
5184 19d7f90a Guido Trotter
          self.LogWarning("Could not snapshot block device %s on node %s",
5185 9a4f63d1 Iustin Pop
                          disk.logical_id[1], src_node)
5186 19d7f90a Guido Trotter
          snap_disks.append(False)
5187 19d7f90a Guido Trotter
        else:
5188 19d7f90a Guido Trotter
          new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
5189 19d7f90a Guido Trotter
                                 logical_id=(vgname, new_dev_name),
5190 19d7f90a Guido Trotter
                                 physical_id=(vgname, new_dev_name),
5191 19d7f90a Guido Trotter
                                 iv_name=disk.iv_name)
5192 19d7f90a Guido Trotter
          snap_disks.append(new_dev)
5193 a8083063 Iustin Pop
5194 a8083063 Iustin Pop
    finally:
5195 fb300fb7 Guido Trotter
      if self.op.shutdown and instance.status == "up":
5196 72737a7f Iustin Pop
        if not self.rpc.call_instance_start(src_node, instance, None):
5197 b9bddb6b Iustin Pop
          _ShutdownInstanceDisks(self, instance)
5198 fb300fb7 Guido Trotter
          raise errors.OpExecError("Could not start instance")
5199 a8083063 Iustin Pop
5200 a8083063 Iustin Pop
    # TODO: check for size
5201 a8083063 Iustin Pop
5202 62c9ec92 Iustin Pop
    cluster_name = self.cfg.GetClusterName()
5203 74c47259 Iustin Pop
    for idx, dev in enumerate(snap_disks):
5204 19d7f90a Guido Trotter
      if dev:
5205 19d7f90a Guido Trotter
        if not self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
5206 74c47259 Iustin Pop
                                             instance, cluster_name, idx):
5207 19d7f90a Guido Trotter
          self.LogWarning("Could not export block device %s from node %s to"
5208 19d7f90a Guido Trotter
                          " node %s", dev.logical_id[1], src_node,
5209 19d7f90a Guido Trotter
                          dst_node.name)
5210 19d7f90a Guido Trotter
        if not self.rpc.call_blockdev_remove(src_node, dev):
5211 19d7f90a Guido Trotter
          self.LogWarning("Could not remove snapshot block device %s from node"
5212 19d7f90a Guido Trotter
                          " %s", dev.logical_id[1], src_node)
5213 a8083063 Iustin Pop
5214 72737a7f Iustin Pop
    if not self.rpc.call_finalize_export(dst_node.name, instance, snap_disks):
5215 19d7f90a Guido Trotter
      self.LogWarning("Could not finalize export for instance %s on node %s",
5216 19d7f90a Guido Trotter
                      instance.name, dst_node.name)
5217 a8083063 Iustin Pop
5218 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
5219 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
5220 a8083063 Iustin Pop
5221 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
5222 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
5223 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
5224 a8083063 Iustin Pop
    if nodelist:
5225 72737a7f Iustin Pop
      exportlist = self.rpc.call_export_list(nodelist)
5226 a8083063 Iustin Pop
      for node in exportlist:
5227 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
5228 72737a7f Iustin Pop
          if not self.rpc.call_export_remove(node, instance.name):
5229 19d7f90a Guido Trotter
            self.LogWarning("Could not remove older export for instance %s"
5230 19d7f90a Guido Trotter
                            " on node %s", instance.name, node)
5231 5c947f38 Iustin Pop
5232 5c947f38 Iustin Pop
5233 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
5234 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
5235 9ac99fda Guido Trotter

5236 9ac99fda Guido Trotter
  """
5237 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
5238 3656b3af Guido Trotter
  REQ_BGL = False
5239 3656b3af Guido Trotter
5240 3656b3af Guido Trotter
  def ExpandNames(self):
5241 3656b3af Guido Trotter
    self.needed_locks = {}
5242 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
5243 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
5244 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
5245 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5246 9ac99fda Guido Trotter
5247 9ac99fda Guido Trotter
  def CheckPrereq(self):
5248 9ac99fda Guido Trotter
    """Check prerequisites.
5249 9ac99fda Guido Trotter
    """
5250 9ac99fda Guido Trotter
    pass
5251 9ac99fda Guido Trotter
5252 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
5253 9ac99fda Guido Trotter
    """Remove any export.
5254 9ac99fda Guido Trotter

5255 9ac99fda Guido Trotter
    """
5256 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
5257 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
5258 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
5259 9ac99fda Guido Trotter
    fqdn_warn = False
5260 9ac99fda Guido Trotter
    if not instance_name:
5261 9ac99fda Guido Trotter
      fqdn_warn = True
5262 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
5263 9ac99fda Guido Trotter
5264 72737a7f Iustin Pop
    exportlist = self.rpc.call_export_list(self.acquired_locks[
5265 72737a7f Iustin Pop
      locking.LEVEL_NODE])
5266 9ac99fda Guido Trotter
    found = False
5267 9ac99fda Guido Trotter
    for node in exportlist:
5268 9ac99fda Guido Trotter
      if instance_name in exportlist[node]:
5269 9ac99fda Guido Trotter
        found = True
5270 72737a7f Iustin Pop
        if not self.rpc.call_export_remove(node, instance_name):
5271 9a4f63d1 Iustin Pop
          logging.error("Could not remove export for instance %s"
5272 9a4f63d1 Iustin Pop
                        " on node %s", instance_name, node)
5273 9ac99fda Guido Trotter
5274 9ac99fda Guido Trotter
    if fqdn_warn and not found:
5275 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
5276 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
5277 9ac99fda Guido Trotter
                  " Domain Name.")
5278 9ac99fda Guido Trotter
5279 9ac99fda Guido Trotter
5280 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
5281 5c947f38 Iustin Pop
  """Generic tags LU.
5282 5c947f38 Iustin Pop

5283 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
5284 5c947f38 Iustin Pop

5285 5c947f38 Iustin Pop
  """
5286 5c947f38 Iustin Pop
5287 8646adce Guido Trotter
  def ExpandNames(self):
5288 8646adce Guido Trotter
    self.needed_locks = {}
5289 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
5290 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
5291 5c947f38 Iustin Pop
      if name is None:
5292 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
5293 3ecf6786 Iustin Pop
                                   (self.op.name,))
5294 5c947f38 Iustin Pop
      self.op.name = name
5295 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = name
5296 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
5297 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
5298 5c947f38 Iustin Pop
      if name is None:
5299 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
5300 3ecf6786 Iustin Pop
                                   (self.op.name,))
5301 5c947f38 Iustin Pop
      self.op.name = name
5302 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = name
5303 8646adce Guido Trotter
5304 8646adce Guido Trotter
  def CheckPrereq(self):
5305 8646adce Guido Trotter
    """Check prerequisites.
5306 8646adce Guido Trotter

5307 8646adce Guido Trotter
    """
5308 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
5309 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
5310 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
5311 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
5312 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
5313 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
5314 5c947f38 Iustin Pop
    else:
5315 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
5316 3ecf6786 Iustin Pop
                                 str(self.op.kind))
5317 5c947f38 Iustin Pop
5318 5c947f38 Iustin Pop
5319 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
5320 5c947f38 Iustin Pop
  """Returns the tags of a given object.
5321 5c947f38 Iustin Pop

5322 5c947f38 Iustin Pop
  """
5323 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
5324 8646adce Guido Trotter
  REQ_BGL = False
5325 5c947f38 Iustin Pop
5326 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5327 5c947f38 Iustin Pop
    """Returns the tag list.
5328 5c947f38 Iustin Pop

5329 5c947f38 Iustin Pop
    """
5330 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
5331 5c947f38 Iustin Pop
5332 5c947f38 Iustin Pop
5333 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
5334 73415719 Iustin Pop
  """Searches the tags for a given pattern.
5335 73415719 Iustin Pop

5336 73415719 Iustin Pop
  """
5337 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
5338 8646adce Guido Trotter
  REQ_BGL = False
5339 8646adce Guido Trotter
5340 8646adce Guido Trotter
  def ExpandNames(self):
5341 8646adce Guido Trotter
    self.needed_locks = {}
5342 73415719 Iustin Pop
5343 73415719 Iustin Pop
  def CheckPrereq(self):
5344 73415719 Iustin Pop
    """Check prerequisites.
5345 73415719 Iustin Pop

5346 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
5347 73415719 Iustin Pop

5348 73415719 Iustin Pop
    """
5349 73415719 Iustin Pop
    try:
5350 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
5351 73415719 Iustin Pop
    except re.error, err:
5352 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
5353 73415719 Iustin Pop
                                 (self.op.pattern, err))
5354 73415719 Iustin Pop
5355 73415719 Iustin Pop
  def Exec(self, feedback_fn):
5356 73415719 Iustin Pop
    """Returns the tag list.
5357 73415719 Iustin Pop

5358 73415719 Iustin Pop
    """
5359 73415719 Iustin Pop
    cfg = self.cfg
5360 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
5361 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
5362 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
5363 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
5364 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
5365 73415719 Iustin Pop
    results = []
5366 73415719 Iustin Pop
    for path, target in tgts:
5367 73415719 Iustin Pop
      for tag in target.GetTags():
5368 73415719 Iustin Pop
        if self.re.search(tag):
5369 73415719 Iustin Pop
          results.append((path, tag))
5370 73415719 Iustin Pop
    return results
5371 73415719 Iustin Pop
5372 73415719 Iustin Pop
5373 f27302fa Iustin Pop
class LUAddTags(TagsLU):
5374 5c947f38 Iustin Pop
  """Sets a tag on a given object.
5375 5c947f38 Iustin Pop

5376 5c947f38 Iustin Pop
  """
5377 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
5378 8646adce Guido Trotter
  REQ_BGL = False
5379 5c947f38 Iustin Pop
5380 5c947f38 Iustin Pop
  def CheckPrereq(self):
5381 5c947f38 Iustin Pop
    """Check prerequisites.
5382 5c947f38 Iustin Pop

5383 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
5384 5c947f38 Iustin Pop

5385 5c947f38 Iustin Pop
    """
5386 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
5387 f27302fa Iustin Pop
    for tag in self.op.tags:
5388 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
5389 5c947f38 Iustin Pop
5390 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5391 5c947f38 Iustin Pop
    """Sets the tag.
5392 5c947f38 Iustin Pop

5393 5c947f38 Iustin Pop
    """
5394 5c947f38 Iustin Pop
    try:
5395 f27302fa Iustin Pop
      for tag in self.op.tags:
5396 f27302fa Iustin Pop
        self.target.AddTag(tag)
5397 5c947f38 Iustin Pop
    except errors.TagError, err:
5398 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
5399 5c947f38 Iustin Pop
    try:
5400 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
5401 5c947f38 Iustin Pop
    except errors.ConfigurationError:
5402 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
5403 3ecf6786 Iustin Pop
                                " config file and the operation has been"
5404 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
5405 5c947f38 Iustin Pop
5406 5c947f38 Iustin Pop
5407 f27302fa Iustin Pop
class LUDelTags(TagsLU):
5408 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
5409 5c947f38 Iustin Pop

5410 5c947f38 Iustin Pop
  """
5411 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
5412 8646adce Guido Trotter
  REQ_BGL = False
5413 5c947f38 Iustin Pop
5414 5c947f38 Iustin Pop
  def CheckPrereq(self):
5415 5c947f38 Iustin Pop
    """Check prerequisites.
5416 5c947f38 Iustin Pop

5417 5c947f38 Iustin Pop
    This checks that we have the given tag.
5418 5c947f38 Iustin Pop

5419 5c947f38 Iustin Pop
    """
5420 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
5421 f27302fa Iustin Pop
    for tag in self.op.tags:
5422 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
5423 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
5424 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
5425 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
5426 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
5427 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
5428 f27302fa Iustin Pop
      diff_names.sort()
5429 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
5430 f27302fa Iustin Pop
                                 (",".join(diff_names)))
5431 5c947f38 Iustin Pop
5432 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5433 5c947f38 Iustin Pop
    """Remove the tag from the object.
5434 5c947f38 Iustin Pop

5435 5c947f38 Iustin Pop
    """
5436 f27302fa Iustin Pop
    for tag in self.op.tags:
5437 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
5438 5c947f38 Iustin Pop
    try:
5439 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
5440 5c947f38 Iustin Pop
    except errors.ConfigurationError:
5441 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
5442 3ecf6786 Iustin Pop
                                " config file and the operation has been"
5443 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
5444 06009e27 Iustin Pop
5445 0eed6e61 Guido Trotter
5446 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
5447 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
5448 06009e27 Iustin Pop

5449 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
5450 06009e27 Iustin Pop
  time.
5451 06009e27 Iustin Pop

5452 06009e27 Iustin Pop
  """
5453 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
5454 fbe9022f Guido Trotter
  REQ_BGL = False
5455 06009e27 Iustin Pop
5456 fbe9022f Guido Trotter
  def ExpandNames(self):
5457 fbe9022f Guido Trotter
    """Expand names and set required locks.
5458 06009e27 Iustin Pop

5459 fbe9022f Guido Trotter
    This expands the node list, if any.
5460 06009e27 Iustin Pop

5461 06009e27 Iustin Pop
    """
5462 fbe9022f Guido Trotter
    self.needed_locks = {}
5463 06009e27 Iustin Pop
    if self.op.on_nodes:
5464 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
5465 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
5466 fbe9022f Guido Trotter
      # more information.
5467 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
5468 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
5469 fbe9022f Guido Trotter
5470 fbe9022f Guido Trotter
  def CheckPrereq(self):
5471 fbe9022f Guido Trotter
    """Check prerequisites.
5472 fbe9022f Guido Trotter

5473 fbe9022f Guido Trotter
    """
5474 06009e27 Iustin Pop
5475 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
5476 06009e27 Iustin Pop
    """Do the actual sleep.
5477 06009e27 Iustin Pop

5478 06009e27 Iustin Pop
    """
5479 06009e27 Iustin Pop
    if self.op.on_master:
5480 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
5481 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
5482 06009e27 Iustin Pop
    if self.op.on_nodes:
5483 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
5484 06009e27 Iustin Pop
      if not result:
5485 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
5486 06009e27 Iustin Pop
      for node, node_result in result.items():
5487 06009e27 Iustin Pop
        if not node_result:
5488 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
5489 06009e27 Iustin Pop
                                   " result: %s" % (node, node_result))
5490 d61df03e Iustin Pop
5491 d61df03e Iustin Pop
5492 d1c2dd75 Iustin Pop
class IAllocator(object):
5493 d1c2dd75 Iustin Pop
  """IAllocator framework.
5494 d61df03e Iustin Pop

5495 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
5496 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
5497 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
5498 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
5499 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
5500 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
5501 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
5502 d1c2dd75 Iustin Pop
      easy usage
5503 d61df03e Iustin Pop

5504 d61df03e Iustin Pop
  """
5505 29859cb7 Iustin Pop
  _ALLO_KEYS = [
5506 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
5507 8cc7e742 Guido Trotter
    "os", "tags", "nics", "vcpus", "hypervisor",
5508 d1c2dd75 Iustin Pop
    ]
5509 29859cb7 Iustin Pop
  _RELO_KEYS = [
5510 29859cb7 Iustin Pop
    "relocate_from",
5511 29859cb7 Iustin Pop
    ]
5512 d1c2dd75 Iustin Pop
5513 72737a7f Iustin Pop
  def __init__(self, lu, mode, name, **kwargs):
5514 72737a7f Iustin Pop
    self.lu = lu
5515 d1c2dd75 Iustin Pop
    # init buffer variables
5516 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
5517 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
5518 29859cb7 Iustin Pop
    self.mode = mode
5519 29859cb7 Iustin Pop
    self.name = name
5520 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
5521 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
5522 29859cb7 Iustin Pop
    self.relocate_from = None
5523 27579978 Iustin Pop
    # computed fields
5524 27579978 Iustin Pop
    self.required_nodes = None
5525 d1c2dd75 Iustin Pop
    # init result fields
5526 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
5527 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5528 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
5529 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
5530 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
5531 29859cb7 Iustin Pop
    else:
5532 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
5533 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
5534 d1c2dd75 Iustin Pop
    for key in kwargs:
5535 29859cb7 Iustin Pop
      if key not in keyset:
5536 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
5537 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
5538 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
5539 29859cb7 Iustin Pop
    for key in keyset:
5540 d1c2dd75 Iustin Pop
      if key not in kwargs:
5541 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
5542 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
5543 d1c2dd75 Iustin Pop
    self._BuildInputData()
5544 d1c2dd75 Iustin Pop
5545 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
5546 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
5547 d1c2dd75 Iustin Pop

5548 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
5549 d1c2dd75 Iustin Pop

5550 d1c2dd75 Iustin Pop
    """
5551 72737a7f Iustin Pop
    cfg = self.lu.cfg
5552 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
5553 d1c2dd75 Iustin Pop
    # cluster data
5554 d1c2dd75 Iustin Pop
    data = {
5555 d1c2dd75 Iustin Pop
      "version": 1,
5556 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
5557 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
5558 e69d05fd Iustin Pop
      "enable_hypervisors": list(cluster_info.enabled_hypervisors),
5559 d1c2dd75 Iustin Pop
      # we don't have job IDs
5560 d61df03e Iustin Pop
      }
5561 b57e9819 Guido Trotter
    iinfo = cfg.GetAllInstancesInfo().values()
5562 b57e9819 Guido Trotter
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
5563 6286519f Iustin Pop
5564 d1c2dd75 Iustin Pop
    # node data
5565 d1c2dd75 Iustin Pop
    node_results = {}
5566 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
5567 8cc7e742 Guido Trotter
5568 8cc7e742 Guido Trotter
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5569 8cc7e742 Guido Trotter
      hypervisor = self.hypervisor
5570 8cc7e742 Guido Trotter
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
5571 8cc7e742 Guido Trotter
      hypervisor = cfg.GetInstanceInfo(self.name).hypervisor
5572 8cc7e742 Guido Trotter
5573 72737a7f Iustin Pop
    node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
5574 8cc7e742 Guido Trotter
                                           hypervisor)
5575 18640d69 Guido Trotter
    node_iinfo = self.lu.rpc.call_all_instances_info(node_list,
5576 18640d69 Guido Trotter
                       cluster_info.enabled_hypervisors)
5577 d1c2dd75 Iustin Pop
    for nname in node_list:
5578 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
5579 d1c2dd75 Iustin Pop
      if nname not in node_data or not isinstance(node_data[nname], dict):
5580 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't get data for node %s" % nname)
5581 d1c2dd75 Iustin Pop
      remote_info = node_data[nname]
5582 b2662e7f Iustin Pop
      for attr in ['memory_total', 'memory_free', 'memory_dom0',
5583 4337cf1b Iustin Pop
                   'vg_size', 'vg_free', 'cpu_total']:
5584 d1c2dd75 Iustin Pop
        if attr not in remote_info:
5585 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' didn't return attribute '%s'" %
5586 d1c2dd75 Iustin Pop
                                   (nname, attr))
5587 d1c2dd75 Iustin Pop
        try:
5588 b2662e7f Iustin Pop
          remote_info[attr] = int(remote_info[attr])
5589 d1c2dd75 Iustin Pop
        except ValueError, err:
5590 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' returned invalid value for '%s':"
5591 d1c2dd75 Iustin Pop
                                   " %s" % (nname, attr, str(err)))
5592 6286519f Iustin Pop
      # compute memory used by primary instances
5593 6286519f Iustin Pop
      i_p_mem = i_p_up_mem = 0
5594 338e51e8 Iustin Pop
      for iinfo, beinfo in i_list:
5595 6286519f Iustin Pop
        if iinfo.primary_node == nname:
5596 338e51e8 Iustin Pop
          i_p_mem += beinfo[constants.BE_MEMORY]
5597 18640d69 Guido Trotter
          if iinfo.name not in node_iinfo[nname]:
5598 18640d69 Guido Trotter
            i_used_mem = 0
5599 18640d69 Guido Trotter
          else:
5600 18640d69 Guido Trotter
            i_used_mem = int(node_iinfo[nname][iinfo.name]['memory'])
5601 18640d69 Guido Trotter
          i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
5602 18640d69 Guido Trotter
          remote_info['memory_free'] -= max(0, i_mem_diff)
5603 18640d69 Guido Trotter
5604 6286519f Iustin Pop
          if iinfo.status == "up":
5605 338e51e8 Iustin Pop
            i_p_up_mem += beinfo[constants.BE_MEMORY]
5606 6286519f Iustin Pop
5607 b2662e7f Iustin Pop
      # compute memory used by instances
5608 d1c2dd75 Iustin Pop
      pnr = {
5609 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
5610 b2662e7f Iustin Pop
        "total_memory": remote_info['memory_total'],
5611 b2662e7f Iustin Pop
        "reserved_memory": remote_info['memory_dom0'],
5612 b2662e7f Iustin Pop
        "free_memory": remote_info['memory_free'],
5613 6286519f Iustin Pop
        "i_pri_memory": i_p_mem,
5614 6286519f Iustin Pop
        "i_pri_up_memory": i_p_up_mem,
5615 b2662e7f Iustin Pop
        "total_disk": remote_info['vg_size'],
5616 b2662e7f Iustin Pop
        "free_disk": remote_info['vg_free'],
5617 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
5618 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
5619 4337cf1b Iustin Pop
        "total_cpus": remote_info['cpu_total'],
5620 d1c2dd75 Iustin Pop
        }
5621 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
5622 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
5623 d1c2dd75 Iustin Pop
5624 d1c2dd75 Iustin Pop
    # instance data
5625 d1c2dd75 Iustin Pop
    instance_data = {}
5626 338e51e8 Iustin Pop
    for iinfo, beinfo in i_list:
5627 d1c2dd75 Iustin Pop
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
5628 d1c2dd75 Iustin Pop
                  for n in iinfo.nics]
5629 d1c2dd75 Iustin Pop
      pir = {
5630 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
5631 d1c2dd75 Iustin Pop
        "should_run": iinfo.status == "up",
5632 338e51e8 Iustin Pop
        "vcpus": beinfo[constants.BE_VCPUS],
5633 338e51e8 Iustin Pop
        "memory": beinfo[constants.BE_MEMORY],
5634 d1c2dd75 Iustin Pop
        "os": iinfo.os,
5635 d1c2dd75 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
5636 d1c2dd75 Iustin Pop
        "nics": nic_data,
5637 d1c2dd75 Iustin Pop
        "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
5638 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
5639 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
5640 d1c2dd75 Iustin Pop
        }
5641 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
5642 d61df03e Iustin Pop
5643 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
5644 d61df03e Iustin Pop
5645 d1c2dd75 Iustin Pop
    self.in_data = data
5646 d61df03e Iustin Pop
5647 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
5648 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
5649 d61df03e Iustin Pop

5650 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
5651 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5652 d61df03e Iustin Pop

5653 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5654 d1c2dd75 Iustin Pop
    done.
5655 d61df03e Iustin Pop

5656 d1c2dd75 Iustin Pop
    """
5657 d1c2dd75 Iustin Pop
    data = self.in_data
5658 d1c2dd75 Iustin Pop
    if len(self.disks) != 2:
5659 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Only two-disk configurations supported")
5660 d1c2dd75 Iustin Pop
5661 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
5662 d1c2dd75 Iustin Pop
5663 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
5664 27579978 Iustin Pop
      self.required_nodes = 2
5665 27579978 Iustin Pop
    else:
5666 27579978 Iustin Pop
      self.required_nodes = 1
5667 d1c2dd75 Iustin Pop
    request = {
5668 d1c2dd75 Iustin Pop
      "type": "allocate",
5669 d1c2dd75 Iustin Pop
      "name": self.name,
5670 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
5671 d1c2dd75 Iustin Pop
      "tags": self.tags,
5672 d1c2dd75 Iustin Pop
      "os": self.os,
5673 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
5674 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
5675 d1c2dd75 Iustin Pop
      "disks": self.disks,
5676 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
5677 d1c2dd75 Iustin Pop
      "nics": self.nics,
5678 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5679 d1c2dd75 Iustin Pop
      }
5680 d1c2dd75 Iustin Pop
    data["request"] = request
5681 298fe380 Iustin Pop
5682 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
5683 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
5684 298fe380 Iustin Pop

5685 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
5686 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5687 d61df03e Iustin Pop

5688 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5689 d1c2dd75 Iustin Pop
    done.
5690 d61df03e Iustin Pop

5691 d1c2dd75 Iustin Pop
    """
5692 72737a7f Iustin Pop
    instance = self.lu.cfg.GetInstanceInfo(self.name)
5693 27579978 Iustin Pop
    if instance is None:
5694 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
5695 27579978 Iustin Pop
                                   " IAllocator" % self.name)
5696 27579978 Iustin Pop
5697 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
5698 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
5699 27579978 Iustin Pop
5700 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
5701 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
5702 2a139bb0 Iustin Pop
5703 27579978 Iustin Pop
    self.required_nodes = 1
5704 dafc7302 Guido Trotter
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
5705 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
5706 27579978 Iustin Pop
5707 d1c2dd75 Iustin Pop
    request = {
5708 2a139bb0 Iustin Pop
      "type": "relocate",
5709 d1c2dd75 Iustin Pop
      "name": self.name,
5710 27579978 Iustin Pop
      "disk_space_total": disk_space,
5711 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5712 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
5713 d1c2dd75 Iustin Pop
      }
5714 27579978 Iustin Pop
    self.in_data["request"] = request
5715 d61df03e Iustin Pop
5716 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
5717 d1c2dd75 Iustin Pop
    """Build input data structures.
5718 d61df03e Iustin Pop

5719 d1c2dd75 Iustin Pop
    """
5720 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
5721 d61df03e Iustin Pop
5722 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5723 d1c2dd75 Iustin Pop
      self._AddNewInstance()
5724 d1c2dd75 Iustin Pop
    else:
5725 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
5726 d61df03e Iustin Pop
5727 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
5728 d61df03e Iustin Pop
5729 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
5730 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
5731 298fe380 Iustin Pop

5732 d1c2dd75 Iustin Pop
    """
5733 72737a7f Iustin Pop
    if call_fn is None:
5734 72737a7f Iustin Pop
      call_fn = self.lu.rpc.call_iallocator_runner
5735 d1c2dd75 Iustin Pop
    data = self.in_text
5736 298fe380 Iustin Pop
5737 72737a7f Iustin Pop
    result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
5738 298fe380 Iustin Pop
5739 43f5ea7a Guido Trotter
    if not isinstance(result, (list, tuple)) or len(result) != 4:
5740 8d528b7c Iustin Pop
      raise errors.OpExecError("Invalid result from master iallocator runner")
5741 8d528b7c Iustin Pop
5742 8d528b7c Iustin Pop
    rcode, stdout, stderr, fail = result
5743 8d528b7c Iustin Pop
5744 8d528b7c Iustin Pop
    if rcode == constants.IARUN_NOTFOUND:
5745 8d528b7c Iustin Pop
      raise errors.OpExecError("Can't find allocator '%s'" % name)
5746 8d528b7c Iustin Pop
    elif rcode == constants.IARUN_FAILURE:
5747 38206f3c Iustin Pop
      raise errors.OpExecError("Instance allocator call failed: %s,"
5748 38206f3c Iustin Pop
                               " output: %s" % (fail, stdout+stderr))
5749 8d528b7c Iustin Pop
    self.out_text = stdout
5750 d1c2dd75 Iustin Pop
    if validate:
5751 d1c2dd75 Iustin Pop
      self._ValidateResult()
5752 298fe380 Iustin Pop
5753 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
5754 d1c2dd75 Iustin Pop
    """Process the allocator results.
5755 538475ca Iustin Pop

5756 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
5757 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
5758 538475ca Iustin Pop

5759 d1c2dd75 Iustin Pop
    """
5760 d1c2dd75 Iustin Pop
    try:
5761 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
5762 d1c2dd75 Iustin Pop
    except Exception, err:
5763 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
5764 d1c2dd75 Iustin Pop
5765 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
5766 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
5767 538475ca Iustin Pop
5768 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
5769 d1c2dd75 Iustin Pop
      if key not in rdict:
5770 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
5771 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
5772 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
5773 538475ca Iustin Pop
5774 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
5775 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
5776 d1c2dd75 Iustin Pop
                               " is not a list")
5777 d1c2dd75 Iustin Pop
    self.out_data = rdict
5778 538475ca Iustin Pop
5779 538475ca Iustin Pop
5780 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
5781 d61df03e Iustin Pop
  """Run allocator tests.
5782 d61df03e Iustin Pop

5783 d61df03e Iustin Pop
  This LU runs the allocator tests
5784 d61df03e Iustin Pop

5785 d61df03e Iustin Pop
  """
5786 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
5787 d61df03e Iustin Pop
5788 d61df03e Iustin Pop
  def CheckPrereq(self):
5789 d61df03e Iustin Pop
    """Check prerequisites.
5790 d61df03e Iustin Pop

5791 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
5792 d61df03e Iustin Pop

5793 d61df03e Iustin Pop
    """
5794 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5795 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
5796 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
5797 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
5798 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
5799 d61df03e Iustin Pop
                                     attr)
5800 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
5801 d61df03e Iustin Pop
      if iname is not None:
5802 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
5803 d61df03e Iustin Pop
                                   iname)
5804 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
5805 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
5806 d61df03e Iustin Pop
      for row in self.op.nics:
5807 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5808 d61df03e Iustin Pop
            "mac" not in row or
5809 d61df03e Iustin Pop
            "ip" not in row or
5810 d61df03e Iustin Pop
            "bridge" not in row):
5811 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5812 d61df03e Iustin Pop
                                     " 'nics' parameter")
5813 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
5814 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
5815 298fe380 Iustin Pop
      if len(self.op.disks) != 2:
5816 298fe380 Iustin Pop
        raise errors.OpPrereqError("Only two-disk configurations supported")
5817 d61df03e Iustin Pop
      for row in self.op.disks:
5818 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5819 d61df03e Iustin Pop
            "size" not in row or
5820 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
5821 d61df03e Iustin Pop
            "mode" not in row or
5822 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
5823 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5824 d61df03e Iustin Pop
                                     " 'disks' parameter")
5825 8cc7e742 Guido Trotter
      if self.op.hypervisor is None:
5826 8cc7e742 Guido Trotter
        self.op.hypervisor = self.cfg.GetHypervisorType()
5827 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
5828 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
5829 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
5830 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
5831 d61df03e Iustin Pop
      if fname is None:
5832 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
5833 d61df03e Iustin Pop
                                   self.op.name)
5834 d61df03e Iustin Pop
      self.op.name = fname
5835 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
5836 d61df03e Iustin Pop
    else:
5837 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
5838 d61df03e Iustin Pop
                                 self.op.mode)
5839 d61df03e Iustin Pop
5840 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
5841 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
5842 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
5843 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
5844 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
5845 d61df03e Iustin Pop
                                 self.op.direction)
5846 d61df03e Iustin Pop
5847 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
5848 d61df03e Iustin Pop
    """Run the allocator test.
5849 d61df03e Iustin Pop

5850 d61df03e Iustin Pop
    """
5851 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5852 72737a7f Iustin Pop
      ial = IAllocator(self,
5853 29859cb7 Iustin Pop
                       mode=self.op.mode,
5854 29859cb7 Iustin Pop
                       name=self.op.name,
5855 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
5856 29859cb7 Iustin Pop
                       disks=self.op.disks,
5857 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
5858 29859cb7 Iustin Pop
                       os=self.op.os,
5859 29859cb7 Iustin Pop
                       tags=self.op.tags,
5860 29859cb7 Iustin Pop
                       nics=self.op.nics,
5861 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
5862 8cc7e742 Guido Trotter
                       hypervisor=self.op.hypervisor,
5863 29859cb7 Iustin Pop
                       )
5864 29859cb7 Iustin Pop
    else:
5865 72737a7f Iustin Pop
      ial = IAllocator(self,
5866 29859cb7 Iustin Pop
                       mode=self.op.mode,
5867 29859cb7 Iustin Pop
                       name=self.op.name,
5868 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
5869 29859cb7 Iustin Pop
                       )
5870 d61df03e Iustin Pop
5871 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
5872 d1c2dd75 Iustin Pop
      result = ial.in_text
5873 298fe380 Iustin Pop
    else:
5874 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
5875 d1c2dd75 Iustin Pop
      result = ial.out_text
5876 298fe380 Iustin Pop
    return result