Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 16714921

History | View | Annotate | Download (203.2 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 ffa1c0dc Iustin Pop
import logging
34 74409b12 Iustin Pop
import copy
35 a8083063 Iustin Pop
36 a8083063 Iustin Pop
from ganeti import ssh
37 a8083063 Iustin Pop
from ganeti import utils
38 a8083063 Iustin Pop
from ganeti import errors
39 a8083063 Iustin Pop
from ganeti import hypervisor
40 6048c986 Guido Trotter
from ganeti import locking
41 a8083063 Iustin Pop
from ganeti import constants
42 a8083063 Iustin Pop
from ganeti import objects
43 a8083063 Iustin Pop
from ganeti import opcodes
44 8d14b30d Iustin Pop
from ganeti import serializer
45 d61df03e Iustin Pop
46 d61df03e Iustin Pop
47 a8083063 Iustin Pop
class LogicalUnit(object):
48 396e1b78 Michael Hanselmann
  """Logical Unit base class.
49 a8083063 Iustin Pop

50 a8083063 Iustin Pop
  Subclasses must follow these rules:
51 d465bdc8 Guido Trotter
    - implement ExpandNames
52 d465bdc8 Guido Trotter
    - implement CheckPrereq
53 a8083063 Iustin Pop
    - implement Exec
54 a8083063 Iustin Pop
    - implement BuildHooksEnv
55 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
56 05f86716 Guido Trotter
    - optionally redefine their run requirements:
57 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
58 05f86716 Guido Trotter

59 05f86716 Guido Trotter
  Note that all commands require root permissions.
60 a8083063 Iustin Pop

61 a8083063 Iustin Pop
  """
62 a8083063 Iustin Pop
  HPATH = None
63 a8083063 Iustin Pop
  HTYPE = None
64 a8083063 Iustin Pop
  _OP_REQP = []
65 7e55040e Guido Trotter
  REQ_BGL = True
66 a8083063 Iustin Pop
67 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
68 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
69 a8083063 Iustin Pop

70 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
71 a8083063 Iustin Pop
    validity.
72 a8083063 Iustin Pop

73 a8083063 Iustin Pop
    """
74 5bfac263 Iustin Pop
    self.proc = processor
75 a8083063 Iustin Pop
    self.op = op
76 77b657a3 Guido Trotter
    self.cfg = context.cfg
77 77b657a3 Guido Trotter
    self.context = context
78 72737a7f Iustin Pop
    self.rpc = rpc
79 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
80 d465bdc8 Guido Trotter
    self.needed_locks = None
81 6683bba2 Guido Trotter
    self.acquired_locks = {}
82 3977a4c1 Guido Trotter
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
83 ca2a79e1 Guido Trotter
    self.add_locks = {}
84 ca2a79e1 Guido Trotter
    self.remove_locks = {}
85 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
86 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
87 c92b310a Michael Hanselmann
    self.__ssh = None
88 86d9d3bb Iustin Pop
    # logging
89 86d9d3bb Iustin Pop
    self.LogWarning = processor.LogWarning
90 86d9d3bb Iustin Pop
    self.LogInfo = processor.LogInfo
91 c92b310a Michael Hanselmann
92 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
93 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
94 a8083063 Iustin Pop
      if attr_val is None:
95 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
96 3ecf6786 Iustin Pop
                                   attr_name)
97 4be4691d Iustin Pop
    self.CheckArguments()
98 a8083063 Iustin Pop
99 c92b310a Michael Hanselmann
  def __GetSSH(self):
100 c92b310a Michael Hanselmann
    """Returns the SshRunner object
101 c92b310a Michael Hanselmann

102 c92b310a Michael Hanselmann
    """
103 c92b310a Michael Hanselmann
    if not self.__ssh:
104 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
105 c92b310a Michael Hanselmann
    return self.__ssh
106 c92b310a Michael Hanselmann
107 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
108 c92b310a Michael Hanselmann
109 4be4691d Iustin Pop
  def CheckArguments(self):
110 4be4691d Iustin Pop
    """Check syntactic validity for the opcode arguments.
111 4be4691d Iustin Pop

112 4be4691d Iustin Pop
    This method is for doing a simple syntactic check and ensure
113 4be4691d Iustin Pop
    validity of opcode parameters, without any cluster-related
114 4be4691d Iustin Pop
    checks. While the same can be accomplished in ExpandNames and/or
115 4be4691d Iustin Pop
    CheckPrereq, doing these separate is better because:
116 4be4691d Iustin Pop

117 4be4691d Iustin Pop
      - ExpandNames is left as as purely a lock-related function
118 4be4691d Iustin Pop
      - CheckPrereq is run after we have aquired locks (and possible
119 4be4691d Iustin Pop
        waited for them)
120 4be4691d Iustin Pop

121 4be4691d Iustin Pop
    The function is allowed to change the self.op attribute so that
122 4be4691d Iustin Pop
    later methods can no longer worry about missing parameters.
123 4be4691d Iustin Pop

124 4be4691d Iustin Pop
    """
125 4be4691d Iustin Pop
    pass
126 4be4691d Iustin Pop
127 d465bdc8 Guido Trotter
  def ExpandNames(self):
128 d465bdc8 Guido Trotter
    """Expand names for this LU.
129 d465bdc8 Guido Trotter

130 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
131 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
132 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
133 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
134 d465bdc8 Guido Trotter

135 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
136 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
137 d465bdc8 Guido Trotter
    as values. Rules:
138 e4376078 Iustin Pop

139 e4376078 Iustin Pop
      - use an empty dict if you don't need any lock
140 e4376078 Iustin Pop
      - if you don't need any lock at a particular level omit that level
141 e4376078 Iustin Pop
      - don't put anything for the BGL level
142 e4376078 Iustin Pop
      - if you want all locks at a level use locking.ALL_SET as a value
143 d465bdc8 Guido Trotter

144 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
145 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
146 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
147 3977a4c1 Guido Trotter

148 e4376078 Iustin Pop
    Examples::
149 e4376078 Iustin Pop

150 e4376078 Iustin Pop
      # Acquire all nodes and one instance
151 e4376078 Iustin Pop
      self.needed_locks = {
152 e4376078 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
153 e4376078 Iustin Pop
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
154 e4376078 Iustin Pop
      }
155 e4376078 Iustin Pop
      # Acquire just two nodes
156 e4376078 Iustin Pop
      self.needed_locks = {
157 e4376078 Iustin Pop
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
158 e4376078 Iustin Pop
      }
159 e4376078 Iustin Pop
      # Acquire no locks
160 e4376078 Iustin Pop
      self.needed_locks = {} # No, you can't leave it to the default value None
161 d465bdc8 Guido Trotter

162 d465bdc8 Guido Trotter
    """
163 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
164 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
165 d465bdc8 Guido Trotter
    # time.
166 d465bdc8 Guido Trotter
    if self.REQ_BGL:
167 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
168 d465bdc8 Guido Trotter
    else:
169 d465bdc8 Guido Trotter
      raise NotImplementedError
170 d465bdc8 Guido Trotter
171 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
172 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
173 fb8dcb62 Guido Trotter

174 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
175 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
176 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
177 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
178 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
179 fb8dcb62 Guido Trotter
    default it does nothing.
180 fb8dcb62 Guido Trotter

181 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
182 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
183 fb8dcb62 Guido Trotter

184 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
185 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
186 fb8dcb62 Guido Trotter

187 fb8dcb62 Guido Trotter
    """
188 fb8dcb62 Guido Trotter
189 a8083063 Iustin Pop
  def CheckPrereq(self):
190 a8083063 Iustin Pop
    """Check prerequisites for this LU.
191 a8083063 Iustin Pop

192 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
193 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
194 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
195 a8083063 Iustin Pop
    allowed.
196 a8083063 Iustin Pop

197 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
198 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
199 a8083063 Iustin Pop

200 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
201 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
202 a8083063 Iustin Pop

203 a8083063 Iustin Pop
    """
204 a8083063 Iustin Pop
    raise NotImplementedError
205 a8083063 Iustin Pop
206 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
207 a8083063 Iustin Pop
    """Execute the LU.
208 a8083063 Iustin Pop

209 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
210 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
211 a8083063 Iustin Pop
    code, or expected.
212 a8083063 Iustin Pop

213 a8083063 Iustin Pop
    """
214 a8083063 Iustin Pop
    raise NotImplementedError
215 a8083063 Iustin Pop
216 a8083063 Iustin Pop
  def BuildHooksEnv(self):
217 a8083063 Iustin Pop
    """Build hooks environment for this LU.
218 a8083063 Iustin Pop

219 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
220 a8083063 Iustin Pop
    containing the environment that will be used for running the
221 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
222 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
223 a8083063 Iustin Pop
    the hook should run after the execution.
224 a8083063 Iustin Pop

225 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
226 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
227 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
228 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
229 a8083063 Iustin Pop

230 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
231 a8083063 Iustin Pop

232 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
233 a8083063 Iustin Pop
    not be called.
234 a8083063 Iustin Pop

235 a8083063 Iustin Pop
    """
236 a8083063 Iustin Pop
    raise NotImplementedError
237 a8083063 Iustin Pop
238 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
239 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
240 1fce5219 Guido Trotter

241 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
242 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
243 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
244 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
245 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
246 1fce5219 Guido Trotter

247 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
248 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
249 e4376078 Iustin Pop
    @param hook_results: the results of the multi-node hooks rpc call
250 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
251 e4376078 Iustin Pop
    @param lu_result: the previous Exec result this LU had, or None
252 e4376078 Iustin Pop
        in the PRE phase
253 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
254 e4376078 Iustin Pop
        and hook results
255 1fce5219 Guido Trotter

256 1fce5219 Guido Trotter
    """
257 1fce5219 Guido Trotter
    return lu_result
258 1fce5219 Guido Trotter
259 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
260 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
261 43905206 Guido Trotter

262 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
263 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
264 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
265 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
266 43905206 Guido Trotter
    before.
267 43905206 Guido Trotter

268 43905206 Guido Trotter
    """
269 43905206 Guido Trotter
    if self.needed_locks is None:
270 43905206 Guido Trotter
      self.needed_locks = {}
271 43905206 Guido Trotter
    else:
272 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
273 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
274 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
275 43905206 Guido Trotter
    if expanded_name is None:
276 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
277 43905206 Guido Trotter
                                  self.op.instance_name)
278 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
279 43905206 Guido Trotter
    self.op.instance_name = expanded_name
280 43905206 Guido Trotter
281 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
282 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
283 c4a2fee1 Guido Trotter

284 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
285 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
286 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
287 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
288 c4a2fee1 Guido Trotter

289 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
290 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
291 c4a2fee1 Guido Trotter

292 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
293 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
294 c4a2fee1 Guido Trotter

295 e4376078 Iustin Pop
    If should be called in DeclareLocks in a way similar to::
296 c4a2fee1 Guido Trotter

297 e4376078 Iustin Pop
      if level == locking.LEVEL_NODE:
298 e4376078 Iustin Pop
        self._LockInstancesNodes()
299 c4a2fee1 Guido Trotter

300 a82ce292 Guido Trotter
    @type primary_only: boolean
301 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
302 a82ce292 Guido Trotter

303 c4a2fee1 Guido Trotter
    """
304 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
305 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
306 c4a2fee1 Guido Trotter
307 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
308 c4a2fee1 Guido Trotter
309 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
310 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
311 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
312 c4a2fee1 Guido Trotter
    wanted_nodes = []
313 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
314 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
315 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
316 a82ce292 Guido Trotter
      if not primary_only:
317 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
318 9513b6ab Guido Trotter
319 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
320 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
321 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
322 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
323 c4a2fee1 Guido Trotter
324 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
325 c4a2fee1 Guido Trotter
326 a8083063 Iustin Pop
327 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
328 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
329 a8083063 Iustin Pop

330 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
331 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
332 a8083063 Iustin Pop

333 a8083063 Iustin Pop
  """
334 a8083063 Iustin Pop
  HPATH = None
335 a8083063 Iustin Pop
  HTYPE = None
336 a8083063 Iustin Pop
337 a8083063 Iustin Pop
338 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
339 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
340 83120a01 Michael Hanselmann

341 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
342 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
343 e4376078 Iustin Pop
  @type nodes: list
344 e4376078 Iustin Pop
  @param nodes: list of node names or None for all nodes
345 e4376078 Iustin Pop
  @rtype: list
346 e4376078 Iustin Pop
  @return: the list of nodes, sorted
347 e4376078 Iustin Pop
  @raise errors.OpProgrammerError: if the nodes parameter is wrong type
348 83120a01 Michael Hanselmann

349 83120a01 Michael Hanselmann
  """
350 3312b702 Iustin Pop
  if not isinstance(nodes, list):
351 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
352 dcb93971 Michael Hanselmann
353 ea47808a Guido Trotter
  if not nodes:
354 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
355 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
356 dcb93971 Michael Hanselmann
357 ea47808a Guido Trotter
  wanted = []
358 ea47808a Guido Trotter
  for name in nodes:
359 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
360 ea47808a Guido Trotter
    if node is None:
361 ea47808a Guido Trotter
      raise errors.OpPrereqError("No such node name '%s'" % name)
362 ea47808a Guido Trotter
    wanted.append(node)
363 dcb93971 Michael Hanselmann
364 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
365 3312b702 Iustin Pop
366 3312b702 Iustin Pop
367 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
368 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
369 3312b702 Iustin Pop

370 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
371 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
372 e4376078 Iustin Pop
  @type instances: list
373 e4376078 Iustin Pop
  @param instances: list of instance names or None for all instances
374 e4376078 Iustin Pop
  @rtype: list
375 e4376078 Iustin Pop
  @return: the list of instances, sorted
376 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if the instances parameter is wrong type
377 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if any of the passed instances is not found
378 3312b702 Iustin Pop

379 3312b702 Iustin Pop
  """
380 3312b702 Iustin Pop
  if not isinstance(instances, list):
381 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
382 3312b702 Iustin Pop
383 3312b702 Iustin Pop
  if instances:
384 3312b702 Iustin Pop
    wanted = []
385 3312b702 Iustin Pop
386 3312b702 Iustin Pop
    for name in instances:
387 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
388 3312b702 Iustin Pop
      if instance is None:
389 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
390 3312b702 Iustin Pop
      wanted.append(instance)
391 3312b702 Iustin Pop
392 3312b702 Iustin Pop
  else:
393 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
394 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
395 dcb93971 Michael Hanselmann
396 dcb93971 Michael Hanselmann
397 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
398 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
399 83120a01 Michael Hanselmann

400 a2d2e1a7 Iustin Pop
  @type static: L{utils.FieldSet}
401 31bf511f Iustin Pop
  @param static: static fields set
402 a2d2e1a7 Iustin Pop
  @type dynamic: L{utils.FieldSet}
403 31bf511f Iustin Pop
  @param dynamic: dynamic fields set
404 83120a01 Michael Hanselmann

405 83120a01 Michael Hanselmann
  """
406 a2d2e1a7 Iustin Pop
  f = utils.FieldSet()
407 31bf511f Iustin Pop
  f.Extend(static)
408 31bf511f Iustin Pop
  f.Extend(dynamic)
409 dcb93971 Michael Hanselmann
410 31bf511f Iustin Pop
  delta = f.NonMatching(selected)
411 31bf511f Iustin Pop
  if delta:
412 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
413 31bf511f Iustin Pop
                               % ",".join(delta))
414 dcb93971 Michael Hanselmann
415 dcb93971 Michael Hanselmann
416 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
417 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
418 e4376078 Iustin Pop
  """Builds instance related env variables for hooks
419 e4376078 Iustin Pop

420 e4376078 Iustin Pop
  This builds the hook environment from individual variables.
421 e4376078 Iustin Pop

422 e4376078 Iustin Pop
  @type name: string
423 e4376078 Iustin Pop
  @param name: the name of the instance
424 e4376078 Iustin Pop
  @type primary_node: string
425 e4376078 Iustin Pop
  @param primary_node: the name of the instance's primary node
426 e4376078 Iustin Pop
  @type secondary_nodes: list
427 e4376078 Iustin Pop
  @param secondary_nodes: list of secondary nodes as strings
428 e4376078 Iustin Pop
  @type os_type: string
429 e4376078 Iustin Pop
  @param os_type: the name of the instance's OS
430 e4376078 Iustin Pop
  @type status: string
431 e4376078 Iustin Pop
  @param status: the desired status of the instances
432 e4376078 Iustin Pop
  @type memory: string
433 e4376078 Iustin Pop
  @param memory: the memory size of the instance
434 e4376078 Iustin Pop
  @type vcpus: string
435 e4376078 Iustin Pop
  @param vcpus: the count of VCPUs the instance has
436 e4376078 Iustin Pop
  @type nics: list
437 e4376078 Iustin Pop
  @param nics: list of tuples (ip, bridge, mac) representing
438 e4376078 Iustin Pop
      the NICs the instance  has
439 e4376078 Iustin Pop
  @rtype: dict
440 e4376078 Iustin Pop
  @return: the hook environment for this instance
441 ecb215b5 Michael Hanselmann

442 396e1b78 Michael Hanselmann
  """
443 396e1b78 Michael Hanselmann
  env = {
444 0e137c28 Iustin Pop
    "OP_TARGET": name,
445 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
446 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
447 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
448 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
449 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
450 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
451 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
452 396e1b78 Michael Hanselmann
  }
453 396e1b78 Michael Hanselmann
454 396e1b78 Michael Hanselmann
  if nics:
455 396e1b78 Michael Hanselmann
    nic_count = len(nics)
456 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
457 396e1b78 Michael Hanselmann
      if ip is None:
458 396e1b78 Michael Hanselmann
        ip = ""
459 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
460 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
461 53e4e875 Guido Trotter
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
462 396e1b78 Michael Hanselmann
  else:
463 396e1b78 Michael Hanselmann
    nic_count = 0
464 396e1b78 Michael Hanselmann
465 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
466 396e1b78 Michael Hanselmann
467 396e1b78 Michael Hanselmann
  return env
468 396e1b78 Michael Hanselmann
469 396e1b78 Michael Hanselmann
470 338e51e8 Iustin Pop
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
471 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
472 ecb215b5 Michael Hanselmann

473 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
474 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
475 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
476 e4376078 Iustin Pop
  @param instance: the instance for which we should build the
477 e4376078 Iustin Pop
      environment
478 e4376078 Iustin Pop
  @type override: dict
479 e4376078 Iustin Pop
  @param override: dictionary with key/values that will override
480 e4376078 Iustin Pop
      our values
481 e4376078 Iustin Pop
  @rtype: dict
482 e4376078 Iustin Pop
  @return: the hook environment dictionary
483 e4376078 Iustin Pop

484 ecb215b5 Michael Hanselmann
  """
485 338e51e8 Iustin Pop
  bep = lu.cfg.GetClusterInfo().FillBE(instance)
486 396e1b78 Michael Hanselmann
  args = {
487 396e1b78 Michael Hanselmann
    'name': instance.name,
488 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
489 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
490 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
491 396e1b78 Michael Hanselmann
    'status': instance.os,
492 338e51e8 Iustin Pop
    'memory': bep[constants.BE_MEMORY],
493 338e51e8 Iustin Pop
    'vcpus': bep[constants.BE_VCPUS],
494 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
495 396e1b78 Michael Hanselmann
  }
496 396e1b78 Michael Hanselmann
  if override:
497 396e1b78 Michael Hanselmann
    args.update(override)
498 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
499 396e1b78 Michael Hanselmann
500 396e1b78 Michael Hanselmann
501 b9bddb6b Iustin Pop
def _CheckInstanceBridgesExist(lu, instance):
502 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
503 bf6929a2 Alexander Schreiber

504 bf6929a2 Alexander Schreiber
  """
505 bf6929a2 Alexander Schreiber
  # check bridges existance
506 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
507 72737a7f Iustin Pop
  if not lu.rpc.call_bridges_exist(instance.primary_node, brlist):
508 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
509 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
510 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
511 bf6929a2 Alexander Schreiber
512 bf6929a2 Alexander Schreiber
513 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
514 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
515 a8083063 Iustin Pop

516 a8083063 Iustin Pop
  """
517 a8083063 Iustin Pop
  _OP_REQP = []
518 a8083063 Iustin Pop
519 a8083063 Iustin Pop
  def CheckPrereq(self):
520 a8083063 Iustin Pop
    """Check prerequisites.
521 a8083063 Iustin Pop

522 a8083063 Iustin Pop
    This checks whether the cluster is empty.
523 a8083063 Iustin Pop

524 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
525 a8083063 Iustin Pop

526 a8083063 Iustin Pop
    """
527 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
528 a8083063 Iustin Pop
529 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
530 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
531 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
532 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
533 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
534 db915bd1 Michael Hanselmann
    if instancelist:
535 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
536 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
537 a8083063 Iustin Pop
538 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
539 a8083063 Iustin Pop
    """Destroys the cluster.
540 a8083063 Iustin Pop

541 a8083063 Iustin Pop
    """
542 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
543 72737a7f Iustin Pop
    if not self.rpc.call_node_stop_master(master, False):
544 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
545 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
546 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
547 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
548 140aa4a8 Iustin Pop
    return master
549 a8083063 Iustin Pop
550 a8083063 Iustin Pop
551 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
552 a8083063 Iustin Pop
  """Verifies the cluster status.
553 a8083063 Iustin Pop

554 a8083063 Iustin Pop
  """
555 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
556 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
557 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
558 d4b9d97f Guido Trotter
  REQ_BGL = False
559 d4b9d97f Guido Trotter
560 d4b9d97f Guido Trotter
  def ExpandNames(self):
561 d4b9d97f Guido Trotter
    self.needed_locks = {
562 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
563 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
564 d4b9d97f Guido Trotter
    }
565 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
566 a8083063 Iustin Pop
567 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
568 a8083063 Iustin Pop
                  remote_version, feedback_fn):
569 a8083063 Iustin Pop
    """Run multiple tests against a node.
570 a8083063 Iustin Pop

571 e4376078 Iustin Pop
    Test list::
572 e4376078 Iustin Pop

573 a8083063 Iustin Pop
      - compares ganeti version
574 a8083063 Iustin Pop
      - checks vg existance and size > 20G
575 a8083063 Iustin Pop
      - checks config file checksum
576 a8083063 Iustin Pop
      - checks ssh to other nodes
577 a8083063 Iustin Pop

578 e4376078 Iustin Pop
    @type node: string
579 e4376078 Iustin Pop
    @param node: the name of the node to check
580 e4376078 Iustin Pop
    @param file_list: required list of files
581 e4376078 Iustin Pop
    @param local_cksum: dictionary of local files and their checksums
582 e4376078 Iustin Pop
    @type vglist: dict
583 e4376078 Iustin Pop
    @param vglist: dictionary of volume group names and their size
584 e4376078 Iustin Pop
    @param node_result: the results from the node
585 e4376078 Iustin Pop
    @param remote_version: the RPC version from the remote node
586 e4376078 Iustin Pop
    @param feedback_fn: function used to accumulate results
587 098c0958 Michael Hanselmann

588 a8083063 Iustin Pop
    """
589 a8083063 Iustin Pop
    # compares ganeti version
590 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
591 a8083063 Iustin Pop
    if not remote_version:
592 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
593 a8083063 Iustin Pop
      return True
594 a8083063 Iustin Pop
595 a8083063 Iustin Pop
    if local_version != remote_version:
596 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
597 a8083063 Iustin Pop
                      (local_version, node, remote_version))
598 a8083063 Iustin Pop
      return True
599 a8083063 Iustin Pop
600 a8083063 Iustin Pop
    # checks vg existance and size > 20G
601 a8083063 Iustin Pop
602 a8083063 Iustin Pop
    bad = False
603 a8083063 Iustin Pop
    if not vglist:
604 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
605 a8083063 Iustin Pop
                      (node,))
606 a8083063 Iustin Pop
      bad = True
607 a8083063 Iustin Pop
    else:
608 8d1a2a64 Michael Hanselmann
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
609 8d1a2a64 Michael Hanselmann
                                            constants.MIN_VG_SIZE)
610 a8083063 Iustin Pop
      if vgstatus:
611 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
612 a8083063 Iustin Pop
        bad = True
613 a8083063 Iustin Pop
614 2eb78bc8 Guido Trotter
    if not node_result:
615 2eb78bc8 Guido Trotter
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
616 2eb78bc8 Guido Trotter
      return True
617 2eb78bc8 Guido Trotter
618 a8083063 Iustin Pop
    # checks config file checksum
619 a8083063 Iustin Pop
    # checks ssh to any
620 a8083063 Iustin Pop
621 a8083063 Iustin Pop
    if 'filelist' not in node_result:
622 a8083063 Iustin Pop
      bad = True
623 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
624 a8083063 Iustin Pop
    else:
625 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
626 a8083063 Iustin Pop
      for file_name in file_list:
627 a8083063 Iustin Pop
        if file_name not in remote_cksum:
628 a8083063 Iustin Pop
          bad = True
629 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
630 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
631 a8083063 Iustin Pop
          bad = True
632 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
633 a8083063 Iustin Pop
634 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
635 a8083063 Iustin Pop
      bad = True
636 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
637 a8083063 Iustin Pop
    else:
638 a8083063 Iustin Pop
      if node_result['nodelist']:
639 a8083063 Iustin Pop
        bad = True
640 a8083063 Iustin Pop
        for node in node_result['nodelist']:
641 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
642 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
643 9d4bfc96 Iustin Pop
    if 'node-net-test' not in node_result:
644 9d4bfc96 Iustin Pop
      bad = True
645 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
646 9d4bfc96 Iustin Pop
    else:
647 9d4bfc96 Iustin Pop
      if node_result['node-net-test']:
648 9d4bfc96 Iustin Pop
        bad = True
649 9d4bfc96 Iustin Pop
        nlist = utils.NiceSort(node_result['node-net-test'].keys())
650 9d4bfc96 Iustin Pop
        for node in nlist:
651 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
652 9d4bfc96 Iustin Pop
                          (node, node_result['node-net-test'][node]))
653 9d4bfc96 Iustin Pop
654 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
655 e69d05fd Iustin Pop
    if isinstance(hyp_result, dict):
656 e69d05fd Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
657 e69d05fd Iustin Pop
        if hv_result is not None:
658 e69d05fd Iustin Pop
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
659 e69d05fd Iustin Pop
                      (hv_name, hv_result))
660 a8083063 Iustin Pop
    return bad
661 a8083063 Iustin Pop
662 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
663 c5705f58 Guido Trotter
                      node_instance, feedback_fn):
664 a8083063 Iustin Pop
    """Verify an instance.
665 a8083063 Iustin Pop

666 a8083063 Iustin Pop
    This function checks to see if the required block devices are
667 a8083063 Iustin Pop
    available on the instance's node.
668 a8083063 Iustin Pop

669 a8083063 Iustin Pop
    """
670 a8083063 Iustin Pop
    bad = False
671 a8083063 Iustin Pop
672 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
673 a8083063 Iustin Pop
674 a8083063 Iustin Pop
    node_vol_should = {}
675 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
676 a8083063 Iustin Pop
677 a8083063 Iustin Pop
    for node in node_vol_should:
678 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
679 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
680 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
681 a8083063 Iustin Pop
                          (volume, node))
682 a8083063 Iustin Pop
          bad = True
683 a8083063 Iustin Pop
684 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
685 a872dae6 Guido Trotter
      if (node_current not in node_instance or
686 a872dae6 Guido Trotter
          not instance in node_instance[node_current]):
687 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
688 a8083063 Iustin Pop
                        (instance, node_current))
689 a8083063 Iustin Pop
        bad = True
690 a8083063 Iustin Pop
691 a8083063 Iustin Pop
    for node in node_instance:
692 a8083063 Iustin Pop
      if (not node == node_current):
693 a8083063 Iustin Pop
        if instance in node_instance[node]:
694 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
695 a8083063 Iustin Pop
                          (instance, node))
696 a8083063 Iustin Pop
          bad = True
697 a8083063 Iustin Pop
698 6a438c98 Michael Hanselmann
    return bad
699 a8083063 Iustin Pop
700 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
701 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
702 a8083063 Iustin Pop

703 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
704 a8083063 Iustin Pop
    reported as unknown.
705 a8083063 Iustin Pop

706 a8083063 Iustin Pop
    """
707 a8083063 Iustin Pop
    bad = False
708 a8083063 Iustin Pop
709 a8083063 Iustin Pop
    for node in node_vol_is:
710 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
711 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
712 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
713 a8083063 Iustin Pop
                      (volume, node))
714 a8083063 Iustin Pop
          bad = True
715 a8083063 Iustin Pop
    return bad
716 a8083063 Iustin Pop
717 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
718 a8083063 Iustin Pop
    """Verify the list of running instances.
719 a8083063 Iustin Pop

720 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
721 a8083063 Iustin Pop

722 a8083063 Iustin Pop
    """
723 a8083063 Iustin Pop
    bad = False
724 a8083063 Iustin Pop
    for node in node_instance:
725 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
726 a8083063 Iustin Pop
        if runninginstance not in instancelist:
727 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
728 a8083063 Iustin Pop
                          (runninginstance, node))
729 a8083063 Iustin Pop
          bad = True
730 a8083063 Iustin Pop
    return bad
731 a8083063 Iustin Pop
732 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
733 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
734 2b3b6ddd Guido Trotter

735 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
736 2b3b6ddd Guido Trotter
    was primary for.
737 2b3b6ddd Guido Trotter

738 2b3b6ddd Guido Trotter
    """
739 2b3b6ddd Guido Trotter
    bad = False
740 2b3b6ddd Guido Trotter
741 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
742 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
743 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
744 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
745 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
746 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
747 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
748 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
749 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
750 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
751 2b3b6ddd Guido Trotter
        needed_mem = 0
752 2b3b6ddd Guido Trotter
        for instance in instances:
753 338e51e8 Iustin Pop
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
754 c0f2b229 Iustin Pop
          if bep[constants.BE_AUTO_BALANCE]:
755 3924700f Iustin Pop
            needed_mem += bep[constants.BE_MEMORY]
756 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
757 2b3b6ddd Guido Trotter
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
758 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
759 2b3b6ddd Guido Trotter
          bad = True
760 2b3b6ddd Guido Trotter
    return bad
761 2b3b6ddd Guido Trotter
762 a8083063 Iustin Pop
  def CheckPrereq(self):
763 a8083063 Iustin Pop
    """Check prerequisites.
764 a8083063 Iustin Pop

765 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
766 e54c4c5e Guido Trotter
    all its members are valid.
767 a8083063 Iustin Pop

768 a8083063 Iustin Pop
    """
769 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
770 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
771 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
772 a8083063 Iustin Pop
773 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
774 d8fff41c Guido Trotter
    """Build hooks env.
775 d8fff41c Guido Trotter

776 d8fff41c Guido Trotter
    Cluster-Verify hooks just rone in the post phase and their failure makes
777 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
778 d8fff41c Guido Trotter

779 d8fff41c Guido Trotter
    """
780 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
781 d8fff41c Guido Trotter
    # TODO: populate the environment with useful information for verify hooks
782 d8fff41c Guido Trotter
    env = {}
783 d8fff41c Guido Trotter
    return env, [], all_nodes
784 d8fff41c Guido Trotter
785 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
786 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
787 a8083063 Iustin Pop

788 a8083063 Iustin Pop
    """
789 a8083063 Iustin Pop
    bad = False
790 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
791 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
792 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
793 a8083063 Iustin Pop
794 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
795 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
796 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
797 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
798 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
799 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
800 3924700f Iustin Pop
    i_non_a_balanced = [] # Non auto-balanced instances
801 a8083063 Iustin Pop
    node_volume = {}
802 a8083063 Iustin Pop
    node_instance = {}
803 9c9c7d30 Guido Trotter
    node_info = {}
804 26b6af5e Guido Trotter
    instance_cfg = {}
805 a8083063 Iustin Pop
806 a8083063 Iustin Pop
    # FIXME: verify OS list
807 a8083063 Iustin Pop
    # do local checksums
808 d6a02168 Michael Hanselmann
    file_names = []
809 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
810 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
811 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
812 a8083063 Iustin Pop
813 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
814 72737a7f Iustin Pop
    all_volumeinfo = self.rpc.call_volume_list(nodelist, vg_name)
815 72737a7f Iustin Pop
    all_instanceinfo = self.rpc.call_instance_list(nodelist, hypervisors)
816 72737a7f Iustin Pop
    all_vglist = self.rpc.call_vg_list(nodelist)
817 a8083063 Iustin Pop
    node_verify_param = {
818 a8083063 Iustin Pop
      'filelist': file_names,
819 a8083063 Iustin Pop
      'nodelist': nodelist,
820 e69d05fd Iustin Pop
      'hypervisor': hypervisors,
821 9d4bfc96 Iustin Pop
      'node-net-test': [(node.name, node.primary_ip, node.secondary_ip)
822 9d4bfc96 Iustin Pop
                        for node in nodeinfo]
823 a8083063 Iustin Pop
      }
824 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
825 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
826 72737a7f Iustin Pop
    all_rversion = self.rpc.call_version(nodelist)
827 72737a7f Iustin Pop
    all_ninfo = self.rpc.call_node_info(nodelist, self.cfg.GetVGName(),
828 72737a7f Iustin Pop
                                        self.cfg.GetHypervisorType())
829 a8083063 Iustin Pop
830 3924700f Iustin Pop
    cluster = self.cfg.GetClusterInfo()
831 a8083063 Iustin Pop
    for node in nodelist:
832 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
833 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
834 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
835 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
836 a8083063 Iustin Pop
      bad = bad or result
837 a8083063 Iustin Pop
838 a8083063 Iustin Pop
      # node_volume
839 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
840 a8083063 Iustin Pop
841 b63ed789 Iustin Pop
      if isinstance(volumeinfo, basestring):
842 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
843 b63ed789 Iustin Pop
                    (node, volumeinfo[-400:].encode('string_escape')))
844 b63ed789 Iustin Pop
        bad = True
845 b63ed789 Iustin Pop
        node_volume[node] = {}
846 b63ed789 Iustin Pop
      elif not isinstance(volumeinfo, dict):
847 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
848 a8083063 Iustin Pop
        bad = True
849 a8083063 Iustin Pop
        continue
850 b63ed789 Iustin Pop
      else:
851 b63ed789 Iustin Pop
        node_volume[node] = volumeinfo
852 a8083063 Iustin Pop
853 a8083063 Iustin Pop
      # node_instance
854 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
855 a8083063 Iustin Pop
      if type(nodeinstance) != list:
856 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
857 a8083063 Iustin Pop
        bad = True
858 a8083063 Iustin Pop
        continue
859 a8083063 Iustin Pop
860 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
861 a8083063 Iustin Pop
862 9c9c7d30 Guido Trotter
      # node_info
863 9c9c7d30 Guido Trotter
      nodeinfo = all_ninfo[node]
864 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
865 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
866 9c9c7d30 Guido Trotter
        bad = True
867 9c9c7d30 Guido Trotter
        continue
868 9c9c7d30 Guido Trotter
869 9c9c7d30 Guido Trotter
      try:
870 9c9c7d30 Guido Trotter
        node_info[node] = {
871 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
872 9c9c7d30 Guido Trotter
          "dfree": int(nodeinfo['vg_free']),
873 93e4c50b Guido Trotter
          "pinst": [],
874 93e4c50b Guido Trotter
          "sinst": [],
875 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
876 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
877 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
878 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
879 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
880 36e7da50 Guido Trotter
          # secondary.
881 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
882 9c9c7d30 Guido Trotter
        }
883 9c9c7d30 Guido Trotter
      except ValueError:
884 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
885 9c9c7d30 Guido Trotter
        bad = True
886 9c9c7d30 Guido Trotter
        continue
887 9c9c7d30 Guido Trotter
888 a8083063 Iustin Pop
    node_vol_should = {}
889 a8083063 Iustin Pop
890 a8083063 Iustin Pop
    for instance in instancelist:
891 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
892 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
893 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
894 c5705f58 Guido Trotter
                                     node_instance, feedback_fn)
895 c5705f58 Guido Trotter
      bad = bad or result
896 a8083063 Iustin Pop
897 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
898 a8083063 Iustin Pop
899 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
900 26b6af5e Guido Trotter
901 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
902 93e4c50b Guido Trotter
      if pnode in node_info:
903 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
904 93e4c50b Guido Trotter
      else:
905 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
906 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
907 93e4c50b Guido Trotter
        bad = True
908 93e4c50b Guido Trotter
909 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
910 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
911 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
912 93e4c50b Guido Trotter
      # supported either.
913 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
914 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
915 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
916 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
917 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
918 93e4c50b Guido Trotter
                    % instance)
919 93e4c50b Guido Trotter
920 c0f2b229 Iustin Pop
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
921 3924700f Iustin Pop
        i_non_a_balanced.append(instance)
922 3924700f Iustin Pop
923 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
924 93e4c50b Guido Trotter
        if snode in node_info:
925 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
926 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
927 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
928 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
929 93e4c50b Guido Trotter
        else:
930 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
931 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
932 93e4c50b Guido Trotter
933 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
934 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
935 a8083063 Iustin Pop
                                       feedback_fn)
936 a8083063 Iustin Pop
    bad = bad or result
937 a8083063 Iustin Pop
938 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
939 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
940 a8083063 Iustin Pop
                                         feedback_fn)
941 a8083063 Iustin Pop
    bad = bad or result
942 a8083063 Iustin Pop
943 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
944 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
945 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
946 e54c4c5e Guido Trotter
      bad = bad or result
947 2b3b6ddd Guido Trotter
948 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
949 2b3b6ddd Guido Trotter
    if i_non_redundant:
950 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
951 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
952 2b3b6ddd Guido Trotter
953 3924700f Iustin Pop
    if i_non_a_balanced:
954 3924700f Iustin Pop
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
955 3924700f Iustin Pop
                  % len(i_non_a_balanced))
956 3924700f Iustin Pop
957 34290825 Michael Hanselmann
    return not bad
958 a8083063 Iustin Pop
959 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
960 e4376078 Iustin Pop
    """Analize the post-hooks' result
961 e4376078 Iustin Pop

962 e4376078 Iustin Pop
    This method analyses the hook result, handles it, and sends some
963 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
964 d8fff41c Guido Trotter

965 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
966 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
967 e4376078 Iustin Pop
    @param hooks_results: the results of the multi-node hooks rpc call
968 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
969 e4376078 Iustin Pop
    @param lu_result: previous Exec result
970 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
971 e4376078 Iustin Pop
        and hook results
972 d8fff41c Guido Trotter

973 d8fff41c Guido Trotter
    """
974 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
975 38206f3c Iustin Pop
    # their results
976 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
977 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
978 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
979 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
980 d8fff41c Guido Trotter
      if not hooks_results:
981 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
982 d8fff41c Guido Trotter
        lu_result = 1
983 d8fff41c Guido Trotter
      else:
984 d8fff41c Guido Trotter
        for node_name in hooks_results:
985 d8fff41c Guido Trotter
          show_node_header = True
986 d8fff41c Guido Trotter
          res = hooks_results[node_name]
987 d8fff41c Guido Trotter
          if res is False or not isinstance(res, list):
988 d8fff41c Guido Trotter
            feedback_fn("    Communication failure")
989 d8fff41c Guido Trotter
            lu_result = 1
990 d8fff41c Guido Trotter
            continue
991 d8fff41c Guido Trotter
          for script, hkr, output in res:
992 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
993 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
994 d8fff41c Guido Trotter
              # failing hooks on that node
995 d8fff41c Guido Trotter
              if show_node_header:
996 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
997 d8fff41c Guido Trotter
                show_node_header = False
998 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
999 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
1000 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
1001 d8fff41c Guido Trotter
              lu_result = 1
1002 d8fff41c Guido Trotter
1003 d8fff41c Guido Trotter
      return lu_result
1004 d8fff41c Guido Trotter
1005 a8083063 Iustin Pop
1006 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
1007 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
1008 2c95a8d4 Iustin Pop

1009 2c95a8d4 Iustin Pop
  """
1010 2c95a8d4 Iustin Pop
  _OP_REQP = []
1011 d4b9d97f Guido Trotter
  REQ_BGL = False
1012 d4b9d97f Guido Trotter
1013 d4b9d97f Guido Trotter
  def ExpandNames(self):
1014 d4b9d97f Guido Trotter
    self.needed_locks = {
1015 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1016 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1017 d4b9d97f Guido Trotter
    }
1018 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1019 2c95a8d4 Iustin Pop
1020 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
1021 2c95a8d4 Iustin Pop
    """Check prerequisites.
1022 2c95a8d4 Iustin Pop

1023 2c95a8d4 Iustin Pop
    This has no prerequisites.
1024 2c95a8d4 Iustin Pop

1025 2c95a8d4 Iustin Pop
    """
1026 2c95a8d4 Iustin Pop
    pass
1027 2c95a8d4 Iustin Pop
1028 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
1029 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
1030 2c95a8d4 Iustin Pop

1031 2c95a8d4 Iustin Pop
    """
1032 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
1033 2c95a8d4 Iustin Pop
1034 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
1035 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1036 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
1037 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
1038 2c95a8d4 Iustin Pop
1039 2c95a8d4 Iustin Pop
    nv_dict = {}
1040 2c95a8d4 Iustin Pop
    for inst in instances:
1041 2c95a8d4 Iustin Pop
      inst_lvs = {}
1042 2c95a8d4 Iustin Pop
      if (inst.status != "up" or
1043 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
1044 2c95a8d4 Iustin Pop
        continue
1045 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
1046 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1047 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
1048 2c95a8d4 Iustin Pop
        for vol in vol_list:
1049 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
1050 2c95a8d4 Iustin Pop
1051 2c95a8d4 Iustin Pop
    if not nv_dict:
1052 2c95a8d4 Iustin Pop
      return result
1053 2c95a8d4 Iustin Pop
1054 72737a7f Iustin Pop
    node_lvs = self.rpc.call_volume_list(nodes, vg_name)
1055 2c95a8d4 Iustin Pop
1056 2c95a8d4 Iustin Pop
    to_act = set()
1057 2c95a8d4 Iustin Pop
    for node in nodes:
1058 2c95a8d4 Iustin Pop
      # node_volume
1059 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
1060 2c95a8d4 Iustin Pop
1061 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
1062 9a4f63d1 Iustin Pop
        logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
1063 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
1064 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
1065 9a4f63d1 Iustin Pop
        logging.warning("Connection to node %s failed or invalid data"
1066 9a4f63d1 Iustin Pop
                        " returned", node)
1067 2c95a8d4 Iustin Pop
        res_nodes.append(node)
1068 2c95a8d4 Iustin Pop
        continue
1069 2c95a8d4 Iustin Pop
1070 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
1071 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
1072 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
1073 b63ed789 Iustin Pop
            and inst.name not in res_instances):
1074 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
1075 2c95a8d4 Iustin Pop
1076 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1077 b63ed789 Iustin Pop
    # data better
1078 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1079 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1080 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1081 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1082 b63ed789 Iustin Pop
1083 2c95a8d4 Iustin Pop
    return result
1084 2c95a8d4 Iustin Pop
1085 2c95a8d4 Iustin Pop
1086 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1087 07bd8a51 Iustin Pop
  """Rename the cluster.
1088 07bd8a51 Iustin Pop

1089 07bd8a51 Iustin Pop
  """
1090 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1091 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1092 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1093 07bd8a51 Iustin Pop
1094 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1095 07bd8a51 Iustin Pop
    """Build hooks env.
1096 07bd8a51 Iustin Pop

1097 07bd8a51 Iustin Pop
    """
1098 07bd8a51 Iustin Pop
    env = {
1099 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1100 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1101 07bd8a51 Iustin Pop
      }
1102 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1103 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1104 07bd8a51 Iustin Pop
1105 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1106 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1107 07bd8a51 Iustin Pop

1108 07bd8a51 Iustin Pop
    """
1109 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1110 07bd8a51 Iustin Pop
1111 bcf043c9 Iustin Pop
    new_name = hostname.name
1112 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1113 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
1114 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
1115 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1116 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1117 07bd8a51 Iustin Pop
                                 " cluster has changed")
1118 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1119 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1120 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1121 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1122 07bd8a51 Iustin Pop
                                   new_ip)
1123 07bd8a51 Iustin Pop
1124 07bd8a51 Iustin Pop
    self.op.name = new_name
1125 07bd8a51 Iustin Pop
1126 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1127 07bd8a51 Iustin Pop
    """Rename the cluster.
1128 07bd8a51 Iustin Pop

1129 07bd8a51 Iustin Pop
    """
1130 07bd8a51 Iustin Pop
    clustername = self.op.name
1131 07bd8a51 Iustin Pop
    ip = self.ip
1132 07bd8a51 Iustin Pop
1133 07bd8a51 Iustin Pop
    # shutdown the master IP
1134 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
1135 72737a7f Iustin Pop
    if not self.rpc.call_node_stop_master(master, False):
1136 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1137 07bd8a51 Iustin Pop
1138 07bd8a51 Iustin Pop
    try:
1139 07bd8a51 Iustin Pop
      # modify the sstore
1140 d6a02168 Michael Hanselmann
      # TODO: sstore
1141 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
1142 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
1143 07bd8a51 Iustin Pop
1144 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
1145 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
1146 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
1147 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
1148 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
1149 07bd8a51 Iustin Pop
1150 9a4f63d1 Iustin Pop
      logging.debug("Copying updated ssconf data to all nodes")
1151 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1152 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
1153 72737a7f Iustin Pop
        result = self.rpc.call_upload_file(dist_nodes, fname)
1154 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
1155 07bd8a51 Iustin Pop
          if not result[to_node]:
1156 86d9d3bb Iustin Pop
            self.LogWarning("Copy of file %s to node %s failed",
1157 86d9d3bb Iustin Pop
                            fname, to_node)
1158 07bd8a51 Iustin Pop
    finally:
1159 72737a7f Iustin Pop
      if not self.rpc.call_node_start_master(master, False):
1160 86d9d3bb Iustin Pop
        self.LogWarning("Could not re-enable the master role on"
1161 86d9d3bb Iustin Pop
                        " the master, please restart manually.")
1162 07bd8a51 Iustin Pop
1163 07bd8a51 Iustin Pop
1164 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1165 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1166 8084f9f6 Manuel Franceschini

1167 e4376078 Iustin Pop
  @type disk: L{objects.Disk}
1168 e4376078 Iustin Pop
  @param disk: the disk to check
1169 e4376078 Iustin Pop
  @rtype: booleean
1170 e4376078 Iustin Pop
  @return: boolean indicating whether a LD_LV dev_type was found or not
1171 8084f9f6 Manuel Franceschini

1172 8084f9f6 Manuel Franceschini
  """
1173 8084f9f6 Manuel Franceschini
  if disk.children:
1174 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1175 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1176 8084f9f6 Manuel Franceschini
        return True
1177 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1178 8084f9f6 Manuel Franceschini
1179 8084f9f6 Manuel Franceschini
1180 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1181 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1182 8084f9f6 Manuel Franceschini

1183 8084f9f6 Manuel Franceschini
  """
1184 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1185 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1186 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1187 c53279cf Guido Trotter
  REQ_BGL = False
1188 c53279cf Guido Trotter
1189 c53279cf Guido Trotter
  def ExpandNames(self):
1190 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
1191 c53279cf Guido Trotter
    # all nodes to be modified.
1192 c53279cf Guido Trotter
    self.needed_locks = {
1193 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1194 c53279cf Guido Trotter
    }
1195 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1196 8084f9f6 Manuel Franceschini
1197 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1198 8084f9f6 Manuel Franceschini
    """Build hooks env.
1199 8084f9f6 Manuel Franceschini

1200 8084f9f6 Manuel Franceschini
    """
1201 8084f9f6 Manuel Franceschini
    env = {
1202 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1203 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1204 8084f9f6 Manuel Franceschini
      }
1205 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1206 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1207 8084f9f6 Manuel Franceschini
1208 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1209 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1210 8084f9f6 Manuel Franceschini

1211 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1212 5f83e263 Iustin Pop
    if the given volume group is valid.
1213 8084f9f6 Manuel Franceschini

1214 8084f9f6 Manuel Franceschini
    """
1215 c53279cf Guido Trotter
    # FIXME: This only works because there is only one parameter that can be
1216 c53279cf Guido Trotter
    # changed or removed.
1217 779c15bb Iustin Pop
    if self.op.vg_name is not None and not self.op.vg_name:
1218 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
1219 8084f9f6 Manuel Franceschini
      for inst in instances:
1220 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1221 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1222 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1223 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1224 8084f9f6 Manuel Franceschini
1225 779c15bb Iustin Pop
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1226 779c15bb Iustin Pop
1227 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1228 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1229 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
1230 8084f9f6 Manuel Franceschini
      for node in node_list:
1231 8d1a2a64 Michael Hanselmann
        vgstatus = utils.CheckVolumeGroupSize(vglist[node], self.op.vg_name,
1232 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1233 8084f9f6 Manuel Franceschini
        if vgstatus:
1234 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1235 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1236 8084f9f6 Manuel Franceschini
1237 779c15bb Iustin Pop
    self.cluster = cluster = self.cfg.GetClusterInfo()
1238 779c15bb Iustin Pop
    # beparams changes do not need validation (we can't validate?),
1239 779c15bb Iustin Pop
    # but we still process here
1240 779c15bb Iustin Pop
    if self.op.beparams:
1241 779c15bb Iustin Pop
      self.new_beparams = cluster.FillDict(
1242 779c15bb Iustin Pop
        cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)
1243 779c15bb Iustin Pop
1244 779c15bb Iustin Pop
    # hypervisor list/parameters
1245 779c15bb Iustin Pop
    self.new_hvparams = cluster.FillDict(cluster.hvparams, {})
1246 779c15bb Iustin Pop
    if self.op.hvparams:
1247 779c15bb Iustin Pop
      if not isinstance(self.op.hvparams, dict):
1248 779c15bb Iustin Pop
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1249 779c15bb Iustin Pop
      for hv_name, hv_dict in self.op.hvparams.items():
1250 779c15bb Iustin Pop
        if hv_name not in self.new_hvparams:
1251 779c15bb Iustin Pop
          self.new_hvparams[hv_name] = hv_dict
1252 779c15bb Iustin Pop
        else:
1253 779c15bb Iustin Pop
          self.new_hvparams[hv_name].update(hv_dict)
1254 779c15bb Iustin Pop
1255 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1256 779c15bb Iustin Pop
      self.hv_list = self.op.enabled_hypervisors
1257 779c15bb Iustin Pop
    else:
1258 779c15bb Iustin Pop
      self.hv_list = cluster.enabled_hypervisors
1259 779c15bb Iustin Pop
1260 779c15bb Iustin Pop
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1261 779c15bb Iustin Pop
      # either the enabled list has changed, or the parameters have, validate
1262 779c15bb Iustin Pop
      for hv_name, hv_params in self.new_hvparams.items():
1263 779c15bb Iustin Pop
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1264 779c15bb Iustin Pop
            (self.op.enabled_hypervisors and
1265 779c15bb Iustin Pop
             hv_name in self.op.enabled_hypervisors)):
1266 779c15bb Iustin Pop
          # either this is a new hypervisor, or its parameters have changed
1267 779c15bb Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
1268 779c15bb Iustin Pop
          hv_class.CheckParameterSyntax(hv_params)
1269 779c15bb Iustin Pop
          _CheckHVParams(self, node_list, hv_name, hv_params)
1270 779c15bb Iustin Pop
1271 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1272 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1273 8084f9f6 Manuel Franceschini

1274 8084f9f6 Manuel Franceschini
    """
1275 779c15bb Iustin Pop
    if self.op.vg_name is not None:
1276 779c15bb Iustin Pop
      if self.op.vg_name != self.cfg.GetVGName():
1277 779c15bb Iustin Pop
        self.cfg.SetVGName(self.op.vg_name)
1278 779c15bb Iustin Pop
      else:
1279 779c15bb Iustin Pop
        feedback_fn("Cluster LVM configuration already in desired"
1280 779c15bb Iustin Pop
                    " state, not changing")
1281 779c15bb Iustin Pop
    if self.op.hvparams:
1282 779c15bb Iustin Pop
      self.cluster.hvparams = self.new_hvparams
1283 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1284 779c15bb Iustin Pop
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1285 779c15bb Iustin Pop
    if self.op.beparams:
1286 779c15bb Iustin Pop
      self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams
1287 779c15bb Iustin Pop
    self.cfg.Update(self.cluster)
1288 8084f9f6 Manuel Franceschini
1289 8084f9f6 Manuel Franceschini
1290 b9bddb6b Iustin Pop
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1291 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1292 a8083063 Iustin Pop

1293 a8083063 Iustin Pop
  """
1294 a8083063 Iustin Pop
  if not instance.disks:
1295 a8083063 Iustin Pop
    return True
1296 a8083063 Iustin Pop
1297 a8083063 Iustin Pop
  if not oneshot:
1298 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1299 a8083063 Iustin Pop
1300 a8083063 Iustin Pop
  node = instance.primary_node
1301 a8083063 Iustin Pop
1302 a8083063 Iustin Pop
  for dev in instance.disks:
1303 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
1304 a8083063 Iustin Pop
1305 a8083063 Iustin Pop
  retries = 0
1306 a8083063 Iustin Pop
  while True:
1307 a8083063 Iustin Pop
    max_time = 0
1308 a8083063 Iustin Pop
    done = True
1309 a8083063 Iustin Pop
    cumul_degraded = False
1310 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1311 a8083063 Iustin Pop
    if not rstats:
1312 86d9d3bb Iustin Pop
      lu.LogWarning("Can't get any data from node %s", node)
1313 a8083063 Iustin Pop
      retries += 1
1314 a8083063 Iustin Pop
      if retries >= 10:
1315 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1316 3ecf6786 Iustin Pop
                                 " aborting." % node)
1317 a8083063 Iustin Pop
      time.sleep(6)
1318 a8083063 Iustin Pop
      continue
1319 a8083063 Iustin Pop
    retries = 0
1320 a8083063 Iustin Pop
    for i in range(len(rstats)):
1321 a8083063 Iustin Pop
      mstat = rstats[i]
1322 a8083063 Iustin Pop
      if mstat is None:
1323 86d9d3bb Iustin Pop
        lu.LogWarning("Can't compute data for node %s/%s",
1324 86d9d3bb Iustin Pop
                           node, instance.disks[i].iv_name)
1325 a8083063 Iustin Pop
        continue
1326 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1327 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1328 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1329 a8083063 Iustin Pop
      if perc_done is not None:
1330 a8083063 Iustin Pop
        done = False
1331 a8083063 Iustin Pop
        if est_time is not None:
1332 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1333 a8083063 Iustin Pop
          max_time = est_time
1334 a8083063 Iustin Pop
        else:
1335 a8083063 Iustin Pop
          rem_time = "no time estimate"
1336 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1337 b9bddb6b Iustin Pop
                        (instance.disks[i].iv_name, perc_done, rem_time))
1338 a8083063 Iustin Pop
    if done or oneshot:
1339 a8083063 Iustin Pop
      break
1340 a8083063 Iustin Pop
1341 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
1342 a8083063 Iustin Pop
1343 a8083063 Iustin Pop
  if done:
1344 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1345 a8083063 Iustin Pop
  return not cumul_degraded
1346 a8083063 Iustin Pop
1347 a8083063 Iustin Pop
1348 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1349 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1350 a8083063 Iustin Pop

1351 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1352 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1353 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1354 0834c866 Iustin Pop

1355 a8083063 Iustin Pop
  """
1356 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
1357 0834c866 Iustin Pop
  if ldisk:
1358 0834c866 Iustin Pop
    idx = 6
1359 0834c866 Iustin Pop
  else:
1360 0834c866 Iustin Pop
    idx = 5
1361 a8083063 Iustin Pop
1362 a8083063 Iustin Pop
  result = True
1363 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1364 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
1365 a8083063 Iustin Pop
    if not rstats:
1366 9a4f63d1 Iustin Pop
      logging.warning("Node %s: disk degraded, not found or node down", node)
1367 a8083063 Iustin Pop
      result = False
1368 a8083063 Iustin Pop
    else:
1369 0834c866 Iustin Pop
      result = result and (not rstats[idx])
1370 a8083063 Iustin Pop
  if dev.children:
1371 a8083063 Iustin Pop
    for child in dev.children:
1372 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1373 a8083063 Iustin Pop
1374 a8083063 Iustin Pop
  return result
1375 a8083063 Iustin Pop
1376 a8083063 Iustin Pop
1377 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1378 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1379 a8083063 Iustin Pop

1380 a8083063 Iustin Pop
  """
1381 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1382 6bf01bbb Guido Trotter
  REQ_BGL = False
1383 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet()
1384 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
1385 a8083063 Iustin Pop
1386 6bf01bbb Guido Trotter
  def ExpandNames(self):
1387 1f9430d6 Iustin Pop
    if self.op.names:
1388 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1389 1f9430d6 Iustin Pop
1390 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1391 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1392 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1393 1f9430d6 Iustin Pop
1394 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
1395 6bf01bbb Guido Trotter
    self.needed_locks = {}
1396 6bf01bbb Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1397 e310b019 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1398 6bf01bbb Guido Trotter
1399 6bf01bbb Guido Trotter
  def CheckPrereq(self):
1400 6bf01bbb Guido Trotter
    """Check prerequisites.
1401 6bf01bbb Guido Trotter

1402 6bf01bbb Guido Trotter
    """
1403 6bf01bbb Guido Trotter
1404 1f9430d6 Iustin Pop
  @staticmethod
1405 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1406 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1407 1f9430d6 Iustin Pop

1408 e4376078 Iustin Pop
    @param node_list: a list with the names of all nodes
1409 e4376078 Iustin Pop
    @param rlist: a map with node names as keys and OS objects as values
1410 1f9430d6 Iustin Pop

1411 e4376078 Iustin Pop
    @rtype: dict
1412 e4376078 Iustin Pop
    @returns: a dictionary with osnames as keys and as value another map, with
1413 e4376078 Iustin Pop
        nodes as keys and list of OS objects as values, eg::
1414 e4376078 Iustin Pop

1415 e4376078 Iustin Pop
          {"debian-etch": {"node1": [<object>,...],
1416 e4376078 Iustin Pop
                           "node2": [<object>,]}
1417 e4376078 Iustin Pop
          }
1418 1f9430d6 Iustin Pop

1419 1f9430d6 Iustin Pop
    """
1420 1f9430d6 Iustin Pop
    all_os = {}
1421 1f9430d6 Iustin Pop
    for node_name, nr in rlist.iteritems():
1422 1f9430d6 Iustin Pop
      if not nr:
1423 1f9430d6 Iustin Pop
        continue
1424 b4de68a9 Iustin Pop
      for os_obj in nr:
1425 b4de68a9 Iustin Pop
        if os_obj.name not in all_os:
1426 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1427 1f9430d6 Iustin Pop
          # for each node in node_list
1428 b4de68a9 Iustin Pop
          all_os[os_obj.name] = {}
1429 1f9430d6 Iustin Pop
          for nname in node_list:
1430 b4de68a9 Iustin Pop
            all_os[os_obj.name][nname] = []
1431 b4de68a9 Iustin Pop
        all_os[os_obj.name][node_name].append(os_obj)
1432 1f9430d6 Iustin Pop
    return all_os
1433 a8083063 Iustin Pop
1434 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1435 a8083063 Iustin Pop
    """Compute the list of OSes.
1436 a8083063 Iustin Pop

1437 a8083063 Iustin Pop
    """
1438 6bf01bbb Guido Trotter
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1439 72737a7f Iustin Pop
    node_data = self.rpc.call_os_diagnose(node_list)
1440 a8083063 Iustin Pop
    if node_data == False:
1441 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1442 1f9430d6 Iustin Pop
    pol = self._DiagnoseByOS(node_list, node_data)
1443 1f9430d6 Iustin Pop
    output = []
1444 1f9430d6 Iustin Pop
    for os_name, os_data in pol.iteritems():
1445 1f9430d6 Iustin Pop
      row = []
1446 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1447 1f9430d6 Iustin Pop
        if field == "name":
1448 1f9430d6 Iustin Pop
          val = os_name
1449 1f9430d6 Iustin Pop
        elif field == "valid":
1450 1f9430d6 Iustin Pop
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1451 1f9430d6 Iustin Pop
        elif field == "node_status":
1452 1f9430d6 Iustin Pop
          val = {}
1453 1f9430d6 Iustin Pop
          for node_name, nos_list in os_data.iteritems():
1454 1f9430d6 Iustin Pop
            val[node_name] = [(v.status, v.path) for v in nos_list]
1455 1f9430d6 Iustin Pop
        else:
1456 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1457 1f9430d6 Iustin Pop
        row.append(val)
1458 1f9430d6 Iustin Pop
      output.append(row)
1459 1f9430d6 Iustin Pop
1460 1f9430d6 Iustin Pop
    return output
1461 a8083063 Iustin Pop
1462 a8083063 Iustin Pop
1463 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1464 a8083063 Iustin Pop
  """Logical unit for removing a node.
1465 a8083063 Iustin Pop

1466 a8083063 Iustin Pop
  """
1467 a8083063 Iustin Pop
  HPATH = "node-remove"
1468 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1469 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1470 a8083063 Iustin Pop
1471 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1472 a8083063 Iustin Pop
    """Build hooks env.
1473 a8083063 Iustin Pop

1474 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1475 d08869ee Guido Trotter
    node would then be impossible to remove.
1476 a8083063 Iustin Pop

1477 a8083063 Iustin Pop
    """
1478 396e1b78 Michael Hanselmann
    env = {
1479 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1480 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1481 396e1b78 Michael Hanselmann
      }
1482 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1483 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1484 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1485 a8083063 Iustin Pop
1486 a8083063 Iustin Pop
  def CheckPrereq(self):
1487 a8083063 Iustin Pop
    """Check prerequisites.
1488 a8083063 Iustin Pop

1489 a8083063 Iustin Pop
    This checks:
1490 a8083063 Iustin Pop
     - the node exists in the configuration
1491 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1492 a8083063 Iustin Pop
     - it's not the master
1493 a8083063 Iustin Pop

1494 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1495 a8083063 Iustin Pop

1496 a8083063 Iustin Pop
    """
1497 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1498 a8083063 Iustin Pop
    if node is None:
1499 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1500 a8083063 Iustin Pop
1501 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1502 a8083063 Iustin Pop
1503 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
1504 a8083063 Iustin Pop
    if node.name == masternode:
1505 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1506 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1507 a8083063 Iustin Pop
1508 a8083063 Iustin Pop
    for instance_name in instance_list:
1509 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1510 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1511 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1512 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1513 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1514 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1515 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1516 a8083063 Iustin Pop
    self.op.node_name = node.name
1517 a8083063 Iustin Pop
    self.node = node
1518 a8083063 Iustin Pop
1519 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1520 a8083063 Iustin Pop
    """Removes the node from the cluster.
1521 a8083063 Iustin Pop

1522 a8083063 Iustin Pop
    """
1523 a8083063 Iustin Pop
    node = self.node
1524 9a4f63d1 Iustin Pop
    logging.info("Stopping the node daemon and removing configs from node %s",
1525 9a4f63d1 Iustin Pop
                 node.name)
1526 a8083063 Iustin Pop
1527 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
1528 a8083063 Iustin Pop
1529 72737a7f Iustin Pop
    self.rpc.call_node_leave_cluster(node.name)
1530 c8a0948f Michael Hanselmann
1531 a8083063 Iustin Pop
1532 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1533 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1534 a8083063 Iustin Pop

1535 a8083063 Iustin Pop
  """
1536 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1537 35705d8f Guido Trotter
  REQ_BGL = False
1538 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet(
1539 31bf511f Iustin Pop
    "dtotal", "dfree",
1540 31bf511f Iustin Pop
    "mtotal", "mnode", "mfree",
1541 31bf511f Iustin Pop
    "bootid",
1542 31bf511f Iustin Pop
    "ctotal",
1543 31bf511f Iustin Pop
    )
1544 31bf511f Iustin Pop
1545 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(
1546 31bf511f Iustin Pop
    "name", "pinst_cnt", "sinst_cnt",
1547 31bf511f Iustin Pop
    "pinst_list", "sinst_list",
1548 31bf511f Iustin Pop
    "pip", "sip", "tags",
1549 31bf511f Iustin Pop
    "serial_no",
1550 31bf511f Iustin Pop
    )
1551 a8083063 Iustin Pop
1552 35705d8f Guido Trotter
  def ExpandNames(self):
1553 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1554 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1555 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1556 a8083063 Iustin Pop
1557 35705d8f Guido Trotter
    self.needed_locks = {}
1558 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1559 c8d8b4c8 Iustin Pop
1560 c8d8b4c8 Iustin Pop
    if self.op.names:
1561 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
1562 35705d8f Guido Trotter
    else:
1563 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
1564 c8d8b4c8 Iustin Pop
1565 31bf511f Iustin Pop
    self.do_locking = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
1566 c8d8b4c8 Iustin Pop
    if self.do_locking:
1567 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
1568 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
1569 c8d8b4c8 Iustin Pop
1570 35705d8f Guido Trotter
1571 35705d8f Guido Trotter
  def CheckPrereq(self):
1572 35705d8f Guido Trotter
    """Check prerequisites.
1573 35705d8f Guido Trotter

1574 35705d8f Guido Trotter
    """
1575 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
1576 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
1577 c8d8b4c8 Iustin Pop
    pass
1578 a8083063 Iustin Pop
1579 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1580 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1581 a8083063 Iustin Pop

1582 a8083063 Iustin Pop
    """
1583 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
1584 c8d8b4c8 Iustin Pop
    if self.do_locking:
1585 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
1586 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
1587 3fa93523 Guido Trotter
      nodenames = self.wanted
1588 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
1589 3fa93523 Guido Trotter
      if missing:
1590 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
1591 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
1592 c8d8b4c8 Iustin Pop
    else:
1593 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
1594 c1f1cbb2 Iustin Pop
1595 c1f1cbb2 Iustin Pop
    nodenames = utils.NiceSort(nodenames)
1596 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
1597 a8083063 Iustin Pop
1598 a8083063 Iustin Pop
    # begin data gathering
1599 a8083063 Iustin Pop
1600 31bf511f Iustin Pop
    if self.do_locking:
1601 a8083063 Iustin Pop
      live_data = {}
1602 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
1603 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
1604 a8083063 Iustin Pop
      for name in nodenames:
1605 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1606 a8083063 Iustin Pop
        if nodeinfo:
1607 a8083063 Iustin Pop
          live_data[name] = {
1608 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1609 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1610 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1611 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1612 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1613 e8a4c138 Iustin Pop
            "ctotal": utils.TryConvert(int, nodeinfo['cpu_total']),
1614 3ef10550 Michael Hanselmann
            "bootid": nodeinfo['bootid'],
1615 a8083063 Iustin Pop
            }
1616 a8083063 Iustin Pop
        else:
1617 a8083063 Iustin Pop
          live_data[name] = {}
1618 a8083063 Iustin Pop
    else:
1619 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1620 a8083063 Iustin Pop
1621 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1622 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1623 a8083063 Iustin Pop
1624 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1625 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1626 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1627 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1628 a8083063 Iustin Pop
1629 ec223efb Iustin Pop
      for instance_name in instancelist:
1630 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1631 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1632 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1633 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1634 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1635 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1636 a8083063 Iustin Pop
1637 a8083063 Iustin Pop
    # end data gathering
1638 a8083063 Iustin Pop
1639 a8083063 Iustin Pop
    output = []
1640 a8083063 Iustin Pop
    for node in nodelist:
1641 a8083063 Iustin Pop
      node_output = []
1642 a8083063 Iustin Pop
      for field in self.op.output_fields:
1643 a8083063 Iustin Pop
        if field == "name":
1644 a8083063 Iustin Pop
          val = node.name
1645 ec223efb Iustin Pop
        elif field == "pinst_list":
1646 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1647 ec223efb Iustin Pop
        elif field == "sinst_list":
1648 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1649 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1650 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1651 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1652 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1653 a8083063 Iustin Pop
        elif field == "pip":
1654 a8083063 Iustin Pop
          val = node.primary_ip
1655 a8083063 Iustin Pop
        elif field == "sip":
1656 a8083063 Iustin Pop
          val = node.secondary_ip
1657 130a6a6f Iustin Pop
        elif field == "tags":
1658 130a6a6f Iustin Pop
          val = list(node.GetTags())
1659 38d7239a Iustin Pop
        elif field == "serial_no":
1660 38d7239a Iustin Pop
          val = node.serial_no
1661 31bf511f Iustin Pop
        elif self._FIELDS_DYNAMIC.Matches(field):
1662 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1663 a8083063 Iustin Pop
        else:
1664 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1665 a8083063 Iustin Pop
        node_output.append(val)
1666 a8083063 Iustin Pop
      output.append(node_output)
1667 a8083063 Iustin Pop
1668 a8083063 Iustin Pop
    return output
1669 a8083063 Iustin Pop
1670 a8083063 Iustin Pop
1671 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1672 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1673 dcb93971 Michael Hanselmann

1674 dcb93971 Michael Hanselmann
  """
1675 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1676 21a15682 Guido Trotter
  REQ_BGL = False
1677 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
1678 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("node")
1679 21a15682 Guido Trotter
1680 21a15682 Guido Trotter
  def ExpandNames(self):
1681 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1682 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1683 21a15682 Guido Trotter
                       selected=self.op.output_fields)
1684 21a15682 Guido Trotter
1685 21a15682 Guido Trotter
    self.needed_locks = {}
1686 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1687 21a15682 Guido Trotter
    if not self.op.nodes:
1688 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1689 21a15682 Guido Trotter
    else:
1690 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
1691 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
1692 dcb93971 Michael Hanselmann
1693 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1694 dcb93971 Michael Hanselmann
    """Check prerequisites.
1695 dcb93971 Michael Hanselmann

1696 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1697 dcb93971 Michael Hanselmann

1698 dcb93971 Michael Hanselmann
    """
1699 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
1700 dcb93971 Michael Hanselmann
1701 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1702 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1703 dcb93971 Michael Hanselmann

1704 dcb93971 Michael Hanselmann
    """
1705 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1706 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
1707 dcb93971 Michael Hanselmann
1708 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1709 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1710 dcb93971 Michael Hanselmann
1711 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1712 dcb93971 Michael Hanselmann
1713 dcb93971 Michael Hanselmann
    output = []
1714 dcb93971 Michael Hanselmann
    for node in nodenames:
1715 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1716 37d19eb2 Michael Hanselmann
        continue
1717 37d19eb2 Michael Hanselmann
1718 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1719 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1720 dcb93971 Michael Hanselmann
1721 dcb93971 Michael Hanselmann
      for vol in node_vols:
1722 dcb93971 Michael Hanselmann
        node_output = []
1723 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1724 dcb93971 Michael Hanselmann
          if field == "node":
1725 dcb93971 Michael Hanselmann
            val = node
1726 dcb93971 Michael Hanselmann
          elif field == "phys":
1727 dcb93971 Michael Hanselmann
            val = vol['dev']
1728 dcb93971 Michael Hanselmann
          elif field == "vg":
1729 dcb93971 Michael Hanselmann
            val = vol['vg']
1730 dcb93971 Michael Hanselmann
          elif field == "name":
1731 dcb93971 Michael Hanselmann
            val = vol['name']
1732 dcb93971 Michael Hanselmann
          elif field == "size":
1733 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1734 dcb93971 Michael Hanselmann
          elif field == "instance":
1735 dcb93971 Michael Hanselmann
            for inst in ilist:
1736 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1737 dcb93971 Michael Hanselmann
                continue
1738 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1739 dcb93971 Michael Hanselmann
                val = inst.name
1740 dcb93971 Michael Hanselmann
                break
1741 dcb93971 Michael Hanselmann
            else:
1742 dcb93971 Michael Hanselmann
              val = '-'
1743 dcb93971 Michael Hanselmann
          else:
1744 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1745 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1746 dcb93971 Michael Hanselmann
1747 dcb93971 Michael Hanselmann
        output.append(node_output)
1748 dcb93971 Michael Hanselmann
1749 dcb93971 Michael Hanselmann
    return output
1750 dcb93971 Michael Hanselmann
1751 dcb93971 Michael Hanselmann
1752 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1753 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1754 a8083063 Iustin Pop

1755 a8083063 Iustin Pop
  """
1756 a8083063 Iustin Pop
  HPATH = "node-add"
1757 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1758 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1759 a8083063 Iustin Pop
1760 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1761 a8083063 Iustin Pop
    """Build hooks env.
1762 a8083063 Iustin Pop

1763 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1764 a8083063 Iustin Pop

1765 a8083063 Iustin Pop
    """
1766 a8083063 Iustin Pop
    env = {
1767 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1768 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1769 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1770 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1771 a8083063 Iustin Pop
      }
1772 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1773 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1774 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1775 a8083063 Iustin Pop
1776 a8083063 Iustin Pop
  def CheckPrereq(self):
1777 a8083063 Iustin Pop
    """Check prerequisites.
1778 a8083063 Iustin Pop

1779 a8083063 Iustin Pop
    This checks:
1780 a8083063 Iustin Pop
     - the new node is not already in the config
1781 a8083063 Iustin Pop
     - it is resolvable
1782 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1783 a8083063 Iustin Pop

1784 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1785 a8083063 Iustin Pop

1786 a8083063 Iustin Pop
    """
1787 a8083063 Iustin Pop
    node_name = self.op.node_name
1788 a8083063 Iustin Pop
    cfg = self.cfg
1789 a8083063 Iustin Pop
1790 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1791 a8083063 Iustin Pop
1792 bcf043c9 Iustin Pop
    node = dns_data.name
1793 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1794 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1795 a8083063 Iustin Pop
    if secondary_ip is None:
1796 a8083063 Iustin Pop
      secondary_ip = primary_ip
1797 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1798 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1799 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1800 e7c6e02b Michael Hanselmann
1801 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1802 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
1803 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
1804 e7c6e02b Michael Hanselmann
                                 node)
1805 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
1806 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
1807 a8083063 Iustin Pop
1808 a8083063 Iustin Pop
    for existing_node_name in node_list:
1809 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1810 e7c6e02b Michael Hanselmann
1811 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
1812 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
1813 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
1814 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
1815 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
1816 e7c6e02b Michael Hanselmann
        continue
1817 e7c6e02b Michael Hanselmann
1818 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1819 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1820 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1821 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1822 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1823 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1824 a8083063 Iustin Pop
1825 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1826 a8083063 Iustin Pop
    # same as for the master
1827 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
1828 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1829 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1830 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1831 a8083063 Iustin Pop
      if master_singlehomed:
1832 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1833 3ecf6786 Iustin Pop
                                   " new node has one")
1834 a8083063 Iustin Pop
      else:
1835 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1836 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1837 a8083063 Iustin Pop
1838 a8083063 Iustin Pop
    # checks reachablity
1839 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
1840 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1841 a8083063 Iustin Pop
1842 a8083063 Iustin Pop
    if not newbie_singlehomed:
1843 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1844 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
1845 b15d625f Iustin Pop
                           source=myself.secondary_ip):
1846 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1847 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
1848 a8083063 Iustin Pop
1849 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1850 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1851 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1852 a8083063 Iustin Pop
1853 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1854 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1855 a8083063 Iustin Pop

1856 a8083063 Iustin Pop
    """
1857 a8083063 Iustin Pop
    new_node = self.new_node
1858 a8083063 Iustin Pop
    node = new_node.name
1859 a8083063 Iustin Pop
1860 a8083063 Iustin Pop
    # check connectivity
1861 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
1862 a8083063 Iustin Pop
    if result:
1863 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1864 9a4f63d1 Iustin Pop
        logging.info("Communication to node %s fine, sw version %s match",
1865 9a4f63d1 Iustin Pop
                     node, result)
1866 a8083063 Iustin Pop
      else:
1867 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1868 3ecf6786 Iustin Pop
                                 " node version %s" %
1869 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1870 a8083063 Iustin Pop
    else:
1871 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1872 a8083063 Iustin Pop
1873 a8083063 Iustin Pop
    # setup ssh on node
1874 9a4f63d1 Iustin Pop
    logging.info("Copy ssh key to node %s", node)
1875 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1876 a8083063 Iustin Pop
    keyarray = []
1877 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1878 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1879 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1880 a8083063 Iustin Pop
1881 a8083063 Iustin Pop
    for i in keyfiles:
1882 a8083063 Iustin Pop
      f = open(i, 'r')
1883 a8083063 Iustin Pop
      try:
1884 a8083063 Iustin Pop
        keyarray.append(f.read())
1885 a8083063 Iustin Pop
      finally:
1886 a8083063 Iustin Pop
        f.close()
1887 a8083063 Iustin Pop
1888 72737a7f Iustin Pop
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
1889 72737a7f Iustin Pop
                                    keyarray[2],
1890 72737a7f Iustin Pop
                                    keyarray[3], keyarray[4], keyarray[5])
1891 a8083063 Iustin Pop
1892 a8083063 Iustin Pop
    if not result:
1893 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1894 a8083063 Iustin Pop
1895 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1896 d9c02ca6 Michael Hanselmann
    utils.AddHostToEtcHosts(new_node.name)
1897 c8a0948f Michael Hanselmann
1898 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1899 caad16e2 Iustin Pop
      if not self.rpc.call_node_has_ip_address(new_node.name,
1900 caad16e2 Iustin Pop
                                               new_node.secondary_ip):
1901 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
1902 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
1903 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
1904 a8083063 Iustin Pop
1905 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
1906 5c0527ed Guido Trotter
    node_verify_param = {
1907 5c0527ed Guido Trotter
      'nodelist': [node],
1908 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
1909 5c0527ed Guido Trotter
    }
1910 5c0527ed Guido Trotter
1911 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
1912 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
1913 5c0527ed Guido Trotter
    for verifier in node_verify_list:
1914 5c0527ed Guido Trotter
      if not result[verifier]:
1915 5c0527ed Guido Trotter
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
1916 5c0527ed Guido Trotter
                                 " for remote verification" % verifier)
1917 5c0527ed Guido Trotter
      if result[verifier]['nodelist']:
1918 5c0527ed Guido Trotter
        for failed in result[verifier]['nodelist']:
1919 5c0527ed Guido Trotter
          feedback_fn("ssh/hostname verification failed %s -> %s" %
1920 5c0527ed Guido Trotter
                      (verifier, result[verifier]['nodelist'][failed]))
1921 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
1922 ff98055b Iustin Pop
1923 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1924 a8083063 Iustin Pop
    # including the node just added
1925 d6a02168 Michael Hanselmann
    myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
1926 102b115b Michael Hanselmann
    dist_nodes = self.cfg.GetNodeList()
1927 102b115b Michael Hanselmann
    if not self.op.readd:
1928 102b115b Michael Hanselmann
      dist_nodes.append(node)
1929 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1930 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1931 a8083063 Iustin Pop
1932 9a4f63d1 Iustin Pop
    logging.debug("Copying hosts and known_hosts to all nodes")
1933 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
1934 72737a7f Iustin Pop
      result = self.rpc.call_upload_file(dist_nodes, fname)
1935 a8083063 Iustin Pop
      for to_node in dist_nodes:
1936 a8083063 Iustin Pop
        if not result[to_node]:
1937 9a4f63d1 Iustin Pop
          logging.error("Copy of file %s to node %s failed", fname, to_node)
1938 a8083063 Iustin Pop
1939 d6a02168 Michael Hanselmann
    to_copy = []
1940 00cd937c Iustin Pop
    if constants.HT_XEN_HVM in self.cfg.GetClusterInfo().enabled_hypervisors:
1941 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
1942 a8083063 Iustin Pop
    for fname in to_copy:
1943 72737a7f Iustin Pop
      result = self.rpc.call_upload_file([node], fname)
1944 b5602d15 Guido Trotter
      if not result[node]:
1945 9a4f63d1 Iustin Pop
        logging.error("Could not copy file %s to node %s", fname, node)
1946 a8083063 Iustin Pop
1947 d8470559 Michael Hanselmann
    if self.op.readd:
1948 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
1949 d8470559 Michael Hanselmann
    else:
1950 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
1951 a8083063 Iustin Pop
1952 a8083063 Iustin Pop
1953 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1954 a8083063 Iustin Pop
  """Query cluster configuration.
1955 a8083063 Iustin Pop

1956 a8083063 Iustin Pop
  """
1957 a8083063 Iustin Pop
  _OP_REQP = []
1958 642339cf Guido Trotter
  REQ_BGL = False
1959 642339cf Guido Trotter
1960 642339cf Guido Trotter
  def ExpandNames(self):
1961 642339cf Guido Trotter
    self.needed_locks = {}
1962 a8083063 Iustin Pop
1963 a8083063 Iustin Pop
  def CheckPrereq(self):
1964 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1965 a8083063 Iustin Pop

1966 a8083063 Iustin Pop
    """
1967 a8083063 Iustin Pop
    pass
1968 a8083063 Iustin Pop
1969 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1970 a8083063 Iustin Pop
    """Return cluster config.
1971 a8083063 Iustin Pop

1972 a8083063 Iustin Pop
    """
1973 469f88e1 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
1974 a8083063 Iustin Pop
    result = {
1975 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1976 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1977 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1978 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1979 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1980 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1981 469f88e1 Iustin Pop
      "name": cluster.cluster_name,
1982 469f88e1 Iustin Pop
      "master": cluster.master_node,
1983 02691904 Alexander Schreiber
      "default_hypervisor": cluster.default_hypervisor,
1984 469f88e1 Iustin Pop
      "enabled_hypervisors": cluster.enabled_hypervisors,
1985 469f88e1 Iustin Pop
      "hvparams": cluster.hvparams,
1986 469f88e1 Iustin Pop
      "beparams": cluster.beparams,
1987 a8083063 Iustin Pop
      }
1988 a8083063 Iustin Pop
1989 a8083063 Iustin Pop
    return result
1990 a8083063 Iustin Pop
1991 a8083063 Iustin Pop
1992 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
1993 ae5849b5 Michael Hanselmann
  """Return configuration values.
1994 a8083063 Iustin Pop

1995 a8083063 Iustin Pop
  """
1996 a8083063 Iustin Pop
  _OP_REQP = []
1997 642339cf Guido Trotter
  REQ_BGL = False
1998 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet()
1999 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag")
2000 642339cf Guido Trotter
2001 642339cf Guido Trotter
  def ExpandNames(self):
2002 642339cf Guido Trotter
    self.needed_locks = {}
2003 a8083063 Iustin Pop
2004 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2005 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2006 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
2007 ae5849b5 Michael Hanselmann
2008 a8083063 Iustin Pop
  def CheckPrereq(self):
2009 a8083063 Iustin Pop
    """No prerequisites.
2010 a8083063 Iustin Pop

2011 a8083063 Iustin Pop
    """
2012 a8083063 Iustin Pop
    pass
2013 a8083063 Iustin Pop
2014 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2015 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
2016 a8083063 Iustin Pop

2017 a8083063 Iustin Pop
    """
2018 ae5849b5 Michael Hanselmann
    values = []
2019 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
2020 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
2021 3ccafd0e Iustin Pop
        entry = self.cfg.GetClusterName()
2022 ae5849b5 Michael Hanselmann
      elif field == "master_node":
2023 3ccafd0e Iustin Pop
        entry = self.cfg.GetMasterNode()
2024 3ccafd0e Iustin Pop
      elif field == "drain_flag":
2025 3ccafd0e Iustin Pop
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2026 ae5849b5 Michael Hanselmann
      else:
2027 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
2028 3ccafd0e Iustin Pop
      values.append(entry)
2029 ae5849b5 Michael Hanselmann
    return values
2030 a8083063 Iustin Pop
2031 a8083063 Iustin Pop
2032 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
2033 a8083063 Iustin Pop
  """Bring up an instance's disks.
2034 a8083063 Iustin Pop

2035 a8083063 Iustin Pop
  """
2036 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2037 f22a8ba3 Guido Trotter
  REQ_BGL = False
2038 f22a8ba3 Guido Trotter
2039 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2040 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2041 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2042 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2043 f22a8ba3 Guido Trotter
2044 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2045 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2046 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2047 a8083063 Iustin Pop
2048 a8083063 Iustin Pop
  def CheckPrereq(self):
2049 a8083063 Iustin Pop
    """Check prerequisites.
2050 a8083063 Iustin Pop

2051 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2052 a8083063 Iustin Pop

2053 a8083063 Iustin Pop
    """
2054 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2055 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2056 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2057 a8083063 Iustin Pop
2058 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2059 a8083063 Iustin Pop
    """Activate the disks.
2060 a8083063 Iustin Pop

2061 a8083063 Iustin Pop
    """
2062 b9bddb6b Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
2063 a8083063 Iustin Pop
    if not disks_ok:
2064 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
2065 a8083063 Iustin Pop
2066 a8083063 Iustin Pop
    return disks_info
2067 a8083063 Iustin Pop
2068 a8083063 Iustin Pop
2069 b9bddb6b Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
2070 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
2071 a8083063 Iustin Pop

2072 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
2073 a8083063 Iustin Pop

2074 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
2075 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
2076 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
2077 e4376078 Iustin Pop
  @param instance: the instance for whose disks we assemble
2078 e4376078 Iustin Pop
  @type ignore_secondaries: boolean
2079 e4376078 Iustin Pop
  @param ignore_secondaries: if true, errors on secondary nodes
2080 e4376078 Iustin Pop
      won't result in an error return from the function
2081 e4376078 Iustin Pop
  @return: False if the operation failed, otherwise a list of
2082 e4376078 Iustin Pop
      (host, instance_visible_name, node_visible_name)
2083 e4376078 Iustin Pop
      with the mapping from node devices to instance devices
2084 a8083063 Iustin Pop

2085 a8083063 Iustin Pop
  """
2086 a8083063 Iustin Pop
  device_info = []
2087 a8083063 Iustin Pop
  disks_ok = True
2088 fdbd668d Iustin Pop
  iname = instance.name
2089 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
2090 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
2091 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
2092 fdbd668d Iustin Pop
2093 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
2094 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
2095 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
2096 fdbd668d Iustin Pop
  # SyncSource, etc.)
2097 fdbd668d Iustin Pop
2098 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
2099 a8083063 Iustin Pop
  for inst_disk in instance.disks:
2100 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2101 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2102 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2103 a8083063 Iustin Pop
      if not result:
2104 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2105 86d9d3bb Iustin Pop
                           " (is_primary=False, pass=1)",
2106 86d9d3bb Iustin Pop
                           inst_disk.iv_name, node)
2107 fdbd668d Iustin Pop
        if not ignore_secondaries:
2108 a8083063 Iustin Pop
          disks_ok = False
2109 fdbd668d Iustin Pop
2110 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
2111 fdbd668d Iustin Pop
2112 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
2113 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
2114 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2115 fdbd668d Iustin Pop
      if node != instance.primary_node:
2116 fdbd668d Iustin Pop
        continue
2117 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2118 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2119 fdbd668d Iustin Pop
      if not result:
2120 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2121 86d9d3bb Iustin Pop
                           " (is_primary=True, pass=2)",
2122 86d9d3bb Iustin Pop
                           inst_disk.iv_name, node)
2123 fdbd668d Iustin Pop
        disks_ok = False
2124 fdbd668d Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name, result))
2125 a8083063 Iustin Pop
2126 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
2127 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
2128 b352ab5b Iustin Pop
  # improving the logical/physical id handling
2129 b352ab5b Iustin Pop
  for disk in instance.disks:
2130 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
2131 b352ab5b Iustin Pop
2132 a8083063 Iustin Pop
  return disks_ok, device_info
2133 a8083063 Iustin Pop
2134 a8083063 Iustin Pop
2135 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
2136 3ecf6786 Iustin Pop
  """Start the disks of an instance.
2137 3ecf6786 Iustin Pop

2138 3ecf6786 Iustin Pop
  """
2139 b9bddb6b Iustin Pop
  disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2140 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
2141 fe7b0351 Michael Hanselmann
  if not disks_ok:
2142 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
2143 fe7b0351 Michael Hanselmann
    if force is not None and not force:
2144 86d9d3bb Iustin Pop
      lu.proc.LogWarning("", hint="If the message above refers to a"
2145 86d9d3bb Iustin Pop
                         " secondary node,"
2146 86d9d3bb Iustin Pop
                         " you can retry the operation using '--force'.")
2147 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
2148 fe7b0351 Michael Hanselmann
2149 fe7b0351 Michael Hanselmann
2150 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
2151 a8083063 Iustin Pop
  """Shutdown an instance's disks.
2152 a8083063 Iustin Pop

2153 a8083063 Iustin Pop
  """
2154 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2155 f22a8ba3 Guido Trotter
  REQ_BGL = False
2156 f22a8ba3 Guido Trotter
2157 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2158 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2159 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2160 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2161 f22a8ba3 Guido Trotter
2162 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2163 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2164 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2165 a8083063 Iustin Pop
2166 a8083063 Iustin Pop
  def CheckPrereq(self):
2167 a8083063 Iustin Pop
    """Check prerequisites.
2168 a8083063 Iustin Pop

2169 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2170 a8083063 Iustin Pop

2171 a8083063 Iustin Pop
    """
2172 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2173 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2174 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2175 a8083063 Iustin Pop
2176 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2177 a8083063 Iustin Pop
    """Deactivate the disks
2178 a8083063 Iustin Pop

2179 a8083063 Iustin Pop
    """
2180 a8083063 Iustin Pop
    instance = self.instance
2181 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
2182 a8083063 Iustin Pop
2183 a8083063 Iustin Pop
2184 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
2185 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
2186 155d6c75 Guido Trotter

2187 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
2188 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
2189 155d6c75 Guido Trotter

2190 155d6c75 Guido Trotter
  """
2191 72737a7f Iustin Pop
  ins_l = lu.rpc.call_instance_list([instance.primary_node],
2192 72737a7f Iustin Pop
                                      [instance.hypervisor])
2193 155d6c75 Guido Trotter
  ins_l = ins_l[instance.primary_node]
2194 155d6c75 Guido Trotter
  if not type(ins_l) is list:
2195 155d6c75 Guido Trotter
    raise errors.OpExecError("Can't contact node '%s'" %
2196 155d6c75 Guido Trotter
                             instance.primary_node)
2197 155d6c75 Guido Trotter
2198 155d6c75 Guido Trotter
  if instance.name in ins_l:
2199 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
2200 155d6c75 Guido Trotter
                             " block devices.")
2201 155d6c75 Guido Trotter
2202 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
2203 a8083063 Iustin Pop
2204 a8083063 Iustin Pop
2205 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2206 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
2207 a8083063 Iustin Pop

2208 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
2209 a8083063 Iustin Pop

2210 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
2211 a8083063 Iustin Pop
  ignored.
2212 a8083063 Iustin Pop

2213 a8083063 Iustin Pop
  """
2214 a8083063 Iustin Pop
  result = True
2215 a8083063 Iustin Pop
  for disk in instance.disks:
2216 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2217 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
2218 72737a7f Iustin Pop
      if not lu.rpc.call_blockdev_shutdown(node, top_disk):
2219 9a4f63d1 Iustin Pop
        logging.error("Could not shutdown block device %s on node %s",
2220 9a4f63d1 Iustin Pop
                      disk.iv_name, node)
2221 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
2222 a8083063 Iustin Pop
          result = False
2223 a8083063 Iustin Pop
  return result
2224 a8083063 Iustin Pop
2225 a8083063 Iustin Pop
2226 b9bddb6b Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor):
2227 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
2228 d4f16fd9 Iustin Pop

2229 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
2230 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
2231 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
2232 d4f16fd9 Iustin Pop
  exception.
2233 d4f16fd9 Iustin Pop

2234 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
2235 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
2236 e69d05fd Iustin Pop
  @type node: C{str}
2237 e69d05fd Iustin Pop
  @param node: the node to check
2238 e69d05fd Iustin Pop
  @type reason: C{str}
2239 e69d05fd Iustin Pop
  @param reason: string to use in the error message
2240 e69d05fd Iustin Pop
  @type requested: C{int}
2241 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
2242 e69d05fd Iustin Pop
  @type hypervisor: C{str}
2243 e69d05fd Iustin Pop
  @param hypervisor: the hypervisor to ask for memory stats
2244 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2245 e69d05fd Iustin Pop
      we cannot check the node
2246 d4f16fd9 Iustin Pop

2247 d4f16fd9 Iustin Pop
  """
2248 72737a7f Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor)
2249 d4f16fd9 Iustin Pop
  if not nodeinfo or not isinstance(nodeinfo, dict):
2250 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Could not contact node %s for resource"
2251 d4f16fd9 Iustin Pop
                             " information" % (node,))
2252 d4f16fd9 Iustin Pop
2253 d4f16fd9 Iustin Pop
  free_mem = nodeinfo[node].get('memory_free')
2254 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2255 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2256 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
2257 d4f16fd9 Iustin Pop
  if requested > free_mem:
2258 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2259 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
2260 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
2261 d4f16fd9 Iustin Pop
2262 d4f16fd9 Iustin Pop
2263 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2264 a8083063 Iustin Pop
  """Starts an instance.
2265 a8083063 Iustin Pop

2266 a8083063 Iustin Pop
  """
2267 a8083063 Iustin Pop
  HPATH = "instance-start"
2268 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2269 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2270 e873317a Guido Trotter
  REQ_BGL = False
2271 e873317a Guido Trotter
2272 e873317a Guido Trotter
  def ExpandNames(self):
2273 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2274 a8083063 Iustin Pop
2275 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2276 a8083063 Iustin Pop
    """Build hooks env.
2277 a8083063 Iustin Pop

2278 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2279 a8083063 Iustin Pop

2280 a8083063 Iustin Pop
    """
2281 a8083063 Iustin Pop
    env = {
2282 a8083063 Iustin Pop
      "FORCE": self.op.force,
2283 a8083063 Iustin Pop
      }
2284 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2285 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2286 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2287 a8083063 Iustin Pop
    return env, nl, nl
2288 a8083063 Iustin Pop
2289 a8083063 Iustin Pop
  def CheckPrereq(self):
2290 a8083063 Iustin Pop
    """Check prerequisites.
2291 a8083063 Iustin Pop

2292 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2293 a8083063 Iustin Pop

2294 a8083063 Iustin Pop
    """
2295 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2296 e873317a Guido Trotter
    assert self.instance is not None, \
2297 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2298 a8083063 Iustin Pop
2299 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2300 a8083063 Iustin Pop
    # check bridges existance
2301 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2302 a8083063 Iustin Pop
2303 b9bddb6b Iustin Pop
    _CheckNodeFreeMemory(self, instance.primary_node,
2304 d4f16fd9 Iustin Pop
                         "starting instance %s" % instance.name,
2305 338e51e8 Iustin Pop
                         bep[constants.BE_MEMORY], instance.hypervisor)
2306 d4f16fd9 Iustin Pop
2307 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2308 a8083063 Iustin Pop
    """Start the instance.
2309 a8083063 Iustin Pop

2310 a8083063 Iustin Pop
    """
2311 a8083063 Iustin Pop
    instance = self.instance
2312 a8083063 Iustin Pop
    force = self.op.force
2313 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
2314 a8083063 Iustin Pop
2315 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2316 fe482621 Iustin Pop
2317 a8083063 Iustin Pop
    node_current = instance.primary_node
2318 a8083063 Iustin Pop
2319 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
2320 a8083063 Iustin Pop
2321 72737a7f Iustin Pop
    if not self.rpc.call_instance_start(node_current, instance, extra_args):
2322 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
2323 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
2324 a8083063 Iustin Pop
2325 a8083063 Iustin Pop
2326 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2327 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2328 bf6929a2 Alexander Schreiber

2329 bf6929a2 Alexander Schreiber
  """
2330 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2331 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2332 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2333 e873317a Guido Trotter
  REQ_BGL = False
2334 e873317a Guido Trotter
2335 e873317a Guido Trotter
  def ExpandNames(self):
2336 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2337 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2338 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
2339 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2340 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
2341 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2342 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
2343 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2344 bf6929a2 Alexander Schreiber
2345 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2346 bf6929a2 Alexander Schreiber
    """Build hooks env.
2347 bf6929a2 Alexander Schreiber

2348 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2349 bf6929a2 Alexander Schreiber

2350 bf6929a2 Alexander Schreiber
    """
2351 bf6929a2 Alexander Schreiber
    env = {
2352 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2353 bf6929a2 Alexander Schreiber
      }
2354 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2355 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2356 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
2357 bf6929a2 Alexander Schreiber
    return env, nl, nl
2358 bf6929a2 Alexander Schreiber
2359 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2360 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2361 bf6929a2 Alexander Schreiber

2362 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2363 bf6929a2 Alexander Schreiber

2364 bf6929a2 Alexander Schreiber
    """
2365 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2366 e873317a Guido Trotter
    assert self.instance is not None, \
2367 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2368 bf6929a2 Alexander Schreiber
2369 bf6929a2 Alexander Schreiber
    # check bridges existance
2370 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2371 bf6929a2 Alexander Schreiber
2372 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2373 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2374 bf6929a2 Alexander Schreiber

2375 bf6929a2 Alexander Schreiber
    """
2376 bf6929a2 Alexander Schreiber
    instance = self.instance
2377 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2378 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2379 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2380 bf6929a2 Alexander Schreiber
2381 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2382 bf6929a2 Alexander Schreiber
2383 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2384 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2385 72737a7f Iustin Pop
      if not self.rpc.call_instance_reboot(node_current, instance,
2386 72737a7f Iustin Pop
                                           reboot_type, extra_args):
2387 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2388 bf6929a2 Alexander Schreiber
    else:
2389 72737a7f Iustin Pop
      if not self.rpc.call_instance_shutdown(node_current, instance):
2390 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2391 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
2392 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
2393 72737a7f Iustin Pop
      if not self.rpc.call_instance_start(node_current, instance, extra_args):
2394 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
2395 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2396 bf6929a2 Alexander Schreiber
2397 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2398 bf6929a2 Alexander Schreiber
2399 bf6929a2 Alexander Schreiber
2400 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2401 a8083063 Iustin Pop
  """Shutdown an instance.
2402 a8083063 Iustin Pop

2403 a8083063 Iustin Pop
  """
2404 a8083063 Iustin Pop
  HPATH = "instance-stop"
2405 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2406 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2407 e873317a Guido Trotter
  REQ_BGL = False
2408 e873317a Guido Trotter
2409 e873317a Guido Trotter
  def ExpandNames(self):
2410 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2411 a8083063 Iustin Pop
2412 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2413 a8083063 Iustin Pop
    """Build hooks env.
2414 a8083063 Iustin Pop

2415 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2416 a8083063 Iustin Pop

2417 a8083063 Iustin Pop
    """
2418 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2419 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2420 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2421 a8083063 Iustin Pop
    return env, nl, nl
2422 a8083063 Iustin Pop
2423 a8083063 Iustin Pop
  def CheckPrereq(self):
2424 a8083063 Iustin Pop
    """Check prerequisites.
2425 a8083063 Iustin Pop

2426 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2427 a8083063 Iustin Pop

2428 a8083063 Iustin Pop
    """
2429 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2430 e873317a Guido Trotter
    assert self.instance is not None, \
2431 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2432 a8083063 Iustin Pop
2433 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2434 a8083063 Iustin Pop
    """Shutdown the instance.
2435 a8083063 Iustin Pop

2436 a8083063 Iustin Pop
    """
2437 a8083063 Iustin Pop
    instance = self.instance
2438 a8083063 Iustin Pop
    node_current = instance.primary_node
2439 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2440 72737a7f Iustin Pop
    if not self.rpc.call_instance_shutdown(node_current, instance):
2441 86d9d3bb Iustin Pop
      self.proc.LogWarning("Could not shutdown instance")
2442 a8083063 Iustin Pop
2443 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
2444 a8083063 Iustin Pop
2445 a8083063 Iustin Pop
2446 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2447 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2448 fe7b0351 Michael Hanselmann

2449 fe7b0351 Michael Hanselmann
  """
2450 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2451 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2452 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2453 4e0b4d2d Guido Trotter
  REQ_BGL = False
2454 4e0b4d2d Guido Trotter
2455 4e0b4d2d Guido Trotter
  def ExpandNames(self):
2456 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
2457 fe7b0351 Michael Hanselmann
2458 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2459 fe7b0351 Michael Hanselmann
    """Build hooks env.
2460 fe7b0351 Michael Hanselmann

2461 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2462 fe7b0351 Michael Hanselmann

2463 fe7b0351 Michael Hanselmann
    """
2464 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2465 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2466 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2467 fe7b0351 Michael Hanselmann
    return env, nl, nl
2468 fe7b0351 Michael Hanselmann
2469 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2470 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2471 fe7b0351 Michael Hanselmann

2472 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2473 fe7b0351 Michael Hanselmann

2474 fe7b0351 Michael Hanselmann
    """
2475 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2476 4e0b4d2d Guido Trotter
    assert instance is not None, \
2477 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2478 4e0b4d2d Guido Trotter
2479 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2480 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2481 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2482 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2483 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2484 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2485 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2486 72737a7f Iustin Pop
                                              instance.name,
2487 72737a7f Iustin Pop
                                              instance.hypervisor)
2488 fe7b0351 Michael Hanselmann
    if remote_info:
2489 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2490 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2491 3ecf6786 Iustin Pop
                                  instance.primary_node))
2492 d0834de3 Michael Hanselmann
2493 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2494 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2495 d0834de3 Michael Hanselmann
      # OS verification
2496 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2497 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2498 d0834de3 Michael Hanselmann
      if pnode is None:
2499 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2500 3ecf6786 Iustin Pop
                                   self.op.pnode)
2501 72737a7f Iustin Pop
      os_obj = self.rpc.call_os_get(pnode.name, self.op.os_type)
2502 dfa96ded Guido Trotter
      if not os_obj:
2503 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2504 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2505 d0834de3 Michael Hanselmann
2506 fe7b0351 Michael Hanselmann
    self.instance = instance
2507 fe7b0351 Michael Hanselmann
2508 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2509 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2510 fe7b0351 Michael Hanselmann

2511 fe7b0351 Michael Hanselmann
    """
2512 fe7b0351 Michael Hanselmann
    inst = self.instance
2513 fe7b0351 Michael Hanselmann
2514 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2515 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2516 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2517 97abc79f Iustin Pop
      self.cfg.Update(inst)
2518 d0834de3 Michael Hanselmann
2519 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
2520 fe7b0351 Michael Hanselmann
    try:
2521 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2522 bb2ee932 Iustin Pop
      if not self.rpc.call_instance_os_add(inst.primary_node, inst):
2523 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
2524 f4bc1f2c Michael Hanselmann
                                 " on node %s" %
2525 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2526 fe7b0351 Michael Hanselmann
    finally:
2527 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
2528 fe7b0351 Michael Hanselmann
2529 fe7b0351 Michael Hanselmann
2530 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2531 decd5f45 Iustin Pop
  """Rename an instance.
2532 decd5f45 Iustin Pop

2533 decd5f45 Iustin Pop
  """
2534 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2535 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2536 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2537 decd5f45 Iustin Pop
2538 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2539 decd5f45 Iustin Pop
    """Build hooks env.
2540 decd5f45 Iustin Pop

2541 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2542 decd5f45 Iustin Pop

2543 decd5f45 Iustin Pop
    """
2544 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2545 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2546 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2547 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2548 decd5f45 Iustin Pop
    return env, nl, nl
2549 decd5f45 Iustin Pop
2550 decd5f45 Iustin Pop
  def CheckPrereq(self):
2551 decd5f45 Iustin Pop
    """Check prerequisites.
2552 decd5f45 Iustin Pop

2553 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2554 decd5f45 Iustin Pop

2555 decd5f45 Iustin Pop
    """
2556 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2557 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2558 decd5f45 Iustin Pop
    if instance is None:
2559 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2560 decd5f45 Iustin Pop
                                 self.op.instance_name)
2561 decd5f45 Iustin Pop
    if instance.status != "down":
2562 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2563 decd5f45 Iustin Pop
                                 self.op.instance_name)
2564 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2565 72737a7f Iustin Pop
                                              instance.name,
2566 72737a7f Iustin Pop
                                              instance.hypervisor)
2567 decd5f45 Iustin Pop
    if remote_info:
2568 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2569 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2570 decd5f45 Iustin Pop
                                  instance.primary_node))
2571 decd5f45 Iustin Pop
    self.instance = instance
2572 decd5f45 Iustin Pop
2573 decd5f45 Iustin Pop
    # new name verification
2574 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2575 decd5f45 Iustin Pop
2576 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2577 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
2578 7bde3275 Guido Trotter
    if new_name in instance_list:
2579 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2580 c09f363f Manuel Franceschini
                                 new_name)
2581 7bde3275 Guido Trotter
2582 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2583 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
2584 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2585 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2586 decd5f45 Iustin Pop
2587 decd5f45 Iustin Pop
2588 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2589 decd5f45 Iustin Pop
    """Reinstall the instance.
2590 decd5f45 Iustin Pop

2591 decd5f45 Iustin Pop
    """
2592 decd5f45 Iustin Pop
    inst = self.instance
2593 decd5f45 Iustin Pop
    old_name = inst.name
2594 decd5f45 Iustin Pop
2595 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2596 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2597 b23c4333 Manuel Franceschini
2598 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2599 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
2600 cb4e8387 Iustin Pop
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
2601 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
2602 decd5f45 Iustin Pop
2603 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2604 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2605 decd5f45 Iustin Pop
2606 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2607 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2608 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
2609 72737a7f Iustin Pop
                                                     old_file_storage_dir,
2610 72737a7f Iustin Pop
                                                     new_file_storage_dir)
2611 b23c4333 Manuel Franceschini
2612 b23c4333 Manuel Franceschini
      if not result:
2613 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not connect to node '%s' to rename"
2614 b23c4333 Manuel Franceschini
                                 " directory '%s' to '%s' (but the instance"
2615 b23c4333 Manuel Franceschini
                                 " has been renamed in Ganeti)" % (
2616 b23c4333 Manuel Franceschini
                                 inst.primary_node, old_file_storage_dir,
2617 b23c4333 Manuel Franceschini
                                 new_file_storage_dir))
2618 b23c4333 Manuel Franceschini
2619 b23c4333 Manuel Franceschini
      if not result[0]:
2620 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2621 b23c4333 Manuel Franceschini
                                 " (but the instance has been renamed in"
2622 b23c4333 Manuel Franceschini
                                 " Ganeti)" % (old_file_storage_dir,
2623 b23c4333 Manuel Franceschini
                                               new_file_storage_dir))
2624 b23c4333 Manuel Franceschini
2625 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
2626 decd5f45 Iustin Pop
    try:
2627 72737a7f Iustin Pop
      if not self.rpc.call_instance_run_rename(inst.primary_node, inst,
2628 d15a9ad3 Guido Trotter
                                               old_name):
2629 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
2630 6291574d Alexander Schreiber
               " (but the instance has been renamed in Ganeti)" %
2631 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2632 86d9d3bb Iustin Pop
        self.proc.LogWarning(msg)
2633 decd5f45 Iustin Pop
    finally:
2634 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
2635 decd5f45 Iustin Pop
2636 decd5f45 Iustin Pop
2637 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2638 a8083063 Iustin Pop
  """Remove an instance.
2639 a8083063 Iustin Pop

2640 a8083063 Iustin Pop
  """
2641 a8083063 Iustin Pop
  HPATH = "instance-remove"
2642 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2643 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
2644 cf472233 Guido Trotter
  REQ_BGL = False
2645 cf472233 Guido Trotter
2646 cf472233 Guido Trotter
  def ExpandNames(self):
2647 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
2648 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2649 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2650 cf472233 Guido Trotter
2651 cf472233 Guido Trotter
  def DeclareLocks(self, level):
2652 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
2653 cf472233 Guido Trotter
      self._LockInstancesNodes()
2654 a8083063 Iustin Pop
2655 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2656 a8083063 Iustin Pop
    """Build hooks env.
2657 a8083063 Iustin Pop

2658 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2659 a8083063 Iustin Pop

2660 a8083063 Iustin Pop
    """
2661 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2662 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
2663 a8083063 Iustin Pop
    return env, nl, nl
2664 a8083063 Iustin Pop
2665 a8083063 Iustin Pop
  def CheckPrereq(self):
2666 a8083063 Iustin Pop
    """Check prerequisites.
2667 a8083063 Iustin Pop

2668 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2669 a8083063 Iustin Pop

2670 a8083063 Iustin Pop
    """
2671 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2672 cf472233 Guido Trotter
    assert self.instance is not None, \
2673 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2674 a8083063 Iustin Pop
2675 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2676 a8083063 Iustin Pop
    """Remove the instance.
2677 a8083063 Iustin Pop

2678 a8083063 Iustin Pop
    """
2679 a8083063 Iustin Pop
    instance = self.instance
2680 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
2681 9a4f63d1 Iustin Pop
                 instance.name, instance.primary_node)
2682 a8083063 Iustin Pop
2683 72737a7f Iustin Pop
    if not self.rpc.call_instance_shutdown(instance.primary_node, instance):
2684 1d67656e Iustin Pop
      if self.op.ignore_failures:
2685 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2686 1d67656e Iustin Pop
      else:
2687 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2688 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2689 a8083063 Iustin Pop
2690 9a4f63d1 Iustin Pop
    logging.info("Removing block devices for instance %s", instance.name)
2691 a8083063 Iustin Pop
2692 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
2693 1d67656e Iustin Pop
      if self.op.ignore_failures:
2694 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2695 1d67656e Iustin Pop
      else:
2696 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2697 a8083063 Iustin Pop
2698 9a4f63d1 Iustin Pop
    logging.info("Removing instance %s out of cluster config", instance.name)
2699 a8083063 Iustin Pop
2700 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2701 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
2702 a8083063 Iustin Pop
2703 a8083063 Iustin Pop
2704 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2705 a8083063 Iustin Pop
  """Logical unit for querying instances.
2706 a8083063 Iustin Pop

2707 a8083063 Iustin Pop
  """
2708 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2709 7eb9d8f7 Guido Trotter
  REQ_BGL = False
2710 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
2711 a2d2e1a7 Iustin Pop
                                    "admin_state", "admin_ram",
2712 a2d2e1a7 Iustin Pop
                                    "disk_template", "ip", "mac", "bridge",
2713 a2d2e1a7 Iustin Pop
                                    "sda_size", "sdb_size", "vcpus", "tags",
2714 a2d2e1a7 Iustin Pop
                                    "network_port", "beparams",
2715 a2d2e1a7 Iustin Pop
                                    "(disk).(size)/([0-9]+)",
2716 a2d2e1a7 Iustin Pop
                                    "(disk).(sizes)",
2717 a2d2e1a7 Iustin Pop
                                    "(nic).(mac|ip|bridge)/([0-9]+)",
2718 a2d2e1a7 Iustin Pop
                                    "(nic).(macs|ips|bridges)",
2719 a2d2e1a7 Iustin Pop
                                    "(disk|nic).(count)",
2720 a2d2e1a7 Iustin Pop
                                    "serial_no", "hypervisor", "hvparams",] +
2721 a2d2e1a7 Iustin Pop
                                  ["hv/%s" % name
2722 a2d2e1a7 Iustin Pop
                                   for name in constants.HVS_PARAMETERS] +
2723 a2d2e1a7 Iustin Pop
                                  ["be/%s" % name
2724 a2d2e1a7 Iustin Pop
                                   for name in constants.BES_PARAMETERS])
2725 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
2726 31bf511f Iustin Pop
2727 a8083063 Iustin Pop
2728 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
2729 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2730 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2731 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2732 a8083063 Iustin Pop
2733 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
2734 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
2735 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2736 7eb9d8f7 Guido Trotter
2737 57a2fb91 Iustin Pop
    if self.op.names:
2738 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
2739 7eb9d8f7 Guido Trotter
    else:
2740 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
2741 7eb9d8f7 Guido Trotter
2742 31bf511f Iustin Pop
    self.do_locking = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
2743 57a2fb91 Iustin Pop
    if self.do_locking:
2744 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
2745 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
2746 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2747 7eb9d8f7 Guido Trotter
2748 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
2749 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
2750 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
2751 7eb9d8f7 Guido Trotter
2752 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
2753 7eb9d8f7 Guido Trotter
    """Check prerequisites.
2754 7eb9d8f7 Guido Trotter

2755 7eb9d8f7 Guido Trotter
    """
2756 57a2fb91 Iustin Pop
    pass
2757 069dcc86 Iustin Pop
2758 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2759 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2760 a8083063 Iustin Pop

2761 a8083063 Iustin Pop
    """
2762 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
2763 57a2fb91 Iustin Pop
    if self.do_locking:
2764 57a2fb91 Iustin Pop
      instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2765 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
2766 3fa93523 Guido Trotter
      instance_names = self.wanted
2767 3fa93523 Guido Trotter
      missing = set(instance_names).difference(all_info.keys())
2768 3fa93523 Guido Trotter
      if missing:
2769 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
2770 3fa93523 Guido Trotter
          "Some instances were removed before retrieving their data: %s"
2771 3fa93523 Guido Trotter
          % missing)
2772 57a2fb91 Iustin Pop
    else:
2773 57a2fb91 Iustin Pop
      instance_names = all_info.keys()
2774 c1f1cbb2 Iustin Pop
2775 c1f1cbb2 Iustin Pop
    instance_names = utils.NiceSort(instance_names)
2776 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
2777 a8083063 Iustin Pop
2778 a8083063 Iustin Pop
    # begin data gathering
2779 a8083063 Iustin Pop
2780 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2781 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
2782 a8083063 Iustin Pop
2783 a8083063 Iustin Pop
    bad_nodes = []
2784 31bf511f Iustin Pop
    if self.do_locking:
2785 a8083063 Iustin Pop
      live_data = {}
2786 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
2787 a8083063 Iustin Pop
      for name in nodes:
2788 a8083063 Iustin Pop
        result = node_data[name]
2789 a8083063 Iustin Pop
        if result:
2790 a8083063 Iustin Pop
          live_data.update(result)
2791 a8083063 Iustin Pop
        elif result == False:
2792 a8083063 Iustin Pop
          bad_nodes.append(name)
2793 a8083063 Iustin Pop
        # else no instance is alive
2794 a8083063 Iustin Pop
    else:
2795 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2796 a8083063 Iustin Pop
2797 a8083063 Iustin Pop
    # end data gathering
2798 a8083063 Iustin Pop
2799 5018a335 Iustin Pop
    HVPREFIX = "hv/"
2800 338e51e8 Iustin Pop
    BEPREFIX = "be/"
2801 a8083063 Iustin Pop
    output = []
2802 a8083063 Iustin Pop
    for instance in instance_list:
2803 a8083063 Iustin Pop
      iout = []
2804 5018a335 Iustin Pop
      i_hv = self.cfg.GetClusterInfo().FillHV(instance)
2805 338e51e8 Iustin Pop
      i_be = self.cfg.GetClusterInfo().FillBE(instance)
2806 a8083063 Iustin Pop
      for field in self.op.output_fields:
2807 71c1af58 Iustin Pop
        st_match = self._FIELDS_STATIC.Matches(field)
2808 a8083063 Iustin Pop
        if field == "name":
2809 a8083063 Iustin Pop
          val = instance.name
2810 a8083063 Iustin Pop
        elif field == "os":
2811 a8083063 Iustin Pop
          val = instance.os
2812 a8083063 Iustin Pop
        elif field == "pnode":
2813 a8083063 Iustin Pop
          val = instance.primary_node
2814 a8083063 Iustin Pop
        elif field == "snodes":
2815 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2816 a8083063 Iustin Pop
        elif field == "admin_state":
2817 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2818 a8083063 Iustin Pop
        elif field == "oper_state":
2819 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2820 8a23d2d3 Iustin Pop
            val = None
2821 a8083063 Iustin Pop
          else:
2822 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2823 d8052456 Iustin Pop
        elif field == "status":
2824 d8052456 Iustin Pop
          if instance.primary_node in bad_nodes:
2825 d8052456 Iustin Pop
            val = "ERROR_nodedown"
2826 d8052456 Iustin Pop
          else:
2827 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
2828 d8052456 Iustin Pop
            if running:
2829 d8052456 Iustin Pop
              if instance.status != "down":
2830 d8052456 Iustin Pop
                val = "running"
2831 d8052456 Iustin Pop
              else:
2832 d8052456 Iustin Pop
                val = "ERROR_up"
2833 d8052456 Iustin Pop
            else:
2834 d8052456 Iustin Pop
              if instance.status != "down":
2835 d8052456 Iustin Pop
                val = "ERROR_down"
2836 d8052456 Iustin Pop
              else:
2837 d8052456 Iustin Pop
                val = "ADMIN_down"
2838 a8083063 Iustin Pop
        elif field == "oper_ram":
2839 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2840 8a23d2d3 Iustin Pop
            val = None
2841 a8083063 Iustin Pop
          elif instance.name in live_data:
2842 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2843 a8083063 Iustin Pop
          else:
2844 a8083063 Iustin Pop
            val = "-"
2845 a8083063 Iustin Pop
        elif field == "disk_template":
2846 a8083063 Iustin Pop
          val = instance.disk_template
2847 a8083063 Iustin Pop
        elif field == "ip":
2848 a8083063 Iustin Pop
          val = instance.nics[0].ip
2849 a8083063 Iustin Pop
        elif field == "bridge":
2850 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2851 a8083063 Iustin Pop
        elif field == "mac":
2852 a8083063 Iustin Pop
          val = instance.nics[0].mac
2853 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2854 ad24e046 Iustin Pop
          idx = ord(field[2]) - ord('a')
2855 ad24e046 Iustin Pop
          try:
2856 ad24e046 Iustin Pop
            val = instance.FindDisk(idx).size
2857 ad24e046 Iustin Pop
          except errors.OpPrereqError:
2858 8a23d2d3 Iustin Pop
            val = None
2859 130a6a6f Iustin Pop
        elif field == "tags":
2860 130a6a6f Iustin Pop
          val = list(instance.GetTags())
2861 38d7239a Iustin Pop
        elif field == "serial_no":
2862 38d7239a Iustin Pop
          val = instance.serial_no
2863 5018a335 Iustin Pop
        elif field == "network_port":
2864 5018a335 Iustin Pop
          val = instance.network_port
2865 338e51e8 Iustin Pop
        elif field == "hypervisor":
2866 338e51e8 Iustin Pop
          val = instance.hypervisor
2867 338e51e8 Iustin Pop
        elif field == "hvparams":
2868 338e51e8 Iustin Pop
          val = i_hv
2869 5018a335 Iustin Pop
        elif (field.startswith(HVPREFIX) and
2870 5018a335 Iustin Pop
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
2871 5018a335 Iustin Pop
          val = i_hv.get(field[len(HVPREFIX):], None)
2872 338e51e8 Iustin Pop
        elif field == "beparams":
2873 338e51e8 Iustin Pop
          val = i_be
2874 338e51e8 Iustin Pop
        elif (field.startswith(BEPREFIX) and
2875 338e51e8 Iustin Pop
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
2876 338e51e8 Iustin Pop
          val = i_be.get(field[len(BEPREFIX):], None)
2877 71c1af58 Iustin Pop
        elif st_match and st_match.groups():
2878 71c1af58 Iustin Pop
          # matches a variable list
2879 71c1af58 Iustin Pop
          st_groups = st_match.groups()
2880 71c1af58 Iustin Pop
          if st_groups and st_groups[0] == "disk":
2881 71c1af58 Iustin Pop
            if st_groups[1] == "count":
2882 71c1af58 Iustin Pop
              val = len(instance.disks)
2883 41a776da Iustin Pop
            elif st_groups[1] == "sizes":
2884 41a776da Iustin Pop
              val = [disk.size for disk in instance.disks]
2885 71c1af58 Iustin Pop
            elif st_groups[1] == "size":
2886 3e0cea06 Iustin Pop
              try:
2887 3e0cea06 Iustin Pop
                val = instance.FindDisk(st_groups[2]).size
2888 3e0cea06 Iustin Pop
              except errors.OpPrereqError:
2889 71c1af58 Iustin Pop
                val = None
2890 71c1af58 Iustin Pop
            else:
2891 71c1af58 Iustin Pop
              assert False, "Unhandled disk parameter"
2892 71c1af58 Iustin Pop
          elif st_groups[0] == "nic":
2893 71c1af58 Iustin Pop
            if st_groups[1] == "count":
2894 71c1af58 Iustin Pop
              val = len(instance.nics)
2895 41a776da Iustin Pop
            elif st_groups[1] == "macs":
2896 41a776da Iustin Pop
              val = [nic.mac for nic in instance.nics]
2897 41a776da Iustin Pop
            elif st_groups[1] == "ips":
2898 41a776da Iustin Pop
              val = [nic.ip for nic in instance.nics]
2899 41a776da Iustin Pop
            elif st_groups[1] == "bridges":
2900 41a776da Iustin Pop
              val = [nic.bridge for nic in instance.nics]
2901 71c1af58 Iustin Pop
            else:
2902 71c1af58 Iustin Pop
              # index-based item
2903 71c1af58 Iustin Pop
              nic_idx = int(st_groups[2])
2904 71c1af58 Iustin Pop
              if nic_idx >= len(instance.nics):
2905 71c1af58 Iustin Pop
                val = None
2906 71c1af58 Iustin Pop
              else:
2907 71c1af58 Iustin Pop
                if st_groups[1] == "mac":
2908 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].mac
2909 71c1af58 Iustin Pop
                elif st_groups[1] == "ip":
2910 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].ip
2911 71c1af58 Iustin Pop
                elif st_groups[1] == "bridge":
2912 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].bridge
2913 71c1af58 Iustin Pop
                else:
2914 71c1af58 Iustin Pop
                  assert False, "Unhandled NIC parameter"
2915 71c1af58 Iustin Pop
          else:
2916 71c1af58 Iustin Pop
            assert False, "Unhandled variable parameter"
2917 a8083063 Iustin Pop
        else:
2918 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2919 a8083063 Iustin Pop
        iout.append(val)
2920 a8083063 Iustin Pop
      output.append(iout)
2921 a8083063 Iustin Pop
2922 a8083063 Iustin Pop
    return output
2923 a8083063 Iustin Pop
2924 a8083063 Iustin Pop
2925 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2926 a8083063 Iustin Pop
  """Failover an instance.
2927 a8083063 Iustin Pop

2928 a8083063 Iustin Pop
  """
2929 a8083063 Iustin Pop
  HPATH = "instance-failover"
2930 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2931 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2932 c9e5c064 Guido Trotter
  REQ_BGL = False
2933 c9e5c064 Guido Trotter
2934 c9e5c064 Guido Trotter
  def ExpandNames(self):
2935 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
2936 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2937 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2938 c9e5c064 Guido Trotter
2939 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
2940 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
2941 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
2942 a8083063 Iustin Pop
2943 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2944 a8083063 Iustin Pop
    """Build hooks env.
2945 a8083063 Iustin Pop

2946 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2947 a8083063 Iustin Pop

2948 a8083063 Iustin Pop
    """
2949 a8083063 Iustin Pop
    env = {
2950 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2951 a8083063 Iustin Pop
      }
2952 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2953 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
2954 a8083063 Iustin Pop
    return env, nl, nl
2955 a8083063 Iustin Pop
2956 a8083063 Iustin Pop
  def CheckPrereq(self):
2957 a8083063 Iustin Pop
    """Check prerequisites.
2958 a8083063 Iustin Pop

2959 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2960 a8083063 Iustin Pop

2961 a8083063 Iustin Pop
    """
2962 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2963 c9e5c064 Guido Trotter
    assert self.instance is not None, \
2964 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2965 a8083063 Iustin Pop
2966 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2967 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2968 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2969 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
2970 2a710df1 Michael Hanselmann
2971 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2972 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2973 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2974 abdf0113 Iustin Pop
                                   "a mirrored disk template")
2975 2a710df1 Michael Hanselmann
2976 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2977 d4f16fd9 Iustin Pop
    # check memory requirements on the secondary node
2978 b9bddb6b Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
2979 338e51e8 Iustin Pop
                         instance.name, bep[constants.BE_MEMORY],
2980 e69d05fd Iustin Pop
                         instance.hypervisor)
2981 3a7c308e Guido Trotter
2982 a8083063 Iustin Pop
    # check bridge existance
2983 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2984 72737a7f Iustin Pop
    if not self.rpc.call_bridges_exist(target_node, brlist):
2985 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2986 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2987 50ff9a7a Iustin Pop
                                 (brlist, target_node))
2988 a8083063 Iustin Pop
2989 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2990 a8083063 Iustin Pop
    """Failover an instance.
2991 a8083063 Iustin Pop

2992 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2993 a8083063 Iustin Pop
    starting it on the secondary.
2994 a8083063 Iustin Pop

2995 a8083063 Iustin Pop
    """
2996 a8083063 Iustin Pop
    instance = self.instance
2997 a8083063 Iustin Pop
2998 a8083063 Iustin Pop
    source_node = instance.primary_node
2999 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
3000 a8083063 Iustin Pop
3001 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
3002 a8083063 Iustin Pop
    for dev in instance.disks:
3003 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
3004 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
3005 a0aaa0d0 Guido Trotter
        if instance.status == "up" and not self.op.ignore_consistency:
3006 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
3007 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
3008 a8083063 Iustin Pop
3009 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
3010 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
3011 9a4f63d1 Iustin Pop
                 instance.name, source_node)
3012 a8083063 Iustin Pop
3013 72737a7f Iustin Pop
    if not self.rpc.call_instance_shutdown(source_node, instance):
3014 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
3015 86d9d3bb Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
3016 86d9d3bb Iustin Pop
                             " Proceeding"
3017 86d9d3bb Iustin Pop
                             " anyway. Please make sure node %s is down",
3018 86d9d3bb Iustin Pop
                             instance.name, source_node, source_node)
3019 24a40d57 Iustin Pop
      else:
3020 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
3021 24a40d57 Iustin Pop
                                 (instance.name, source_node))
3022 a8083063 Iustin Pop
3023 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
3024 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3025 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
3026 a8083063 Iustin Pop
3027 a8083063 Iustin Pop
    instance.primary_node = target_node
3028 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
3029 b6102dab Guido Trotter
    self.cfg.Update(instance)
3030 a8083063 Iustin Pop
3031 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
3032 12a0cfbe Guido Trotter
    if instance.status == "up":
3033 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
3034 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s",
3035 9a4f63d1 Iustin Pop
                   instance.name, target_node)
3036 12a0cfbe Guido Trotter
3037 b9bddb6b Iustin Pop
      disks_ok, dummy = _AssembleInstanceDisks(self, instance,
3038 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
3039 12a0cfbe Guido Trotter
      if not disks_ok:
3040 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3041 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
3042 a8083063 Iustin Pop
3043 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
3044 72737a7f Iustin Pop
      if not self.rpc.call_instance_start(target_node, instance, None):
3045 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3046 12a0cfbe Guido Trotter
        raise errors.OpExecError("Could not start instance %s on node %s." %
3047 12a0cfbe Guido Trotter
                                 (instance.name, target_node))
3048 a8083063 Iustin Pop
3049 a8083063 Iustin Pop
3050 b9bddb6b Iustin Pop
def _CreateBlockDevOnPrimary(lu, node, instance, device, info):
3051 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
3052 a8083063 Iustin Pop

3053 a8083063 Iustin Pop
  This always creates all devices.
3054 a8083063 Iustin Pop

3055 a8083063 Iustin Pop
  """
3056 a8083063 Iustin Pop
  if device.children:
3057 a8083063 Iustin Pop
    for child in device.children:
3058 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnPrimary(lu, node, instance, child, info):
3059 a8083063 Iustin Pop
        return False
3060 a8083063 Iustin Pop
3061 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
3062 72737a7f Iustin Pop
  new_id = lu.rpc.call_blockdev_create(node, device, device.size,
3063 72737a7f Iustin Pop
                                       instance.name, True, info)
3064 a8083063 Iustin Pop
  if not new_id:
3065 a8083063 Iustin Pop
    return False
3066 a8083063 Iustin Pop
  if device.physical_id is None:
3067 a8083063 Iustin Pop
    device.physical_id = new_id
3068 a8083063 Iustin Pop
  return True
3069 a8083063 Iustin Pop
3070 a8083063 Iustin Pop
3071 b9bddb6b Iustin Pop
def _CreateBlockDevOnSecondary(lu, node, instance, device, force, info):
3072 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
3073 a8083063 Iustin Pop

3074 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
3075 a8083063 Iustin Pop
  all its children.
3076 a8083063 Iustin Pop

3077 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
3078 a8083063 Iustin Pop

3079 a8083063 Iustin Pop
  """
3080 a8083063 Iustin Pop
  if device.CreateOnSecondary():
3081 a8083063 Iustin Pop
    force = True
3082 a8083063 Iustin Pop
  if device.children:
3083 a8083063 Iustin Pop
    for child in device.children:
3084 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnSecondary(lu, node, instance,
3085 3f78eef2 Iustin Pop
                                        child, force, info):
3086 a8083063 Iustin Pop
        return False
3087 a8083063 Iustin Pop
3088 a8083063 Iustin Pop
  if not force:
3089 a8083063 Iustin Pop
    return True
3090 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
3091 72737a7f Iustin Pop
  new_id = lu.rpc.call_blockdev_create(node, device, device.size,
3092 72737a7f Iustin Pop
                                       instance.name, False, info)
3093 a8083063 Iustin Pop
  if not new_id:
3094 a8083063 Iustin Pop
    return False
3095 a8083063 Iustin Pop
  if device.physical_id is None:
3096 a8083063 Iustin Pop
    device.physical_id = new_id
3097 a8083063 Iustin Pop
  return True
3098 a8083063 Iustin Pop
3099 a8083063 Iustin Pop
3100 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
3101 923b1523 Iustin Pop
  """Generate a suitable LV name.
3102 923b1523 Iustin Pop

3103 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
3104 923b1523 Iustin Pop

3105 923b1523 Iustin Pop
  """
3106 923b1523 Iustin Pop
  results = []
3107 923b1523 Iustin Pop
  for val in exts:
3108 b9bddb6b Iustin Pop
    new_id = lu.cfg.GenerateUniqueID()
3109 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
3110 923b1523 Iustin Pop
  return results
3111 923b1523 Iustin Pop
3112 923b1523 Iustin Pop
3113 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
3114 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
3115 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
3116 a1f445d3 Iustin Pop

3117 a1f445d3 Iustin Pop
  """
3118 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
3119 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
3120 b9bddb6b Iustin Pop
  shared_secret = lu.cfg.GenerateDRBDSecret()
3121 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3122 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
3123 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3124 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
3125 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
3126 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
3127 f9518d38 Iustin Pop
                                      p_minor, s_minor,
3128 f9518d38 Iustin Pop
                                      shared_secret),
3129 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
3130 a1f445d3 Iustin Pop
                          iv_name=iv_name)
3131 a1f445d3 Iustin Pop
  return drbd_dev
3132 a1f445d3 Iustin Pop
3133 7c0d6283 Michael Hanselmann
3134 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
3135 a8083063 Iustin Pop
                          instance_name, primary_node,
3136 08db7c5c Iustin Pop
                          secondary_nodes, disk_info,
3137 e2a65344 Iustin Pop
                          file_storage_dir, file_driver,
3138 e2a65344 Iustin Pop
                          base_index):
3139 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
3140 a8083063 Iustin Pop

3141 a8083063 Iustin Pop
  """
3142 a8083063 Iustin Pop
  #TODO: compute space requirements
3143 a8083063 Iustin Pop
3144 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
3145 08db7c5c Iustin Pop
  disk_count = len(disk_info)
3146 08db7c5c Iustin Pop
  disks = []
3147 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
3148 08db7c5c Iustin Pop
    pass
3149 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
3150 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
3151 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
3152 923b1523 Iustin Pop
3153 08db7c5c Iustin Pop
    names = _GenerateUniqueNames(lu, [".disk%d" % i
3154 08db7c5c Iustin Pop
                                      for i in range(disk_count)])
3155 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
3156 e2a65344 Iustin Pop
      disk_index = idx + base_index
3157 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
3158 08db7c5c Iustin Pop
                              logical_id=(vgname, names[idx]),
3159 e2a65344 Iustin Pop
                              iv_name="disk/%d" % disk_index)
3160 08db7c5c Iustin Pop
      disks.append(disk_dev)
3161 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
3162 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
3163 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
3164 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
3165 08db7c5c Iustin Pop
    minors = lu.cfg.AllocateDRBDMinor(
3166 08db7c5c Iustin Pop
      [primary_node, remote_node] * len(disk_info), instance_name)
3167 08db7c5c Iustin Pop
3168 08db7c5c Iustin Pop
    names = _GenerateUniqueNames(lu,
3169 08db7c5c Iustin Pop
                                 [".disk%d_%s" % (i, s)
3170 08db7c5c Iustin Pop
                                  for i in range(disk_count)
3171 08db7c5c Iustin Pop
                                  for s in ("data", "meta")
3172 08db7c5c Iustin Pop
                                  ])
3173 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
3174 08db7c5c Iustin Pop
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
3175 08db7c5c Iustin Pop
                                      disk["size"], names[idx*2:idx*2+2],
3176 e2a65344 Iustin Pop
                                      "disk/%d" % disk_index,
3177 08db7c5c Iustin Pop
                                      minors[idx*2], minors[idx*2+1])
3178 08db7c5c Iustin Pop
      disks.append(disk_dev)
3179 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
3180 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
3181 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
3182 0f1a06e3 Manuel Franceschini
3183 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
3184 08db7c5c Iustin Pop
3185 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
3186 e2a65344 Iustin Pop
                              iv_name="disk/%d" % disk_index,
3187 08db7c5c Iustin Pop
                              logical_id=(file_driver,
3188 08db7c5c Iustin Pop
                                          "%s/disk%d" % (file_storage_dir,
3189 08db7c5c Iustin Pop
                                                         idx)))
3190 08db7c5c Iustin Pop
      disks.append(disk_dev)
3191 a8083063 Iustin Pop
  else:
3192 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
3193 a8083063 Iustin Pop
  return disks
3194 a8083063 Iustin Pop
3195 a8083063 Iustin Pop
3196 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
3197 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
3198 3ecf6786 Iustin Pop

3199 3ecf6786 Iustin Pop
  """
3200 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
3201 a0c3fea1 Michael Hanselmann
3202 a0c3fea1 Michael Hanselmann
3203 b9bddb6b Iustin Pop
def _CreateDisks(lu, instance):
3204 a8083063 Iustin Pop
  """Create all disks for an instance.
3205 a8083063 Iustin Pop

3206 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
3207 a8083063 Iustin Pop

3208 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
3209 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
3210 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
3211 e4376078 Iustin Pop
  @param instance: the instance whose disks we should create
3212 e4376078 Iustin Pop
  @rtype: boolean
3213 e4376078 Iustin Pop
  @return: the success of the creation
3214 a8083063 Iustin Pop

3215 a8083063 Iustin Pop
  """
3216 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
3217 a0c3fea1 Michael Hanselmann
3218 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
3219 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3220 72737a7f Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(instance.primary_node,
3221 72737a7f Iustin Pop
                                                 file_storage_dir)
3222 0f1a06e3 Manuel Franceschini
3223 0f1a06e3 Manuel Franceschini
    if not result:
3224 9a4f63d1 Iustin Pop
      logging.error("Could not connect to node '%s'", instance.primary_node)
3225 0f1a06e3 Manuel Franceschini
      return False
3226 0f1a06e3 Manuel Franceschini
3227 0f1a06e3 Manuel Franceschini
    if not result[0]:
3228 9a4f63d1 Iustin Pop
      logging.error("Failed to create directory '%s'", file_storage_dir)
3229 0f1a06e3 Manuel Franceschini
      return False
3230 0f1a06e3 Manuel Franceschini
3231 24991749 Iustin Pop
  # Note: this needs to be kept in sync with adding of disks in
3232 24991749 Iustin Pop
  # LUSetInstanceParams
3233 a8083063 Iustin Pop
  for device in instance.disks:
3234 9a4f63d1 Iustin Pop
    logging.info("Creating volume %s for instance %s",
3235 9a4f63d1 Iustin Pop
                 device.iv_name, instance.name)
3236 a8083063 Iustin Pop
    #HARDCODE
3237 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
3238 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnSecondary(lu, secondary_node, instance,
3239 3f78eef2 Iustin Pop
                                        device, False, info):
3240 9a4f63d1 Iustin Pop
        logging.error("Failed to create volume %s (%s) on secondary node %s!",
3241 9a4f63d1 Iustin Pop
                      device.iv_name, device, secondary_node)
3242 a8083063 Iustin Pop
        return False
3243 a8083063 Iustin Pop
    #HARDCODE
3244 b9bddb6b Iustin Pop
    if not _CreateBlockDevOnPrimary(lu, instance.primary_node,
3245 3f78eef2 Iustin Pop
                                    instance, device, info):
3246 9a4f63d1 Iustin Pop
      logging.error("Failed to create volume %s on primary!", device.iv_name)
3247 a8083063 Iustin Pop
      return False
3248 1c6e3627 Manuel Franceschini
3249 a8083063 Iustin Pop
  return True
3250 a8083063 Iustin Pop
3251 a8083063 Iustin Pop
3252 b9bddb6b Iustin Pop
def _RemoveDisks(lu, instance):
3253 a8083063 Iustin Pop
  """Remove all disks for an instance.
3254 a8083063 Iustin Pop

3255 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
3256 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
3257 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
3258 a8083063 Iustin Pop
  with `_CreateDisks()`).
3259 a8083063 Iustin Pop

3260 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
3261 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
3262 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
3263 e4376078 Iustin Pop
  @param instance: the instance whose disks we should remove
3264 e4376078 Iustin Pop
  @rtype: boolean
3265 e4376078 Iustin Pop
  @return: the success of the removal
3266 a8083063 Iustin Pop

3267 a8083063 Iustin Pop
  """
3268 9a4f63d1 Iustin Pop
  logging.info("Removing block devices for instance %s", instance.name)
3269 a8083063 Iustin Pop
3270 a8083063 Iustin Pop
  result = True
3271 a8083063 Iustin Pop
  for device in instance.disks:
3272 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
3273 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
3274 72737a7f Iustin Pop
      if not lu.rpc.call_blockdev_remove(node, disk):
3275 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not remove block device %s on node %s,"
3276 86d9d3bb Iustin Pop
                           " continuing anyway", device.iv_name, node)
3277 a8083063 Iustin Pop
        result = False
3278 0f1a06e3 Manuel Franceschini
3279 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
3280 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3281 72737a7f Iustin Pop
    if not lu.rpc.call_file_storage_dir_remove(instance.primary_node,
3282 72737a7f Iustin Pop
                                               file_storage_dir):
3283 9a4f63d1 Iustin Pop
      logging.error("Could not remove directory '%s'", file_storage_dir)
3284 0f1a06e3 Manuel Franceschini
      result = False
3285 0f1a06e3 Manuel Franceschini
3286 a8083063 Iustin Pop
  return result
3287 a8083063 Iustin Pop
3288 a8083063 Iustin Pop
3289 08db7c5c Iustin Pop
def _ComputeDiskSize(disk_template, disks):
3290 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
3291 e2fe6369 Iustin Pop

3292 e2fe6369 Iustin Pop
  """
3293 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
3294 e2fe6369 Iustin Pop
  req_size_dict = {
3295 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
3296 08db7c5c Iustin Pop
    constants.DT_PLAIN: sum(d["size"] for d in disks),
3297 08db7c5c Iustin Pop
    # 128 MB are added for drbd metadata for each disk
3298 08db7c5c Iustin Pop
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
3299 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
3300 e2fe6369 Iustin Pop
  }
3301 e2fe6369 Iustin Pop
3302 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
3303 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
3304 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
3305 e2fe6369 Iustin Pop
3306 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
3307 e2fe6369 Iustin Pop
3308 e2fe6369 Iustin Pop
3309 74409b12 Iustin Pop
def _CheckHVParams(lu, nodenames, hvname, hvparams):
3310 74409b12 Iustin Pop
  """Hypervisor parameter validation.
3311 74409b12 Iustin Pop

3312 74409b12 Iustin Pop
  This function abstract the hypervisor parameter validation to be
3313 74409b12 Iustin Pop
  used in both instance create and instance modify.
3314 74409b12 Iustin Pop

3315 74409b12 Iustin Pop
  @type lu: L{LogicalUnit}
3316 74409b12 Iustin Pop
  @param lu: the logical unit for which we check
3317 74409b12 Iustin Pop
  @type nodenames: list
3318 74409b12 Iustin Pop
  @param nodenames: the list of nodes on which we should check
3319 74409b12 Iustin Pop
  @type hvname: string
3320 74409b12 Iustin Pop
  @param hvname: the name of the hypervisor we should use
3321 74409b12 Iustin Pop
  @type hvparams: dict
3322 74409b12 Iustin Pop
  @param hvparams: the parameters which we need to check
3323 74409b12 Iustin Pop
  @raise errors.OpPrereqError: if the parameters are not valid
3324 74409b12 Iustin Pop

3325 74409b12 Iustin Pop
  """
3326 74409b12 Iustin Pop
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
3327 74409b12 Iustin Pop
                                                  hvname,
3328 74409b12 Iustin Pop
                                                  hvparams)
3329 74409b12 Iustin Pop
  for node in nodenames:
3330 74409b12 Iustin Pop
    info = hvinfo.get(node, None)
3331 74409b12 Iustin Pop
    if not info or not isinstance(info, (tuple, list)):
3332 74409b12 Iustin Pop
      raise errors.OpPrereqError("Cannot get current information"
3333 74409b12 Iustin Pop
                                 " from node '%s' (%s)" % (node, info))
3334 74409b12 Iustin Pop
    if not info[0]:
3335 74409b12 Iustin Pop
      raise errors.OpPrereqError("Hypervisor parameter validation failed:"
3336 74409b12 Iustin Pop
                                 " %s" % info[1])
3337 74409b12 Iustin Pop
3338 74409b12 Iustin Pop
3339 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
3340 a8083063 Iustin Pop
  """Create an instance.
3341 a8083063 Iustin Pop

3342 a8083063 Iustin Pop
  """
3343 a8083063 Iustin Pop
  HPATH = "instance-add"
3344 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3345 08db7c5c Iustin Pop
  _OP_REQP = ["instance_name", "disks", "disk_template",
3346 08db7c5c Iustin Pop
              "mode", "start",
3347 08db7c5c Iustin Pop
              "wait_for_sync", "ip_check", "nics",
3348 338e51e8 Iustin Pop
              "hvparams", "beparams"]
3349 7baf741d Guido Trotter
  REQ_BGL = False
3350 7baf741d Guido Trotter
3351 7baf741d Guido Trotter
  def _ExpandNode(self, node):
3352 7baf741d Guido Trotter
    """Expands and checks one node name.
3353 7baf741d Guido Trotter

3354 7baf741d Guido Trotter
    """
3355 7baf741d Guido Trotter
    node_full = self.cfg.ExpandNodeName(node)
3356 7baf741d Guido Trotter
    if node_full is None:
3357 7baf741d Guido Trotter
      raise errors.OpPrereqError("Unknown node %s" % node)
3358 7baf741d Guido Trotter
    return node_full
3359 7baf741d Guido Trotter
3360 7baf741d Guido Trotter
  def ExpandNames(self):
3361 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
3362 7baf741d Guido Trotter

3363 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
3364 7baf741d Guido Trotter

3365 7baf741d Guido Trotter
    """
3366 7baf741d Guido Trotter
    self.needed_locks = {}
3367 7baf741d Guido Trotter
3368 7baf741d Guido Trotter
    # set optional parameters to none if they don't exist
3369 6785674e Iustin Pop
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
3370 7baf741d Guido Trotter
      if not hasattr(self.op, attr):
3371 7baf741d Guido Trotter
        setattr(self.op, attr, None)
3372 7baf741d Guido Trotter
3373 4b2f38dd Iustin Pop
    # cheap checks, mostly valid constants given
3374 4b2f38dd Iustin Pop
3375 7baf741d Guido Trotter
    # verify creation mode
3376 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
3377 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
3378 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
3379 7baf741d Guido Trotter
                                 self.op.mode)
3380 4b2f38dd Iustin Pop
3381 7baf741d Guido Trotter
    # disk template and mirror node verification
3382 7baf741d Guido Trotter
    if self.op.disk_template not in constants.DISK_TEMPLATES:
3383 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid disk template name")
3384 7baf741d Guido Trotter
3385 4b2f38dd Iustin Pop
    if self.op.hypervisor is None:
3386 4b2f38dd Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
3387 4b2f38dd Iustin Pop
3388 8705eb96 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
3389 8705eb96 Iustin Pop
    enabled_hvs = cluster.enabled_hypervisors
3390 4b2f38dd Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
3391 4b2f38dd Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
3392 4b2f38dd Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
3393 4b2f38dd Iustin Pop
                                  ",".join(enabled_hvs)))
3394 4b2f38dd Iustin Pop
3395 6785674e Iustin Pop
    # check hypervisor parameter syntax (locally)
3396 6785674e Iustin Pop
3397 8705eb96 Iustin Pop
    filled_hvp = cluster.FillDict(cluster.hvparams[self.op.hypervisor],
3398 8705eb96 Iustin Pop
                                  self.op.hvparams)
3399 6785674e Iustin Pop
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
3400 8705eb96 Iustin Pop
    hv_type.CheckParameterSyntax(filled_hvp)
3401 6785674e Iustin Pop
3402 338e51e8 Iustin Pop
    # fill and remember the beparams dict
3403 338e51e8 Iustin Pop
    self.be_full = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
3404 338e51e8 Iustin Pop
                                    self.op.beparams)
3405 338e51e8 Iustin Pop
3406 7baf741d Guido Trotter
    #### instance parameters check
3407 7baf741d Guido Trotter
3408 7baf741d Guido Trotter
    # instance name verification
3409 7baf741d Guido Trotter
    hostname1 = utils.HostInfo(self.op.instance_name)
3410 7baf741d Guido Trotter
    self.op.instance_name = instance_name = hostname1.name
3411 7baf741d Guido Trotter
3412 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
3413 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
3414 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
3415 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3416 7baf741d Guido Trotter
                                 instance_name)
3417 7baf741d Guido Trotter
3418 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
3419 7baf741d Guido Trotter
3420 08db7c5c Iustin Pop
    # NIC buildup
3421 08db7c5c Iustin Pop
    self.nics = []
3422 08db7c5c Iustin Pop
    for nic in self.op.nics:
3423 08db7c5c Iustin Pop
      # ip validity checks
3424 08db7c5c Iustin Pop
      ip = nic.get("ip", None)
3425 08db7c5c Iustin Pop
      if ip is None or ip.lower() == "none":
3426 08db7c5c Iustin Pop
        nic_ip = None
3427 08db7c5c Iustin Pop
      elif ip.lower() == constants.VALUE_AUTO:
3428 08db7c5c Iustin Pop
        nic_ip = hostname1.ip
3429 08db7c5c Iustin Pop
      else:
3430 08db7c5c Iustin Pop
        if not utils.IsValidIP(ip):
3431 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
3432 08db7c5c Iustin Pop
                                     " like a valid IP" % ip)
3433 08db7c5c Iustin Pop
        nic_ip = ip
3434 08db7c5c Iustin Pop
3435 08db7c5c Iustin Pop
      # MAC address verification
3436 08db7c5c Iustin Pop
      mac = nic.get("mac", constants.VALUE_AUTO)
3437 08db7c5c Iustin Pop
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
3438 08db7c5c Iustin Pop
        if not utils.IsValidMac(mac.lower()):
3439 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Invalid MAC address specified: %s" %
3440 08db7c5c Iustin Pop
                                     mac)
3441 08db7c5c Iustin Pop
      # bridge verification
3442 08db7c5c Iustin Pop
      bridge = nic.get("bridge", self.cfg.GetDefBridge())
3443 08db7c5c Iustin Pop
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, bridge=bridge))
3444 08db7c5c Iustin Pop
3445 08db7c5c Iustin Pop
    # disk checks/pre-build
3446 08db7c5c Iustin Pop
    self.disks = []
3447 08db7c5c Iustin Pop
    for disk in self.op.disks:
3448 08db7c5c Iustin Pop
      mode = disk.get("mode", constants.DISK_RDWR)
3449 08db7c5c Iustin Pop
      if mode not in constants.DISK_ACCESS_SET:
3450 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
3451 08db7c5c Iustin Pop
                                   mode)
3452 08db7c5c Iustin Pop
      size = disk.get("size", None)
3453 08db7c5c Iustin Pop
      if size is None:
3454 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Missing disk size")
3455 08db7c5c Iustin Pop
      try:
3456 08db7c5c Iustin Pop
        size = int(size)
3457 08db7c5c Iustin Pop
      except ValueError:
3458 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk size '%s'" % size)
3459 08db7c5c Iustin Pop
      self.disks.append({"size": size, "mode": mode})
3460 08db7c5c Iustin Pop
3461 7baf741d Guido Trotter
    # used in CheckPrereq for ip ping check
3462 7baf741d Guido Trotter
    self.check_ip = hostname1.ip
3463 7baf741d Guido Trotter
3464 7baf741d Guido Trotter
    # file storage checks
3465 7baf741d Guido Trotter
    if (self.op.file_driver and
3466 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
3467 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
3468 7baf741d Guido Trotter
                                 self.op.file_driver)
3469 7baf741d Guido Trotter
3470 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
3471 7baf741d Guido Trotter
      raise errors.OpPrereqError("File storage directory path not absolute")
3472 7baf741d Guido Trotter
3473 7baf741d Guido Trotter
    ### Node/iallocator related checks
3474 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
3475 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
3476 7baf741d Guido Trotter
                                 " node must be given")
3477 7baf741d Guido Trotter
3478 7baf741d Guido Trotter
    if self.op.iallocator:
3479 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3480 7baf741d Guido Trotter
    else:
3481 7baf741d Guido Trotter
      self.op.pnode = self._ExpandNode(self.op.pnode)
3482 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
3483 7baf741d Guido Trotter
      if self.op.snode is not None:
3484 7baf741d Guido Trotter
        self.op.snode = self._ExpandNode(self.op.snode)
3485 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
3486 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
3487 7baf741d Guido Trotter
3488 7baf741d Guido Trotter
    # in case of import lock the source node too
3489 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
3490 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
3491 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
3492 7baf741d Guido Trotter
3493 7baf741d Guido Trotter
      if src_node is None or src_path is None:
3494 7baf741d Guido Trotter
        raise errors.OpPrereqError("Importing an instance requires source"
3495 7baf741d Guido Trotter
                                   " node and path options")
3496 7baf741d Guido Trotter
3497 7baf741d Guido Trotter
      if not os.path.isabs(src_path):
3498 7baf741d Guido Trotter
        raise errors.OpPrereqError("The source path must be absolute")
3499 7baf741d Guido Trotter
3500 7baf741d Guido Trotter
      self.op.src_node = src_node = self._ExpandNode(src_node)
3501 7baf741d Guido Trotter
      if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
3502 7baf741d Guido Trotter
        self.needed_locks[locking.LEVEL_NODE].append(src_node)
3503 7baf741d Guido Trotter
3504 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
3505 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
3506 7baf741d Guido Trotter
        raise errors.OpPrereqError("No guest OS specified")
3507 a8083063 Iustin Pop
3508 538475ca Iustin Pop
  def _RunAllocator(self):
3509 538475ca Iustin Pop
    """Run the allocator based on input opcode.
3510 538475ca Iustin Pop

3511 538475ca Iustin Pop
    """
3512 08db7c5c Iustin Pop
    nics = [n.ToDict() for n in self.nics]
3513 72737a7f Iustin Pop
    ial = IAllocator(self,
3514 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
3515 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
3516 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
3517 d1c2dd75 Iustin Pop
                     tags=[],
3518 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
3519 338e51e8 Iustin Pop
                     vcpus=self.be_full[constants.BE_VCPUS],
3520 338e51e8 Iustin Pop
                     mem_size=self.be_full[constants.BE_MEMORY],
3521 08db7c5c Iustin Pop
                     disks=self.disks,
3522 d1c2dd75 Iustin Pop
                     nics=nics,
3523 8cc7e742 Guido Trotter
                     hypervisor=self.op.hypervisor,
3524 29859cb7 Iustin Pop
                     )
3525 d1c2dd75 Iustin Pop
3526 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
3527 d1c2dd75 Iustin Pop
3528 d1c2dd75 Iustin Pop
    if not ial.success:
3529 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3530 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3531 d1c2dd75 Iustin Pop
                                                           ial.info))
3532 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3533 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3534 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
3535 97abc79f Iustin Pop
                                 (self.op.iallocator, len(ial.nodes),
3536 1ce4bbe3 Renรฉ Nussbaumer
                                  ial.required_nodes))
3537 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
3538 86d9d3bb Iustin Pop
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
3539 86d9d3bb Iustin Pop
                 self.op.instance_name, self.op.iallocator,
3540 86d9d3bb Iustin Pop
                 ", ".join(ial.nodes))
3541 27579978 Iustin Pop
    if ial.required_nodes == 2:
3542 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
3543 538475ca Iustin Pop
3544 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3545 a8083063 Iustin Pop
    """Build hooks env.
3546 a8083063 Iustin Pop

3547 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3548 a8083063 Iustin Pop

3549 a8083063 Iustin Pop
    """
3550 a8083063 Iustin Pop
    env = {
3551 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
3552 08db7c5c Iustin Pop
      "INSTANCE_DISK_SIZE": ",".join(str(d["size"]) for d in self.disks),
3553 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
3554 a8083063 Iustin Pop
      }
3555 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3556 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
3557 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
3558 09acf207 Guido Trotter
      env["INSTANCE_SRC_IMAGES"] = self.src_images
3559 396e1b78 Michael Hanselmann
3560 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
3561 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
3562 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
3563 396e1b78 Michael Hanselmann
      status=self.instance_status,
3564 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
3565 338e51e8 Iustin Pop
      memory=self.be_full[constants.BE_MEMORY],
3566 338e51e8 Iustin Pop
      vcpus=self.be_full[constants.BE_VCPUS],
3567 08db7c5c Iustin Pop
      nics=[(n.ip, n.bridge, n.mac) for n in self.nics],
3568 396e1b78 Michael Hanselmann
    ))
3569 a8083063 Iustin Pop
3570 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
3571 a8083063 Iustin Pop
          self.secondaries)
3572 a8083063 Iustin Pop
    return env, nl, nl
3573 a8083063 Iustin Pop
3574 a8083063 Iustin Pop
3575 a8083063 Iustin Pop
  def CheckPrereq(self):
3576 a8083063 Iustin Pop
    """Check prerequisites.
3577 a8083063 Iustin Pop

3578 a8083063 Iustin Pop
    """
3579 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
3580 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
3581 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
3582 eedc99de Manuel Franceschini
                                 " instances")
3583 eedc99de Manuel Franceschini
3584 e69d05fd Iustin Pop
3585 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3586 7baf741d Guido Trotter
      src_node = self.op.src_node
3587 7baf741d Guido Trotter
      src_path = self.op.src_path
3588 a8083063 Iustin Pop
3589 72737a7f Iustin Pop
      export_info = self.rpc.call_export_info(src_node, src_path)
3590 a8083063 Iustin Pop
3591 a8083063 Iustin Pop
      if not export_info:
3592 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
3593 a8083063 Iustin Pop
3594 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
3595 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
3596 a8083063 Iustin Pop
3597 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
3598 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
3599 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
3600 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
3601 a8083063 Iustin Pop
3602 09acf207 Guido Trotter
      # Check that the new instance doesn't have less disks than the export
3603 08db7c5c Iustin Pop
      instance_disks = len(self.disks)
3604 09acf207 Guido Trotter
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
3605 09acf207 Guido Trotter
      if instance_disks < export_disks:
3606 09acf207 Guido Trotter
        raise errors.OpPrereqError("Not enough disks to import."
3607 09acf207 Guido Trotter
                                   " (instance: %d, export: %d)" %
3608 09acf207 Guido Trotter
                                   (2, export_disks))
3609 a8083063 Iustin Pop
3610 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
3611 09acf207 Guido Trotter
      disk_images = []
3612 09acf207 Guido Trotter
      for idx in range(export_disks):
3613 09acf207 Guido Trotter
        option = 'disk%d_dump' % idx
3614 09acf207 Guido Trotter
        if export_info.has_option(constants.INISECT_INS, option):
3615 09acf207 Guido Trotter
          # FIXME: are the old os-es, disk sizes, etc. useful?
3616 09acf207 Guido Trotter
          export_name = export_info.get(constants.INISECT_INS, option)
3617 09acf207 Guido Trotter
          image = os.path.join(src_path, export_name)
3618 09acf207 Guido Trotter
          disk_images.append(image)
3619 09acf207 Guido Trotter
        else:
3620 09acf207 Guido Trotter
          disk_images.append(False)
3621 09acf207 Guido Trotter
3622 09acf207 Guido Trotter
      self.src_images = disk_images
3623 901a65c1 Iustin Pop
3624 b4364a6b Guido Trotter
      old_name = export_info.get(constants.INISECT_INS, 'name')
3625 b4364a6b Guido Trotter
      # FIXME: int() here could throw a ValueError on broken exports
3626 b4364a6b Guido Trotter
      exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
3627 b4364a6b Guido Trotter
      if self.op.instance_name == old_name:
3628 b4364a6b Guido Trotter
        for idx, nic in enumerate(self.nics):
3629 b4364a6b Guido Trotter
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
3630 b4364a6b Guido Trotter
            nic_mac_ini = 'nic%d_mac' % idx
3631 b4364a6b Guido Trotter
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
3632 bc89efc3 Guido Trotter
3633 7baf741d Guido Trotter
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
3634 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
3635 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
3636 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
3637 901a65c1 Iustin Pop
3638 901a65c1 Iustin Pop
    if self.op.ip_check:
3639 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
3640 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3641 7b3a8fb5 Iustin Pop
                                   (self.check_ip, self.op.instance_name))
3642 901a65c1 Iustin Pop
3643 538475ca Iustin Pop
    #### allocator run
3644 538475ca Iustin Pop
3645 538475ca Iustin Pop
    if self.op.iallocator is not None:
3646 538475ca Iustin Pop
      self._RunAllocator()
3647 0f1a06e3 Manuel Franceschini
3648 901a65c1 Iustin Pop
    #### node related checks
3649 901a65c1 Iustin Pop
3650 901a65c1 Iustin Pop
    # check primary node
3651 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
3652 7baf741d Guido Trotter
    assert self.pnode is not None, \
3653 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
3654 901a65c1 Iustin Pop
    self.secondaries = []
3655 901a65c1 Iustin Pop
3656 901a65c1 Iustin Pop
    # mirror node verification
3657 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
3658 7baf741d Guido Trotter
      if self.op.snode is None:
3659 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
3660 3ecf6786 Iustin Pop
                                   " a mirror node")
3661 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
3662 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
3663 3ecf6786 Iustin Pop
                                   " the primary node.")
3664 7baf741d Guido Trotter
      self.secondaries.append(self.op.snode)
3665 a8083063 Iustin Pop
3666 6785674e Iustin Pop
    nodenames = [pnode.name] + self.secondaries
3667 6785674e Iustin Pop
3668 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
3669 08db7c5c Iustin Pop
                                self.disks)
3670 ed1ebc60 Guido Trotter
3671 8d75db10 Iustin Pop
    # Check lv size requirements
3672 8d75db10 Iustin Pop
    if req_size is not None:
3673 72737a7f Iustin Pop
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
3674 72737a7f Iustin Pop
                                         self.op.hypervisor)
3675 8d75db10 Iustin Pop
      for node in nodenames:
3676 8d75db10 Iustin Pop
        info = nodeinfo.get(node, None)
3677 8d75db10 Iustin Pop
        if not info:
3678 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
3679 3e91897b Iustin Pop
                                     " from node '%s'" % node)
3680 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
3681 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
3682 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
3683 8d75db10 Iustin Pop
                                     " node %s" % node)
3684 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
3685 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
3686 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
3687 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
3688 ed1ebc60 Guido Trotter
3689 74409b12 Iustin Pop
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
3690 6785674e Iustin Pop
3691 a8083063 Iustin Pop
    # os verification
3692 72737a7f Iustin Pop
    os_obj = self.rpc.call_os_get(pnode.name, self.op.os_type)
3693 dfa96ded Guido Trotter
    if not os_obj:
3694 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
3695 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
3696 a8083063 Iustin Pop
3697 901a65c1 Iustin Pop
    # bridge check on primary node
3698 08db7c5c Iustin Pop
    bridges = [n.bridge for n in self.nics]
3699 08db7c5c Iustin Pop
    if not self.rpc.call_bridges_exist(self.pnode.name, bridges):
3700 08db7c5c Iustin Pop
      raise errors.OpPrereqError("one of the target bridges '%s' does not"
3701 08db7c5c Iustin Pop
                                 " exist on"
3702 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
3703 08db7c5c Iustin Pop
                                 (",".join(bridges), pnode.name))
3704 a8083063 Iustin Pop
3705 49ce1563 Iustin Pop
    # memory check on primary node
3706 49ce1563 Iustin Pop
    if self.op.start:
3707 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
3708 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
3709 338e51e8 Iustin Pop
                           self.be_full[constants.BE_MEMORY],
3710 338e51e8 Iustin Pop
                           self.op.hypervisor)
3711 49ce1563 Iustin Pop
3712 a8083063 Iustin Pop
    if self.op.start:
3713 a8083063 Iustin Pop
      self.instance_status = 'up'
3714 a8083063 Iustin Pop
    else:
3715 a8083063 Iustin Pop
      self.instance_status = 'down'
3716 a8083063 Iustin Pop
3717 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3718 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
3719 a8083063 Iustin Pop

3720 a8083063 Iustin Pop
    """
3721 a8083063 Iustin Pop
    instance = self.op.instance_name
3722 a8083063 Iustin Pop
    pnode_name = self.pnode.name
3723 a8083063 Iustin Pop
3724 08db7c5c Iustin Pop
    for nic in self.nics:
3725 08db7c5c Iustin Pop
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
3726 08db7c5c Iustin Pop
        nic.mac = self.cfg.GenerateMAC()
3727 a8083063 Iustin Pop
3728 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
3729 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
3730 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
3731 2a6469d5 Alexander Schreiber
    else:
3732 2a6469d5 Alexander Schreiber
      network_port = None
3733 58acb49d Alexander Schreiber
3734 6785674e Iustin Pop
    ##if self.op.vnc_bind_address is None:
3735 6785674e Iustin Pop
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
3736 31a853d2 Iustin Pop
3737 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
3738 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
3739 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
3740 2c313123 Manuel Franceschini
    else:
3741 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
3742 2c313123 Manuel Franceschini
3743 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
3744 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
3745 d6a02168 Michael Hanselmann
                                        self.cfg.GetFileStorageDir(),
3746 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
3747 0f1a06e3 Manuel Franceschini
3748 0f1a06e3 Manuel Franceschini
3749 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
3750 a8083063 Iustin Pop
                                  self.op.disk_template,
3751 a8083063 Iustin Pop
                                  instance, pnode_name,
3752 08db7c5c Iustin Pop
                                  self.secondaries,
3753 08db7c5c Iustin Pop
                                  self.disks,
3754 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
3755 e2a65344 Iustin Pop
                                  self.op.file_driver,
3756 e2a65344 Iustin Pop
                                  0)
3757 a8083063 Iustin Pop
3758 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3759 a8083063 Iustin Pop
                            primary_node=pnode_name,
3760 08db7c5c Iustin Pop
                            nics=self.nics, disks=disks,
3761 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
3762 a8083063 Iustin Pop
                            status=self.instance_status,
3763 58acb49d Alexander Schreiber
                            network_port=network_port,
3764 338e51e8 Iustin Pop
                            beparams=self.op.beparams,
3765 6785674e Iustin Pop
                            hvparams=self.op.hvparams,
3766 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
3767 a8083063 Iustin Pop
                            )
3768 a8083063 Iustin Pop
3769 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
3770 b9bddb6b Iustin Pop
    if not _CreateDisks(self, iobj):
3771 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
3772 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance)
3773 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
3774 a8083063 Iustin Pop
3775 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
3776 a8083063 Iustin Pop
3777 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
3778 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
3779 7baf741d Guido Trotter
    # added the instance to the config
3780 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
3781 a1578d63 Iustin Pop
    # Remove the temp. assignements for the instance's drbds
3782 a1578d63 Iustin Pop
    self.cfg.ReleaseDRBDMinors(instance)
3783 e36e96b4 Guido Trotter
    # Unlock all the nodes
3784 e36e96b4 Guido Trotter
    self.context.glm.release(locking.LEVEL_NODE)
3785 e36e96b4 Guido Trotter
    del self.acquired_locks[locking.LEVEL_NODE]
3786 a8083063 Iustin Pop
3787 a8083063 Iustin Pop
    if self.op.wait_for_sync:
3788 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
3789 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3790 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
3791 a8083063 Iustin Pop
      time.sleep(15)
3792 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
3793 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
3794 a8083063 Iustin Pop
    else:
3795 a8083063 Iustin Pop
      disk_abort = False
3796 a8083063 Iustin Pop
3797 a8083063 Iustin Pop
    if disk_abort:
3798 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
3799 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3800 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
3801 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
3802 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3803 3ecf6786 Iustin Pop
                               " this instance")
3804 a8083063 Iustin Pop
3805 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3806 a8083063 Iustin Pop
                (instance, pnode_name))
3807 a8083063 Iustin Pop
3808 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3809 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3810 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3811 d15a9ad3 Guido Trotter
        if not self.rpc.call_instance_os_add(pnode_name, iobj):
3812 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3813 3ecf6786 Iustin Pop
                                   " on node %s" %
3814 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3815 a8083063 Iustin Pop
3816 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3817 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3818 a8083063 Iustin Pop
        src_node = self.op.src_node
3819 09acf207 Guido Trotter
        src_images = self.src_images
3820 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
3821 6c0af70e Guido Trotter
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
3822 09acf207 Guido Trotter
                                                         src_node, src_images,
3823 6c0af70e Guido Trotter
                                                         cluster_name)
3824 09acf207 Guido Trotter
        for idx, result in enumerate(import_result):
3825 09acf207 Guido Trotter
          if not result:
3826 09acf207 Guido Trotter
            self.LogWarning("Could not image %s for on instance %s, disk %d,"
3827 09acf207 Guido Trotter
                            " on node %s" % (src_images[idx], instance, idx,
3828 09acf207 Guido Trotter
                                             pnode_name))
3829 a8083063 Iustin Pop
      else:
3830 a8083063 Iustin Pop
        # also checked in the prereq part
3831 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3832 3ecf6786 Iustin Pop
                                     % self.op.mode)
3833 a8083063 Iustin Pop
3834 a8083063 Iustin Pop
    if self.op.start:
3835 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s", instance, pnode_name)
3836 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3837 72737a7f Iustin Pop
      if not self.rpc.call_instance_start(pnode_name, iobj, None):
3838 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3839 a8083063 Iustin Pop
3840 a8083063 Iustin Pop
3841 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3842 a8083063 Iustin Pop
  """Connect to an instance's console.
3843 a8083063 Iustin Pop

3844 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3845 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3846 a8083063 Iustin Pop
  console.
3847 a8083063 Iustin Pop

3848 a8083063 Iustin Pop
  """
3849 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3850 8659b73e Guido Trotter
  REQ_BGL = False
3851 8659b73e Guido Trotter
3852 8659b73e Guido Trotter
  def ExpandNames(self):
3853 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
3854 a8083063 Iustin Pop
3855 a8083063 Iustin Pop
  def CheckPrereq(self):
3856 a8083063 Iustin Pop
    """Check prerequisites.
3857 a8083063 Iustin Pop

3858 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3859 a8083063 Iustin Pop

3860 a8083063 Iustin Pop
    """
3861 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3862 8659b73e Guido Trotter
    assert self.instance is not None, \
3863 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3864 a8083063 Iustin Pop
3865 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3866 a8083063 Iustin Pop
    """Connect to the console of an instance
3867 a8083063 Iustin Pop

3868 a8083063 Iustin Pop
    """
3869 a8083063 Iustin Pop
    instance = self.instance
3870 a8083063 Iustin Pop
    node = instance.primary_node
3871 a8083063 Iustin Pop
3872 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
3873 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
3874 a8083063 Iustin Pop
    if node_insts is False:
3875 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3876 a8083063 Iustin Pop
3877 a8083063 Iustin Pop
    if instance.name not in node_insts:
3878 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3879 a8083063 Iustin Pop
3880 9a4f63d1 Iustin Pop
    logging.debug("Connecting to console of %s on %s", instance.name, node)
3881 a8083063 Iustin Pop
3882 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
3883 30989e69 Alexander Schreiber
    console_cmd = hyper.GetShellCommandForConsole(instance)
3884 b047857b Michael Hanselmann
3885 82122173 Iustin Pop
    # build ssh cmdline
3886 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
3887 a8083063 Iustin Pop
3888 a8083063 Iustin Pop
3889 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3890 a8083063 Iustin Pop
  """Replace the disks of an instance.
3891 a8083063 Iustin Pop

3892 a8083063 Iustin Pop
  """
3893 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3894 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3895 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
3896 efd990e4 Guido Trotter
  REQ_BGL = False
3897 efd990e4 Guido Trotter
3898 efd990e4 Guido Trotter
  def ExpandNames(self):
3899 efd990e4 Guido Trotter
    self._ExpandAndLockInstance()
3900 efd990e4 Guido Trotter
3901 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
3902 efd990e4 Guido Trotter
      self.op.remote_node = None
3903 efd990e4 Guido Trotter
3904 efd990e4 Guido Trotter
    ia_name = getattr(self.op, "iallocator", None)
3905 efd990e4 Guido Trotter
    if ia_name is not None:
3906 efd990e4 Guido Trotter
      if self.op.remote_node is not None:
3907 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Give either the iallocator or the new"
3908 efd990e4 Guido Trotter
                                   " secondary, not both")
3909 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3910 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
3911 efd990e4 Guido Trotter
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
3912 efd990e4 Guido Trotter
      if remote_node is None:
3913 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Node '%s' not known" %
3914 efd990e4 Guido Trotter
                                   self.op.remote_node)
3915 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
3916 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
3917 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3918 efd990e4 Guido Trotter
    else:
3919 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
3920 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3921 efd990e4 Guido Trotter
3922 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
3923 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
3924 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
3925 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
3926 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
3927 efd990e4 Guido Trotter
      self._LockInstancesNodes()
3928 a8083063 Iustin Pop
3929 b6e82a65 Iustin Pop
  def _RunAllocator(self):
3930 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
3931 b6e82a65 Iustin Pop

3932 b6e82a65 Iustin Pop
    """
3933 72737a7f Iustin Pop
    ial = IAllocator(self,
3934 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
3935 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
3936 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
3937 b6e82a65 Iustin Pop
3938 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
3939 b6e82a65 Iustin Pop
3940 b6e82a65 Iustin Pop
    if not ial.success:
3941 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3942 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3943 b6e82a65 Iustin Pop
                                                           ial.info))
3944 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3945 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3946 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
3947 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
3948 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
3949 86d9d3bb Iustin Pop
    self.LogInfo("Selected new secondary for the instance: %s",
3950 86d9d3bb Iustin Pop
                 self.op.remote_node)
3951 b6e82a65 Iustin Pop
3952 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3953 a8083063 Iustin Pop
    """Build hooks env.
3954 a8083063 Iustin Pop

3955 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3956 a8083063 Iustin Pop

3957 a8083063 Iustin Pop
    """
3958 a8083063 Iustin Pop
    env = {
3959 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
3960 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3961 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3962 a8083063 Iustin Pop
      }
3963 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3964 0834c866 Iustin Pop
    nl = [
3965 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
3966 0834c866 Iustin Pop
      self.instance.primary_node,
3967 0834c866 Iustin Pop
      ]
3968 0834c866 Iustin Pop
    if self.op.remote_node is not None:
3969 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
3970 a8083063 Iustin Pop
    return env, nl, nl
3971 a8083063 Iustin Pop
3972 a8083063 Iustin Pop
  def CheckPrereq(self):
3973 a8083063 Iustin Pop
    """Check prerequisites.
3974 a8083063 Iustin Pop

3975 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3976 a8083063 Iustin Pop

3977 a8083063 Iustin Pop
    """
3978 efd990e4 Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3979 efd990e4 Guido Trotter
    assert instance is not None, \
3980 efd990e4 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3981 a8083063 Iustin Pop
    self.instance = instance
3982 a8083063 Iustin Pop
3983 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3984 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3985 a9e0c397 Iustin Pop
                                 " network mirrored.")
3986 a8083063 Iustin Pop
3987 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3988 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3989 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3990 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3991 a8083063 Iustin Pop
3992 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
3993 a9e0c397 Iustin Pop
3994 b6e82a65 Iustin Pop
    ia_name = getattr(self.op, "iallocator", None)
3995 b6e82a65 Iustin Pop
    if ia_name is not None:
3996 de8c7666 Guido Trotter
      self._RunAllocator()
3997 b6e82a65 Iustin Pop
3998 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
3999 a9e0c397 Iustin Pop
    if remote_node is not None:
4000 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
4001 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
4002 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
4003 a9e0c397 Iustin Pop
    else:
4004 a9e0c397 Iustin Pop
      self.remote_node_info = None
4005 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
4006 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
4007 3ecf6786 Iustin Pop
                                 " the instance.")
4008 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
4009 0834c866 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_SEC:
4010 0834c866 Iustin Pop
        # this is for DRBD8, where we can't execute the same mode of
4011 0834c866 Iustin Pop
        # replacement as for drbd7 (no different port allocated)
4012 0834c866 Iustin Pop
        raise errors.OpPrereqError("Same secondary given, cannot execute"
4013 0834c866 Iustin Pop
                                   " replacement")
4014 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
4015 7df43a76 Iustin Pop
      if (self.op.mode == constants.REPLACE_DISK_ALL and
4016 7df43a76 Iustin Pop
          remote_node is not None):
4017 7df43a76 Iustin Pop
        # switch to replace secondary mode
4018 7df43a76 Iustin Pop
        self.op.mode = constants.REPLACE_DISK_SEC
4019 7df43a76 Iustin Pop
4020 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
4021 12c3449a Michael Hanselmann
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
4022 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
4023 a9e0c397 Iustin Pop
                                   " both at once")
4024 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
4025 a9e0c397 Iustin Pop
        if remote_node is not None:
4026 12c3449a Michael Hanselmann
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
4027 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
4028 a9e0c397 Iustin Pop
                                     " node disk replacement")
4029 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
4030 cff90b79 Iustin Pop
        self.oth_node = instance.secondary_nodes[0]
4031 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
4032 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
4033 a9e0c397 Iustin Pop
                                    # we don't change the secondary
4034 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
4035 cff90b79 Iustin Pop
        self.oth_node = instance.primary_node
4036 a9e0c397 Iustin Pop
      else:
4037 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
4038 a9e0c397 Iustin Pop
4039 54155f52 Iustin Pop
    if not self.op.disks:
4040 54155f52 Iustin Pop
      self.op.disks = range(len(instance.disks))
4041 54155f52 Iustin Pop
4042 54155f52 Iustin Pop
    for disk_idx in self.op.disks:
4043 3e0cea06 Iustin Pop
      instance.FindDisk(disk_idx)
4044 a8083063 Iustin Pop
4045 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
4046 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
4047 a9e0c397 Iustin Pop

4048 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
4049 e4376078 Iustin Pop

4050 e4376078 Iustin Pop
      1. for each disk to be replaced:
4051 e4376078 Iustin Pop

4052 e4376078 Iustin Pop
        1. create new LVs on the target node with unique names
4053 e4376078 Iustin Pop
        1. detach old LVs from the drbd device
4054 e4376078 Iustin Pop
        1. rename old LVs to name_replaced.<time_t>
4055 e4376078 Iustin Pop
        1. rename new LVs to old LVs
4056 e4376078 Iustin Pop
        1. attach the new LVs (with the old names now) to the drbd device
4057 e4376078 Iustin Pop

4058 e4376078 Iustin Pop
      1. wait for sync across all devices
4059 e4376078 Iustin Pop

4060 e4376078 Iustin Pop
      1. for each modified disk:
4061 e4376078 Iustin Pop

4062 e4376078 Iustin Pop
        1. remove old LVs (which have the name name_replaces.<time_t>)
4063 a9e0c397 Iustin Pop

4064 a9e0c397 Iustin Pop
    Failures are not very well handled.
4065 cff90b79 Iustin Pop

4066 a9e0c397 Iustin Pop
    """
4067 cff90b79 Iustin Pop
    steps_total = 6
4068 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
4069 a9e0c397 Iustin Pop
    instance = self.instance
4070 a9e0c397 Iustin Pop
    iv_names = {}
4071 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
4072 a9e0c397 Iustin Pop
    # start of work
4073 a9e0c397 Iustin Pop
    cfg = self.cfg
4074 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
4075 cff90b79 Iustin Pop
    oth_node = self.oth_node
4076 cff90b79 Iustin Pop
4077 cff90b79 Iustin Pop
    # Step: check device activation
4078 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
4079 cff90b79 Iustin Pop
    info("checking volume groups")
4080 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
4081 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([oth_node, tgt_node])
4082 cff90b79 Iustin Pop
    if not results:
4083 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
4084 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
4085 cff90b79 Iustin Pop
      res = results.get(node, False)
4086 cff90b79 Iustin Pop
      if not res or my_vg not in res:
4087 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
4088 cff90b79 Iustin Pop
                                 (my_vg, node))
4089 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
4090 54155f52 Iustin Pop
      if idx not in self.op.disks:
4091 cff90b79 Iustin Pop
        continue
4092 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
4093 54155f52 Iustin Pop
        info("checking disk/%d on %s" % (idx, node))
4094 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
4095 72737a7f Iustin Pop
        if not self.rpc.call_blockdev_find(node, dev):
4096 54155f52 Iustin Pop
          raise errors.OpExecError("Can't find disk/%d on node %s" %
4097 54155f52 Iustin Pop
                                   (idx, node))
4098 cff90b79 Iustin Pop
4099 cff90b79 Iustin Pop
    # Step: check other node consistency
4100 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
4101 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
4102 54155f52 Iustin Pop
      if idx not in self.op.disks:
4103 cff90b79 Iustin Pop
        continue
4104 54155f52 Iustin Pop
      info("checking disk/%d consistency on %s" % (idx, oth_node))
4105 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, oth_node,
4106 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
4107 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
4108 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
4109 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
4110 cff90b79 Iustin Pop
4111 cff90b79 Iustin Pop
    # Step: create new storage
4112 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
4113 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
4114 54155f52 Iustin Pop
      if idx not in self.op.disks:
4115 a9e0c397 Iustin Pop
        continue
4116 a9e0c397 Iustin Pop
      size = dev.size
4117 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
4118 54155f52 Iustin Pop
      lv_names = [".disk%d_%s" % (idx, suf)
4119 54155f52 Iustin Pop
                  for suf in ["data", "meta"]]
4120 b9bddb6b Iustin Pop
      names = _GenerateUniqueNames(self, lv_names)
4121 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
4122 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
4123 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
4124 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
4125 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
4126 a9e0c397 Iustin Pop
      old_lvs = dev.children
4127 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
4128 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
4129 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
4130 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
4131 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
4132 a9e0c397 Iustin Pop
      # are talking about the secondary node
4133 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
4134 b9bddb6b Iustin Pop
        if not _CreateBlockDevOnPrimary(self, tgt_node, instance, new_lv,
4135 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
4136 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
4137 a9e0c397 Iustin Pop
                                   " node '%s'" %
4138 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
4139 a9e0c397 Iustin Pop
4140 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
4141 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
4142 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
4143 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
4144 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
4145 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
4146 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
4147 cff90b79 Iustin Pop
      #dev.children = []
4148 cff90b79 Iustin Pop
      #cfg.Update(instance)
4149 a9e0c397 Iustin Pop
4150 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
4151 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
4152 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
4153 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
4154 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
4155 cff90b79 Iustin Pop
4156 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
4157 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
4158 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
4159 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
4160 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
4161 cff90b79 Iustin Pop
      rlist = []
4162 cff90b79 Iustin Pop
      for to_ren in old_lvs:
4163 72737a7f Iustin Pop
        find_res = self.rpc.call_blockdev_find(tgt_node, to_ren)
4164 cff90b79 Iustin Pop
        if find_res is not None: # device exists
4165 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
4166 cff90b79 Iustin Pop
4167 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
4168 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_rename(tgt_node, rlist):
4169 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
4170 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
4171 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
4172 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
4173 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_rename(tgt_node, rlist):
4174 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
4175 cff90b79 Iustin Pop
4176 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
4177 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
4178 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
4179 a9e0c397 Iustin Pop
4180 cff90b79 Iustin Pop
      for disk in old_lvs:
4181 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
4182 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
4183 a9e0c397 Iustin Pop
4184 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
4185 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
4186 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
4187 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
4188 72737a7f Iustin Pop
          if not self.rpc.call_blockdev_remove(tgt_node, new_lv):
4189 79caa9ed Guido Trotter
            warning("Can't rollback device %s", hint="manually cleanup unused"
4190 cff90b79 Iustin Pop
                    " logical volumes")
4191 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
4192 a9e0c397 Iustin Pop
4193 a9e0c397 Iustin Pop
      dev.children = new_lvs
4194 a9e0c397 Iustin Pop
      cfg.Update(instance)
4195 a9e0c397 Iustin Pop
4196 cff90b79 Iustin Pop
    # Step: wait for sync
4197 a9e0c397 Iustin Pop
4198 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
4199 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
4200 a9e0c397 Iustin Pop
    # return value
4201 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
4202 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
4203 a9e0c397 Iustin Pop
4204 a9e0c397 Iustin Pop
    # so check manually all the devices
4205 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
4206 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
4207 72737a7f Iustin Pop
      is_degr = self.rpc.call_blockdev_find(instance.primary_node, dev)[5]
4208 a9e0c397 Iustin Pop
      if is_degr:
4209 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
4210 a9e0c397 Iustin Pop
4211 cff90b79 Iustin Pop
    # Step: remove old storage
4212 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
4213 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
4214 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
4215 a9e0c397 Iustin Pop
      for lv in old_lvs:
4216 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
4217 72737a7f Iustin Pop
        if not self.rpc.call_blockdev_remove(tgt_node, lv):
4218 79caa9ed Guido Trotter
          warning("Can't remove old LV", hint="manually remove unused LVs")
4219 a9e0c397 Iustin Pop
          continue
4220 a9e0c397 Iustin Pop
4221 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
4222 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
4223 a9e0c397 Iustin Pop

4224 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
4225 a9e0c397 Iustin Pop
      - for all disks of the instance:
4226 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
4227 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
4228 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
4229 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
4230 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
4231 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
4232 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
4233 a9e0c397 Iustin Pop
          not network enabled
4234 a9e0c397 Iustin Pop
      - wait for sync across all devices
4235 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
4236 a9e0c397 Iustin Pop

4237 a9e0c397 Iustin Pop
    Failures are not very well handled.
4238 0834c866 Iustin Pop

4239 a9e0c397 Iustin Pop
    """
4240 0834c866 Iustin Pop
    steps_total = 6
4241 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
4242 a9e0c397 Iustin Pop
    instance = self.instance
4243 a9e0c397 Iustin Pop
    iv_names = {}
4244 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
4245 a9e0c397 Iustin Pop
    # start of work
4246 a9e0c397 Iustin Pop
    cfg = self.cfg
4247 a9e0c397 Iustin Pop
    old_node = self.tgt_node
4248 a9e0c397 Iustin Pop
    new_node = self.new_node
4249 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
4250 0834c866 Iustin Pop
4251 0834c866 Iustin Pop
    # Step: check device activation
4252 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
4253 0834c866 Iustin Pop
    info("checking volume groups")
4254 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
4255 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([pri_node, new_node])
4256 0834c866 Iustin Pop
    if not results:
4257 0834c866 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
4258 0834c866 Iustin Pop
    for node in pri_node, new_node:
4259 0834c866 Iustin Pop
      res = results.get(node, False)
4260 0834c866 Iustin Pop
      if not res or my_vg not in res:
4261 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
4262 0834c866 Iustin Pop
                                 (my_vg, node))
4263 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
4264 d418ebfb Iustin Pop
      if idx not in self.op.disks:
4265 0834c866 Iustin Pop
        continue
4266 d418ebfb Iustin Pop
      info("checking disk/%d on %s" % (idx, pri_node))
4267 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4268 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_find(pri_node, dev):
4269 d418ebfb Iustin Pop
        raise errors.OpExecError("Can't find disk/%d on node %s" %
4270 d418ebfb Iustin Pop
                                 (idx, pri_node))
4271 0834c866 Iustin Pop
4272 0834c866 Iustin Pop
    # Step: check other node consistency
4273 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
4274 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
4275 d418ebfb Iustin Pop
      if idx not in self.op.disks:
4276 0834c866 Iustin Pop
        continue
4277 d418ebfb Iustin Pop
      info("checking disk/%d consistency on %s" % (idx, pri_node))
4278 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
4279 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
4280 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
4281 0834c866 Iustin Pop
                                 pri_node)
4282 0834c866 Iustin Pop
4283 0834c866 Iustin Pop
    # Step: create new storage
4284 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
4285 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
4286 a9e0c397 Iustin Pop
      size = dev.size
4287 d418ebfb Iustin Pop
      info("adding new local storage on %s for disk/%d" %
4288 d418ebfb Iustin Pop
           (new_node, idx))
4289 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
4290 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
4291 a9e0c397 Iustin Pop
      # are talking about the secondary node
4292 a9e0c397 Iustin Pop
      for new_lv in dev.children:
4293 b9bddb6b Iustin Pop
        if not _CreateBlockDevOnPrimary(self, new_node, instance, new_lv,
4294 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
4295 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
4296 a9e0c397 Iustin Pop
                                   " node '%s'" %
4297 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
4298 a9e0c397 Iustin Pop
4299 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
4300 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
4301 a1578d63 Iustin Pop
    # error and the success paths
4302 a1578d63 Iustin Pop
    minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
4303 a1578d63 Iustin Pop
                                   instance.name)
4304 468b46f9 Iustin Pop
    logging.debug("Allocated minors %s" % (minors,))
4305 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
4306 d418ebfb Iustin Pop
    for idx, (dev, new_minor) in enumerate(zip(instance.disks, minors)):
4307 0834c866 Iustin Pop
      size = dev.size
4308 d418ebfb Iustin Pop
      info("activating a new drbd on %s for disk/%d" % (new_node, idx))
4309 a9e0c397 Iustin Pop
      # create new devices on new_node
4310 ffa1c0dc Iustin Pop
      if pri_node == dev.logical_id[0]:
4311 ffa1c0dc Iustin Pop
        new_logical_id = (pri_node, new_node,
4312 f9518d38 Iustin Pop
                          dev.logical_id[2], dev.logical_id[3], new_minor,
4313 f9518d38 Iustin Pop
                          dev.logical_id[5])
4314 ffa1c0dc Iustin Pop
      else:
4315 ffa1c0dc Iustin Pop
        new_logical_id = (new_node, pri_node,
4316 f9518d38 Iustin Pop
                          dev.logical_id[2], new_minor, dev.logical_id[4],
4317 f9518d38 Iustin Pop
                          dev.logical_id[5])
4318 d418ebfb Iustin Pop
      iv_names[idx] = (dev, dev.children, new_logical_id)
4319 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
4320 a1578d63 Iustin Pop
                    new_logical_id)
4321 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
4322 ffa1c0dc Iustin Pop
                              logical_id=new_logical_id,
4323 a9e0c397 Iustin Pop
                              children=dev.children)
4324 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnSecondary(self, new_node, instance,
4325 3f78eef2 Iustin Pop
                                        new_drbd, False,
4326 b9bddb6b Iustin Pop
                                        _GetInstanceInfoText(instance)):
4327 a1578d63 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
4328 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
4329 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
4330 a9e0c397 Iustin Pop
4331 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
4332 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
4333 d418ebfb Iustin Pop
      info("shutting down drbd for disk/%d on old node" % idx)
4334 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
4335 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_shutdown(old_node, dev):
4336 d418ebfb Iustin Pop
        warning("Failed to shutdown drbd for disk/%d on old node" % idx,
4337 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
4338 a9e0c397 Iustin Pop
4339 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
4340 642445d9 Iustin Pop
    done = 0
4341 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
4342 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4343 f9518d38 Iustin Pop
      # set the network part of the physical (unique in bdev terms) id
4344 f9518d38 Iustin Pop
      # to None, meaning detach from network
4345 f9518d38 Iustin Pop
      dev.physical_id = (None, None, None, None) + dev.physical_id[4:]
4346 642445d9 Iustin Pop
      # and 'find' the device, which will 'fix' it to match the
4347 642445d9 Iustin Pop
      # standalone state
4348 72737a7f Iustin Pop
      if self.rpc.call_blockdev_find(pri_node, dev):
4349 642445d9 Iustin Pop
        done += 1
4350 642445d9 Iustin Pop
      else:
4351 d418ebfb Iustin Pop
        warning("Failed to detach drbd disk/%d from network, unusual case" %
4352 d418ebfb Iustin Pop
                idx)
4353 642445d9 Iustin Pop
4354 642445d9 Iustin Pop
    if not done:
4355 642445d9 Iustin Pop
      # no detaches succeeded (very unlikely)
4356 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance.name)
4357 642445d9 Iustin Pop
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
4358 642445d9 Iustin Pop
4359 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
4360 642445d9 Iustin Pop
    # the instance to point to the new secondary
4361 642445d9 Iustin Pop
    info("updating instance configuration")
4362 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
4363 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
4364 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4365 642445d9 Iustin Pop
    cfg.Update(instance)
4366 a1578d63 Iustin Pop
    # we can remove now the temp minors as now the new values are
4367 a1578d63 Iustin Pop
    # written to the config file (and therefore stable)
4368 a1578d63 Iustin Pop
    self.cfg.ReleaseDRBDMinors(instance.name)
4369 a9e0c397 Iustin Pop
4370 642445d9 Iustin Pop
    # and now perform the drbd attach
4371 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
4372 642445d9 Iustin Pop
    failures = []
4373 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
4374 d418ebfb Iustin Pop
      info("attaching primary drbd for disk/%d to new secondary node" % idx)
4375 642445d9 Iustin Pop
      # since the attach is smart, it's enough to 'find' the device,
4376 642445d9 Iustin Pop
      # it will automatically activate the network, if the physical_id
4377 642445d9 Iustin Pop
      # is correct
4378 642445d9 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4379 ffa1c0dc Iustin Pop
      logging.debug("Disk to attach: %s", dev)
4380 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_find(pri_node, dev):
4381 d418ebfb Iustin Pop
        warning("can't attach drbd disk/%d to new secondary!" % idx,
4382 642445d9 Iustin Pop
                "please do a gnt-instance info to see the status of disks")
4383 a9e0c397 Iustin Pop
4384 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
4385 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
4386 a9e0c397 Iustin Pop
    # return value
4387 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
4388 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
4389 a9e0c397 Iustin Pop
4390 a9e0c397 Iustin Pop
    # so check manually all the devices
4391 d418ebfb Iustin Pop
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
4392 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4393 72737a7f Iustin Pop
      is_degr = self.rpc.call_blockdev_find(pri_node, dev)[5]
4394 a9e0c397 Iustin Pop
      if is_degr:
4395 d418ebfb Iustin Pop
        raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx)
4396 a9e0c397 Iustin Pop
4397 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
4398 d418ebfb Iustin Pop
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
4399 d418ebfb Iustin Pop
      info("remove logical volumes for disk/%d" % idx)
4400 a9e0c397 Iustin Pop
      for lv in old_lvs:
4401 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
4402 72737a7f Iustin Pop
        if not self.rpc.call_blockdev_remove(old_node, lv):
4403 0834c866 Iustin Pop
          warning("Can't remove LV on old secondary",
4404 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
4405 a9e0c397 Iustin Pop
4406 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
4407 a9e0c397 Iustin Pop
    """Execute disk replacement.
4408 a9e0c397 Iustin Pop

4409 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
4410 a9e0c397 Iustin Pop

4411 a9e0c397 Iustin Pop
    """
4412 a9e0c397 Iustin Pop
    instance = self.instance
4413 22985314 Guido Trotter
4414 22985314 Guido Trotter
    # Activate the instance disks if we're replacing them on a down instance
4415 22985314 Guido Trotter
    if instance.status == "down":
4416 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, True)
4417 22985314 Guido Trotter
4418 abdf0113 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
4419 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
4420 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
4421 a9e0c397 Iustin Pop
      else:
4422 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
4423 a9e0c397 Iustin Pop
    else:
4424 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
4425 22985314 Guido Trotter
4426 22985314 Guido Trotter
    ret = fn(feedback_fn)
4427 22985314 Guido Trotter
4428 22985314 Guido Trotter
    # Deactivate the instance disks if we're replacing them on a down instance
4429 22985314 Guido Trotter
    if instance.status == "down":
4430 b9bddb6b Iustin Pop
      _SafeShutdownInstanceDisks(self, instance)
4431 22985314 Guido Trotter
4432 22985314 Guido Trotter
    return ret
4433 a9e0c397 Iustin Pop
4434 a8083063 Iustin Pop
4435 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
4436 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
4437 8729e0d7 Iustin Pop

4438 8729e0d7 Iustin Pop
  """
4439 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
4440 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4441 6605411d Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
4442 31e63dbf Guido Trotter
  REQ_BGL = False
4443 31e63dbf Guido Trotter
4444 31e63dbf Guido Trotter
  def ExpandNames(self):
4445 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
4446 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4447 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4448 31e63dbf Guido Trotter
4449 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
4450 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
4451 31e63dbf Guido Trotter
      self._LockInstancesNodes()
4452 8729e0d7 Iustin Pop
4453 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
4454 8729e0d7 Iustin Pop
    """Build hooks env.
4455 8729e0d7 Iustin Pop

4456 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
4457 8729e0d7 Iustin Pop

4458 8729e0d7 Iustin Pop
    """
4459 8729e0d7 Iustin Pop
    env = {
4460 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
4461 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
4462 8729e0d7 Iustin Pop
      }
4463 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4464 8729e0d7 Iustin Pop
    nl = [
4465 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
4466 8729e0d7 Iustin Pop
      self.instance.primary_node,
4467 8729e0d7 Iustin Pop
      ]
4468 8729e0d7 Iustin Pop
    return env, nl, nl
4469 8729e0d7 Iustin Pop
4470 8729e0d7 Iustin Pop
  def CheckPrereq(self):
4471 8729e0d7 Iustin Pop
    """Check prerequisites.
4472 8729e0d7 Iustin Pop

4473 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
4474 8729e0d7 Iustin Pop

4475 8729e0d7 Iustin Pop
    """
4476 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4477 31e63dbf Guido Trotter
    assert instance is not None, \
4478 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4479 31e63dbf Guido Trotter
4480 8729e0d7 Iustin Pop
    self.instance = instance
4481 8729e0d7 Iustin Pop
4482 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
4483 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
4484 8729e0d7 Iustin Pop
                                 " growing.")
4485 8729e0d7 Iustin Pop
4486 ad24e046 Iustin Pop
    self.disk = instance.FindDisk(self.op.disk)
4487 8729e0d7 Iustin Pop
4488 8729e0d7 Iustin Pop
    nodenames = [instance.primary_node] + list(instance.secondary_nodes)
4489 72737a7f Iustin Pop
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4490 72737a7f Iustin Pop
                                       instance.hypervisor)
4491 8729e0d7 Iustin Pop
    for node in nodenames:
4492 8729e0d7 Iustin Pop
      info = nodeinfo.get(node, None)
4493 8729e0d7 Iustin Pop
      if not info:
4494 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
4495 8729e0d7 Iustin Pop
                                   " from node '%s'" % node)
4496 8729e0d7 Iustin Pop
      vg_free = info.get('vg_free', None)
4497 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
4498 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
4499 8729e0d7 Iustin Pop
                                   " node %s" % node)
4500 8729e0d7 Iustin Pop
      if self.op.amount > info['vg_free']:
4501 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
4502 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
4503 8729e0d7 Iustin Pop
                                   (node, info['vg_free'], self.op.amount))
4504 8729e0d7 Iustin Pop
4505 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
4506 8729e0d7 Iustin Pop
    """Execute disk grow.
4507 8729e0d7 Iustin Pop

4508 8729e0d7 Iustin Pop
    """
4509 8729e0d7 Iustin Pop
    instance = self.instance
4510 ad24e046 Iustin Pop
    disk = self.disk
4511 8729e0d7 Iustin Pop
    for node in (instance.secondary_nodes + (instance.primary_node,)):
4512 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
4513 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
4514 72737a7f Iustin Pop
      if (not result or not isinstance(result, (list, tuple)) or
4515 72737a7f Iustin Pop
          len(result) != 2):
4516 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s" % node)
4517 8729e0d7 Iustin Pop
      elif not result[0]:
4518 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s: %s" %
4519 8729e0d7 Iustin Pop
                                 (node, result[1]))
4520 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
4521 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
4522 6605411d Iustin Pop
    if self.op.wait_for_sync:
4523 cd4d138f Guido Trotter
      disk_abort = not _WaitForSync(self, instance)
4524 6605411d Iustin Pop
      if disk_abort:
4525 86d9d3bb Iustin Pop
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
4526 86d9d3bb Iustin Pop
                             " status.\nPlease check the instance.")
4527 8729e0d7 Iustin Pop
4528 8729e0d7 Iustin Pop
4529 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
4530 a8083063 Iustin Pop
  """Query runtime instance data.
4531 a8083063 Iustin Pop

4532 a8083063 Iustin Pop
  """
4533 57821cac Iustin Pop
  _OP_REQP = ["instances", "static"]
4534 a987fa48 Guido Trotter
  REQ_BGL = False
4535 ae5849b5 Michael Hanselmann
4536 a987fa48 Guido Trotter
  def ExpandNames(self):
4537 a987fa48 Guido Trotter
    self.needed_locks = {}
4538 a987fa48 Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
4539 a987fa48 Guido Trotter
4540 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
4541 a987fa48 Guido Trotter
      raise errors.OpPrereqError("Invalid argument type 'instances'")
4542 a987fa48 Guido Trotter
4543 a987fa48 Guido Trotter
    if self.op.instances:
4544 a987fa48 Guido Trotter
      self.wanted_names = []
4545 a987fa48 Guido Trotter
      for name in self.op.instances:
4546 a987fa48 Guido Trotter
        full_name = self.cfg.ExpandInstanceName(name)
4547 a987fa48 Guido Trotter
        if full_name is None:
4548 a987fa48 Guido Trotter
          raise errors.OpPrereqError("Instance '%s' not known" %
4549 a987fa48 Guido Trotter
                                     self.op.instance_name)
4550 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
4551 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
4552 a987fa48 Guido Trotter
    else:
4553 a987fa48 Guido Trotter
      self.wanted_names = None
4554 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
4555 a987fa48 Guido Trotter
4556 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4557 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4558 a987fa48 Guido Trotter
4559 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
4560 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
4561 a987fa48 Guido Trotter
      self._LockInstancesNodes()
4562 a8083063 Iustin Pop
4563 a8083063 Iustin Pop
  def CheckPrereq(self):
4564 a8083063 Iustin Pop
    """Check prerequisites.
4565 a8083063 Iustin Pop

4566 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
4567 a8083063 Iustin Pop

4568 a8083063 Iustin Pop
    """
4569 a987fa48 Guido Trotter
    if self.wanted_names is None:
4570 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
4571 a8083063 Iustin Pop
4572 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
4573 a987fa48 Guido Trotter
                             in self.wanted_names]
4574 a987fa48 Guido Trotter
    return
4575 a8083063 Iustin Pop
4576 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
4577 a8083063 Iustin Pop
    """Compute block device status.
4578 a8083063 Iustin Pop

4579 a8083063 Iustin Pop
    """
4580 57821cac Iustin Pop
    static = self.op.static
4581 57821cac Iustin Pop
    if not static:
4582 57821cac Iustin Pop
      self.cfg.SetDiskID(dev, instance.primary_node)
4583 57821cac Iustin Pop
      dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
4584 57821cac Iustin Pop
    else:
4585 57821cac Iustin Pop
      dev_pstatus = None
4586 57821cac Iustin Pop
4587 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
4588 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
4589 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
4590 a8083063 Iustin Pop
        snode = dev.logical_id[1]
4591 a8083063 Iustin Pop
      else:
4592 a8083063 Iustin Pop
        snode = dev.logical_id[0]
4593 a8083063 Iustin Pop
4594 57821cac Iustin Pop
    if snode and not static:
4595 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
4596 72737a7f Iustin Pop
      dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
4597 a8083063 Iustin Pop
    else:
4598 a8083063 Iustin Pop
      dev_sstatus = None
4599 a8083063 Iustin Pop
4600 a8083063 Iustin Pop
    if dev.children:
4601 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
4602 a8083063 Iustin Pop
                      for child in dev.children]
4603 a8083063 Iustin Pop
    else:
4604 a8083063 Iustin Pop
      dev_children = []
4605 a8083063 Iustin Pop
4606 a8083063 Iustin Pop
    data = {
4607 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
4608 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
4609 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
4610 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
4611 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
4612 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
4613 a8083063 Iustin Pop
      "children": dev_children,
4614 b6fdf8b8 Iustin Pop
      "mode": dev.mode,
4615 a8083063 Iustin Pop
      }
4616 a8083063 Iustin Pop
4617 a8083063 Iustin Pop
    return data
4618 a8083063 Iustin Pop
4619 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4620 a8083063 Iustin Pop
    """Gather and return data"""
4621 a8083063 Iustin Pop
    result = {}
4622 338e51e8 Iustin Pop
4623 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
4624 338e51e8 Iustin Pop
4625 a8083063 Iustin Pop
    for instance in self.wanted_instances:
4626 57821cac Iustin Pop
      if not self.op.static:
4627 57821cac Iustin Pop
        remote_info = self.rpc.call_instance_info(instance.primary_node,
4628 57821cac Iustin Pop
                                                  instance.name,
4629 57821cac Iustin Pop
                                                  instance.hypervisor)
4630 57821cac Iustin Pop
        if remote_info and "state" in remote_info:
4631 57821cac Iustin Pop
          remote_state = "up"
4632 57821cac Iustin Pop
        else:
4633 57821cac Iustin Pop
          remote_state = "down"
4634 a8083063 Iustin Pop
      else:
4635 57821cac Iustin Pop
        remote_state = None
4636 a8083063 Iustin Pop
      if instance.status == "down":
4637 a8083063 Iustin Pop
        config_state = "down"
4638 a8083063 Iustin Pop
      else:
4639 a8083063 Iustin Pop
        config_state = "up"
4640 a8083063 Iustin Pop
4641 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
4642 a8083063 Iustin Pop
               for device in instance.disks]
4643 a8083063 Iustin Pop
4644 a8083063 Iustin Pop
      idict = {
4645 a8083063 Iustin Pop
        "name": instance.name,
4646 a8083063 Iustin Pop
        "config_state": config_state,
4647 a8083063 Iustin Pop
        "run_state": remote_state,
4648 a8083063 Iustin Pop
        "pnode": instance.primary_node,
4649 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
4650 a8083063 Iustin Pop
        "os": instance.os,
4651 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4652 a8083063 Iustin Pop
        "disks": disks,
4653 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
4654 24838135 Iustin Pop
        "network_port": instance.network_port,
4655 24838135 Iustin Pop
        "hv_instance": instance.hvparams,
4656 338e51e8 Iustin Pop
        "hv_actual": cluster.FillHV(instance),
4657 338e51e8 Iustin Pop
        "be_instance": instance.beparams,
4658 338e51e8 Iustin Pop
        "be_actual": cluster.FillBE(instance),
4659 a8083063 Iustin Pop
        }
4660 a8083063 Iustin Pop
4661 a8083063 Iustin Pop
      result[instance.name] = idict
4662 a8083063 Iustin Pop
4663 a8083063 Iustin Pop
    return result
4664 a8083063 Iustin Pop
4665 a8083063 Iustin Pop
4666 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
4667 a8083063 Iustin Pop
  """Modifies an instances's parameters.
4668 a8083063 Iustin Pop

4669 a8083063 Iustin Pop
  """
4670 a8083063 Iustin Pop
  HPATH = "instance-modify"
4671 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4672 24991749 Iustin Pop
  _OP_REQP = ["instance_name"]
4673 1a5c7281 Guido Trotter
  REQ_BGL = False
4674 1a5c7281 Guido Trotter
4675 24991749 Iustin Pop
  def CheckArguments(self):
4676 24991749 Iustin Pop
    if not hasattr(self.op, 'nics'):
4677 24991749 Iustin Pop
      self.op.nics = []
4678 24991749 Iustin Pop
    if not hasattr(self.op, 'disks'):
4679 24991749 Iustin Pop
      self.op.disks = []
4680 24991749 Iustin Pop
    if not hasattr(self.op, 'beparams'):
4681 24991749 Iustin Pop
      self.op.beparams = {}
4682 24991749 Iustin Pop
    if not hasattr(self.op, 'hvparams'):
4683 24991749 Iustin Pop
      self.op.hvparams = {}
4684 24991749 Iustin Pop
    self.op.force = getattr(self.op, "force", False)
4685 24991749 Iustin Pop
    if not (self.op.nics or self.op.disks or
4686 24991749 Iustin Pop
            self.op.hvparams or self.op.beparams):
4687 24991749 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
4688 24991749 Iustin Pop
4689 24991749 Iustin Pop
    for item in (constants.BE_MEMORY, constants.BE_VCPUS):
4690 24991749 Iustin Pop
      val = self.op.beparams.get(item, None)
4691 24991749 Iustin Pop
      if val is not None:
4692 24991749 Iustin Pop
        try:
4693 24991749 Iustin Pop
          val = int(val)
4694 24991749 Iustin Pop
        except ValueError, err:
4695 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid %s size: %s" % (item, str(err)))
4696 24991749 Iustin Pop
        self.op.beparams[item] = val
4697 24991749 Iustin Pop
    # Disk validation
4698 24991749 Iustin Pop
    disk_addremove = 0
4699 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
4700 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
4701 24991749 Iustin Pop
        disk_addremove += 1
4702 24991749 Iustin Pop
        continue
4703 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
4704 24991749 Iustin Pop
        disk_addremove += 1
4705 24991749 Iustin Pop
      else:
4706 24991749 Iustin Pop
        if not isinstance(disk_op, int):
4707 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index")
4708 24991749 Iustin Pop
      if disk_op == constants.DDM_ADD:
4709 24991749 Iustin Pop
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
4710 24991749 Iustin Pop
        if mode not in (constants.DISK_RDONLY, constants.DISK_RDWR):
4711 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode)
4712 24991749 Iustin Pop
        size = disk_dict.get('size', None)
4713 24991749 Iustin Pop
        if size is None:
4714 24991749 Iustin Pop
          raise errors.OpPrereqError("Required disk parameter size missing")
4715 24991749 Iustin Pop
        try:
4716 24991749 Iustin Pop
          size = int(size)
4717 24991749 Iustin Pop
        except ValueError, err:
4718 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
4719 24991749 Iustin Pop
                                     str(err))
4720 24991749 Iustin Pop
        disk_dict['size'] = size
4721 24991749 Iustin Pop
      else:
4722 24991749 Iustin Pop
        # modification of disk
4723 24991749 Iustin Pop
        if 'size' in disk_dict:
4724 24991749 Iustin Pop
          raise errors.OpPrereqError("Disk size change not possible, use"
4725 24991749 Iustin Pop
                                     " grow-disk")
4726 24991749 Iustin Pop
4727 24991749 Iustin Pop
    if disk_addremove > 1:
4728 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one disk add or remove operation"
4729 24991749 Iustin Pop
                                 " supported at a time")
4730 24991749 Iustin Pop
4731 24991749 Iustin Pop
    # NIC validation
4732 24991749 Iustin Pop
    nic_addremove = 0
4733 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
4734 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
4735 24991749 Iustin Pop
        nic_addremove += 1
4736 24991749 Iustin Pop
        continue
4737 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
4738 24991749 Iustin Pop
        nic_addremove += 1
4739 24991749 Iustin Pop
      else:
4740 24991749 Iustin Pop
        if not isinstance(nic_op, int):
4741 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid nic index")
4742 24991749 Iustin Pop
4743 24991749 Iustin Pop
      # nic_dict should be a dict
4744 24991749 Iustin Pop
      nic_ip = nic_dict.get('ip', None)
4745 24991749 Iustin Pop
      if nic_ip is not None:
4746 24991749 Iustin Pop
        if nic_ip.lower() == "none":
4747 24991749 Iustin Pop
          nic_dict['ip'] = None
4748 24991749 Iustin Pop
        else:
4749 24991749 Iustin Pop
          if not utils.IsValidIP(nic_ip):
4750 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip)
4751 24991749 Iustin Pop
      # we can only check None bridges and assign the default one
4752 24991749 Iustin Pop
      nic_bridge = nic_dict.get('bridge', None)
4753 24991749 Iustin Pop
      if nic_bridge is None:
4754 24991749 Iustin Pop
        nic_dict['bridge'] = self.cfg.GetDefBridge()
4755 24991749 Iustin Pop
      # but we can validate MACs
4756 24991749 Iustin Pop
      nic_mac = nic_dict.get('mac', None)
4757 24991749 Iustin Pop
      if nic_mac is not None:
4758 24991749 Iustin Pop
        if self.cfg.IsMacInUse(nic_mac):
4759 24991749 Iustin Pop
          raise errors.OpPrereqError("MAC address %s already in use"
4760 24991749 Iustin Pop
                                     " in cluster" % nic_mac)
4761 24991749 Iustin Pop
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4762 24991749 Iustin Pop
          if not utils.IsValidMac(nic_mac):
4763 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac)
4764 24991749 Iustin Pop
    if nic_addremove > 1:
4765 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one NIC add or remove operation"
4766 24991749 Iustin Pop
                                 " supported at a time")
4767 24991749 Iustin Pop
4768 1a5c7281 Guido Trotter
  def ExpandNames(self):
4769 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
4770 74409b12 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
4771 74409b12 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4772 74409b12 Iustin Pop
4773 74409b12 Iustin Pop
  def DeclareLocks(self, level):
4774 74409b12 Iustin Pop
    if level == locking.LEVEL_NODE:
4775 74409b12 Iustin Pop
      self._LockInstancesNodes()
4776 a8083063 Iustin Pop
4777 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4778 a8083063 Iustin Pop
    """Build hooks env.
4779 a8083063 Iustin Pop

4780 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
4781 a8083063 Iustin Pop

4782 a8083063 Iustin Pop
    """
4783 396e1b78 Michael Hanselmann
    args = dict()
4784 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.be_new:
4785 338e51e8 Iustin Pop
      args['memory'] = self.be_new[constants.BE_MEMORY]
4786 338e51e8 Iustin Pop
    if constants.BE_VCPUS in self.be_new:
4787 61be6ba4 Iustin Pop
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
4788 24991749 Iustin Pop
    # FIXME: readd disk/nic changes
4789 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
4790 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(),
4791 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
4792 a8083063 Iustin Pop
    return env, nl, nl
4793 a8083063 Iustin Pop
4794 a8083063 Iustin Pop
  def CheckPrereq(self):
4795 a8083063 Iustin Pop
    """Check prerequisites.
4796 a8083063 Iustin Pop

4797 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
4798 a8083063 Iustin Pop

4799 a8083063 Iustin Pop
    """
4800 24991749 Iustin Pop
    force = self.force = self.op.force
4801 a8083063 Iustin Pop
4802 74409b12 Iustin Pop
    # checking the new params on the primary/secondary nodes
4803 31a853d2 Iustin Pop
4804 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4805 1a5c7281 Guido Trotter
    assert self.instance is not None, \
4806 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4807 74409b12 Iustin Pop
    pnode = self.instance.primary_node
4808 74409b12 Iustin Pop
    nodelist = [pnode]
4809 74409b12 Iustin Pop
    nodelist.extend(instance.secondary_nodes)
4810 74409b12 Iustin Pop
4811 338e51e8 Iustin Pop
    # hvparams processing
4812 74409b12 Iustin Pop
    if self.op.hvparams:
4813 74409b12 Iustin Pop
      i_hvdict = copy.deepcopy(instance.hvparams)
4814 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
4815 74409b12 Iustin Pop
        if val is None:
4816 74409b12 Iustin Pop
          try:
4817 74409b12 Iustin Pop
            del i_hvdict[key]
4818 74409b12 Iustin Pop
          except KeyError:
4819 74409b12 Iustin Pop
            pass
4820 74409b12 Iustin Pop
        else:
4821 74409b12 Iustin Pop
          i_hvdict[key] = val
4822 74409b12 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
4823 74409b12 Iustin Pop
      hv_new = cluster.FillDict(cluster.hvparams[instance.hypervisor],
4824 74409b12 Iustin Pop
                                i_hvdict)
4825 74409b12 Iustin Pop
      # local check
4826 74409b12 Iustin Pop
      hypervisor.GetHypervisor(
4827 74409b12 Iustin Pop
        instance.hypervisor).CheckParameterSyntax(hv_new)
4828 74409b12 Iustin Pop
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
4829 338e51e8 Iustin Pop
      self.hv_new = hv_new # the new actual values
4830 338e51e8 Iustin Pop
      self.hv_inst = i_hvdict # the new dict (without defaults)
4831 338e51e8 Iustin Pop
    else:
4832 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
4833 338e51e8 Iustin Pop
4834 338e51e8 Iustin Pop
    # beparams processing
4835 338e51e8 Iustin Pop
    if self.op.beparams:
4836 338e51e8 Iustin Pop
      i_bedict = copy.deepcopy(instance.beparams)
4837 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
4838 338e51e8 Iustin Pop
        if val is None:
4839 338e51e8 Iustin Pop
          try:
4840 338e51e8 Iustin Pop
            del i_bedict[key]
4841 338e51e8 Iustin Pop
          except KeyError:
4842 338e51e8 Iustin Pop
            pass
4843 338e51e8 Iustin Pop
        else:
4844 338e51e8 Iustin Pop
          i_bedict[key] = val
4845 338e51e8 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
4846 338e51e8 Iustin Pop
      be_new = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
4847 338e51e8 Iustin Pop
                                i_bedict)
4848 338e51e8 Iustin Pop
      self.be_new = be_new # the new actual values
4849 338e51e8 Iustin Pop
      self.be_inst = i_bedict # the new dict (without defaults)
4850 338e51e8 Iustin Pop
    else:
4851 b637ae4d Iustin Pop
      self.be_new = self.be_inst = {}
4852 74409b12 Iustin Pop
4853 cfefe007 Guido Trotter
    self.warn = []
4854 647a5d80 Iustin Pop
4855 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.op.beparams and not self.force:
4856 647a5d80 Iustin Pop
      mem_check_list = [pnode]
4857 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
4858 c0f2b229 Iustin Pop
        # either we changed auto_balance to yes or it was from before
4859 647a5d80 Iustin Pop
        mem_check_list.extend(instance.secondary_nodes)
4860 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
4861 72737a7f Iustin Pop
                                                  instance.hypervisor)
4862 647a5d80 Iustin Pop
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
4863 72737a7f Iustin Pop
                                         instance.hypervisor)
4864 cfefe007 Guido Trotter
4865 cfefe007 Guido Trotter
      if pnode not in nodeinfo or not isinstance(nodeinfo[pnode], dict):
4866 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
4867 cfefe007 Guido Trotter
        self.warn.append("Can't get info from primary node %s" % pnode)
4868 cfefe007 Guido Trotter
      else:
4869 cfefe007 Guido Trotter
        if instance_info:
4870 cfefe007 Guido Trotter
          current_mem = instance_info['memory']
4871 cfefe007 Guido Trotter
        else:
4872 cfefe007 Guido Trotter
          # Assume instance not running
4873 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
4874 cfefe007 Guido Trotter
          # and we have no other way to check)
4875 cfefe007 Guido Trotter
          current_mem = 0
4876 338e51e8 Iustin Pop
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
4877 338e51e8 Iustin Pop
                    nodeinfo[pnode]['memory_free'])
4878 cfefe007 Guido Trotter
        if miss_mem > 0:
4879 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
4880 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
4881 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
4882 cfefe007 Guido Trotter
4883 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
4884 647a5d80 Iustin Pop
        for node in instance.secondary_nodes:
4885 647a5d80 Iustin Pop
          if node not in nodeinfo or not isinstance(nodeinfo[node], dict):
4886 647a5d80 Iustin Pop
            self.warn.append("Can't get info from secondary node %s" % node)
4887 647a5d80 Iustin Pop
          elif be_new[constants.BE_MEMORY] > nodeinfo[node]['memory_free']:
4888 647a5d80 Iustin Pop
            self.warn.append("Not enough memory to failover instance to"
4889 647a5d80 Iustin Pop
                             " secondary node %s" % node)
4890 5bc84f33 Alexander Schreiber
4891 24991749 Iustin Pop
    # NIC processing
4892 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
4893 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
4894 24991749 Iustin Pop
        if not instance.nics:
4895 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance has no NICs, cannot remove")
4896 24991749 Iustin Pop
        continue
4897 24991749 Iustin Pop
      if nic_op != constants.DDM_ADD:
4898 24991749 Iustin Pop
        # an existing nic
4899 24991749 Iustin Pop
        if nic_op < 0 or nic_op >= len(instance.nics):
4900 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
4901 24991749 Iustin Pop
                                     " are 0 to %d" %
4902 24991749 Iustin Pop
                                     (nic_op, len(instance.nics)))
4903 24991749 Iustin Pop
      nic_bridge = nic_dict.get('bridge', None)
4904 24991749 Iustin Pop
      if nic_bridge is not None:
4905 24991749 Iustin Pop
        if not self.rpc.call_bridges_exist(pnode, [nic_bridge]):
4906 24991749 Iustin Pop
          msg = ("Bridge '%s' doesn't exist on one of"
4907 24991749 Iustin Pop
                 " the instance nodes" % nic_bridge)
4908 24991749 Iustin Pop
          if self.force:
4909 24991749 Iustin Pop
            self.warn.append(msg)
4910 24991749 Iustin Pop
          else:
4911 24991749 Iustin Pop
            raise errors.OpPrereqError(msg)
4912 24991749 Iustin Pop
4913 24991749 Iustin Pop
    # DISK processing
4914 24991749 Iustin Pop
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
4915 24991749 Iustin Pop
      raise errors.OpPrereqError("Disk operations not supported for"
4916 24991749 Iustin Pop
                                 " diskless instances")
4917 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
4918 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
4919 24991749 Iustin Pop
        if len(instance.disks) == 1:
4920 24991749 Iustin Pop
          raise errors.OpPrereqError("Cannot remove the last disk of"
4921 24991749 Iustin Pop
                                     " an instance")
4922 24991749 Iustin Pop
        ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
4923 24991749 Iustin Pop
        ins_l = ins_l[pnode]
4924 24991749 Iustin Pop
        if not type(ins_l) is list:
4925 24991749 Iustin Pop
          raise errors.OpPrereqError("Can't contact node '%s'" % pnode)
4926 24991749 Iustin Pop
        if instance.name in ins_l:
4927 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance is running, can't remove"
4928 24991749 Iustin Pop
                                     " disks.")
4929 24991749 Iustin Pop
4930 24991749 Iustin Pop
      if (disk_op == constants.DDM_ADD and
4931 24991749 Iustin Pop
          len(instance.nics) >= constants.MAX_DISKS):
4932 24991749 Iustin Pop
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
4933 24991749 Iustin Pop
                                   " add more" % constants.MAX_DISKS)
4934 24991749 Iustin Pop
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
4935 24991749 Iustin Pop
        # an existing disk
4936 24991749 Iustin Pop
        if disk_op < 0 or disk_op >= len(instance.disks):
4937 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
4938 24991749 Iustin Pop
                                     " are 0 to %d" %
4939 24991749 Iustin Pop
                                     (disk_op, len(instance.disks)))
4940 24991749 Iustin Pop
4941 a8083063 Iustin Pop
    return
4942 a8083063 Iustin Pop
4943 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4944 a8083063 Iustin Pop
    """Modifies an instance.
4945 a8083063 Iustin Pop

4946 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
4947 24991749 Iustin Pop

4948 a8083063 Iustin Pop
    """
4949 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
4950 cfefe007 Guido Trotter
    # feedback_fn there.
4951 cfefe007 Guido Trotter
    for warn in self.warn:
4952 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
4953 cfefe007 Guido Trotter
4954 a8083063 Iustin Pop
    result = []
4955 a8083063 Iustin Pop
    instance = self.instance
4956 24991749 Iustin Pop
    # disk changes
4957 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
4958 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
4959 24991749 Iustin Pop
        # remove the last disk
4960 24991749 Iustin Pop
        device = instance.disks.pop()
4961 24991749 Iustin Pop
        device_idx = len(instance.disks)
4962 24991749 Iustin Pop
        for node, disk in device.ComputeNodeTree(instance.primary_node):
4963 24991749 Iustin Pop
          self.cfg.SetDiskID(disk, node)
4964 24991749 Iustin Pop
          if not self.rpc.call_blockdev_remove(node, disk):
4965 24991749 Iustin Pop
            self.proc.LogWarning("Could not remove disk/%d on node %s,"
4966 24991749 Iustin Pop
                                 " continuing anyway", device_idx, node)
4967 24991749 Iustin Pop
        result.append(("disk/%d" % device_idx, "remove"))
4968 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
4969 24991749 Iustin Pop
        # add a new disk
4970 24991749 Iustin Pop
        if instance.disk_template == constants.DT_FILE:
4971 24991749 Iustin Pop
          file_driver, file_path = instance.disks[0].logical_id
4972 24991749 Iustin Pop
          file_path = os.path.dirname(file_path)
4973 24991749 Iustin Pop
        else:
4974 24991749 Iustin Pop
          file_driver = file_path = None
4975 24991749 Iustin Pop
        disk_idx_base = len(instance.disks)
4976 24991749 Iustin Pop
        new_disk = _GenerateDiskTemplate(self,
4977 24991749 Iustin Pop
                                         instance.disk_template,
4978 24991749 Iustin Pop
                                         instance, instance.primary_node,
4979 24991749 Iustin Pop
                                         instance.secondary_nodes,
4980 24991749 Iustin Pop
                                         [disk_dict],
4981 24991749 Iustin Pop
                                         file_path,
4982 24991749 Iustin Pop
                                         file_driver,
4983 24991749 Iustin Pop
                                         disk_idx_base)[0]
4984 24991749 Iustin Pop
        new_disk.mode = disk_dict['mode']
4985 24991749 Iustin Pop
        instance.disks.append(new_disk)
4986 24991749 Iustin Pop
        info = _GetInstanceInfoText(instance)
4987 24991749 Iustin Pop
4988 24991749 Iustin Pop
        logging.info("Creating volume %s for instance %s",
4989 24991749 Iustin Pop
                     new_disk.iv_name, instance.name)
4990 24991749 Iustin Pop
        # Note: this needs to be kept in sync with _CreateDisks
4991 24991749 Iustin Pop
        #HARDCODE
4992 24991749 Iustin Pop
        for secondary_node in instance.secondary_nodes:
4993 24991749 Iustin Pop
          if not _CreateBlockDevOnSecondary(self, secondary_node, instance,
4994 24991749 Iustin Pop
                                            new_disk, False, info):
4995 24991749 Iustin Pop
            self.LogWarning("Failed to create volume %s (%s) on"
4996 24991749 Iustin Pop
                            " secondary node %s!",
4997 24991749 Iustin Pop
                            new_disk.iv_name, new_disk, secondary_node)
4998 24991749 Iustin Pop
        #HARDCODE
4999 24991749 Iustin Pop
        if not _CreateBlockDevOnPrimary(self, instance.primary_node,
5000 24991749 Iustin Pop
                                        instance, new_disk, info):
5001 24991749 Iustin Pop
          self.LogWarning("Failed to create volume %s on primary!",
5002 24991749 Iustin Pop
                          new_disk.iv_name)
5003 24991749 Iustin Pop
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
5004 24991749 Iustin Pop
                       (new_disk.size, new_disk.mode)))
5005 24991749 Iustin Pop
      else:
5006 24991749 Iustin Pop
        # change a given disk
5007 24991749 Iustin Pop
        instance.disks[disk_op].mode = disk_dict['mode']
5008 24991749 Iustin Pop
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
5009 24991749 Iustin Pop
    # NIC changes
5010 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
5011 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
5012 24991749 Iustin Pop
        # remove the last nic
5013 24991749 Iustin Pop
        del instance.nics[-1]
5014 24991749 Iustin Pop
        result.append(("nic.%d" % len(instance.nics), "remove"))
5015 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
5016 24991749 Iustin Pop
        # add a new nic
5017 24991749 Iustin Pop
        if 'mac' not in nic_dict:
5018 24991749 Iustin Pop
          mac = constants.VALUE_GENERATE
5019 24991749 Iustin Pop
        else:
5020 24991749 Iustin Pop
          mac = nic_dict['mac']
5021 24991749 Iustin Pop
        if mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5022 24991749 Iustin Pop
          mac = self.cfg.GenerateMAC()
5023 24991749 Iustin Pop
        new_nic = objects.NIC(mac=mac, ip=nic_dict.get('ip', None),
5024 24991749 Iustin Pop
                              bridge=nic_dict.get('bridge', None))
5025 24991749 Iustin Pop
        instance.nics.append(new_nic)
5026 24991749 Iustin Pop
        result.append(("nic.%d" % (len(instance.nics) - 1),
5027 24991749 Iustin Pop
                       "add:mac=%s,ip=%s,bridge=%s" %
5028 24991749 Iustin Pop
                       (new_nic.mac, new_nic.ip, new_nic.bridge)))
5029 24991749 Iustin Pop
      else:
5030 24991749 Iustin Pop
        # change a given nic
5031 24991749 Iustin Pop
        for key in 'mac', 'ip', 'bridge':
5032 24991749 Iustin Pop
          if key in nic_dict:
5033 24991749 Iustin Pop
            setattr(instance.nics[nic_op], key, nic_dict[key])
5034 24991749 Iustin Pop
            result.append(("nic.%s/%d" % (key, nic_op), nic_dict[key]))
5035 24991749 Iustin Pop
5036 24991749 Iustin Pop
    # hvparams changes
5037 74409b12 Iustin Pop
    if self.op.hvparams:
5038 74409b12 Iustin Pop
      instance.hvparams = self.hv_new
5039 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
5040 74409b12 Iustin Pop
        result.append(("hv/%s" % key, val))
5041 24991749 Iustin Pop
5042 24991749 Iustin Pop
    # beparams changes
5043 338e51e8 Iustin Pop
    if self.op.beparams:
5044 338e51e8 Iustin Pop
      instance.beparams = self.be_inst
5045 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
5046 338e51e8 Iustin Pop
        result.append(("be/%s" % key, val))
5047 a8083063 Iustin Pop
5048 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
5049 a8083063 Iustin Pop
5050 a8083063 Iustin Pop
    return result
5051 a8083063 Iustin Pop
5052 a8083063 Iustin Pop
5053 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
5054 a8083063 Iustin Pop
  """Query the exports list
5055 a8083063 Iustin Pop

5056 a8083063 Iustin Pop
  """
5057 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
5058 21a15682 Guido Trotter
  REQ_BGL = False
5059 21a15682 Guido Trotter
5060 21a15682 Guido Trotter
  def ExpandNames(self):
5061 21a15682 Guido Trotter
    self.needed_locks = {}
5062 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
5063 21a15682 Guido Trotter
    if not self.op.nodes:
5064 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5065 21a15682 Guido Trotter
    else:
5066 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
5067 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
5068 a8083063 Iustin Pop
5069 a8083063 Iustin Pop
  def CheckPrereq(self):
5070 21a15682 Guido Trotter
    """Check prerequisites.
5071 a8083063 Iustin Pop

5072 a8083063 Iustin Pop
    """
5073 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
5074 a8083063 Iustin Pop
5075 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5076 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
5077 a8083063 Iustin Pop

5078 e4376078 Iustin Pop
    @rtype: dict
5079 e4376078 Iustin Pop
    @return: a dictionary with the structure node->(export-list)
5080 e4376078 Iustin Pop
        where export-list is a list of the instances exported on
5081 e4376078 Iustin Pop
        that node.
5082 a8083063 Iustin Pop

5083 a8083063 Iustin Pop
    """
5084 72737a7f Iustin Pop
    return self.rpc.call_export_list(self.nodes)
5085 a8083063 Iustin Pop
5086 a8083063 Iustin Pop
5087 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
5088 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
5089 a8083063 Iustin Pop

5090 a8083063 Iustin Pop
  """
5091 a8083063 Iustin Pop
  HPATH = "instance-export"
5092 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5093 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
5094 6657590e Guido Trotter
  REQ_BGL = False
5095 6657590e Guido Trotter
5096 6657590e Guido Trotter
  def ExpandNames(self):
5097 6657590e Guido Trotter
    self._ExpandAndLockInstance()
5098 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
5099 6657590e Guido Trotter
    #
5100 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
5101 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
5102 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
5103 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
5104 6657590e Guido Trotter
    #    then one to remove, after
5105 6657590e Guido Trotter
    #  - removing the removal operation altoghether
5106 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5107 6657590e Guido Trotter
5108 6657590e Guido Trotter
  def DeclareLocks(self, level):
5109 6657590e Guido Trotter
    """Last minute lock declaration."""
5110 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
5111 a8083063 Iustin Pop
5112 a8083063 Iustin Pop
  def BuildHooksEnv(self):
5113 a8083063 Iustin Pop
    """Build hooks env.
5114 a8083063 Iustin Pop

5115 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
5116 a8083063 Iustin Pop

5117 a8083063 Iustin Pop
    """
5118 a8083063 Iustin Pop
    env = {
5119 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
5120 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
5121 a8083063 Iustin Pop
      }
5122 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5123 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
5124 a8083063 Iustin Pop
          self.op.target_node]
5125 a8083063 Iustin Pop
    return env, nl, nl
5126 a8083063 Iustin Pop
5127 a8083063 Iustin Pop
  def CheckPrereq(self):
5128 a8083063 Iustin Pop
    """Check prerequisites.
5129 a8083063 Iustin Pop

5130 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
5131 a8083063 Iustin Pop

5132 a8083063 Iustin Pop
    """
5133 6657590e Guido Trotter
    instance_name = self.op.instance_name
5134 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
5135 6657590e Guido Trotter
    assert self.instance is not None, \
5136 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
5137 a8083063 Iustin Pop
5138 6657590e Guido Trotter
    self.dst_node = self.cfg.GetNodeInfo(
5139 6657590e Guido Trotter
      self.cfg.ExpandNodeName(self.op.target_node))
5140 a8083063 Iustin Pop
5141 6657590e Guido Trotter
    assert self.dst_node is not None, \
5142 6657590e Guido Trotter
          "Cannot retrieve locked node %s" % self.op.target_node
5143 a8083063 Iustin Pop
5144 b6023d6c Manuel Franceschini
    # instance disk type verification
5145 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
5146 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
5147 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
5148 b6023d6c Manuel Franceschini
                                   " file-based disks")
5149 b6023d6c Manuel Franceschini
5150 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5151 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
5152 a8083063 Iustin Pop

5153 a8083063 Iustin Pop
    """
5154 a8083063 Iustin Pop
    instance = self.instance
5155 a8083063 Iustin Pop
    dst_node = self.dst_node
5156 a8083063 Iustin Pop
    src_node = instance.primary_node
5157 a8083063 Iustin Pop
    if self.op.shutdown:
5158 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
5159 72737a7f Iustin Pop
      if not self.rpc.call_instance_shutdown(src_node, instance):
5160 38206f3c Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
5161 38206f3c Iustin Pop
                                 (instance.name, src_node))
5162 a8083063 Iustin Pop
5163 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
5164 a8083063 Iustin Pop
5165 a8083063 Iustin Pop
    snap_disks = []
5166 a8083063 Iustin Pop
5167 a8083063 Iustin Pop
    try:
5168 a8083063 Iustin Pop
      for disk in instance.disks:
5169 19d7f90a Guido Trotter
        # new_dev_name will be a snapshot of an lvm leaf of the one we passed
5170 19d7f90a Guido Trotter
        new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk)
5171 a8083063 Iustin Pop
5172 19d7f90a Guido Trotter
        if not new_dev_name:
5173 19d7f90a Guido Trotter
          self.LogWarning("Could not snapshot block device %s on node %s",
5174 9a4f63d1 Iustin Pop
                          disk.logical_id[1], src_node)
5175 19d7f90a Guido Trotter
          snap_disks.append(False)
5176 19d7f90a Guido Trotter
        else:
5177 19d7f90a Guido Trotter
          new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
5178 19d7f90a Guido Trotter
                                 logical_id=(vgname, new_dev_name),
5179 19d7f90a Guido Trotter
                                 physical_id=(vgname, new_dev_name),
5180 19d7f90a Guido Trotter
                                 iv_name=disk.iv_name)
5181 19d7f90a Guido Trotter
          snap_disks.append(new_dev)
5182 a8083063 Iustin Pop
5183 a8083063 Iustin Pop
    finally:
5184 fb300fb7 Guido Trotter
      if self.op.shutdown and instance.status == "up":
5185 72737a7f Iustin Pop
        if not self.rpc.call_instance_start(src_node, instance, None):
5186 b9bddb6b Iustin Pop
          _ShutdownInstanceDisks(self, instance)
5187 fb300fb7 Guido Trotter
          raise errors.OpExecError("Could not start instance")
5188 a8083063 Iustin Pop
5189 a8083063 Iustin Pop
    # TODO: check for size
5190 a8083063 Iustin Pop
5191 62c9ec92 Iustin Pop
    cluster_name = self.cfg.GetClusterName()
5192 74c47259 Iustin Pop
    for idx, dev in enumerate(snap_disks):
5193 19d7f90a Guido Trotter
      if dev:
5194 19d7f90a Guido Trotter
        if not self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
5195 74c47259 Iustin Pop
                                             instance, cluster_name, idx):
5196 19d7f90a Guido Trotter
          self.LogWarning("Could not export block device %s from node %s to"
5197 19d7f90a Guido Trotter
                          " node %s", dev.logical_id[1], src_node,
5198 19d7f90a Guido Trotter
                          dst_node.name)
5199 19d7f90a Guido Trotter
        if not self.rpc.call_blockdev_remove(src_node, dev):
5200 19d7f90a Guido Trotter
          self.LogWarning("Could not remove snapshot block device %s from node"
5201 19d7f90a Guido Trotter
                          " %s", dev.logical_id[1], src_node)
5202 a8083063 Iustin Pop
5203 72737a7f Iustin Pop
    if not self.rpc.call_finalize_export(dst_node.name, instance, snap_disks):
5204 19d7f90a Guido Trotter
      self.LogWarning("Could not finalize export for instance %s on node %s",
5205 19d7f90a Guido Trotter
                      instance.name, dst_node.name)
5206 a8083063 Iustin Pop
5207 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
5208 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
5209 a8083063 Iustin Pop
5210 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
5211 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
5212 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
5213 a8083063 Iustin Pop
    if nodelist:
5214 72737a7f Iustin Pop
      exportlist = self.rpc.call_export_list(nodelist)
5215 a8083063 Iustin Pop
      for node in exportlist:
5216 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
5217 72737a7f Iustin Pop
          if not self.rpc.call_export_remove(node, instance.name):
5218 19d7f90a Guido Trotter
            self.LogWarning("Could not remove older export for instance %s"
5219 19d7f90a Guido Trotter
                            " on node %s", instance.name, node)
5220 5c947f38 Iustin Pop
5221 5c947f38 Iustin Pop
5222 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
5223 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
5224 9ac99fda Guido Trotter

5225 9ac99fda Guido Trotter
  """
5226 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
5227 3656b3af Guido Trotter
  REQ_BGL = False
5228 3656b3af Guido Trotter
5229 3656b3af Guido Trotter
  def ExpandNames(self):
5230 3656b3af Guido Trotter
    self.needed_locks = {}
5231 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
5232 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
5233 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
5234 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5235 9ac99fda Guido Trotter
5236 9ac99fda Guido Trotter
  def CheckPrereq(self):
5237 9ac99fda Guido Trotter
    """Check prerequisites.
5238 9ac99fda Guido Trotter
    """
5239 9ac99fda Guido Trotter
    pass
5240 9ac99fda Guido Trotter
5241 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
5242 9ac99fda Guido Trotter
    """Remove any export.
5243 9ac99fda Guido Trotter

5244 9ac99fda Guido Trotter
    """
5245 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
5246 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
5247 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
5248 9ac99fda Guido Trotter
    fqdn_warn = False
5249 9ac99fda Guido Trotter
    if not instance_name:
5250 9ac99fda Guido Trotter
      fqdn_warn = True
5251 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
5252 9ac99fda Guido Trotter
5253 72737a7f Iustin Pop
    exportlist = self.rpc.call_export_list(self.acquired_locks[
5254 72737a7f Iustin Pop
      locking.LEVEL_NODE])
5255 9ac99fda Guido Trotter
    found = False
5256 9ac99fda Guido Trotter
    for node in exportlist:
5257 9ac99fda Guido Trotter
      if instance_name in exportlist[node]:
5258 9ac99fda Guido Trotter
        found = True
5259 72737a7f Iustin Pop
        if not self.rpc.call_export_remove(node, instance_name):
5260 9a4f63d1 Iustin Pop
          logging.error("Could not remove export for instance %s"
5261 9a4f63d1 Iustin Pop
                        " on node %s", instance_name, node)
5262 9ac99fda Guido Trotter
5263 9ac99fda Guido Trotter
    if fqdn_warn and not found:
5264 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
5265 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
5266 9ac99fda Guido Trotter
                  " Domain Name.")
5267 9ac99fda Guido Trotter
5268 9ac99fda Guido Trotter
5269 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
5270 5c947f38 Iustin Pop
  """Generic tags LU.
5271 5c947f38 Iustin Pop

5272 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
5273 5c947f38 Iustin Pop

5274 5c947f38 Iustin Pop
  """
5275 5c947f38 Iustin Pop
5276 8646adce Guido Trotter
  def ExpandNames(self):
5277 8646adce Guido Trotter
    self.needed_locks = {}
5278 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
5279 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
5280 5c947f38 Iustin Pop
      if name is None:
5281 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
5282 3ecf6786 Iustin Pop
                                   (self.op.name,))
5283 5c947f38 Iustin Pop
      self.op.name = name
5284 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = name
5285 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
5286 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
5287 5c947f38 Iustin Pop
      if name is None:
5288 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
5289 3ecf6786 Iustin Pop
                                   (self.op.name,))
5290 5c947f38 Iustin Pop
      self.op.name = name
5291 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = name
5292 8646adce Guido Trotter
5293 8646adce Guido Trotter
  def CheckPrereq(self):
5294 8646adce Guido Trotter
    """Check prerequisites.
5295 8646adce Guido Trotter

5296 8646adce Guido Trotter
    """
5297 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
5298 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
5299 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
5300 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
5301 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
5302 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
5303 5c947f38 Iustin Pop
    else:
5304 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
5305 3ecf6786 Iustin Pop
                                 str(self.op.kind))
5306 5c947f38 Iustin Pop
5307 5c947f38 Iustin Pop
5308 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
5309 5c947f38 Iustin Pop
  """Returns the tags of a given object.
5310 5c947f38 Iustin Pop

5311 5c947f38 Iustin Pop
  """
5312 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
5313 8646adce Guido Trotter
  REQ_BGL = False
5314 5c947f38 Iustin Pop
5315 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5316 5c947f38 Iustin Pop
    """Returns the tag list.
5317 5c947f38 Iustin Pop

5318 5c947f38 Iustin Pop
    """
5319 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
5320 5c947f38 Iustin Pop
5321 5c947f38 Iustin Pop
5322 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
5323 73415719 Iustin Pop
  """Searches the tags for a given pattern.
5324 73415719 Iustin Pop

5325 73415719 Iustin Pop
  """
5326 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
5327 8646adce Guido Trotter
  REQ_BGL = False
5328 8646adce Guido Trotter
5329 8646adce Guido Trotter
  def ExpandNames(self):
5330 8646adce Guido Trotter
    self.needed_locks = {}
5331 73415719 Iustin Pop
5332 73415719 Iustin Pop
  def CheckPrereq(self):
5333 73415719 Iustin Pop
    """Check prerequisites.
5334 73415719 Iustin Pop

5335 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
5336 73415719 Iustin Pop

5337 73415719 Iustin Pop
    """
5338 73415719 Iustin Pop
    try:
5339 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
5340 73415719 Iustin Pop
    except re.error, err:
5341 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
5342 73415719 Iustin Pop
                                 (self.op.pattern, err))
5343 73415719 Iustin Pop
5344 73415719 Iustin Pop
  def Exec(self, feedback_fn):
5345 73415719 Iustin Pop
    """Returns the tag list.
5346 73415719 Iustin Pop

5347 73415719 Iustin Pop
    """
5348 73415719 Iustin Pop
    cfg = self.cfg
5349 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
5350 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
5351 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
5352 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
5353 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
5354 73415719 Iustin Pop
    results = []
5355 73415719 Iustin Pop
    for path, target in tgts:
5356 73415719 Iustin Pop
      for tag in target.GetTags():
5357 73415719 Iustin Pop
        if self.re.search(tag):
5358 73415719 Iustin Pop
          results.append((path, tag))
5359 73415719 Iustin Pop
    return results
5360 73415719 Iustin Pop
5361 73415719 Iustin Pop
5362 f27302fa Iustin Pop
class LUAddTags(TagsLU):
5363 5c947f38 Iustin Pop
  """Sets a tag on a given object.
5364 5c947f38 Iustin Pop

5365 5c947f38 Iustin Pop
  """
5366 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
5367 8646adce Guido Trotter
  REQ_BGL = False
5368 5c947f38 Iustin Pop
5369 5c947f38 Iustin Pop
  def CheckPrereq(self):
5370 5c947f38 Iustin Pop
    """Check prerequisites.
5371 5c947f38 Iustin Pop

5372 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
5373 5c947f38 Iustin Pop

5374 5c947f38 Iustin Pop
    """
5375 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
5376 f27302fa Iustin Pop
    for tag in self.op.tags:
5377 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
5378 5c947f38 Iustin Pop
5379 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5380 5c947f38 Iustin Pop
    """Sets the tag.
5381 5c947f38 Iustin Pop

5382 5c947f38 Iustin Pop
    """
5383 5c947f38 Iustin Pop
    try:
5384 f27302fa Iustin Pop
      for tag in self.op.tags:
5385 f27302fa Iustin Pop
        self.target.AddTag(tag)
5386 5c947f38 Iustin Pop
    except errors.TagError, err:
5387 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
5388 5c947f38 Iustin Pop
    try:
5389 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
5390 5c947f38 Iustin Pop
    except errors.ConfigurationError:
5391 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
5392 3ecf6786 Iustin Pop
                                " config file and the operation has been"
5393 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
5394 5c947f38 Iustin Pop
5395 5c947f38 Iustin Pop
5396 f27302fa Iustin Pop
class LUDelTags(TagsLU):
5397 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
5398 5c947f38 Iustin Pop

5399 5c947f38 Iustin Pop
  """
5400 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
5401 8646adce Guido Trotter
  REQ_BGL = False
5402 5c947f38 Iustin Pop
5403 5c947f38 Iustin Pop
  def CheckPrereq(self):
5404 5c947f38 Iustin Pop
    """Check prerequisites.
5405 5c947f38 Iustin Pop

5406 5c947f38 Iustin Pop
    This checks that we have the given tag.
5407 5c947f38 Iustin Pop

5408 5c947f38 Iustin Pop
    """
5409 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
5410 f27302fa Iustin Pop
    for tag in self.op.tags:
5411 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
5412 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
5413 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
5414 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
5415 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
5416 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
5417 f27302fa Iustin Pop
      diff_names.sort()
5418 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
5419 f27302fa Iustin Pop
                                 (",".join(diff_names)))
5420 5c947f38 Iustin Pop
5421 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5422 5c947f38 Iustin Pop
    """Remove the tag from the object.
5423 5c947f38 Iustin Pop

5424 5c947f38 Iustin Pop
    """
5425 f27302fa Iustin Pop
    for tag in self.op.tags:
5426 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
5427 5c947f38 Iustin Pop
    try:
5428 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
5429 5c947f38 Iustin Pop
    except errors.ConfigurationError:
5430 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
5431 3ecf6786 Iustin Pop
                                " config file and the operation has been"
5432 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
5433 06009e27 Iustin Pop
5434 0eed6e61 Guido Trotter
5435 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
5436 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
5437 06009e27 Iustin Pop

5438 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
5439 06009e27 Iustin Pop
  time.
5440 06009e27 Iustin Pop

5441 06009e27 Iustin Pop
  """
5442 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
5443 fbe9022f Guido Trotter
  REQ_BGL = False
5444 06009e27 Iustin Pop
5445 fbe9022f Guido Trotter
  def ExpandNames(self):
5446 fbe9022f Guido Trotter
    """Expand names and set required locks.
5447 06009e27 Iustin Pop

5448 fbe9022f Guido Trotter
    This expands the node list, if any.
5449 06009e27 Iustin Pop

5450 06009e27 Iustin Pop
    """
5451 fbe9022f Guido Trotter
    self.needed_locks = {}
5452 06009e27 Iustin Pop
    if self.op.on_nodes:
5453 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
5454 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
5455 fbe9022f Guido Trotter
      # more information.
5456 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
5457 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
5458 fbe9022f Guido Trotter
5459 fbe9022f Guido Trotter
  def CheckPrereq(self):
5460 fbe9022f Guido Trotter
    """Check prerequisites.
5461 fbe9022f Guido Trotter

5462 fbe9022f Guido Trotter
    """
5463 06009e27 Iustin Pop
5464 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
5465 06009e27 Iustin Pop
    """Do the actual sleep.
5466 06009e27 Iustin Pop

5467 06009e27 Iustin Pop
    """
5468 06009e27 Iustin Pop
    if self.op.on_master:
5469 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
5470 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
5471 06009e27 Iustin Pop
    if self.op.on_nodes:
5472 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
5473 06009e27 Iustin Pop
      if not result:
5474 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
5475 06009e27 Iustin Pop
      for node, node_result in result.items():
5476 06009e27 Iustin Pop
        if not node_result:
5477 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
5478 06009e27 Iustin Pop
                                   " result: %s" % (node, node_result))
5479 d61df03e Iustin Pop
5480 d61df03e Iustin Pop
5481 d1c2dd75 Iustin Pop
class IAllocator(object):
5482 d1c2dd75 Iustin Pop
  """IAllocator framework.
5483 d61df03e Iustin Pop

5484 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
5485 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
5486 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
5487 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
5488 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
5489 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
5490 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
5491 d1c2dd75 Iustin Pop
      easy usage
5492 d61df03e Iustin Pop

5493 d61df03e Iustin Pop
  """
5494 29859cb7 Iustin Pop
  _ALLO_KEYS = [
5495 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
5496 8cc7e742 Guido Trotter
    "os", "tags", "nics", "vcpus", "hypervisor",
5497 d1c2dd75 Iustin Pop
    ]
5498 29859cb7 Iustin Pop
  _RELO_KEYS = [
5499 29859cb7 Iustin Pop
    "relocate_from",
5500 29859cb7 Iustin Pop
    ]
5501 d1c2dd75 Iustin Pop
5502 72737a7f Iustin Pop
  def __init__(self, lu, mode, name, **kwargs):
5503 72737a7f Iustin Pop
    self.lu = lu
5504 d1c2dd75 Iustin Pop
    # init buffer variables
5505 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
5506 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
5507 29859cb7 Iustin Pop
    self.mode = mode
5508 29859cb7 Iustin Pop
    self.name = name
5509 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
5510 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
5511 29859cb7 Iustin Pop
    self.relocate_from = None
5512 27579978 Iustin Pop
    # computed fields
5513 27579978 Iustin Pop
    self.required_nodes = None
5514 d1c2dd75 Iustin Pop
    # init result fields
5515 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
5516 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5517 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
5518 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
5519 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
5520 29859cb7 Iustin Pop
    else:
5521 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
5522 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
5523 d1c2dd75 Iustin Pop
    for key in kwargs:
5524 29859cb7 Iustin Pop
      if key not in keyset:
5525 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
5526 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
5527 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
5528 29859cb7 Iustin Pop
    for key in keyset:
5529 d1c2dd75 Iustin Pop
      if key not in kwargs:
5530 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
5531 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
5532 d1c2dd75 Iustin Pop
    self._BuildInputData()
5533 d1c2dd75 Iustin Pop
5534 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
5535 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
5536 d1c2dd75 Iustin Pop

5537 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
5538 d1c2dd75 Iustin Pop

5539 d1c2dd75 Iustin Pop
    """
5540 72737a7f Iustin Pop
    cfg = self.lu.cfg
5541 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
5542 d1c2dd75 Iustin Pop
    # cluster data
5543 d1c2dd75 Iustin Pop
    data = {
5544 d1c2dd75 Iustin Pop
      "version": 1,
5545 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
5546 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
5547 e69d05fd Iustin Pop
      "enable_hypervisors": list(cluster_info.enabled_hypervisors),
5548 d1c2dd75 Iustin Pop
      # we don't have job IDs
5549 d61df03e Iustin Pop
      }
5550 b57e9819 Guido Trotter
    iinfo = cfg.GetAllInstancesInfo().values()
5551 b57e9819 Guido Trotter
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
5552 6286519f Iustin Pop
5553 d1c2dd75 Iustin Pop
    # node data
5554 d1c2dd75 Iustin Pop
    node_results = {}
5555 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
5556 8cc7e742 Guido Trotter
5557 8cc7e742 Guido Trotter
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5558 8cc7e742 Guido Trotter
      hypervisor = self.hypervisor
5559 8cc7e742 Guido Trotter
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
5560 8cc7e742 Guido Trotter
      hypervisor = cfg.GetInstanceInfo(self.name).hypervisor
5561 8cc7e742 Guido Trotter
5562 72737a7f Iustin Pop
    node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
5563 8cc7e742 Guido Trotter
                                           hypervisor)
5564 18640d69 Guido Trotter
    node_iinfo = self.lu.rpc.call_all_instances_info(node_list,
5565 18640d69 Guido Trotter
                       cluster_info.enabled_hypervisors)
5566 d1c2dd75 Iustin Pop
    for nname in node_list:
5567 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
5568 d1c2dd75 Iustin Pop
      if nname not in node_data or not isinstance(node_data[nname], dict):
5569 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't get data for node %s" % nname)
5570 d1c2dd75 Iustin Pop
      remote_info = node_data[nname]
5571 b2662e7f Iustin Pop
      for attr in ['memory_total', 'memory_free', 'memory_dom0',
5572 4337cf1b Iustin Pop
                   'vg_size', 'vg_free', 'cpu_total']:
5573 d1c2dd75 Iustin Pop
        if attr not in remote_info:
5574 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' didn't return attribute '%s'" %
5575 d1c2dd75 Iustin Pop
                                   (nname, attr))
5576 d1c2dd75 Iustin Pop
        try:
5577 b2662e7f Iustin Pop
          remote_info[attr] = int(remote_info[attr])
5578 d1c2dd75 Iustin Pop
        except ValueError, err:
5579 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' returned invalid value for '%s':"
5580 d1c2dd75 Iustin Pop
                                   " %s" % (nname, attr, str(err)))
5581 6286519f Iustin Pop
      # compute memory used by primary instances
5582 6286519f Iustin Pop
      i_p_mem = i_p_up_mem = 0
5583 338e51e8 Iustin Pop
      for iinfo, beinfo in i_list:
5584 6286519f Iustin Pop
        if iinfo.primary_node == nname:
5585 338e51e8 Iustin Pop
          i_p_mem += beinfo[constants.BE_MEMORY]
5586 18640d69 Guido Trotter
          if iinfo.name not in node_iinfo[nname]:
5587 18640d69 Guido Trotter
            i_used_mem = 0
5588 18640d69 Guido Trotter
          else:
5589 18640d69 Guido Trotter
            i_used_mem = int(node_iinfo[nname][iinfo.name]['memory'])
5590 18640d69 Guido Trotter
          i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
5591 18640d69 Guido Trotter
          remote_info['memory_free'] -= max(0, i_mem_diff)
5592 18640d69 Guido Trotter
5593 6286519f Iustin Pop
          if iinfo.status == "up":
5594 338e51e8 Iustin Pop
            i_p_up_mem += beinfo[constants.BE_MEMORY]
5595 6286519f Iustin Pop
5596 b2662e7f Iustin Pop
      # compute memory used by instances
5597 d1c2dd75 Iustin Pop
      pnr = {
5598 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
5599 b2662e7f Iustin Pop
        "total_memory": remote_info['memory_total'],
5600 b2662e7f Iustin Pop
        "reserved_memory": remote_info['memory_dom0'],
5601 b2662e7f Iustin Pop
        "free_memory": remote_info['memory_free'],
5602 6286519f Iustin Pop
        "i_pri_memory": i_p_mem,
5603 6286519f Iustin Pop
        "i_pri_up_memory": i_p_up_mem,
5604 b2662e7f Iustin Pop
        "total_disk": remote_info['vg_size'],
5605 b2662e7f Iustin Pop
        "free_disk": remote_info['vg_free'],
5606 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
5607 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
5608 4337cf1b Iustin Pop
        "total_cpus": remote_info['cpu_total'],
5609 d1c2dd75 Iustin Pop
        }
5610 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
5611 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
5612 d1c2dd75 Iustin Pop
5613 d1c2dd75 Iustin Pop
    # instance data
5614 d1c2dd75 Iustin Pop
    instance_data = {}
5615 338e51e8 Iustin Pop
    for iinfo, beinfo in i_list:
5616 d1c2dd75 Iustin Pop
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
5617 d1c2dd75 Iustin Pop
                  for n in iinfo.nics]
5618 d1c2dd75 Iustin Pop
      pir = {
5619 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
5620 d1c2dd75 Iustin Pop
        "should_run": iinfo.status == "up",
5621 338e51e8 Iustin Pop
        "vcpus": beinfo[constants.BE_VCPUS],
5622 338e51e8 Iustin Pop
        "memory": beinfo[constants.BE_MEMORY],
5623 d1c2dd75 Iustin Pop
        "os": iinfo.os,
5624 d1c2dd75 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
5625 d1c2dd75 Iustin Pop
        "nics": nic_data,
5626 d1c2dd75 Iustin Pop
        "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
5627 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
5628 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
5629 d1c2dd75 Iustin Pop
        }
5630 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
5631 d61df03e Iustin Pop
5632 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
5633 d61df03e Iustin Pop
5634 d1c2dd75 Iustin Pop
    self.in_data = data
5635 d61df03e Iustin Pop
5636 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
5637 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
5638 d61df03e Iustin Pop

5639 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
5640 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5641 d61df03e Iustin Pop

5642 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5643 d1c2dd75 Iustin Pop
    done.
5644 d61df03e Iustin Pop

5645 d1c2dd75 Iustin Pop
    """
5646 d1c2dd75 Iustin Pop
    data = self.in_data
5647 d1c2dd75 Iustin Pop
    if len(self.disks) != 2:
5648 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Only two-disk configurations supported")
5649 d1c2dd75 Iustin Pop
5650 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
5651 d1c2dd75 Iustin Pop
5652 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
5653 27579978 Iustin Pop
      self.required_nodes = 2
5654 27579978 Iustin Pop
    else:
5655 27579978 Iustin Pop
      self.required_nodes = 1
5656 d1c2dd75 Iustin Pop
    request = {
5657 d1c2dd75 Iustin Pop
      "type": "allocate",
5658 d1c2dd75 Iustin Pop
      "name": self.name,
5659 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
5660 d1c2dd75 Iustin Pop
      "tags": self.tags,
5661 d1c2dd75 Iustin Pop
      "os": self.os,
5662 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
5663 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
5664 d1c2dd75 Iustin Pop
      "disks": self.disks,
5665 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
5666 d1c2dd75 Iustin Pop
      "nics": self.nics,
5667 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5668 d1c2dd75 Iustin Pop
      }
5669 d1c2dd75 Iustin Pop
    data["request"] = request
5670 298fe380 Iustin Pop
5671 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
5672 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
5673 298fe380 Iustin Pop

5674 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
5675 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5676 d61df03e Iustin Pop

5677 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5678 d1c2dd75 Iustin Pop
    done.
5679 d61df03e Iustin Pop

5680 d1c2dd75 Iustin Pop
    """
5681 72737a7f Iustin Pop
    instance = self.lu.cfg.GetInstanceInfo(self.name)
5682 27579978 Iustin Pop
    if instance is None:
5683 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
5684 27579978 Iustin Pop
                                   " IAllocator" % self.name)
5685 27579978 Iustin Pop
5686 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
5687 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
5688 27579978 Iustin Pop
5689 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
5690 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
5691 2a139bb0 Iustin Pop
5692 27579978 Iustin Pop
    self.required_nodes = 1
5693 dafc7302 Guido Trotter
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
5694 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
5695 27579978 Iustin Pop
5696 d1c2dd75 Iustin Pop
    request = {
5697 2a139bb0 Iustin Pop
      "type": "relocate",
5698 d1c2dd75 Iustin Pop
      "name": self.name,
5699 27579978 Iustin Pop
      "disk_space_total": disk_space,
5700 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5701 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
5702 d1c2dd75 Iustin Pop
      }
5703 27579978 Iustin Pop
    self.in_data["request"] = request
5704 d61df03e Iustin Pop
5705 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
5706 d1c2dd75 Iustin Pop
    """Build input data structures.
5707 d61df03e Iustin Pop

5708 d1c2dd75 Iustin Pop
    """
5709 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
5710 d61df03e Iustin Pop
5711 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5712 d1c2dd75 Iustin Pop
      self._AddNewInstance()
5713 d1c2dd75 Iustin Pop
    else:
5714 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
5715 d61df03e Iustin Pop
5716 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
5717 d61df03e Iustin Pop
5718 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
5719 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
5720 298fe380 Iustin Pop

5721 d1c2dd75 Iustin Pop
    """
5722 72737a7f Iustin Pop
    if call_fn is None:
5723 72737a7f Iustin Pop
      call_fn = self.lu.rpc.call_iallocator_runner
5724 d1c2dd75 Iustin Pop
    data = self.in_text
5725 298fe380 Iustin Pop
5726 72737a7f Iustin Pop
    result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
5727 298fe380 Iustin Pop
5728 43f5ea7a Guido Trotter
    if not isinstance(result, (list, tuple)) or len(result) != 4:
5729 8d528b7c Iustin Pop
      raise errors.OpExecError("Invalid result from master iallocator runner")
5730 8d528b7c Iustin Pop
5731 8d528b7c Iustin Pop
    rcode, stdout, stderr, fail = result
5732 8d528b7c Iustin Pop
5733 8d528b7c Iustin Pop
    if rcode == constants.IARUN_NOTFOUND:
5734 8d528b7c Iustin Pop
      raise errors.OpExecError("Can't find allocator '%s'" % name)
5735 8d528b7c Iustin Pop
    elif rcode == constants.IARUN_FAILURE:
5736 38206f3c Iustin Pop
      raise errors.OpExecError("Instance allocator call failed: %s,"
5737 38206f3c Iustin Pop
                               " output: %s" % (fail, stdout+stderr))
5738 8d528b7c Iustin Pop
    self.out_text = stdout
5739 d1c2dd75 Iustin Pop
    if validate:
5740 d1c2dd75 Iustin Pop
      self._ValidateResult()
5741 298fe380 Iustin Pop
5742 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
5743 d1c2dd75 Iustin Pop
    """Process the allocator results.
5744 538475ca Iustin Pop

5745 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
5746 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
5747 538475ca Iustin Pop

5748 d1c2dd75 Iustin Pop
    """
5749 d1c2dd75 Iustin Pop
    try:
5750 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
5751 d1c2dd75 Iustin Pop
    except Exception, err:
5752 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
5753 d1c2dd75 Iustin Pop
5754 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
5755 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
5756 538475ca Iustin Pop
5757 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
5758 d1c2dd75 Iustin Pop
      if key not in rdict:
5759 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
5760 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
5761 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
5762 538475ca Iustin Pop
5763 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
5764 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
5765 d1c2dd75 Iustin Pop
                               " is not a list")
5766 d1c2dd75 Iustin Pop
    self.out_data = rdict
5767 538475ca Iustin Pop
5768 538475ca Iustin Pop
5769 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
5770 d61df03e Iustin Pop
  """Run allocator tests.
5771 d61df03e Iustin Pop

5772 d61df03e Iustin Pop
  This LU runs the allocator tests
5773 d61df03e Iustin Pop

5774 d61df03e Iustin Pop
  """
5775 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
5776 d61df03e Iustin Pop
5777 d61df03e Iustin Pop
  def CheckPrereq(self):
5778 d61df03e Iustin Pop
    """Check prerequisites.
5779 d61df03e Iustin Pop

5780 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
5781 d61df03e Iustin Pop

5782 d61df03e Iustin Pop
    """
5783 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5784 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
5785 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
5786 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
5787 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
5788 d61df03e Iustin Pop
                                     attr)
5789 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
5790 d61df03e Iustin Pop
      if iname is not None:
5791 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
5792 d61df03e Iustin Pop
                                   iname)
5793 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
5794 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
5795 d61df03e Iustin Pop
      for row in self.op.nics:
5796 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5797 d61df03e Iustin Pop
            "mac" not in row or
5798 d61df03e Iustin Pop
            "ip" not in row or
5799 d61df03e Iustin Pop
            "bridge" not in row):
5800 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5801 d61df03e Iustin Pop
                                     " 'nics' parameter")
5802 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
5803 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
5804 298fe380 Iustin Pop
      if len(self.op.disks) != 2:
5805 298fe380 Iustin Pop
        raise errors.OpPrereqError("Only two-disk configurations supported")
5806 d61df03e Iustin Pop
      for row in self.op.disks:
5807 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5808 d61df03e Iustin Pop
            "size" not in row or
5809 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
5810 d61df03e Iustin Pop
            "mode" not in row or
5811 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
5812 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5813 d61df03e Iustin Pop
                                     " 'disks' parameter")
5814 8cc7e742 Guido Trotter
      if self.op.hypervisor is None:
5815 8cc7e742 Guido Trotter
        self.op.hypervisor = self.cfg.GetHypervisorType()
5816 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
5817 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
5818 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
5819 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
5820 d61df03e Iustin Pop
      if fname is None:
5821 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
5822 d61df03e Iustin Pop
                                   self.op.name)
5823 d61df03e Iustin Pop
      self.op.name = fname
5824 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
5825 d61df03e Iustin Pop
    else:
5826 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
5827 d61df03e Iustin Pop
                                 self.op.mode)
5828 d61df03e Iustin Pop
5829 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
5830 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
5831 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
5832 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
5833 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
5834 d61df03e Iustin Pop
                                 self.op.direction)
5835 d61df03e Iustin Pop
5836 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
5837 d61df03e Iustin Pop
    """Run the allocator test.
5838 d61df03e Iustin Pop

5839 d61df03e Iustin Pop
    """
5840 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5841 72737a7f Iustin Pop
      ial = IAllocator(self,
5842 29859cb7 Iustin Pop
                       mode=self.op.mode,
5843 29859cb7 Iustin Pop
                       name=self.op.name,
5844 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
5845 29859cb7 Iustin Pop
                       disks=self.op.disks,
5846 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
5847 29859cb7 Iustin Pop
                       os=self.op.os,
5848 29859cb7 Iustin Pop
                       tags=self.op.tags,
5849 29859cb7 Iustin Pop
                       nics=self.op.nics,
5850 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
5851 8cc7e742 Guido Trotter
                       hypervisor=self.op.hypervisor,
5852 29859cb7 Iustin Pop
                       )
5853 29859cb7 Iustin Pop
    else:
5854 72737a7f Iustin Pop
      ial = IAllocator(self,
5855 29859cb7 Iustin Pop
                       mode=self.op.mode,
5856 29859cb7 Iustin Pop
                       name=self.op.name,
5857 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
5858 29859cb7 Iustin Pop
                       )
5859 d61df03e Iustin Pop
5860 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
5861 d1c2dd75 Iustin Pop
      result = ial.in_text
5862 298fe380 Iustin Pop
    else:
5863 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
5864 d1c2dd75 Iustin Pop
      result = ial.out_text
5865 298fe380 Iustin Pop
    return result