Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 8b3fd458

History | View | Annotate | Download (184.8 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 ffa1c0dc Iustin Pop
import logging
34 74409b12 Iustin Pop
import copy
35 a8083063 Iustin Pop
36 a8083063 Iustin Pop
from ganeti import ssh
37 a8083063 Iustin Pop
from ganeti import logger
38 a8083063 Iustin Pop
from ganeti import utils
39 a8083063 Iustin Pop
from ganeti import errors
40 a8083063 Iustin Pop
from ganeti import hypervisor
41 6048c986 Guido Trotter
from ganeti import locking
42 a8083063 Iustin Pop
from ganeti import constants
43 a8083063 Iustin Pop
from ganeti import objects
44 a8083063 Iustin Pop
from ganeti import opcodes
45 8d14b30d Iustin Pop
from ganeti import serializer
46 d61df03e Iustin Pop
47 d61df03e Iustin Pop
48 a8083063 Iustin Pop
class LogicalUnit(object):
49 396e1b78 Michael Hanselmann
  """Logical Unit base class.
50 a8083063 Iustin Pop

51 a8083063 Iustin Pop
  Subclasses must follow these rules:
52 d465bdc8 Guido Trotter
    - implement ExpandNames
53 d465bdc8 Guido Trotter
    - implement CheckPrereq
54 a8083063 Iustin Pop
    - implement Exec
55 a8083063 Iustin Pop
    - implement BuildHooksEnv
56 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
57 05f86716 Guido Trotter
    - optionally redefine their run requirements:
58 05f86716 Guido Trotter
        REQ_MASTER: the LU needs to run on the master node
59 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
60 05f86716 Guido Trotter

61 05f86716 Guido Trotter
  Note that all commands require root permissions.
62 a8083063 Iustin Pop

63 a8083063 Iustin Pop
  """
64 a8083063 Iustin Pop
  HPATH = None
65 a8083063 Iustin Pop
  HTYPE = None
66 a8083063 Iustin Pop
  _OP_REQP = []
67 a8083063 Iustin Pop
  REQ_MASTER = True
68 7e55040e Guido Trotter
  REQ_BGL = True
69 a8083063 Iustin Pop
70 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
71 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
72 a8083063 Iustin Pop

73 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
74 a8083063 Iustin Pop
    validity.
75 a8083063 Iustin Pop

76 a8083063 Iustin Pop
    """
77 5bfac263 Iustin Pop
    self.proc = processor
78 a8083063 Iustin Pop
    self.op = op
79 77b657a3 Guido Trotter
    self.cfg = context.cfg
80 77b657a3 Guido Trotter
    self.context = context
81 72737a7f Iustin Pop
    self.rpc = rpc
82 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
83 d465bdc8 Guido Trotter
    self.needed_locks = None
84 6683bba2 Guido Trotter
    self.acquired_locks = {}
85 3977a4c1 Guido Trotter
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
86 ca2a79e1 Guido Trotter
    self.add_locks = {}
87 ca2a79e1 Guido Trotter
    self.remove_locks = {}
88 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
89 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
90 c92b310a Michael Hanselmann
    self.__ssh = None
91 c92b310a Michael Hanselmann
92 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
93 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
94 a8083063 Iustin Pop
      if attr_val is None:
95 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
96 3ecf6786 Iustin Pop
                                   attr_name)
97 c6d58a2b Michael Hanselmann
98 f64c9de6 Guido Trotter
    if not self.cfg.IsCluster():
99 c6d58a2b Michael Hanselmann
      raise errors.OpPrereqError("Cluster not initialized yet,"
100 c6d58a2b Michael Hanselmann
                                 " use 'gnt-cluster init' first.")
101 c6d58a2b Michael Hanselmann
    if self.REQ_MASTER:
102 d6a02168 Michael Hanselmann
      master = self.cfg.GetMasterNode()
103 c6d58a2b Michael Hanselmann
      if master != utils.HostInfo().name:
104 c6d58a2b Michael Hanselmann
        raise errors.OpPrereqError("Commands must be run on the master"
105 c6d58a2b Michael Hanselmann
                                   " node %s" % master)
106 a8083063 Iustin Pop
107 c92b310a Michael Hanselmann
  def __GetSSH(self):
108 c92b310a Michael Hanselmann
    """Returns the SshRunner object
109 c92b310a Michael Hanselmann

110 c92b310a Michael Hanselmann
    """
111 c92b310a Michael Hanselmann
    if not self.__ssh:
112 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
113 c92b310a Michael Hanselmann
    return self.__ssh
114 c92b310a Michael Hanselmann
115 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
116 c92b310a Michael Hanselmann
117 d465bdc8 Guido Trotter
  def ExpandNames(self):
118 d465bdc8 Guido Trotter
    """Expand names for this LU.
119 d465bdc8 Guido Trotter

120 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
121 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
122 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
123 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
124 d465bdc8 Guido Trotter

125 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
126 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
127 d465bdc8 Guido Trotter
    as values. Rules:
128 d465bdc8 Guido Trotter
      - Use an empty dict if you don't need any lock
129 d465bdc8 Guido Trotter
      - If you don't need any lock at a particular level omit that level
130 d465bdc8 Guido Trotter
      - Don't put anything for the BGL level
131 e310b019 Guido Trotter
      - If you want all locks at a level use locking.ALL_SET as a value
132 d465bdc8 Guido Trotter

133 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
134 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
135 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
136 3977a4c1 Guido Trotter

137 d465bdc8 Guido Trotter
    Examples:
138 d465bdc8 Guido Trotter
    # Acquire all nodes and one instance
139 d465bdc8 Guido Trotter
    self.needed_locks = {
140 e310b019 Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
141 3a5d7305 Guido Trotter
      locking.LEVEL_INSTANCE: ['instance1.example.tld'],
142 d465bdc8 Guido Trotter
    }
143 d465bdc8 Guido Trotter
    # Acquire just two nodes
144 d465bdc8 Guido Trotter
    self.needed_locks = {
145 d465bdc8 Guido Trotter
      locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
146 d465bdc8 Guido Trotter
    }
147 d465bdc8 Guido Trotter
    # Acquire no locks
148 d465bdc8 Guido Trotter
    self.needed_locks = {} # No, you can't leave it to the default value None
149 d465bdc8 Guido Trotter

150 d465bdc8 Guido Trotter
    """
151 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
152 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
153 d465bdc8 Guido Trotter
    # time.
154 d465bdc8 Guido Trotter
    if self.REQ_BGL:
155 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
156 d465bdc8 Guido Trotter
    else:
157 d465bdc8 Guido Trotter
      raise NotImplementedError
158 d465bdc8 Guido Trotter
159 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
160 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
161 fb8dcb62 Guido Trotter

162 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
163 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
164 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
165 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
166 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
167 fb8dcb62 Guido Trotter
    default it does nothing.
168 fb8dcb62 Guido Trotter

169 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
170 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
171 fb8dcb62 Guido Trotter

172 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
173 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
174 fb8dcb62 Guido Trotter

175 fb8dcb62 Guido Trotter
    """
176 fb8dcb62 Guido Trotter
177 a8083063 Iustin Pop
  def CheckPrereq(self):
178 a8083063 Iustin Pop
    """Check prerequisites for this LU.
179 a8083063 Iustin Pop

180 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
181 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
182 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
183 a8083063 Iustin Pop
    allowed.
184 a8083063 Iustin Pop

185 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
186 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
187 a8083063 Iustin Pop

188 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
189 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
190 a8083063 Iustin Pop

191 a8083063 Iustin Pop
    """
192 a8083063 Iustin Pop
    raise NotImplementedError
193 a8083063 Iustin Pop
194 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
195 a8083063 Iustin Pop
    """Execute the LU.
196 a8083063 Iustin Pop

197 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
198 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
199 a8083063 Iustin Pop
    code, or expected.
200 a8083063 Iustin Pop

201 a8083063 Iustin Pop
    """
202 a8083063 Iustin Pop
    raise NotImplementedError
203 a8083063 Iustin Pop
204 a8083063 Iustin Pop
  def BuildHooksEnv(self):
205 a8083063 Iustin Pop
    """Build hooks environment for this LU.
206 a8083063 Iustin Pop

207 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
208 a8083063 Iustin Pop
    containing the environment that will be used for running the
209 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
210 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
211 a8083063 Iustin Pop
    the hook should run after the execution.
212 a8083063 Iustin Pop

213 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
214 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
215 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
216 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
217 a8083063 Iustin Pop

218 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
219 a8083063 Iustin Pop

220 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
221 a8083063 Iustin Pop
    not be called.
222 a8083063 Iustin Pop

223 a8083063 Iustin Pop
    """
224 a8083063 Iustin Pop
    raise NotImplementedError
225 a8083063 Iustin Pop
226 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
227 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
228 1fce5219 Guido Trotter

229 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
230 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
231 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
232 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
233 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
234 1fce5219 Guido Trotter

235 1fce5219 Guido Trotter
    Args:
236 1fce5219 Guido Trotter
      phase: the hooks phase that has just been run
237 1fce5219 Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
238 1fce5219 Guido Trotter
      feedback_fn: function to send feedback back to the caller
239 1fce5219 Guido Trotter
      lu_result: the previous result this LU had, or None in the PRE phase.
240 1fce5219 Guido Trotter

241 1fce5219 Guido Trotter
    """
242 1fce5219 Guido Trotter
    return lu_result
243 1fce5219 Guido Trotter
244 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
245 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
246 43905206 Guido Trotter

247 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
248 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
249 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
250 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
251 43905206 Guido Trotter
    before.
252 43905206 Guido Trotter

253 43905206 Guido Trotter
    """
254 43905206 Guido Trotter
    if self.needed_locks is None:
255 43905206 Guido Trotter
      self.needed_locks = {}
256 43905206 Guido Trotter
    else:
257 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
258 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
259 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
260 43905206 Guido Trotter
    if expanded_name is None:
261 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
262 43905206 Guido Trotter
                                  self.op.instance_name)
263 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
264 43905206 Guido Trotter
    self.op.instance_name = expanded_name
265 43905206 Guido Trotter
266 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
267 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
268 c4a2fee1 Guido Trotter

269 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
270 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
271 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
272 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
273 c4a2fee1 Guido Trotter

274 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
275 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
276 c4a2fee1 Guido Trotter

277 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
278 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
279 c4a2fee1 Guido Trotter

280 c4a2fee1 Guido Trotter
    If should be called in DeclareLocks in a way similar to:
281 c4a2fee1 Guido Trotter

282 c4a2fee1 Guido Trotter
    if level == locking.LEVEL_NODE:
283 c4a2fee1 Guido Trotter
      self._LockInstancesNodes()
284 c4a2fee1 Guido Trotter

285 a82ce292 Guido Trotter
    @type primary_only: boolean
286 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
287 a82ce292 Guido Trotter

288 c4a2fee1 Guido Trotter
    """
289 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
290 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
291 c4a2fee1 Guido Trotter
292 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
293 c4a2fee1 Guido Trotter
294 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
295 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
296 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
297 c4a2fee1 Guido Trotter
    wanted_nodes = []
298 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
299 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
300 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
301 a82ce292 Guido Trotter
      if not primary_only:
302 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
303 9513b6ab Guido Trotter
304 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
305 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
306 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
307 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
308 c4a2fee1 Guido Trotter
309 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
310 c4a2fee1 Guido Trotter
311 a8083063 Iustin Pop
312 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
313 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
314 a8083063 Iustin Pop

315 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
316 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
317 a8083063 Iustin Pop

318 a8083063 Iustin Pop
  """
319 a8083063 Iustin Pop
  HPATH = None
320 a8083063 Iustin Pop
  HTYPE = None
321 a8083063 Iustin Pop
322 a8083063 Iustin Pop
323 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
324 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
325 83120a01 Michael Hanselmann

326 83120a01 Michael Hanselmann
  Args:
327 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
328 83120a01 Michael Hanselmann

329 83120a01 Michael Hanselmann
  """
330 3312b702 Iustin Pop
  if not isinstance(nodes, list):
331 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
332 dcb93971 Michael Hanselmann
333 ea47808a Guido Trotter
  if not nodes:
334 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
335 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
336 dcb93971 Michael Hanselmann
337 ea47808a Guido Trotter
  wanted = []
338 ea47808a Guido Trotter
  for name in nodes:
339 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
340 ea47808a Guido Trotter
    if node is None:
341 ea47808a Guido Trotter
      raise errors.OpPrereqError("No such node name '%s'" % name)
342 ea47808a Guido Trotter
    wanted.append(node)
343 dcb93971 Michael Hanselmann
344 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
345 3312b702 Iustin Pop
346 3312b702 Iustin Pop
347 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
348 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
349 3312b702 Iustin Pop

350 3312b702 Iustin Pop
  Args:
351 3312b702 Iustin Pop
    instances: List of instances (strings) or None for all
352 3312b702 Iustin Pop

353 3312b702 Iustin Pop
  """
354 3312b702 Iustin Pop
  if not isinstance(instances, list):
355 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
356 3312b702 Iustin Pop
357 3312b702 Iustin Pop
  if instances:
358 3312b702 Iustin Pop
    wanted = []
359 3312b702 Iustin Pop
360 3312b702 Iustin Pop
    for name in instances:
361 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
362 3312b702 Iustin Pop
      if instance is None:
363 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
364 3312b702 Iustin Pop
      wanted.append(instance)
365 3312b702 Iustin Pop
366 3312b702 Iustin Pop
  else:
367 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
368 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
369 dcb93971 Michael Hanselmann
370 dcb93971 Michael Hanselmann
371 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
372 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
373 83120a01 Michael Hanselmann

374 83120a01 Michael Hanselmann
  Args:
375 83120a01 Michael Hanselmann
    static: Static fields
376 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
377 83120a01 Michael Hanselmann

378 83120a01 Michael Hanselmann
  """
379 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
380 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
381 dcb93971 Michael Hanselmann
382 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
383 dcb93971 Michael Hanselmann
384 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
385 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
386 3ecf6786 Iustin Pop
                               % ",".join(frozenset(selected).
387 3ecf6786 Iustin Pop
                                          difference(all_fields)))
388 dcb93971 Michael Hanselmann
389 dcb93971 Michael Hanselmann
390 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
391 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
392 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
393 ecb215b5 Michael Hanselmann

394 ecb215b5 Michael Hanselmann
  Args:
395 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
396 396e1b78 Michael Hanselmann
  """
397 396e1b78 Michael Hanselmann
  env = {
398 0e137c28 Iustin Pop
    "OP_TARGET": name,
399 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
400 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
401 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
402 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
403 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
404 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
405 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
406 396e1b78 Michael Hanselmann
  }
407 396e1b78 Michael Hanselmann
408 396e1b78 Michael Hanselmann
  if nics:
409 396e1b78 Michael Hanselmann
    nic_count = len(nics)
410 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
411 396e1b78 Michael Hanselmann
      if ip is None:
412 396e1b78 Michael Hanselmann
        ip = ""
413 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
414 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
415 53e4e875 Guido Trotter
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
416 396e1b78 Michael Hanselmann
  else:
417 396e1b78 Michael Hanselmann
    nic_count = 0
418 396e1b78 Michael Hanselmann
419 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
420 396e1b78 Michael Hanselmann
421 396e1b78 Michael Hanselmann
  return env
422 396e1b78 Michael Hanselmann
423 396e1b78 Michael Hanselmann
424 396e1b78 Michael Hanselmann
def _BuildInstanceHookEnvByObject(instance, override=None):
425 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
426 ecb215b5 Michael Hanselmann

427 ecb215b5 Michael Hanselmann
  Args:
428 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
429 ecb215b5 Michael Hanselmann
    override: dict of values to override
430 ecb215b5 Michael Hanselmann
  """
431 396e1b78 Michael Hanselmann
  args = {
432 396e1b78 Michael Hanselmann
    'name': instance.name,
433 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
434 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
435 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
436 396e1b78 Michael Hanselmann
    'status': instance.os,
437 396e1b78 Michael Hanselmann
    'memory': instance.memory,
438 396e1b78 Michael Hanselmann
    'vcpus': instance.vcpus,
439 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
440 396e1b78 Michael Hanselmann
  }
441 396e1b78 Michael Hanselmann
  if override:
442 396e1b78 Michael Hanselmann
    args.update(override)
443 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
444 396e1b78 Michael Hanselmann
445 396e1b78 Michael Hanselmann
446 b9bddb6b Iustin Pop
def _CheckInstanceBridgesExist(lu, instance):
447 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
448 bf6929a2 Alexander Schreiber

449 bf6929a2 Alexander Schreiber
  """
450 bf6929a2 Alexander Schreiber
  # check bridges existance
451 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
452 72737a7f Iustin Pop
  if not lu.rpc.call_bridges_exist(instance.primary_node, brlist):
453 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
454 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
455 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
456 bf6929a2 Alexander Schreiber
457 bf6929a2 Alexander Schreiber
458 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
459 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
460 a8083063 Iustin Pop

461 a8083063 Iustin Pop
  """
462 a8083063 Iustin Pop
  _OP_REQP = []
463 a8083063 Iustin Pop
464 a8083063 Iustin Pop
  def CheckPrereq(self):
465 a8083063 Iustin Pop
    """Check prerequisites.
466 a8083063 Iustin Pop

467 a8083063 Iustin Pop
    This checks whether the cluster is empty.
468 a8083063 Iustin Pop

469 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
470 a8083063 Iustin Pop

471 a8083063 Iustin Pop
    """
472 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
473 a8083063 Iustin Pop
474 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
475 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
476 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
477 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
478 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
479 db915bd1 Michael Hanselmann
    if instancelist:
480 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
481 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
482 a8083063 Iustin Pop
483 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
484 a8083063 Iustin Pop
    """Destroys the cluster.
485 a8083063 Iustin Pop

486 a8083063 Iustin Pop
    """
487 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
488 72737a7f Iustin Pop
    if not self.rpc.call_node_stop_master(master, False):
489 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
490 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
491 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
492 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
493 140aa4a8 Iustin Pop
    return master
494 a8083063 Iustin Pop
495 a8083063 Iustin Pop
496 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
497 a8083063 Iustin Pop
  """Verifies the cluster status.
498 a8083063 Iustin Pop

499 a8083063 Iustin Pop
  """
500 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
501 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
502 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
503 d4b9d97f Guido Trotter
  REQ_BGL = False
504 d4b9d97f Guido Trotter
505 d4b9d97f Guido Trotter
  def ExpandNames(self):
506 d4b9d97f Guido Trotter
    self.needed_locks = {
507 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
508 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
509 d4b9d97f Guido Trotter
    }
510 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
511 a8083063 Iustin Pop
512 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
513 a8083063 Iustin Pop
                  remote_version, feedback_fn):
514 a8083063 Iustin Pop
    """Run multiple tests against a node.
515 a8083063 Iustin Pop

516 a8083063 Iustin Pop
    Test list:
517 a8083063 Iustin Pop
      - compares ganeti version
518 a8083063 Iustin Pop
      - checks vg existance and size > 20G
519 a8083063 Iustin Pop
      - checks config file checksum
520 a8083063 Iustin Pop
      - checks ssh to other nodes
521 a8083063 Iustin Pop

522 a8083063 Iustin Pop
    Args:
523 a8083063 Iustin Pop
      node: name of the node to check
524 a8083063 Iustin Pop
      file_list: required list of files
525 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
526 098c0958 Michael Hanselmann

527 a8083063 Iustin Pop
    """
528 a8083063 Iustin Pop
    # compares ganeti version
529 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
530 a8083063 Iustin Pop
    if not remote_version:
531 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
532 a8083063 Iustin Pop
      return True
533 a8083063 Iustin Pop
534 a8083063 Iustin Pop
    if local_version != remote_version:
535 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
536 a8083063 Iustin Pop
                      (local_version, node, remote_version))
537 a8083063 Iustin Pop
      return True
538 a8083063 Iustin Pop
539 a8083063 Iustin Pop
    # checks vg existance and size > 20G
540 a8083063 Iustin Pop
541 a8083063 Iustin Pop
    bad = False
542 a8083063 Iustin Pop
    if not vglist:
543 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
544 a8083063 Iustin Pop
                      (node,))
545 a8083063 Iustin Pop
      bad = True
546 a8083063 Iustin Pop
    else:
547 8d1a2a64 Michael Hanselmann
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
548 8d1a2a64 Michael Hanselmann
                                            constants.MIN_VG_SIZE)
549 a8083063 Iustin Pop
      if vgstatus:
550 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
551 a8083063 Iustin Pop
        bad = True
552 a8083063 Iustin Pop
553 2eb78bc8 Guido Trotter
    if not node_result:
554 2eb78bc8 Guido Trotter
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
555 2eb78bc8 Guido Trotter
      return True
556 2eb78bc8 Guido Trotter
557 a8083063 Iustin Pop
    # checks config file checksum
558 a8083063 Iustin Pop
    # checks ssh to any
559 a8083063 Iustin Pop
560 a8083063 Iustin Pop
    if 'filelist' not in node_result:
561 a8083063 Iustin Pop
      bad = True
562 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
563 a8083063 Iustin Pop
    else:
564 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
565 a8083063 Iustin Pop
      for file_name in file_list:
566 a8083063 Iustin Pop
        if file_name not in remote_cksum:
567 a8083063 Iustin Pop
          bad = True
568 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
569 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
570 a8083063 Iustin Pop
          bad = True
571 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
572 a8083063 Iustin Pop
573 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
574 a8083063 Iustin Pop
      bad = True
575 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
576 a8083063 Iustin Pop
    else:
577 a8083063 Iustin Pop
      if node_result['nodelist']:
578 a8083063 Iustin Pop
        bad = True
579 a8083063 Iustin Pop
        for node in node_result['nodelist']:
580 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
581 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
582 9d4bfc96 Iustin Pop
    if 'node-net-test' not in node_result:
583 9d4bfc96 Iustin Pop
      bad = True
584 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
585 9d4bfc96 Iustin Pop
    else:
586 9d4bfc96 Iustin Pop
      if node_result['node-net-test']:
587 9d4bfc96 Iustin Pop
        bad = True
588 9d4bfc96 Iustin Pop
        nlist = utils.NiceSort(node_result['node-net-test'].keys())
589 9d4bfc96 Iustin Pop
        for node in nlist:
590 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
591 9d4bfc96 Iustin Pop
                          (node, node_result['node-net-test'][node]))
592 9d4bfc96 Iustin Pop
593 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
594 e69d05fd Iustin Pop
    if isinstance(hyp_result, dict):
595 e69d05fd Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
596 e69d05fd Iustin Pop
        if hv_result is not None:
597 e69d05fd Iustin Pop
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
598 e69d05fd Iustin Pop
                      (hv_name, hv_result))
599 a8083063 Iustin Pop
    return bad
600 a8083063 Iustin Pop
601 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
602 c5705f58 Guido Trotter
                      node_instance, feedback_fn):
603 a8083063 Iustin Pop
    """Verify an instance.
604 a8083063 Iustin Pop

605 a8083063 Iustin Pop
    This function checks to see if the required block devices are
606 a8083063 Iustin Pop
    available on the instance's node.
607 a8083063 Iustin Pop

608 a8083063 Iustin Pop
    """
609 a8083063 Iustin Pop
    bad = False
610 a8083063 Iustin Pop
611 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
612 a8083063 Iustin Pop
613 a8083063 Iustin Pop
    node_vol_should = {}
614 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
615 a8083063 Iustin Pop
616 a8083063 Iustin Pop
    for node in node_vol_should:
617 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
618 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
619 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
620 a8083063 Iustin Pop
                          (volume, node))
621 a8083063 Iustin Pop
          bad = True
622 a8083063 Iustin Pop
623 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
624 a872dae6 Guido Trotter
      if (node_current not in node_instance or
625 a872dae6 Guido Trotter
          not instance in node_instance[node_current]):
626 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
627 a8083063 Iustin Pop
                        (instance, node_current))
628 a8083063 Iustin Pop
        bad = True
629 a8083063 Iustin Pop
630 a8083063 Iustin Pop
    for node in node_instance:
631 a8083063 Iustin Pop
      if (not node == node_current):
632 a8083063 Iustin Pop
        if instance in node_instance[node]:
633 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
634 a8083063 Iustin Pop
                          (instance, node))
635 a8083063 Iustin Pop
          bad = True
636 a8083063 Iustin Pop
637 6a438c98 Michael Hanselmann
    return bad
638 a8083063 Iustin Pop
639 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
640 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
641 a8083063 Iustin Pop

642 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
643 a8083063 Iustin Pop
    reported as unknown.
644 a8083063 Iustin Pop

645 a8083063 Iustin Pop
    """
646 a8083063 Iustin Pop
    bad = False
647 a8083063 Iustin Pop
648 a8083063 Iustin Pop
    for node in node_vol_is:
649 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
650 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
651 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
652 a8083063 Iustin Pop
                      (volume, node))
653 a8083063 Iustin Pop
          bad = True
654 a8083063 Iustin Pop
    return bad
655 a8083063 Iustin Pop
656 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
657 a8083063 Iustin Pop
    """Verify the list of running instances.
658 a8083063 Iustin Pop

659 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
660 a8083063 Iustin Pop

661 a8083063 Iustin Pop
    """
662 a8083063 Iustin Pop
    bad = False
663 a8083063 Iustin Pop
    for node in node_instance:
664 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
665 a8083063 Iustin Pop
        if runninginstance not in instancelist:
666 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
667 a8083063 Iustin Pop
                          (runninginstance, node))
668 a8083063 Iustin Pop
          bad = True
669 a8083063 Iustin Pop
    return bad
670 a8083063 Iustin Pop
671 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
672 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
673 2b3b6ddd Guido Trotter

674 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
675 2b3b6ddd Guido Trotter
    was primary for.
676 2b3b6ddd Guido Trotter

677 2b3b6ddd Guido Trotter
    """
678 2b3b6ddd Guido Trotter
    bad = False
679 2b3b6ddd Guido Trotter
680 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
681 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
682 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
683 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
684 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
685 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
686 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
687 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
688 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
689 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
690 2b3b6ddd Guido Trotter
        needed_mem = 0
691 2b3b6ddd Guido Trotter
        for instance in instances:
692 2b3b6ddd Guido Trotter
          needed_mem += instance_cfg[instance].memory
693 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
694 2b3b6ddd Guido Trotter
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
695 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
696 2b3b6ddd Guido Trotter
          bad = True
697 2b3b6ddd Guido Trotter
    return bad
698 2b3b6ddd Guido Trotter
699 a8083063 Iustin Pop
  def CheckPrereq(self):
700 a8083063 Iustin Pop
    """Check prerequisites.
701 a8083063 Iustin Pop

702 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
703 e54c4c5e Guido Trotter
    all its members are valid.
704 a8083063 Iustin Pop

705 a8083063 Iustin Pop
    """
706 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
707 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
708 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
709 a8083063 Iustin Pop
710 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
711 d8fff41c Guido Trotter
    """Build hooks env.
712 d8fff41c Guido Trotter

713 d8fff41c Guido Trotter
    Cluster-Verify hooks just rone in the post phase and their failure makes
714 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
715 d8fff41c Guido Trotter

716 d8fff41c Guido Trotter
    """
717 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
718 d8fff41c Guido Trotter
    # TODO: populate the environment with useful information for verify hooks
719 d8fff41c Guido Trotter
    env = {}
720 d8fff41c Guido Trotter
    return env, [], all_nodes
721 d8fff41c Guido Trotter
722 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
723 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
724 a8083063 Iustin Pop

725 a8083063 Iustin Pop
    """
726 a8083063 Iustin Pop
    bad = False
727 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
728 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
729 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
730 a8083063 Iustin Pop
731 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
732 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
733 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
734 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
735 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
736 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
737 a8083063 Iustin Pop
    node_volume = {}
738 a8083063 Iustin Pop
    node_instance = {}
739 9c9c7d30 Guido Trotter
    node_info = {}
740 26b6af5e Guido Trotter
    instance_cfg = {}
741 a8083063 Iustin Pop
742 a8083063 Iustin Pop
    # FIXME: verify OS list
743 a8083063 Iustin Pop
    # do local checksums
744 d6a02168 Michael Hanselmann
    file_names = []
745 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
746 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
747 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
748 a8083063 Iustin Pop
749 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
750 72737a7f Iustin Pop
    all_volumeinfo = self.rpc.call_volume_list(nodelist, vg_name)
751 72737a7f Iustin Pop
    all_instanceinfo = self.rpc.call_instance_list(nodelist, hypervisors)
752 72737a7f Iustin Pop
    all_vglist = self.rpc.call_vg_list(nodelist)
753 a8083063 Iustin Pop
    node_verify_param = {
754 a8083063 Iustin Pop
      'filelist': file_names,
755 a8083063 Iustin Pop
      'nodelist': nodelist,
756 e69d05fd Iustin Pop
      'hypervisor': hypervisors,
757 9d4bfc96 Iustin Pop
      'node-net-test': [(node.name, node.primary_ip, node.secondary_ip)
758 9d4bfc96 Iustin Pop
                        for node in nodeinfo]
759 a8083063 Iustin Pop
      }
760 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
761 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
762 72737a7f Iustin Pop
    all_rversion = self.rpc.call_version(nodelist)
763 72737a7f Iustin Pop
    all_ninfo = self.rpc.call_node_info(nodelist, self.cfg.GetVGName(),
764 72737a7f Iustin Pop
                                        self.cfg.GetHypervisorType())
765 a8083063 Iustin Pop
766 a8083063 Iustin Pop
    for node in nodelist:
767 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
768 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
769 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
770 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
771 a8083063 Iustin Pop
      bad = bad or result
772 a8083063 Iustin Pop
773 a8083063 Iustin Pop
      # node_volume
774 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
775 a8083063 Iustin Pop
776 b63ed789 Iustin Pop
      if isinstance(volumeinfo, basestring):
777 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
778 b63ed789 Iustin Pop
                    (node, volumeinfo[-400:].encode('string_escape')))
779 b63ed789 Iustin Pop
        bad = True
780 b63ed789 Iustin Pop
        node_volume[node] = {}
781 b63ed789 Iustin Pop
      elif not isinstance(volumeinfo, dict):
782 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
783 a8083063 Iustin Pop
        bad = True
784 a8083063 Iustin Pop
        continue
785 b63ed789 Iustin Pop
      else:
786 b63ed789 Iustin Pop
        node_volume[node] = volumeinfo
787 a8083063 Iustin Pop
788 a8083063 Iustin Pop
      # node_instance
789 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
790 a8083063 Iustin Pop
      if type(nodeinstance) != list:
791 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
792 a8083063 Iustin Pop
        bad = True
793 a8083063 Iustin Pop
        continue
794 a8083063 Iustin Pop
795 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
796 a8083063 Iustin Pop
797 9c9c7d30 Guido Trotter
      # node_info
798 9c9c7d30 Guido Trotter
      nodeinfo = all_ninfo[node]
799 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
800 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
801 9c9c7d30 Guido Trotter
        bad = True
802 9c9c7d30 Guido Trotter
        continue
803 9c9c7d30 Guido Trotter
804 9c9c7d30 Guido Trotter
      try:
805 9c9c7d30 Guido Trotter
        node_info[node] = {
806 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
807 9c9c7d30 Guido Trotter
          "dfree": int(nodeinfo['vg_free']),
808 93e4c50b Guido Trotter
          "pinst": [],
809 93e4c50b Guido Trotter
          "sinst": [],
810 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
811 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
812 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
813 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
814 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
815 36e7da50 Guido Trotter
          # secondary.
816 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
817 9c9c7d30 Guido Trotter
        }
818 9c9c7d30 Guido Trotter
      except ValueError:
819 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
820 9c9c7d30 Guido Trotter
        bad = True
821 9c9c7d30 Guido Trotter
        continue
822 9c9c7d30 Guido Trotter
823 a8083063 Iustin Pop
    node_vol_should = {}
824 a8083063 Iustin Pop
825 a8083063 Iustin Pop
    for instance in instancelist:
826 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
827 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
828 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
829 c5705f58 Guido Trotter
                                     node_instance, feedback_fn)
830 c5705f58 Guido Trotter
      bad = bad or result
831 a8083063 Iustin Pop
832 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
833 a8083063 Iustin Pop
834 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
835 26b6af5e Guido Trotter
836 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
837 93e4c50b Guido Trotter
      if pnode in node_info:
838 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
839 93e4c50b Guido Trotter
      else:
840 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
841 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
842 93e4c50b Guido Trotter
        bad = True
843 93e4c50b Guido Trotter
844 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
845 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
846 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
847 93e4c50b Guido Trotter
      # supported either.
848 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
849 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
850 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
851 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
852 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
853 93e4c50b Guido Trotter
                    % instance)
854 93e4c50b Guido Trotter
855 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
856 93e4c50b Guido Trotter
        if snode in node_info:
857 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
858 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
859 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
860 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
861 93e4c50b Guido Trotter
        else:
862 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
863 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
864 93e4c50b Guido Trotter
865 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
866 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
867 a8083063 Iustin Pop
                                       feedback_fn)
868 a8083063 Iustin Pop
    bad = bad or result
869 a8083063 Iustin Pop
870 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
871 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
872 a8083063 Iustin Pop
                                         feedback_fn)
873 a8083063 Iustin Pop
    bad = bad or result
874 a8083063 Iustin Pop
875 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
876 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
877 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
878 e54c4c5e Guido Trotter
      bad = bad or result
879 2b3b6ddd Guido Trotter
880 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
881 2b3b6ddd Guido Trotter
    if i_non_redundant:
882 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
883 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
884 2b3b6ddd Guido Trotter
885 34290825 Michael Hanselmann
    return not bad
886 a8083063 Iustin Pop
887 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
888 d8fff41c Guido Trotter
    """Analize the post-hooks' result, handle it, and send some
889 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
890 d8fff41c Guido Trotter

891 d8fff41c Guido Trotter
    Args:
892 d8fff41c Guido Trotter
      phase: the hooks phase that has just been run
893 d8fff41c Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
894 d8fff41c Guido Trotter
      feedback_fn: function to send feedback back to the caller
895 d8fff41c Guido Trotter
      lu_result: previous Exec result
896 d8fff41c Guido Trotter

897 d8fff41c Guido Trotter
    """
898 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
899 38206f3c Iustin Pop
    # their results
900 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
901 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
902 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
903 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
904 d8fff41c Guido Trotter
      if not hooks_results:
905 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
906 d8fff41c Guido Trotter
        lu_result = 1
907 d8fff41c Guido Trotter
      else:
908 d8fff41c Guido Trotter
        for node_name in hooks_results:
909 d8fff41c Guido Trotter
          show_node_header = True
910 d8fff41c Guido Trotter
          res = hooks_results[node_name]
911 d8fff41c Guido Trotter
          if res is False or not isinstance(res, list):
912 d8fff41c Guido Trotter
            feedback_fn("    Communication failure")
913 d8fff41c Guido Trotter
            lu_result = 1
914 d8fff41c Guido Trotter
            continue
915 d8fff41c Guido Trotter
          for script, hkr, output in res:
916 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
917 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
918 d8fff41c Guido Trotter
              # failing hooks on that node
919 d8fff41c Guido Trotter
              if show_node_header:
920 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
921 d8fff41c Guido Trotter
                show_node_header = False
922 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
923 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
924 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
925 d8fff41c Guido Trotter
              lu_result = 1
926 d8fff41c Guido Trotter
927 d8fff41c Guido Trotter
      return lu_result
928 d8fff41c Guido Trotter
929 a8083063 Iustin Pop
930 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
931 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
932 2c95a8d4 Iustin Pop

933 2c95a8d4 Iustin Pop
  """
934 2c95a8d4 Iustin Pop
  _OP_REQP = []
935 d4b9d97f Guido Trotter
  REQ_BGL = False
936 d4b9d97f Guido Trotter
937 d4b9d97f Guido Trotter
  def ExpandNames(self):
938 d4b9d97f Guido Trotter
    self.needed_locks = {
939 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
940 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
941 d4b9d97f Guido Trotter
    }
942 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
943 2c95a8d4 Iustin Pop
944 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
945 2c95a8d4 Iustin Pop
    """Check prerequisites.
946 2c95a8d4 Iustin Pop

947 2c95a8d4 Iustin Pop
    This has no prerequisites.
948 2c95a8d4 Iustin Pop

949 2c95a8d4 Iustin Pop
    """
950 2c95a8d4 Iustin Pop
    pass
951 2c95a8d4 Iustin Pop
952 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
953 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
954 2c95a8d4 Iustin Pop

955 2c95a8d4 Iustin Pop
    """
956 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
957 2c95a8d4 Iustin Pop
958 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
959 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
960 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
961 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
962 2c95a8d4 Iustin Pop
963 2c95a8d4 Iustin Pop
    nv_dict = {}
964 2c95a8d4 Iustin Pop
    for inst in instances:
965 2c95a8d4 Iustin Pop
      inst_lvs = {}
966 2c95a8d4 Iustin Pop
      if (inst.status != "up" or
967 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
968 2c95a8d4 Iustin Pop
        continue
969 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
970 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
971 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
972 2c95a8d4 Iustin Pop
        for vol in vol_list:
973 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
974 2c95a8d4 Iustin Pop
975 2c95a8d4 Iustin Pop
    if not nv_dict:
976 2c95a8d4 Iustin Pop
      return result
977 2c95a8d4 Iustin Pop
978 72737a7f Iustin Pop
    node_lvs = self.rpc.call_volume_list(nodes, vg_name)
979 2c95a8d4 Iustin Pop
980 2c95a8d4 Iustin Pop
    to_act = set()
981 2c95a8d4 Iustin Pop
    for node in nodes:
982 2c95a8d4 Iustin Pop
      # node_volume
983 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
984 2c95a8d4 Iustin Pop
985 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
986 b63ed789 Iustin Pop
        logger.Info("error enumerating LVs on node %s: %s" % (node, lvs))
987 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
988 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
989 2c95a8d4 Iustin Pop
        logger.Info("connection to node %s failed or invalid data returned" %
990 2c95a8d4 Iustin Pop
                    (node,))
991 2c95a8d4 Iustin Pop
        res_nodes.append(node)
992 2c95a8d4 Iustin Pop
        continue
993 2c95a8d4 Iustin Pop
994 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
995 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
996 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
997 b63ed789 Iustin Pop
            and inst.name not in res_instances):
998 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
999 2c95a8d4 Iustin Pop
1000 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1001 b63ed789 Iustin Pop
    # data better
1002 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1003 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1004 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1005 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1006 b63ed789 Iustin Pop
1007 2c95a8d4 Iustin Pop
    return result
1008 2c95a8d4 Iustin Pop
1009 2c95a8d4 Iustin Pop
1010 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1011 07bd8a51 Iustin Pop
  """Rename the cluster.
1012 07bd8a51 Iustin Pop

1013 07bd8a51 Iustin Pop
  """
1014 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1015 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1016 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1017 07bd8a51 Iustin Pop
1018 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1019 07bd8a51 Iustin Pop
    """Build hooks env.
1020 07bd8a51 Iustin Pop

1021 07bd8a51 Iustin Pop
    """
1022 07bd8a51 Iustin Pop
    env = {
1023 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1024 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1025 07bd8a51 Iustin Pop
      }
1026 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1027 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1028 07bd8a51 Iustin Pop
1029 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1030 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1031 07bd8a51 Iustin Pop

1032 07bd8a51 Iustin Pop
    """
1033 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1034 07bd8a51 Iustin Pop
1035 bcf043c9 Iustin Pop
    new_name = hostname.name
1036 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1037 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
1038 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
1039 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1040 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1041 07bd8a51 Iustin Pop
                                 " cluster has changed")
1042 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1043 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1044 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1045 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1046 07bd8a51 Iustin Pop
                                   new_ip)
1047 07bd8a51 Iustin Pop
1048 07bd8a51 Iustin Pop
    self.op.name = new_name
1049 07bd8a51 Iustin Pop
1050 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1051 07bd8a51 Iustin Pop
    """Rename the cluster.
1052 07bd8a51 Iustin Pop

1053 07bd8a51 Iustin Pop
    """
1054 07bd8a51 Iustin Pop
    clustername = self.op.name
1055 07bd8a51 Iustin Pop
    ip = self.ip
1056 07bd8a51 Iustin Pop
1057 07bd8a51 Iustin Pop
    # shutdown the master IP
1058 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
1059 72737a7f Iustin Pop
    if not self.rpc.call_node_stop_master(master, False):
1060 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1061 07bd8a51 Iustin Pop
1062 07bd8a51 Iustin Pop
    try:
1063 07bd8a51 Iustin Pop
      # modify the sstore
1064 d6a02168 Michael Hanselmann
      # TODO: sstore
1065 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
1066 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
1067 07bd8a51 Iustin Pop
1068 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
1069 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
1070 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
1071 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
1072 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
1073 07bd8a51 Iustin Pop
1074 07bd8a51 Iustin Pop
      logger.Debug("Copying updated ssconf data to all nodes")
1075 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1076 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
1077 72737a7f Iustin Pop
        result = self.rpc.call_upload_file(dist_nodes, fname)
1078 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
1079 07bd8a51 Iustin Pop
          if not result[to_node]:
1080 07bd8a51 Iustin Pop
            logger.Error("copy of file %s to node %s failed" %
1081 07bd8a51 Iustin Pop
                         (fname, to_node))
1082 07bd8a51 Iustin Pop
    finally:
1083 72737a7f Iustin Pop
      if not self.rpc.call_node_start_master(master, False):
1084 f4bc1f2c Michael Hanselmann
        logger.Error("Could not re-enable the master role on the master,"
1085 f4bc1f2c Michael Hanselmann
                     " please restart manually.")
1086 07bd8a51 Iustin Pop
1087 07bd8a51 Iustin Pop
1088 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1089 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1090 8084f9f6 Manuel Franceschini

1091 8084f9f6 Manuel Franceschini
  Args:
1092 8084f9f6 Manuel Franceschini
    disk: ganeti.objects.Disk object
1093 8084f9f6 Manuel Franceschini

1094 8084f9f6 Manuel Franceschini
  Returns:
1095 8084f9f6 Manuel Franceschini
    boolean indicating whether a LD_LV dev_type was found or not
1096 8084f9f6 Manuel Franceschini

1097 8084f9f6 Manuel Franceschini
  """
1098 8084f9f6 Manuel Franceschini
  if disk.children:
1099 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1100 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1101 8084f9f6 Manuel Franceschini
        return True
1102 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1103 8084f9f6 Manuel Franceschini
1104 8084f9f6 Manuel Franceschini
1105 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1106 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1107 8084f9f6 Manuel Franceschini

1108 8084f9f6 Manuel Franceschini
  """
1109 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1110 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1111 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1112 c53279cf Guido Trotter
  REQ_BGL = False
1113 c53279cf Guido Trotter
1114 c53279cf Guido Trotter
  def ExpandNames(self):
1115 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
1116 c53279cf Guido Trotter
    # all nodes to be modified.
1117 c53279cf Guido Trotter
    self.needed_locks = {
1118 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1119 c53279cf Guido Trotter
    }
1120 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1121 8084f9f6 Manuel Franceschini
1122 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1123 8084f9f6 Manuel Franceschini
    """Build hooks env.
1124 8084f9f6 Manuel Franceschini

1125 8084f9f6 Manuel Franceschini
    """
1126 8084f9f6 Manuel Franceschini
    env = {
1127 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1128 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1129 8084f9f6 Manuel Franceschini
      }
1130 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1131 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1132 8084f9f6 Manuel Franceschini
1133 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1134 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1135 8084f9f6 Manuel Franceschini

1136 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1137 5f83e263 Iustin Pop
    if the given volume group is valid.
1138 8084f9f6 Manuel Franceschini

1139 8084f9f6 Manuel Franceschini
    """
1140 c53279cf Guido Trotter
    # FIXME: This only works because there is only one parameter that can be
1141 c53279cf Guido Trotter
    # changed or removed.
1142 8084f9f6 Manuel Franceschini
    if not self.op.vg_name:
1143 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
1144 8084f9f6 Manuel Franceschini
      for inst in instances:
1145 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1146 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1147 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1148 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1149 8084f9f6 Manuel Franceschini
1150 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1151 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1152 c53279cf Guido Trotter
      node_list = self.acquired_locks[locking.LEVEL_NODE]
1153 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
1154 8084f9f6 Manuel Franceschini
      for node in node_list:
1155 8d1a2a64 Michael Hanselmann
        vgstatus = utils.CheckVolumeGroupSize(vglist[node], self.op.vg_name,
1156 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1157 8084f9f6 Manuel Franceschini
        if vgstatus:
1158 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1159 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1160 8084f9f6 Manuel Franceschini
1161 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1162 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1163 8084f9f6 Manuel Franceschini

1164 8084f9f6 Manuel Franceschini
    """
1165 8084f9f6 Manuel Franceschini
    if self.op.vg_name != self.cfg.GetVGName():
1166 8084f9f6 Manuel Franceschini
      self.cfg.SetVGName(self.op.vg_name)
1167 8084f9f6 Manuel Franceschini
    else:
1168 8084f9f6 Manuel Franceschini
      feedback_fn("Cluster LVM configuration already in desired"
1169 8084f9f6 Manuel Franceschini
                  " state, not changing")
1170 8084f9f6 Manuel Franceschini
1171 8084f9f6 Manuel Franceschini
1172 b9bddb6b Iustin Pop
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1173 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1174 a8083063 Iustin Pop

1175 a8083063 Iustin Pop
  """
1176 a8083063 Iustin Pop
  if not instance.disks:
1177 a8083063 Iustin Pop
    return True
1178 a8083063 Iustin Pop
1179 a8083063 Iustin Pop
  if not oneshot:
1180 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1181 a8083063 Iustin Pop
1182 a8083063 Iustin Pop
  node = instance.primary_node
1183 a8083063 Iustin Pop
1184 a8083063 Iustin Pop
  for dev in instance.disks:
1185 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
1186 a8083063 Iustin Pop
1187 a8083063 Iustin Pop
  retries = 0
1188 a8083063 Iustin Pop
  while True:
1189 a8083063 Iustin Pop
    max_time = 0
1190 a8083063 Iustin Pop
    done = True
1191 a8083063 Iustin Pop
    cumul_degraded = False
1192 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1193 a8083063 Iustin Pop
    if not rstats:
1194 b9bddb6b Iustin Pop
      lu.proc.LogWarning("Can't get any data from node %s" % node)
1195 a8083063 Iustin Pop
      retries += 1
1196 a8083063 Iustin Pop
      if retries >= 10:
1197 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1198 3ecf6786 Iustin Pop
                                 " aborting." % node)
1199 a8083063 Iustin Pop
      time.sleep(6)
1200 a8083063 Iustin Pop
      continue
1201 a8083063 Iustin Pop
    retries = 0
1202 a8083063 Iustin Pop
    for i in range(len(rstats)):
1203 a8083063 Iustin Pop
      mstat = rstats[i]
1204 a8083063 Iustin Pop
      if mstat is None:
1205 b9bddb6b Iustin Pop
        lu.proc.LogWarning("Can't compute data for node %s/%s" %
1206 b9bddb6b Iustin Pop
                           (node, instance.disks[i].iv_name))
1207 a8083063 Iustin Pop
        continue
1208 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1209 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1210 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1211 a8083063 Iustin Pop
      if perc_done is not None:
1212 a8083063 Iustin Pop
        done = False
1213 a8083063 Iustin Pop
        if est_time is not None:
1214 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1215 a8083063 Iustin Pop
          max_time = est_time
1216 a8083063 Iustin Pop
        else:
1217 a8083063 Iustin Pop
          rem_time = "no time estimate"
1218 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1219 b9bddb6b Iustin Pop
                        (instance.disks[i].iv_name, perc_done, rem_time))
1220 a8083063 Iustin Pop
    if done or oneshot:
1221 a8083063 Iustin Pop
      break
1222 a8083063 Iustin Pop
1223 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
1224 a8083063 Iustin Pop
1225 a8083063 Iustin Pop
  if done:
1226 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1227 a8083063 Iustin Pop
  return not cumul_degraded
1228 a8083063 Iustin Pop
1229 a8083063 Iustin Pop
1230 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1231 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1232 a8083063 Iustin Pop

1233 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1234 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1235 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1236 0834c866 Iustin Pop

1237 a8083063 Iustin Pop
  """
1238 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
1239 0834c866 Iustin Pop
  if ldisk:
1240 0834c866 Iustin Pop
    idx = 6
1241 0834c866 Iustin Pop
  else:
1242 0834c866 Iustin Pop
    idx = 5
1243 a8083063 Iustin Pop
1244 a8083063 Iustin Pop
  result = True
1245 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1246 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
1247 a8083063 Iustin Pop
    if not rstats:
1248 aa9d0c32 Guido Trotter
      logger.ToStderr("Node %s: Disk degraded, not found or node down" % node)
1249 a8083063 Iustin Pop
      result = False
1250 a8083063 Iustin Pop
    else:
1251 0834c866 Iustin Pop
      result = result and (not rstats[idx])
1252 a8083063 Iustin Pop
  if dev.children:
1253 a8083063 Iustin Pop
    for child in dev.children:
1254 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1255 a8083063 Iustin Pop
1256 a8083063 Iustin Pop
  return result
1257 a8083063 Iustin Pop
1258 a8083063 Iustin Pop
1259 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1260 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1261 a8083063 Iustin Pop

1262 a8083063 Iustin Pop
  """
1263 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1264 6bf01bbb Guido Trotter
  REQ_BGL = False
1265 a8083063 Iustin Pop
1266 6bf01bbb Guido Trotter
  def ExpandNames(self):
1267 1f9430d6 Iustin Pop
    if self.op.names:
1268 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1269 1f9430d6 Iustin Pop
1270 1f9430d6 Iustin Pop
    self.dynamic_fields = frozenset(["name", "valid", "node_status"])
1271 1f9430d6 Iustin Pop
    _CheckOutputFields(static=[],
1272 1f9430d6 Iustin Pop
                       dynamic=self.dynamic_fields,
1273 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1274 1f9430d6 Iustin Pop
1275 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
1276 6bf01bbb Guido Trotter
    self.needed_locks = {}
1277 6bf01bbb Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1278 e310b019 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1279 6bf01bbb Guido Trotter
1280 6bf01bbb Guido Trotter
  def CheckPrereq(self):
1281 6bf01bbb Guido Trotter
    """Check prerequisites.
1282 6bf01bbb Guido Trotter

1283 6bf01bbb Guido Trotter
    """
1284 6bf01bbb Guido Trotter
1285 1f9430d6 Iustin Pop
  @staticmethod
1286 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1287 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1288 1f9430d6 Iustin Pop

1289 1f9430d6 Iustin Pop
      Args:
1290 1f9430d6 Iustin Pop
        node_list: a list with the names of all nodes
1291 1f9430d6 Iustin Pop
        rlist: a map with node names as keys and OS objects as values
1292 1f9430d6 Iustin Pop

1293 1f9430d6 Iustin Pop
      Returns:
1294 1f9430d6 Iustin Pop
        map: a map with osnames as keys and as value another map, with
1295 1f9430d6 Iustin Pop
             nodes as
1296 1f9430d6 Iustin Pop
             keys and list of OS objects as values
1297 1f9430d6 Iustin Pop
             e.g. {"debian-etch": {"node1": [<object>,...],
1298 1f9430d6 Iustin Pop
                                   "node2": [<object>,]}
1299 1f9430d6 Iustin Pop
                  }
1300 1f9430d6 Iustin Pop

1301 1f9430d6 Iustin Pop
    """
1302 1f9430d6 Iustin Pop
    all_os = {}
1303 1f9430d6 Iustin Pop
    for node_name, nr in rlist.iteritems():
1304 1f9430d6 Iustin Pop
      if not nr:
1305 1f9430d6 Iustin Pop
        continue
1306 b4de68a9 Iustin Pop
      for os_obj in nr:
1307 b4de68a9 Iustin Pop
        if os_obj.name not in all_os:
1308 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1309 1f9430d6 Iustin Pop
          # for each node in node_list
1310 b4de68a9 Iustin Pop
          all_os[os_obj.name] = {}
1311 1f9430d6 Iustin Pop
          for nname in node_list:
1312 b4de68a9 Iustin Pop
            all_os[os_obj.name][nname] = []
1313 b4de68a9 Iustin Pop
        all_os[os_obj.name][node_name].append(os_obj)
1314 1f9430d6 Iustin Pop
    return all_os
1315 a8083063 Iustin Pop
1316 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1317 a8083063 Iustin Pop
    """Compute the list of OSes.
1318 a8083063 Iustin Pop

1319 a8083063 Iustin Pop
    """
1320 6bf01bbb Guido Trotter
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1321 72737a7f Iustin Pop
    node_data = self.rpc.call_os_diagnose(node_list)
1322 a8083063 Iustin Pop
    if node_data == False:
1323 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1324 1f9430d6 Iustin Pop
    pol = self._DiagnoseByOS(node_list, node_data)
1325 1f9430d6 Iustin Pop
    output = []
1326 1f9430d6 Iustin Pop
    for os_name, os_data in pol.iteritems():
1327 1f9430d6 Iustin Pop
      row = []
1328 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1329 1f9430d6 Iustin Pop
        if field == "name":
1330 1f9430d6 Iustin Pop
          val = os_name
1331 1f9430d6 Iustin Pop
        elif field == "valid":
1332 1f9430d6 Iustin Pop
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1333 1f9430d6 Iustin Pop
        elif field == "node_status":
1334 1f9430d6 Iustin Pop
          val = {}
1335 1f9430d6 Iustin Pop
          for node_name, nos_list in os_data.iteritems():
1336 1f9430d6 Iustin Pop
            val[node_name] = [(v.status, v.path) for v in nos_list]
1337 1f9430d6 Iustin Pop
        else:
1338 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1339 1f9430d6 Iustin Pop
        row.append(val)
1340 1f9430d6 Iustin Pop
      output.append(row)
1341 1f9430d6 Iustin Pop
1342 1f9430d6 Iustin Pop
    return output
1343 a8083063 Iustin Pop
1344 a8083063 Iustin Pop
1345 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1346 a8083063 Iustin Pop
  """Logical unit for removing a node.
1347 a8083063 Iustin Pop

1348 a8083063 Iustin Pop
  """
1349 a8083063 Iustin Pop
  HPATH = "node-remove"
1350 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1351 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1352 a8083063 Iustin Pop
1353 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1354 a8083063 Iustin Pop
    """Build hooks env.
1355 a8083063 Iustin Pop

1356 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1357 d08869ee Guido Trotter
    node would then be impossible to remove.
1358 a8083063 Iustin Pop

1359 a8083063 Iustin Pop
    """
1360 396e1b78 Michael Hanselmann
    env = {
1361 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1362 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1363 396e1b78 Michael Hanselmann
      }
1364 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1365 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1366 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1367 a8083063 Iustin Pop
1368 a8083063 Iustin Pop
  def CheckPrereq(self):
1369 a8083063 Iustin Pop
    """Check prerequisites.
1370 a8083063 Iustin Pop

1371 a8083063 Iustin Pop
    This checks:
1372 a8083063 Iustin Pop
     - the node exists in the configuration
1373 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1374 a8083063 Iustin Pop
     - it's not the master
1375 a8083063 Iustin Pop

1376 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1377 a8083063 Iustin Pop

1378 a8083063 Iustin Pop
    """
1379 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1380 a8083063 Iustin Pop
    if node is None:
1381 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1382 a8083063 Iustin Pop
1383 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1384 a8083063 Iustin Pop
1385 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
1386 a8083063 Iustin Pop
    if node.name == masternode:
1387 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1388 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1389 a8083063 Iustin Pop
1390 a8083063 Iustin Pop
    for instance_name in instance_list:
1391 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1392 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1393 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1394 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1395 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1396 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1397 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1398 a8083063 Iustin Pop
    self.op.node_name = node.name
1399 a8083063 Iustin Pop
    self.node = node
1400 a8083063 Iustin Pop
1401 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1402 a8083063 Iustin Pop
    """Removes the node from the cluster.
1403 a8083063 Iustin Pop

1404 a8083063 Iustin Pop
    """
1405 a8083063 Iustin Pop
    node = self.node
1406 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1407 a8083063 Iustin Pop
                node.name)
1408 a8083063 Iustin Pop
1409 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
1410 a8083063 Iustin Pop
1411 72737a7f Iustin Pop
    self.rpc.call_node_leave_cluster(node.name)
1412 c8a0948f Michael Hanselmann
1413 a8083063 Iustin Pop
1414 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1415 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1416 a8083063 Iustin Pop

1417 a8083063 Iustin Pop
  """
1418 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1419 35705d8f Guido Trotter
  REQ_BGL = False
1420 a8083063 Iustin Pop
1421 35705d8f Guido Trotter
  def ExpandNames(self):
1422 e8a4c138 Iustin Pop
    self.dynamic_fields = frozenset([
1423 e8a4c138 Iustin Pop
      "dtotal", "dfree",
1424 e8a4c138 Iustin Pop
      "mtotal", "mnode", "mfree",
1425 e8a4c138 Iustin Pop
      "bootid",
1426 e8a4c138 Iustin Pop
      "ctotal",
1427 e8a4c138 Iustin Pop
      ])
1428 a8083063 Iustin Pop
1429 c8d8b4c8 Iustin Pop
    self.static_fields = frozenset([
1430 c8d8b4c8 Iustin Pop
      "name", "pinst_cnt", "sinst_cnt",
1431 c8d8b4c8 Iustin Pop
      "pinst_list", "sinst_list",
1432 c8d8b4c8 Iustin Pop
      "pip", "sip", "tags",
1433 38d7239a Iustin Pop
      "serial_no",
1434 c8d8b4c8 Iustin Pop
      ])
1435 c8d8b4c8 Iustin Pop
1436 c8d8b4c8 Iustin Pop
    _CheckOutputFields(static=self.static_fields,
1437 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1438 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1439 a8083063 Iustin Pop
1440 35705d8f Guido Trotter
    self.needed_locks = {}
1441 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1442 c8d8b4c8 Iustin Pop
1443 c8d8b4c8 Iustin Pop
    if self.op.names:
1444 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
1445 35705d8f Guido Trotter
    else:
1446 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
1447 c8d8b4c8 Iustin Pop
1448 c8d8b4c8 Iustin Pop
    self.do_locking = not self.static_fields.issuperset(self.op.output_fields)
1449 c8d8b4c8 Iustin Pop
    if self.do_locking:
1450 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
1451 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
1452 c8d8b4c8 Iustin Pop
1453 35705d8f Guido Trotter
1454 35705d8f Guido Trotter
  def CheckPrereq(self):
1455 35705d8f Guido Trotter
    """Check prerequisites.
1456 35705d8f Guido Trotter

1457 35705d8f Guido Trotter
    """
1458 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
1459 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
1460 c8d8b4c8 Iustin Pop
    pass
1461 a8083063 Iustin Pop
1462 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1463 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1464 a8083063 Iustin Pop

1465 a8083063 Iustin Pop
    """
1466 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
1467 c8d8b4c8 Iustin Pop
    if self.do_locking:
1468 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
1469 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
1470 3fa93523 Guido Trotter
      nodenames = self.wanted
1471 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
1472 3fa93523 Guido Trotter
      if missing:
1473 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
1474 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
1475 c8d8b4c8 Iustin Pop
    else:
1476 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
1477 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
1478 a8083063 Iustin Pop
1479 a8083063 Iustin Pop
    # begin data gathering
1480 a8083063 Iustin Pop
1481 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1482 a8083063 Iustin Pop
      live_data = {}
1483 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
1484 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
1485 a8083063 Iustin Pop
      for name in nodenames:
1486 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1487 a8083063 Iustin Pop
        if nodeinfo:
1488 a8083063 Iustin Pop
          live_data[name] = {
1489 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1490 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1491 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1492 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1493 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1494 e8a4c138 Iustin Pop
            "ctotal": utils.TryConvert(int, nodeinfo['cpu_total']),
1495 3ef10550 Michael Hanselmann
            "bootid": nodeinfo['bootid'],
1496 a8083063 Iustin Pop
            }
1497 a8083063 Iustin Pop
        else:
1498 a8083063 Iustin Pop
          live_data[name] = {}
1499 a8083063 Iustin Pop
    else:
1500 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1501 a8083063 Iustin Pop
1502 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1503 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1504 a8083063 Iustin Pop
1505 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1506 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1507 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1508 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1509 a8083063 Iustin Pop
1510 ec223efb Iustin Pop
      for instance_name in instancelist:
1511 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1512 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1513 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1514 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1515 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1516 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1517 a8083063 Iustin Pop
1518 a8083063 Iustin Pop
    # end data gathering
1519 a8083063 Iustin Pop
1520 a8083063 Iustin Pop
    output = []
1521 a8083063 Iustin Pop
    for node in nodelist:
1522 a8083063 Iustin Pop
      node_output = []
1523 a8083063 Iustin Pop
      for field in self.op.output_fields:
1524 a8083063 Iustin Pop
        if field == "name":
1525 a8083063 Iustin Pop
          val = node.name
1526 ec223efb Iustin Pop
        elif field == "pinst_list":
1527 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1528 ec223efb Iustin Pop
        elif field == "sinst_list":
1529 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1530 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1531 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1532 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1533 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1534 a8083063 Iustin Pop
        elif field == "pip":
1535 a8083063 Iustin Pop
          val = node.primary_ip
1536 a8083063 Iustin Pop
        elif field == "sip":
1537 a8083063 Iustin Pop
          val = node.secondary_ip
1538 130a6a6f Iustin Pop
        elif field == "tags":
1539 130a6a6f Iustin Pop
          val = list(node.GetTags())
1540 38d7239a Iustin Pop
        elif field == "serial_no":
1541 38d7239a Iustin Pop
          val = node.serial_no
1542 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1543 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1544 a8083063 Iustin Pop
        else:
1545 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1546 a8083063 Iustin Pop
        node_output.append(val)
1547 a8083063 Iustin Pop
      output.append(node_output)
1548 a8083063 Iustin Pop
1549 a8083063 Iustin Pop
    return output
1550 a8083063 Iustin Pop
1551 a8083063 Iustin Pop
1552 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1553 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1554 dcb93971 Michael Hanselmann

1555 dcb93971 Michael Hanselmann
  """
1556 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1557 21a15682 Guido Trotter
  REQ_BGL = False
1558 21a15682 Guido Trotter
1559 21a15682 Guido Trotter
  def ExpandNames(self):
1560 21a15682 Guido Trotter
    _CheckOutputFields(static=["node"],
1561 21a15682 Guido Trotter
                       dynamic=["phys", "vg", "name", "size", "instance"],
1562 21a15682 Guido Trotter
                       selected=self.op.output_fields)
1563 21a15682 Guido Trotter
1564 21a15682 Guido Trotter
    self.needed_locks = {}
1565 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1566 21a15682 Guido Trotter
    if not self.op.nodes:
1567 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1568 21a15682 Guido Trotter
    else:
1569 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
1570 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
1571 dcb93971 Michael Hanselmann
1572 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1573 dcb93971 Michael Hanselmann
    """Check prerequisites.
1574 dcb93971 Michael Hanselmann

1575 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1576 dcb93971 Michael Hanselmann

1577 dcb93971 Michael Hanselmann
    """
1578 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
1579 dcb93971 Michael Hanselmann
1580 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1581 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1582 dcb93971 Michael Hanselmann

1583 dcb93971 Michael Hanselmann
    """
1584 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1585 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
1586 dcb93971 Michael Hanselmann
1587 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1588 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1589 dcb93971 Michael Hanselmann
1590 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1591 dcb93971 Michael Hanselmann
1592 dcb93971 Michael Hanselmann
    output = []
1593 dcb93971 Michael Hanselmann
    for node in nodenames:
1594 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1595 37d19eb2 Michael Hanselmann
        continue
1596 37d19eb2 Michael Hanselmann
1597 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1598 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1599 dcb93971 Michael Hanselmann
1600 dcb93971 Michael Hanselmann
      for vol in node_vols:
1601 dcb93971 Michael Hanselmann
        node_output = []
1602 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1603 dcb93971 Michael Hanselmann
          if field == "node":
1604 dcb93971 Michael Hanselmann
            val = node
1605 dcb93971 Michael Hanselmann
          elif field == "phys":
1606 dcb93971 Michael Hanselmann
            val = vol['dev']
1607 dcb93971 Michael Hanselmann
          elif field == "vg":
1608 dcb93971 Michael Hanselmann
            val = vol['vg']
1609 dcb93971 Michael Hanselmann
          elif field == "name":
1610 dcb93971 Michael Hanselmann
            val = vol['name']
1611 dcb93971 Michael Hanselmann
          elif field == "size":
1612 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1613 dcb93971 Michael Hanselmann
          elif field == "instance":
1614 dcb93971 Michael Hanselmann
            for inst in ilist:
1615 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1616 dcb93971 Michael Hanselmann
                continue
1617 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1618 dcb93971 Michael Hanselmann
                val = inst.name
1619 dcb93971 Michael Hanselmann
                break
1620 dcb93971 Michael Hanselmann
            else:
1621 dcb93971 Michael Hanselmann
              val = '-'
1622 dcb93971 Michael Hanselmann
          else:
1623 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1624 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1625 dcb93971 Michael Hanselmann
1626 dcb93971 Michael Hanselmann
        output.append(node_output)
1627 dcb93971 Michael Hanselmann
1628 dcb93971 Michael Hanselmann
    return output
1629 dcb93971 Michael Hanselmann
1630 dcb93971 Michael Hanselmann
1631 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1632 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1633 a8083063 Iustin Pop

1634 a8083063 Iustin Pop
  """
1635 a8083063 Iustin Pop
  HPATH = "node-add"
1636 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1637 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1638 a8083063 Iustin Pop
1639 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1640 a8083063 Iustin Pop
    """Build hooks env.
1641 a8083063 Iustin Pop

1642 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1643 a8083063 Iustin Pop

1644 a8083063 Iustin Pop
    """
1645 a8083063 Iustin Pop
    env = {
1646 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1647 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1648 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1649 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1650 a8083063 Iustin Pop
      }
1651 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1652 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1653 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1654 a8083063 Iustin Pop
1655 a8083063 Iustin Pop
  def CheckPrereq(self):
1656 a8083063 Iustin Pop
    """Check prerequisites.
1657 a8083063 Iustin Pop

1658 a8083063 Iustin Pop
    This checks:
1659 a8083063 Iustin Pop
     - the new node is not already in the config
1660 a8083063 Iustin Pop
     - it is resolvable
1661 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1662 a8083063 Iustin Pop

1663 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1664 a8083063 Iustin Pop

1665 a8083063 Iustin Pop
    """
1666 a8083063 Iustin Pop
    node_name = self.op.node_name
1667 a8083063 Iustin Pop
    cfg = self.cfg
1668 a8083063 Iustin Pop
1669 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1670 a8083063 Iustin Pop
1671 bcf043c9 Iustin Pop
    node = dns_data.name
1672 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1673 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1674 a8083063 Iustin Pop
    if secondary_ip is None:
1675 a8083063 Iustin Pop
      secondary_ip = primary_ip
1676 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1677 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1678 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1679 e7c6e02b Michael Hanselmann
1680 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1681 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
1682 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
1683 e7c6e02b Michael Hanselmann
                                 node)
1684 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
1685 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
1686 a8083063 Iustin Pop
1687 a8083063 Iustin Pop
    for existing_node_name in node_list:
1688 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1689 e7c6e02b Michael Hanselmann
1690 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
1691 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
1692 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
1693 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
1694 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
1695 e7c6e02b Michael Hanselmann
        continue
1696 e7c6e02b Michael Hanselmann
1697 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1698 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1699 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1700 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1701 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1702 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1703 a8083063 Iustin Pop
1704 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1705 a8083063 Iustin Pop
    # same as for the master
1706 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
1707 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1708 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1709 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1710 a8083063 Iustin Pop
      if master_singlehomed:
1711 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1712 3ecf6786 Iustin Pop
                                   " new node has one")
1713 a8083063 Iustin Pop
      else:
1714 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1715 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1716 a8083063 Iustin Pop
1717 a8083063 Iustin Pop
    # checks reachablity
1718 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
1719 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1720 a8083063 Iustin Pop
1721 a8083063 Iustin Pop
    if not newbie_singlehomed:
1722 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1723 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
1724 b15d625f Iustin Pop
                           source=myself.secondary_ip):
1725 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1726 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
1727 a8083063 Iustin Pop
1728 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1729 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1730 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1731 a8083063 Iustin Pop
1732 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1733 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1734 a8083063 Iustin Pop

1735 a8083063 Iustin Pop
    """
1736 a8083063 Iustin Pop
    new_node = self.new_node
1737 a8083063 Iustin Pop
    node = new_node.name
1738 a8083063 Iustin Pop
1739 a8083063 Iustin Pop
    # check connectivity
1740 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
1741 a8083063 Iustin Pop
    if result:
1742 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1743 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1744 a8083063 Iustin Pop
                    (node, result))
1745 a8083063 Iustin Pop
      else:
1746 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1747 3ecf6786 Iustin Pop
                                 " node version %s" %
1748 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1749 a8083063 Iustin Pop
    else:
1750 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1751 a8083063 Iustin Pop
1752 a8083063 Iustin Pop
    # setup ssh on node
1753 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1754 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1755 a8083063 Iustin Pop
    keyarray = []
1756 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1757 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1758 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1759 a8083063 Iustin Pop
1760 a8083063 Iustin Pop
    for i in keyfiles:
1761 a8083063 Iustin Pop
      f = open(i, 'r')
1762 a8083063 Iustin Pop
      try:
1763 a8083063 Iustin Pop
        keyarray.append(f.read())
1764 a8083063 Iustin Pop
      finally:
1765 a8083063 Iustin Pop
        f.close()
1766 a8083063 Iustin Pop
1767 72737a7f Iustin Pop
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
1768 72737a7f Iustin Pop
                                    keyarray[2],
1769 72737a7f Iustin Pop
                                    keyarray[3], keyarray[4], keyarray[5])
1770 a8083063 Iustin Pop
1771 a8083063 Iustin Pop
    if not result:
1772 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1773 a8083063 Iustin Pop
1774 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1775 d9c02ca6 Michael Hanselmann
    utils.AddHostToEtcHosts(new_node.name)
1776 c8a0948f Michael Hanselmann
1777 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1778 caad16e2 Iustin Pop
      if not self.rpc.call_node_has_ip_address(new_node.name,
1779 caad16e2 Iustin Pop
                                               new_node.secondary_ip):
1780 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
1781 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
1782 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
1783 a8083063 Iustin Pop
1784 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
1785 5c0527ed Guido Trotter
    node_verify_param = {
1786 5c0527ed Guido Trotter
      'nodelist': [node],
1787 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
1788 5c0527ed Guido Trotter
    }
1789 5c0527ed Guido Trotter
1790 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
1791 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
1792 5c0527ed Guido Trotter
    for verifier in node_verify_list:
1793 5c0527ed Guido Trotter
      if not result[verifier]:
1794 5c0527ed Guido Trotter
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
1795 5c0527ed Guido Trotter
                                 " for remote verification" % verifier)
1796 5c0527ed Guido Trotter
      if result[verifier]['nodelist']:
1797 5c0527ed Guido Trotter
        for failed in result[verifier]['nodelist']:
1798 5c0527ed Guido Trotter
          feedback_fn("ssh/hostname verification failed %s -> %s" %
1799 5c0527ed Guido Trotter
                      (verifier, result[verifier]['nodelist'][failed]))
1800 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
1801 ff98055b Iustin Pop
1802 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1803 a8083063 Iustin Pop
    # including the node just added
1804 d6a02168 Michael Hanselmann
    myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
1805 102b115b Michael Hanselmann
    dist_nodes = self.cfg.GetNodeList()
1806 102b115b Michael Hanselmann
    if not self.op.readd:
1807 102b115b Michael Hanselmann
      dist_nodes.append(node)
1808 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1809 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1810 a8083063 Iustin Pop
1811 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1812 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
1813 72737a7f Iustin Pop
      result = self.rpc.call_upload_file(dist_nodes, fname)
1814 a8083063 Iustin Pop
      for to_node in dist_nodes:
1815 a8083063 Iustin Pop
        if not result[to_node]:
1816 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1817 a8083063 Iustin Pop
                       (fname, to_node))
1818 a8083063 Iustin Pop
1819 d6a02168 Michael Hanselmann
    to_copy = []
1820 00cd937c Iustin Pop
    if constants.HT_XEN_HVM in self.cfg.GetClusterInfo().enabled_hypervisors:
1821 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
1822 a8083063 Iustin Pop
    for fname in to_copy:
1823 72737a7f Iustin Pop
      result = self.rpc.call_upload_file([node], fname)
1824 b5602d15 Guido Trotter
      if not result[node]:
1825 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1826 a8083063 Iustin Pop
1827 d8470559 Michael Hanselmann
    if self.op.readd:
1828 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
1829 d8470559 Michael Hanselmann
    else:
1830 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
1831 a8083063 Iustin Pop
1832 a8083063 Iustin Pop
1833 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1834 a8083063 Iustin Pop
  """Query cluster configuration.
1835 a8083063 Iustin Pop

1836 a8083063 Iustin Pop
  """
1837 a8083063 Iustin Pop
  _OP_REQP = []
1838 59322403 Iustin Pop
  REQ_MASTER = False
1839 642339cf Guido Trotter
  REQ_BGL = False
1840 642339cf Guido Trotter
1841 642339cf Guido Trotter
  def ExpandNames(self):
1842 642339cf Guido Trotter
    self.needed_locks = {}
1843 a8083063 Iustin Pop
1844 a8083063 Iustin Pop
  def CheckPrereq(self):
1845 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1846 a8083063 Iustin Pop

1847 a8083063 Iustin Pop
    """
1848 a8083063 Iustin Pop
    pass
1849 a8083063 Iustin Pop
1850 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1851 a8083063 Iustin Pop
    """Return cluster config.
1852 a8083063 Iustin Pop

1853 a8083063 Iustin Pop
    """
1854 a8083063 Iustin Pop
    result = {
1855 d6a02168 Michael Hanselmann
      "name": self.cfg.GetClusterName(),
1856 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1857 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1858 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1859 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1860 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1861 d6a02168 Michael Hanselmann
      "master": self.cfg.GetMasterNode(),
1862 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1863 d6a02168 Michael Hanselmann
      "hypervisor_type": self.cfg.GetHypervisorType(),
1864 e69d05fd Iustin Pop
      "enabled_hypervisors": self.cfg.GetClusterInfo().enabled_hypervisors,
1865 a8083063 Iustin Pop
      }
1866 a8083063 Iustin Pop
1867 a8083063 Iustin Pop
    return result
1868 a8083063 Iustin Pop
1869 a8083063 Iustin Pop
1870 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
1871 ae5849b5 Michael Hanselmann
  """Return configuration values.
1872 a8083063 Iustin Pop

1873 a8083063 Iustin Pop
  """
1874 a8083063 Iustin Pop
  _OP_REQP = []
1875 642339cf Guido Trotter
  REQ_BGL = False
1876 642339cf Guido Trotter
1877 642339cf Guido Trotter
  def ExpandNames(self):
1878 642339cf Guido Trotter
    self.needed_locks = {}
1879 a8083063 Iustin Pop
1880 ae5849b5 Michael Hanselmann
    static_fields = ["cluster_name", "master_node"]
1881 ae5849b5 Michael Hanselmann
    _CheckOutputFields(static=static_fields,
1882 ae5849b5 Michael Hanselmann
                       dynamic=[],
1883 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
1884 ae5849b5 Michael Hanselmann
1885 a8083063 Iustin Pop
  def CheckPrereq(self):
1886 a8083063 Iustin Pop
    """No prerequisites.
1887 a8083063 Iustin Pop

1888 a8083063 Iustin Pop
    """
1889 a8083063 Iustin Pop
    pass
1890 a8083063 Iustin Pop
1891 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1892 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
1893 a8083063 Iustin Pop

1894 a8083063 Iustin Pop
    """
1895 ae5849b5 Michael Hanselmann
    values = []
1896 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
1897 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
1898 ae5849b5 Michael Hanselmann
        values.append(self.cfg.GetClusterName())
1899 ae5849b5 Michael Hanselmann
      elif field == "master_node":
1900 ae5849b5 Michael Hanselmann
        values.append(self.cfg.GetMasterNode())
1901 ae5849b5 Michael Hanselmann
      else:
1902 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
1903 ae5849b5 Michael Hanselmann
    return values
1904 a8083063 Iustin Pop
1905 a8083063 Iustin Pop
1906 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
1907 a8083063 Iustin Pop
  """Bring up an instance's disks.
1908 a8083063 Iustin Pop

1909 a8083063 Iustin Pop
  """
1910 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1911 f22a8ba3 Guido Trotter
  REQ_BGL = False
1912 f22a8ba3 Guido Trotter
1913 f22a8ba3 Guido Trotter
  def ExpandNames(self):
1914 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
1915 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
1916 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1917 f22a8ba3 Guido Trotter
1918 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
1919 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
1920 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
1921 a8083063 Iustin Pop
1922 a8083063 Iustin Pop
  def CheckPrereq(self):
1923 a8083063 Iustin Pop
    """Check prerequisites.
1924 a8083063 Iustin Pop

1925 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1926 a8083063 Iustin Pop

1927 a8083063 Iustin Pop
    """
1928 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1929 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
1930 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
1931 a8083063 Iustin Pop
1932 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1933 a8083063 Iustin Pop
    """Activate the disks.
1934 a8083063 Iustin Pop

1935 a8083063 Iustin Pop
    """
1936 b9bddb6b Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
1937 a8083063 Iustin Pop
    if not disks_ok:
1938 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
1939 a8083063 Iustin Pop
1940 a8083063 Iustin Pop
    return disks_info
1941 a8083063 Iustin Pop
1942 a8083063 Iustin Pop
1943 b9bddb6b Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
1944 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
1945 a8083063 Iustin Pop

1946 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
1947 a8083063 Iustin Pop

1948 a8083063 Iustin Pop
  Args:
1949 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
1950 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
1951 a8083063 Iustin Pop
                        in an error return from the function
1952 a8083063 Iustin Pop

1953 a8083063 Iustin Pop
  Returns:
1954 a8083063 Iustin Pop
    false if the operation failed
1955 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
1956 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
1957 a8083063 Iustin Pop
  """
1958 a8083063 Iustin Pop
  device_info = []
1959 a8083063 Iustin Pop
  disks_ok = True
1960 fdbd668d Iustin Pop
  iname = instance.name
1961 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
1962 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
1963 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
1964 fdbd668d Iustin Pop
1965 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
1966 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
1967 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
1968 fdbd668d Iustin Pop
  # SyncSource, etc.)
1969 fdbd668d Iustin Pop
1970 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
1971 a8083063 Iustin Pop
  for inst_disk in instance.disks:
1972 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1973 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
1974 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
1975 a8083063 Iustin Pop
      if not result:
1976 f4bc1f2c Michael Hanselmann
        logger.Error("could not prepare block device %s on node %s"
1977 fdbd668d Iustin Pop
                     " (is_primary=False, pass=1)" % (inst_disk.iv_name, node))
1978 fdbd668d Iustin Pop
        if not ignore_secondaries:
1979 a8083063 Iustin Pop
          disks_ok = False
1980 fdbd668d Iustin Pop
1981 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
1982 fdbd668d Iustin Pop
1983 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
1984 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
1985 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1986 fdbd668d Iustin Pop
      if node != instance.primary_node:
1987 fdbd668d Iustin Pop
        continue
1988 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
1989 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
1990 fdbd668d Iustin Pop
      if not result:
1991 fdbd668d Iustin Pop
        logger.Error("could not prepare block device %s on node %s"
1992 fdbd668d Iustin Pop
                     " (is_primary=True, pass=2)" % (inst_disk.iv_name, node))
1993 fdbd668d Iustin Pop
        disks_ok = False
1994 fdbd668d Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name, result))
1995 a8083063 Iustin Pop
1996 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
1997 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
1998 b352ab5b Iustin Pop
  # improving the logical/physical id handling
1999 b352ab5b Iustin Pop
  for disk in instance.disks:
2000 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
2001 b352ab5b Iustin Pop
2002 a8083063 Iustin Pop
  return disks_ok, device_info
2003 a8083063 Iustin Pop
2004 a8083063 Iustin Pop
2005 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
2006 3ecf6786 Iustin Pop
  """Start the disks of an instance.
2007 3ecf6786 Iustin Pop

2008 3ecf6786 Iustin Pop
  """
2009 b9bddb6b Iustin Pop
  disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2010 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
2011 fe7b0351 Michael Hanselmann
  if not disks_ok:
2012 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
2013 fe7b0351 Michael Hanselmann
    if force is not None and not force:
2014 fe7b0351 Michael Hanselmann
      logger.Error("If the message above refers to a secondary node,"
2015 fe7b0351 Michael Hanselmann
                   " you can retry the operation using '--force'.")
2016 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
2017 fe7b0351 Michael Hanselmann
2018 fe7b0351 Michael Hanselmann
2019 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
2020 a8083063 Iustin Pop
  """Shutdown an instance's disks.
2021 a8083063 Iustin Pop

2022 a8083063 Iustin Pop
  """
2023 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2024 f22a8ba3 Guido Trotter
  REQ_BGL = False
2025 f22a8ba3 Guido Trotter
2026 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2027 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2028 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2029 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2030 f22a8ba3 Guido Trotter
2031 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2032 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2033 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2034 a8083063 Iustin Pop
2035 a8083063 Iustin Pop
  def CheckPrereq(self):
2036 a8083063 Iustin Pop
    """Check prerequisites.
2037 a8083063 Iustin Pop

2038 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2039 a8083063 Iustin Pop

2040 a8083063 Iustin Pop
    """
2041 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2042 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2043 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2044 a8083063 Iustin Pop
2045 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2046 a8083063 Iustin Pop
    """Deactivate the disks
2047 a8083063 Iustin Pop

2048 a8083063 Iustin Pop
    """
2049 a8083063 Iustin Pop
    instance = self.instance
2050 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
2051 a8083063 Iustin Pop
2052 a8083063 Iustin Pop
2053 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
2054 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
2055 155d6c75 Guido Trotter

2056 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
2057 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
2058 155d6c75 Guido Trotter

2059 155d6c75 Guido Trotter
  """
2060 72737a7f Iustin Pop
  ins_l = lu.rpc.call_instance_list([instance.primary_node],
2061 72737a7f Iustin Pop
                                      [instance.hypervisor])
2062 155d6c75 Guido Trotter
  ins_l = ins_l[instance.primary_node]
2063 155d6c75 Guido Trotter
  if not type(ins_l) is list:
2064 155d6c75 Guido Trotter
    raise errors.OpExecError("Can't contact node '%s'" %
2065 155d6c75 Guido Trotter
                             instance.primary_node)
2066 155d6c75 Guido Trotter
2067 155d6c75 Guido Trotter
  if instance.name in ins_l:
2068 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
2069 155d6c75 Guido Trotter
                             " block devices.")
2070 155d6c75 Guido Trotter
2071 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
2072 a8083063 Iustin Pop
2073 a8083063 Iustin Pop
2074 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2075 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
2076 a8083063 Iustin Pop

2077 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
2078 a8083063 Iustin Pop

2079 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
2080 a8083063 Iustin Pop
  ignored.
2081 a8083063 Iustin Pop

2082 a8083063 Iustin Pop
  """
2083 a8083063 Iustin Pop
  result = True
2084 a8083063 Iustin Pop
  for disk in instance.disks:
2085 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2086 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
2087 72737a7f Iustin Pop
      if not lu.rpc.call_blockdev_shutdown(node, top_disk):
2088 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
2089 a8083063 Iustin Pop
                     (disk.iv_name, node))
2090 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
2091 a8083063 Iustin Pop
          result = False
2092 a8083063 Iustin Pop
  return result
2093 a8083063 Iustin Pop
2094 a8083063 Iustin Pop
2095 b9bddb6b Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor):
2096 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
2097 d4f16fd9 Iustin Pop

2098 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
2099 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
2100 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
2101 d4f16fd9 Iustin Pop
  exception.
2102 d4f16fd9 Iustin Pop

2103 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
2104 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
2105 e69d05fd Iustin Pop
  @type node: C{str}
2106 e69d05fd Iustin Pop
  @param node: the node to check
2107 e69d05fd Iustin Pop
  @type reason: C{str}
2108 e69d05fd Iustin Pop
  @param reason: string to use in the error message
2109 e69d05fd Iustin Pop
  @type requested: C{int}
2110 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
2111 e69d05fd Iustin Pop
  @type hypervisor: C{str}
2112 e69d05fd Iustin Pop
  @param hypervisor: the hypervisor to ask for memory stats
2113 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2114 e69d05fd Iustin Pop
      we cannot check the node
2115 d4f16fd9 Iustin Pop

2116 d4f16fd9 Iustin Pop
  """
2117 72737a7f Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor)
2118 d4f16fd9 Iustin Pop
  if not nodeinfo or not isinstance(nodeinfo, dict):
2119 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Could not contact node %s for resource"
2120 d4f16fd9 Iustin Pop
                             " information" % (node,))
2121 d4f16fd9 Iustin Pop
2122 d4f16fd9 Iustin Pop
  free_mem = nodeinfo[node].get('memory_free')
2123 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2124 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2125 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
2126 d4f16fd9 Iustin Pop
  if requested > free_mem:
2127 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2128 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
2129 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
2130 d4f16fd9 Iustin Pop
2131 d4f16fd9 Iustin Pop
2132 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2133 a8083063 Iustin Pop
  """Starts an instance.
2134 a8083063 Iustin Pop

2135 a8083063 Iustin Pop
  """
2136 a8083063 Iustin Pop
  HPATH = "instance-start"
2137 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2138 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2139 e873317a Guido Trotter
  REQ_BGL = False
2140 e873317a Guido Trotter
2141 e873317a Guido Trotter
  def ExpandNames(self):
2142 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2143 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2144 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2145 e873317a Guido Trotter
2146 e873317a Guido Trotter
  def DeclareLocks(self, level):
2147 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2148 e873317a Guido Trotter
      self._LockInstancesNodes()
2149 a8083063 Iustin Pop
2150 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2151 a8083063 Iustin Pop
    """Build hooks env.
2152 a8083063 Iustin Pop

2153 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2154 a8083063 Iustin Pop

2155 a8083063 Iustin Pop
    """
2156 a8083063 Iustin Pop
    env = {
2157 a8083063 Iustin Pop
      "FORCE": self.op.force,
2158 a8083063 Iustin Pop
      }
2159 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2160 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2161 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2162 a8083063 Iustin Pop
    return env, nl, nl
2163 a8083063 Iustin Pop
2164 a8083063 Iustin Pop
  def CheckPrereq(self):
2165 a8083063 Iustin Pop
    """Check prerequisites.
2166 a8083063 Iustin Pop

2167 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2168 a8083063 Iustin Pop

2169 a8083063 Iustin Pop
    """
2170 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2171 e873317a Guido Trotter
    assert self.instance is not None, \
2172 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2173 a8083063 Iustin Pop
2174 a8083063 Iustin Pop
    # check bridges existance
2175 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2176 a8083063 Iustin Pop
2177 b9bddb6b Iustin Pop
    _CheckNodeFreeMemory(self, instance.primary_node,
2178 d4f16fd9 Iustin Pop
                         "starting instance %s" % instance.name,
2179 e69d05fd Iustin Pop
                         instance.memory, instance.hypervisor)
2180 d4f16fd9 Iustin Pop
2181 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2182 a8083063 Iustin Pop
    """Start the instance.
2183 a8083063 Iustin Pop

2184 a8083063 Iustin Pop
    """
2185 a8083063 Iustin Pop
    instance = self.instance
2186 a8083063 Iustin Pop
    force = self.op.force
2187 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
2188 a8083063 Iustin Pop
2189 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2190 fe482621 Iustin Pop
2191 a8083063 Iustin Pop
    node_current = instance.primary_node
2192 a8083063 Iustin Pop
2193 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
2194 a8083063 Iustin Pop
2195 72737a7f Iustin Pop
    if not self.rpc.call_instance_start(node_current, instance, extra_args):
2196 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
2197 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
2198 a8083063 Iustin Pop
2199 a8083063 Iustin Pop
2200 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2201 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2202 bf6929a2 Alexander Schreiber

2203 bf6929a2 Alexander Schreiber
  """
2204 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2205 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2206 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2207 e873317a Guido Trotter
  REQ_BGL = False
2208 e873317a Guido Trotter
2209 e873317a Guido Trotter
  def ExpandNames(self):
2210 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2211 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2212 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
2213 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2214 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
2215 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2216 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
2217 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2218 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2219 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2220 e873317a Guido Trotter
2221 e873317a Guido Trotter
  def DeclareLocks(self, level):
2222 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2223 849da276 Guido Trotter
      primary_only = not constants.INSTANCE_REBOOT_FULL
2224 849da276 Guido Trotter
      self._LockInstancesNodes(primary_only=primary_only)
2225 bf6929a2 Alexander Schreiber
2226 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2227 bf6929a2 Alexander Schreiber
    """Build hooks env.
2228 bf6929a2 Alexander Schreiber

2229 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2230 bf6929a2 Alexander Schreiber

2231 bf6929a2 Alexander Schreiber
    """
2232 bf6929a2 Alexander Schreiber
    env = {
2233 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2234 bf6929a2 Alexander Schreiber
      }
2235 bf6929a2 Alexander Schreiber
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2236 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2237 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
2238 bf6929a2 Alexander Schreiber
    return env, nl, nl
2239 bf6929a2 Alexander Schreiber
2240 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2241 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2242 bf6929a2 Alexander Schreiber

2243 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2244 bf6929a2 Alexander Schreiber

2245 bf6929a2 Alexander Schreiber
    """
2246 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2247 e873317a Guido Trotter
    assert self.instance is not None, \
2248 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2249 bf6929a2 Alexander Schreiber
2250 bf6929a2 Alexander Schreiber
    # check bridges existance
2251 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2252 bf6929a2 Alexander Schreiber
2253 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2254 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2255 bf6929a2 Alexander Schreiber

2256 bf6929a2 Alexander Schreiber
    """
2257 bf6929a2 Alexander Schreiber
    instance = self.instance
2258 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2259 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2260 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2261 bf6929a2 Alexander Schreiber
2262 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2263 bf6929a2 Alexander Schreiber
2264 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2265 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2266 72737a7f Iustin Pop
      if not self.rpc.call_instance_reboot(node_current, instance,
2267 72737a7f Iustin Pop
                                           reboot_type, extra_args):
2268 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2269 bf6929a2 Alexander Schreiber
    else:
2270 72737a7f Iustin Pop
      if not self.rpc.call_instance_shutdown(node_current, instance):
2271 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2272 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
2273 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
2274 72737a7f Iustin Pop
      if not self.rpc.call_instance_start(node_current, instance, extra_args):
2275 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
2276 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2277 bf6929a2 Alexander Schreiber
2278 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2279 bf6929a2 Alexander Schreiber
2280 bf6929a2 Alexander Schreiber
2281 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2282 a8083063 Iustin Pop
  """Shutdown an instance.
2283 a8083063 Iustin Pop

2284 a8083063 Iustin Pop
  """
2285 a8083063 Iustin Pop
  HPATH = "instance-stop"
2286 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2287 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2288 e873317a Guido Trotter
  REQ_BGL = False
2289 e873317a Guido Trotter
2290 e873317a Guido Trotter
  def ExpandNames(self):
2291 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2292 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2293 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2294 e873317a Guido Trotter
2295 e873317a Guido Trotter
  def DeclareLocks(self, level):
2296 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2297 e873317a Guido Trotter
      self._LockInstancesNodes()
2298 a8083063 Iustin Pop
2299 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2300 a8083063 Iustin Pop
    """Build hooks env.
2301 a8083063 Iustin Pop

2302 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2303 a8083063 Iustin Pop

2304 a8083063 Iustin Pop
    """
2305 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2306 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2307 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2308 a8083063 Iustin Pop
    return env, nl, nl
2309 a8083063 Iustin Pop
2310 a8083063 Iustin Pop
  def CheckPrereq(self):
2311 a8083063 Iustin Pop
    """Check prerequisites.
2312 a8083063 Iustin Pop

2313 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2314 a8083063 Iustin Pop

2315 a8083063 Iustin Pop
    """
2316 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2317 e873317a Guido Trotter
    assert self.instance is not None, \
2318 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2319 a8083063 Iustin Pop
2320 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2321 a8083063 Iustin Pop
    """Shutdown the instance.
2322 a8083063 Iustin Pop

2323 a8083063 Iustin Pop
    """
2324 a8083063 Iustin Pop
    instance = self.instance
2325 a8083063 Iustin Pop
    node_current = instance.primary_node
2326 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2327 72737a7f Iustin Pop
    if not self.rpc.call_instance_shutdown(node_current, instance):
2328 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
2329 a8083063 Iustin Pop
2330 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
2331 a8083063 Iustin Pop
2332 a8083063 Iustin Pop
2333 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2334 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2335 fe7b0351 Michael Hanselmann

2336 fe7b0351 Michael Hanselmann
  """
2337 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2338 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2339 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2340 4e0b4d2d Guido Trotter
  REQ_BGL = False
2341 4e0b4d2d Guido Trotter
2342 4e0b4d2d Guido Trotter
  def ExpandNames(self):
2343 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
2344 4e0b4d2d Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2345 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2346 4e0b4d2d Guido Trotter
2347 4e0b4d2d Guido Trotter
  def DeclareLocks(self, level):
2348 4e0b4d2d Guido Trotter
    if level == locking.LEVEL_NODE:
2349 4e0b4d2d Guido Trotter
      self._LockInstancesNodes()
2350 fe7b0351 Michael Hanselmann
2351 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2352 fe7b0351 Michael Hanselmann
    """Build hooks env.
2353 fe7b0351 Michael Hanselmann

2354 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2355 fe7b0351 Michael Hanselmann

2356 fe7b0351 Michael Hanselmann
    """
2357 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2358 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2359 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2360 fe7b0351 Michael Hanselmann
    return env, nl, nl
2361 fe7b0351 Michael Hanselmann
2362 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2363 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2364 fe7b0351 Michael Hanselmann

2365 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2366 fe7b0351 Michael Hanselmann

2367 fe7b0351 Michael Hanselmann
    """
2368 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2369 4e0b4d2d Guido Trotter
    assert instance is not None, \
2370 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2371 4e0b4d2d Guido Trotter
2372 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2373 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2374 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2375 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2376 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2377 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2378 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2379 72737a7f Iustin Pop
                                              instance.name,
2380 72737a7f Iustin Pop
                                              instance.hypervisor)
2381 fe7b0351 Michael Hanselmann
    if remote_info:
2382 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2383 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2384 3ecf6786 Iustin Pop
                                  instance.primary_node))
2385 d0834de3 Michael Hanselmann
2386 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2387 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2388 d0834de3 Michael Hanselmann
      # OS verification
2389 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2390 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2391 d0834de3 Michael Hanselmann
      if pnode is None:
2392 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2393 3ecf6786 Iustin Pop
                                   self.op.pnode)
2394 72737a7f Iustin Pop
      os_obj = self.rpc.call_os_get(pnode.name, self.op.os_type)
2395 dfa96ded Guido Trotter
      if not os_obj:
2396 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2397 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2398 d0834de3 Michael Hanselmann
2399 fe7b0351 Michael Hanselmann
    self.instance = instance
2400 fe7b0351 Michael Hanselmann
2401 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2402 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2403 fe7b0351 Michael Hanselmann

2404 fe7b0351 Michael Hanselmann
    """
2405 fe7b0351 Michael Hanselmann
    inst = self.instance
2406 fe7b0351 Michael Hanselmann
2407 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2408 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2409 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2410 97abc79f Iustin Pop
      self.cfg.Update(inst)
2411 d0834de3 Michael Hanselmann
2412 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
2413 fe7b0351 Michael Hanselmann
    try:
2414 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2415 72737a7f Iustin Pop
      if not self.rpc.call_instance_os_add(inst.primary_node, inst,
2416 72737a7f Iustin Pop
                                           "sda", "sdb"):
2417 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
2418 f4bc1f2c Michael Hanselmann
                                 " on node %s" %
2419 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2420 fe7b0351 Michael Hanselmann
    finally:
2421 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
2422 fe7b0351 Michael Hanselmann
2423 fe7b0351 Michael Hanselmann
2424 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2425 decd5f45 Iustin Pop
  """Rename an instance.
2426 decd5f45 Iustin Pop

2427 decd5f45 Iustin Pop
  """
2428 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2429 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2430 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2431 decd5f45 Iustin Pop
2432 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2433 decd5f45 Iustin Pop
    """Build hooks env.
2434 decd5f45 Iustin Pop

2435 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2436 decd5f45 Iustin Pop

2437 decd5f45 Iustin Pop
    """
2438 decd5f45 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self.instance)
2439 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2440 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2441 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2442 decd5f45 Iustin Pop
    return env, nl, nl
2443 decd5f45 Iustin Pop
2444 decd5f45 Iustin Pop
  def CheckPrereq(self):
2445 decd5f45 Iustin Pop
    """Check prerequisites.
2446 decd5f45 Iustin Pop

2447 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2448 decd5f45 Iustin Pop

2449 decd5f45 Iustin Pop
    """
2450 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2451 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2452 decd5f45 Iustin Pop
    if instance is None:
2453 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2454 decd5f45 Iustin Pop
                                 self.op.instance_name)
2455 decd5f45 Iustin Pop
    if instance.status != "down":
2456 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2457 decd5f45 Iustin Pop
                                 self.op.instance_name)
2458 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2459 72737a7f Iustin Pop
                                              instance.name,
2460 72737a7f Iustin Pop
                                              instance.hypervisor)
2461 decd5f45 Iustin Pop
    if remote_info:
2462 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2463 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2464 decd5f45 Iustin Pop
                                  instance.primary_node))
2465 decd5f45 Iustin Pop
    self.instance = instance
2466 decd5f45 Iustin Pop
2467 decd5f45 Iustin Pop
    # new name verification
2468 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2469 decd5f45 Iustin Pop
2470 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2471 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
2472 7bde3275 Guido Trotter
    if new_name in instance_list:
2473 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2474 c09f363f Manuel Franceschini
                                 new_name)
2475 7bde3275 Guido Trotter
2476 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2477 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
2478 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2479 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2480 decd5f45 Iustin Pop
2481 decd5f45 Iustin Pop
2482 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2483 decd5f45 Iustin Pop
    """Reinstall the instance.
2484 decd5f45 Iustin Pop

2485 decd5f45 Iustin Pop
    """
2486 decd5f45 Iustin Pop
    inst = self.instance
2487 decd5f45 Iustin Pop
    old_name = inst.name
2488 decd5f45 Iustin Pop
2489 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2490 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2491 b23c4333 Manuel Franceschini
2492 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2493 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
2494 74b5913f Guido Trotter
    self.context.glm.remove(locking.LEVEL_INSTANCE, inst.name)
2495 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
2496 decd5f45 Iustin Pop
2497 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2498 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2499 decd5f45 Iustin Pop
2500 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2501 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2502 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
2503 72737a7f Iustin Pop
                                                     old_file_storage_dir,
2504 72737a7f Iustin Pop
                                                     new_file_storage_dir)
2505 b23c4333 Manuel Franceschini
2506 b23c4333 Manuel Franceschini
      if not result:
2507 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not connect to node '%s' to rename"
2508 b23c4333 Manuel Franceschini
                                 " directory '%s' to '%s' (but the instance"
2509 b23c4333 Manuel Franceschini
                                 " has been renamed in Ganeti)" % (
2510 b23c4333 Manuel Franceschini
                                 inst.primary_node, old_file_storage_dir,
2511 b23c4333 Manuel Franceschini
                                 new_file_storage_dir))
2512 b23c4333 Manuel Franceschini
2513 b23c4333 Manuel Franceschini
      if not result[0]:
2514 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2515 b23c4333 Manuel Franceschini
                                 " (but the instance has been renamed in"
2516 b23c4333 Manuel Franceschini
                                 " Ganeti)" % (old_file_storage_dir,
2517 b23c4333 Manuel Franceschini
                                               new_file_storage_dir))
2518 b23c4333 Manuel Franceschini
2519 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
2520 decd5f45 Iustin Pop
    try:
2521 72737a7f Iustin Pop
      if not self.rpc.call_instance_run_rename(inst.primary_node, inst,
2522 72737a7f Iustin Pop
                                               old_name,
2523 72737a7f Iustin Pop
                                               "sda", "sdb"):
2524 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
2525 6291574d Alexander Schreiber
               " (but the instance has been renamed in Ganeti)" %
2526 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2527 decd5f45 Iustin Pop
        logger.Error(msg)
2528 decd5f45 Iustin Pop
    finally:
2529 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
2530 decd5f45 Iustin Pop
2531 decd5f45 Iustin Pop
2532 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2533 a8083063 Iustin Pop
  """Remove an instance.
2534 a8083063 Iustin Pop

2535 a8083063 Iustin Pop
  """
2536 a8083063 Iustin Pop
  HPATH = "instance-remove"
2537 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2538 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
2539 cf472233 Guido Trotter
  REQ_BGL = False
2540 cf472233 Guido Trotter
2541 cf472233 Guido Trotter
  def ExpandNames(self):
2542 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
2543 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2544 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2545 cf472233 Guido Trotter
2546 cf472233 Guido Trotter
  def DeclareLocks(self, level):
2547 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
2548 cf472233 Guido Trotter
      self._LockInstancesNodes()
2549 a8083063 Iustin Pop
2550 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2551 a8083063 Iustin Pop
    """Build hooks env.
2552 a8083063 Iustin Pop

2553 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2554 a8083063 Iustin Pop

2555 a8083063 Iustin Pop
    """
2556 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2557 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
2558 a8083063 Iustin Pop
    return env, nl, nl
2559 a8083063 Iustin Pop
2560 a8083063 Iustin Pop
  def CheckPrereq(self):
2561 a8083063 Iustin Pop
    """Check prerequisites.
2562 a8083063 Iustin Pop

2563 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2564 a8083063 Iustin Pop

2565 a8083063 Iustin Pop
    """
2566 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2567 cf472233 Guido Trotter
    assert self.instance is not None, \
2568 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2569 a8083063 Iustin Pop
2570 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2571 a8083063 Iustin Pop
    """Remove the instance.
2572 a8083063 Iustin Pop

2573 a8083063 Iustin Pop
    """
2574 a8083063 Iustin Pop
    instance = self.instance
2575 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
2576 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
2577 a8083063 Iustin Pop
2578 72737a7f Iustin Pop
    if not self.rpc.call_instance_shutdown(instance.primary_node, instance):
2579 1d67656e Iustin Pop
      if self.op.ignore_failures:
2580 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2581 1d67656e Iustin Pop
      else:
2582 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2583 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2584 a8083063 Iustin Pop
2585 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
2586 a8083063 Iustin Pop
2587 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
2588 1d67656e Iustin Pop
      if self.op.ignore_failures:
2589 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2590 1d67656e Iustin Pop
      else:
2591 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2592 a8083063 Iustin Pop
2593 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
2594 a8083063 Iustin Pop
2595 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2596 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
2597 a8083063 Iustin Pop
2598 a8083063 Iustin Pop
2599 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2600 a8083063 Iustin Pop
  """Logical unit for querying instances.
2601 a8083063 Iustin Pop

2602 a8083063 Iustin Pop
  """
2603 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2604 7eb9d8f7 Guido Trotter
  REQ_BGL = False
2605 a8083063 Iustin Pop
2606 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
2607 d8052456 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram", "status"])
2608 57a2fb91 Iustin Pop
    self.static_fields = frozenset([
2609 57a2fb91 Iustin Pop
      "name", "os", "pnode", "snodes",
2610 57a2fb91 Iustin Pop
      "admin_state", "admin_ram",
2611 57a2fb91 Iustin Pop
      "disk_template", "ip", "mac", "bridge",
2612 57a2fb91 Iustin Pop
      "sda_size", "sdb_size", "vcpus", "tags",
2613 5018a335 Iustin Pop
      "network_port",
2614 5018a335 Iustin Pop
      "serial_no", "hypervisor", "hvparams",
2615 5018a335 Iustin Pop
      ] + ["hv/%s" % name for name in constants.HVS_PARAMETERS])
2616 57a2fb91 Iustin Pop
    _CheckOutputFields(static=self.static_fields,
2617 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2618 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2619 a8083063 Iustin Pop
2620 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
2621 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
2622 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2623 7eb9d8f7 Guido Trotter
2624 57a2fb91 Iustin Pop
    if self.op.names:
2625 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
2626 7eb9d8f7 Guido Trotter
    else:
2627 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
2628 7eb9d8f7 Guido Trotter
2629 57a2fb91 Iustin Pop
    self.do_locking = not self.static_fields.issuperset(self.op.output_fields)
2630 57a2fb91 Iustin Pop
    if self.do_locking:
2631 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
2632 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
2633 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2634 7eb9d8f7 Guido Trotter
2635 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
2636 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
2637 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
2638 7eb9d8f7 Guido Trotter
2639 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
2640 7eb9d8f7 Guido Trotter
    """Check prerequisites.
2641 7eb9d8f7 Guido Trotter

2642 7eb9d8f7 Guido Trotter
    """
2643 57a2fb91 Iustin Pop
    pass
2644 069dcc86 Iustin Pop
2645 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2646 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2647 a8083063 Iustin Pop

2648 a8083063 Iustin Pop
    """
2649 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
2650 57a2fb91 Iustin Pop
    if self.do_locking:
2651 57a2fb91 Iustin Pop
      instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2652 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
2653 3fa93523 Guido Trotter
      instance_names = self.wanted
2654 3fa93523 Guido Trotter
      missing = set(instance_names).difference(all_info.keys())
2655 3fa93523 Guido Trotter
      if missing:
2656 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
2657 3fa93523 Guido Trotter
          "Some instances were removed before retrieving their data: %s"
2658 3fa93523 Guido Trotter
          % missing)
2659 57a2fb91 Iustin Pop
    else:
2660 57a2fb91 Iustin Pop
      instance_names = all_info.keys()
2661 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
2662 a8083063 Iustin Pop
2663 a8083063 Iustin Pop
    # begin data gathering
2664 a8083063 Iustin Pop
2665 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2666 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
2667 a8083063 Iustin Pop
2668 a8083063 Iustin Pop
    bad_nodes = []
2669 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2670 a8083063 Iustin Pop
      live_data = {}
2671 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
2672 a8083063 Iustin Pop
      for name in nodes:
2673 a8083063 Iustin Pop
        result = node_data[name]
2674 a8083063 Iustin Pop
        if result:
2675 a8083063 Iustin Pop
          live_data.update(result)
2676 a8083063 Iustin Pop
        elif result == False:
2677 a8083063 Iustin Pop
          bad_nodes.append(name)
2678 a8083063 Iustin Pop
        # else no instance is alive
2679 a8083063 Iustin Pop
    else:
2680 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2681 a8083063 Iustin Pop
2682 a8083063 Iustin Pop
    # end data gathering
2683 a8083063 Iustin Pop
2684 5018a335 Iustin Pop
    HVPREFIX = "hv/"
2685 a8083063 Iustin Pop
    output = []
2686 a8083063 Iustin Pop
    for instance in instance_list:
2687 a8083063 Iustin Pop
      iout = []
2688 5018a335 Iustin Pop
      i_hv = self.cfg.GetClusterInfo().FillHV(instance)
2689 a8083063 Iustin Pop
      for field in self.op.output_fields:
2690 a8083063 Iustin Pop
        if field == "name":
2691 a8083063 Iustin Pop
          val = instance.name
2692 a8083063 Iustin Pop
        elif field == "os":
2693 a8083063 Iustin Pop
          val = instance.os
2694 a8083063 Iustin Pop
        elif field == "pnode":
2695 a8083063 Iustin Pop
          val = instance.primary_node
2696 a8083063 Iustin Pop
        elif field == "snodes":
2697 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2698 a8083063 Iustin Pop
        elif field == "admin_state":
2699 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2700 a8083063 Iustin Pop
        elif field == "oper_state":
2701 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2702 8a23d2d3 Iustin Pop
            val = None
2703 a8083063 Iustin Pop
          else:
2704 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2705 d8052456 Iustin Pop
        elif field == "status":
2706 d8052456 Iustin Pop
          if instance.primary_node in bad_nodes:
2707 d8052456 Iustin Pop
            val = "ERROR_nodedown"
2708 d8052456 Iustin Pop
          else:
2709 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
2710 d8052456 Iustin Pop
            if running:
2711 d8052456 Iustin Pop
              if instance.status != "down":
2712 d8052456 Iustin Pop
                val = "running"
2713 d8052456 Iustin Pop
              else:
2714 d8052456 Iustin Pop
                val = "ERROR_up"
2715 d8052456 Iustin Pop
            else:
2716 d8052456 Iustin Pop
              if instance.status != "down":
2717 d8052456 Iustin Pop
                val = "ERROR_down"
2718 d8052456 Iustin Pop
              else:
2719 d8052456 Iustin Pop
                val = "ADMIN_down"
2720 a8083063 Iustin Pop
        elif field == "admin_ram":
2721 a8083063 Iustin Pop
          val = instance.memory
2722 a8083063 Iustin Pop
        elif field == "oper_ram":
2723 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2724 8a23d2d3 Iustin Pop
            val = None
2725 a8083063 Iustin Pop
          elif instance.name in live_data:
2726 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2727 a8083063 Iustin Pop
          else:
2728 a8083063 Iustin Pop
            val = "-"
2729 a8083063 Iustin Pop
        elif field == "disk_template":
2730 a8083063 Iustin Pop
          val = instance.disk_template
2731 a8083063 Iustin Pop
        elif field == "ip":
2732 a8083063 Iustin Pop
          val = instance.nics[0].ip
2733 a8083063 Iustin Pop
        elif field == "bridge":
2734 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2735 a8083063 Iustin Pop
        elif field == "mac":
2736 a8083063 Iustin Pop
          val = instance.nics[0].mac
2737 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2738 644eeef9 Iustin Pop
          disk = instance.FindDisk(field[:3])
2739 644eeef9 Iustin Pop
          if disk is None:
2740 8a23d2d3 Iustin Pop
            val = None
2741 644eeef9 Iustin Pop
          else:
2742 644eeef9 Iustin Pop
            val = disk.size
2743 d6d415e8 Iustin Pop
        elif field == "vcpus":
2744 d6d415e8 Iustin Pop
          val = instance.vcpus
2745 130a6a6f Iustin Pop
        elif field == "tags":
2746 130a6a6f Iustin Pop
          val = list(instance.GetTags())
2747 38d7239a Iustin Pop
        elif field == "serial_no":
2748 38d7239a Iustin Pop
          val = instance.serial_no
2749 5018a335 Iustin Pop
        elif field == "network_port":
2750 5018a335 Iustin Pop
          val = instance.network_port
2751 5018a335 Iustin Pop
        elif (field.startswith(HVPREFIX) and
2752 5018a335 Iustin Pop
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
2753 5018a335 Iustin Pop
          val = i_hv.get(field[len(HVPREFIX):], None)
2754 5018a335 Iustin Pop
        elif field == "hvparams":
2755 5018a335 Iustin Pop
          val = i_hv
2756 e69d05fd Iustin Pop
        elif field == "hypervisor":
2757 e69d05fd Iustin Pop
          val = instance.hypervisor
2758 a8083063 Iustin Pop
        else:
2759 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2760 a8083063 Iustin Pop
        iout.append(val)
2761 a8083063 Iustin Pop
      output.append(iout)
2762 a8083063 Iustin Pop
2763 a8083063 Iustin Pop
    return output
2764 a8083063 Iustin Pop
2765 a8083063 Iustin Pop
2766 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2767 a8083063 Iustin Pop
  """Failover an instance.
2768 a8083063 Iustin Pop

2769 a8083063 Iustin Pop
  """
2770 a8083063 Iustin Pop
  HPATH = "instance-failover"
2771 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2772 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2773 c9e5c064 Guido Trotter
  REQ_BGL = False
2774 c9e5c064 Guido Trotter
2775 c9e5c064 Guido Trotter
  def ExpandNames(self):
2776 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
2777 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2778 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2779 c9e5c064 Guido Trotter
2780 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
2781 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
2782 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
2783 a8083063 Iustin Pop
2784 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2785 a8083063 Iustin Pop
    """Build hooks env.
2786 a8083063 Iustin Pop

2787 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2788 a8083063 Iustin Pop

2789 a8083063 Iustin Pop
    """
2790 a8083063 Iustin Pop
    env = {
2791 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2792 a8083063 Iustin Pop
      }
2793 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2794 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
2795 a8083063 Iustin Pop
    return env, nl, nl
2796 a8083063 Iustin Pop
2797 a8083063 Iustin Pop
  def CheckPrereq(self):
2798 a8083063 Iustin Pop
    """Check prerequisites.
2799 a8083063 Iustin Pop

2800 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2801 a8083063 Iustin Pop

2802 a8083063 Iustin Pop
    """
2803 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2804 c9e5c064 Guido Trotter
    assert self.instance is not None, \
2805 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2806 a8083063 Iustin Pop
2807 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2808 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2809 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
2810 2a710df1 Michael Hanselmann
2811 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2812 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2813 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2814 abdf0113 Iustin Pop
                                   "a mirrored disk template")
2815 2a710df1 Michael Hanselmann
2816 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2817 d4f16fd9 Iustin Pop
    # check memory requirements on the secondary node
2818 b9bddb6b Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
2819 e69d05fd Iustin Pop
                         instance.name, instance.memory,
2820 e69d05fd Iustin Pop
                         instance.hypervisor)
2821 3a7c308e Guido Trotter
2822 a8083063 Iustin Pop
    # check bridge existance
2823 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2824 72737a7f Iustin Pop
    if not self.rpc.call_bridges_exist(target_node, brlist):
2825 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2826 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2827 50ff9a7a Iustin Pop
                                 (brlist, target_node))
2828 a8083063 Iustin Pop
2829 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2830 a8083063 Iustin Pop
    """Failover an instance.
2831 a8083063 Iustin Pop

2832 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2833 a8083063 Iustin Pop
    starting it on the secondary.
2834 a8083063 Iustin Pop

2835 a8083063 Iustin Pop
    """
2836 a8083063 Iustin Pop
    instance = self.instance
2837 a8083063 Iustin Pop
2838 a8083063 Iustin Pop
    source_node = instance.primary_node
2839 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2840 a8083063 Iustin Pop
2841 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2842 a8083063 Iustin Pop
    for dev in instance.disks:
2843 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
2844 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
2845 a0aaa0d0 Guido Trotter
        if instance.status == "up" and not self.op.ignore_consistency:
2846 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
2847 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
2848 a8083063 Iustin Pop
2849 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2850 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2851 a8083063 Iustin Pop
                (instance.name, source_node))
2852 a8083063 Iustin Pop
2853 72737a7f Iustin Pop
    if not self.rpc.call_instance_shutdown(source_node, instance):
2854 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
2855 24a40d57 Iustin Pop
        logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2856 24a40d57 Iustin Pop
                     " anyway. Please make sure node %s is down"  %
2857 24a40d57 Iustin Pop
                     (instance.name, source_node, source_node))
2858 24a40d57 Iustin Pop
      else:
2859 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2860 24a40d57 Iustin Pop
                                 (instance.name, source_node))
2861 a8083063 Iustin Pop
2862 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2863 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
2864 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
2865 a8083063 Iustin Pop
2866 a8083063 Iustin Pop
    instance.primary_node = target_node
2867 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2868 b6102dab Guido Trotter
    self.cfg.Update(instance)
2869 a8083063 Iustin Pop
2870 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
2871 12a0cfbe Guido Trotter
    if instance.status == "up":
2872 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
2873 12a0cfbe Guido Trotter
      logger.Info("Starting instance %s on node %s" %
2874 12a0cfbe Guido Trotter
                  (instance.name, target_node))
2875 12a0cfbe Guido Trotter
2876 b9bddb6b Iustin Pop
      disks_ok, dummy = _AssembleInstanceDisks(self, instance,
2877 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
2878 12a0cfbe Guido Trotter
      if not disks_ok:
2879 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
2880 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
2881 a8083063 Iustin Pop
2882 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
2883 72737a7f Iustin Pop
      if not self.rpc.call_instance_start(target_node, instance, None):
2884 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
2885 12a0cfbe Guido Trotter
        raise errors.OpExecError("Could not start instance %s on node %s." %
2886 12a0cfbe Guido Trotter
                                 (instance.name, target_node))
2887 a8083063 Iustin Pop
2888 a8083063 Iustin Pop
2889 b9bddb6b Iustin Pop
def _CreateBlockDevOnPrimary(lu, node, instance, device, info):
2890 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2891 a8083063 Iustin Pop

2892 a8083063 Iustin Pop
  This always creates all devices.
2893 a8083063 Iustin Pop

2894 a8083063 Iustin Pop
  """
2895 a8083063 Iustin Pop
  if device.children:
2896 a8083063 Iustin Pop
    for child in device.children:
2897 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnPrimary(lu, node, instance, child, info):
2898 a8083063 Iustin Pop
        return False
2899 a8083063 Iustin Pop
2900 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
2901 72737a7f Iustin Pop
  new_id = lu.rpc.call_blockdev_create(node, device, device.size,
2902 72737a7f Iustin Pop
                                       instance.name, True, info)
2903 a8083063 Iustin Pop
  if not new_id:
2904 a8083063 Iustin Pop
    return False
2905 a8083063 Iustin Pop
  if device.physical_id is None:
2906 a8083063 Iustin Pop
    device.physical_id = new_id
2907 a8083063 Iustin Pop
  return True
2908 a8083063 Iustin Pop
2909 a8083063 Iustin Pop
2910 b9bddb6b Iustin Pop
def _CreateBlockDevOnSecondary(lu, node, instance, device, force, info):
2911 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2912 a8083063 Iustin Pop

2913 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2914 a8083063 Iustin Pop
  all its children.
2915 a8083063 Iustin Pop

2916 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2917 a8083063 Iustin Pop

2918 a8083063 Iustin Pop
  """
2919 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2920 a8083063 Iustin Pop
    force = True
2921 a8083063 Iustin Pop
  if device.children:
2922 a8083063 Iustin Pop
    for child in device.children:
2923 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnSecondary(lu, node, instance,
2924 3f78eef2 Iustin Pop
                                        child, force, info):
2925 a8083063 Iustin Pop
        return False
2926 a8083063 Iustin Pop
2927 a8083063 Iustin Pop
  if not force:
2928 a8083063 Iustin Pop
    return True
2929 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
2930 72737a7f Iustin Pop
  new_id = lu.rpc.call_blockdev_create(node, device, device.size,
2931 72737a7f Iustin Pop
                                       instance.name, False, info)
2932 a8083063 Iustin Pop
  if not new_id:
2933 a8083063 Iustin Pop
    return False
2934 a8083063 Iustin Pop
  if device.physical_id is None:
2935 a8083063 Iustin Pop
    device.physical_id = new_id
2936 a8083063 Iustin Pop
  return True
2937 a8083063 Iustin Pop
2938 a8083063 Iustin Pop
2939 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
2940 923b1523 Iustin Pop
  """Generate a suitable LV name.
2941 923b1523 Iustin Pop

2942 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
2943 923b1523 Iustin Pop

2944 923b1523 Iustin Pop
  """
2945 923b1523 Iustin Pop
  results = []
2946 923b1523 Iustin Pop
  for val in exts:
2947 b9bddb6b Iustin Pop
    new_id = lu.cfg.GenerateUniqueID()
2948 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
2949 923b1523 Iustin Pop
  return results
2950 923b1523 Iustin Pop
2951 923b1523 Iustin Pop
2952 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
2953 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
2954 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
2955 a1f445d3 Iustin Pop

2956 a1f445d3 Iustin Pop
  """
2957 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
2958 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
2959 b9bddb6b Iustin Pop
  shared_secret = lu.cfg.GenerateDRBDSecret()
2960 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2961 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
2962 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2963 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
2964 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
2965 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
2966 f9518d38 Iustin Pop
                                      p_minor, s_minor,
2967 f9518d38 Iustin Pop
                                      shared_secret),
2968 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
2969 a1f445d3 Iustin Pop
                          iv_name=iv_name)
2970 a1f445d3 Iustin Pop
  return drbd_dev
2971 a1f445d3 Iustin Pop
2972 7c0d6283 Michael Hanselmann
2973 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
2974 a8083063 Iustin Pop
                          instance_name, primary_node,
2975 0f1a06e3 Manuel Franceschini
                          secondary_nodes, disk_sz, swap_sz,
2976 0f1a06e3 Manuel Franceschini
                          file_storage_dir, file_driver):
2977 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
2978 a8083063 Iustin Pop

2979 a8083063 Iustin Pop
  """
2980 a8083063 Iustin Pop
  #TODO: compute space requirements
2981 a8083063 Iustin Pop
2982 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
2983 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
2984 a8083063 Iustin Pop
    disks = []
2985 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
2986 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2987 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2988 923b1523 Iustin Pop
2989 b9bddb6b Iustin Pop
    names = _GenerateUniqueNames(lu, [".sda", ".sdb"])
2990 fe96220b Iustin Pop
    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2991 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
2992 a8083063 Iustin Pop
                           iv_name = "sda")
2993 fe96220b Iustin Pop
    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2994 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
2995 a8083063 Iustin Pop
                           iv_name = "sdb")
2996 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
2997 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
2998 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
2999 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
3000 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
3001 ffa1c0dc Iustin Pop
    (minor_pa, minor_pb,
3002 b9bddb6b Iustin Pop
     minor_sa, minor_sb) = lu.cfg.AllocateDRBDMinor(
3003 a1578d63 Iustin Pop
      [primary_node, primary_node, remote_node, remote_node], instance_name)
3004 ffa1c0dc Iustin Pop
3005 b9bddb6b Iustin Pop
    names = _GenerateUniqueNames(lu, [".sda_data", ".sda_meta",
3006 b9bddb6b Iustin Pop
                                      ".sdb_data", ".sdb_meta"])
3007 b9bddb6b Iustin Pop
    drbd_sda_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
3008 ffa1c0dc Iustin Pop
                                        disk_sz, names[0:2], "sda",
3009 ffa1c0dc Iustin Pop
                                        minor_pa, minor_sa)
3010 b9bddb6b Iustin Pop
    drbd_sdb_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
3011 ffa1c0dc Iustin Pop
                                        swap_sz, names[2:4], "sdb",
3012 ffa1c0dc Iustin Pop
                                        minor_pb, minor_sb)
3013 a1f445d3 Iustin Pop
    disks = [drbd_sda_dev, drbd_sdb_dev]
3014 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
3015 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
3016 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
3017 0f1a06e3 Manuel Franceschini
3018 0f1a06e3 Manuel Franceschini
    file_sda_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk_sz,
3019 0f1a06e3 Manuel Franceschini
                                iv_name="sda", logical_id=(file_driver,
3020 0f1a06e3 Manuel Franceschini
                                "%s/sda" % file_storage_dir))
3021 0f1a06e3 Manuel Franceschini
    file_sdb_dev = objects.Disk(dev_type=constants.LD_FILE, size=swap_sz,
3022 0f1a06e3 Manuel Franceschini
                                iv_name="sdb", logical_id=(file_driver,
3023 0f1a06e3 Manuel Franceschini
                                "%s/sdb" % file_storage_dir))
3024 0f1a06e3 Manuel Franceschini
    disks = [file_sda_dev, file_sdb_dev]
3025 a8083063 Iustin Pop
  else:
3026 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
3027 a8083063 Iustin Pop
  return disks
3028 a8083063 Iustin Pop
3029 a8083063 Iustin Pop
3030 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
3031 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
3032 3ecf6786 Iustin Pop

3033 3ecf6786 Iustin Pop
  """
3034 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
3035 a0c3fea1 Michael Hanselmann
3036 a0c3fea1 Michael Hanselmann
3037 b9bddb6b Iustin Pop
def _CreateDisks(lu, instance):
3038 a8083063 Iustin Pop
  """Create all disks for an instance.
3039 a8083063 Iustin Pop

3040 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
3041 a8083063 Iustin Pop

3042 a8083063 Iustin Pop
  Args:
3043 a8083063 Iustin Pop
    instance: the instance object
3044 a8083063 Iustin Pop

3045 a8083063 Iustin Pop
  Returns:
3046 a8083063 Iustin Pop
    True or False showing the success of the creation process
3047 a8083063 Iustin Pop

3048 a8083063 Iustin Pop
  """
3049 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
3050 a0c3fea1 Michael Hanselmann
3051 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
3052 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3053 72737a7f Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(instance.primary_node,
3054 72737a7f Iustin Pop
                                                 file_storage_dir)
3055 0f1a06e3 Manuel Franceschini
3056 0f1a06e3 Manuel Franceschini
    if not result:
3057 b62ddbe5 Guido Trotter
      logger.Error("Could not connect to node '%s'" % instance.primary_node)
3058 0f1a06e3 Manuel Franceschini
      return False
3059 0f1a06e3 Manuel Franceschini
3060 0f1a06e3 Manuel Franceschini
    if not result[0]:
3061 0f1a06e3 Manuel Franceschini
      logger.Error("failed to create directory '%s'" % file_storage_dir)
3062 0f1a06e3 Manuel Franceschini
      return False
3063 0f1a06e3 Manuel Franceschini
3064 a8083063 Iustin Pop
  for device in instance.disks:
3065 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
3066 1c6e3627 Manuel Franceschini
                (device.iv_name, instance.name))
3067 a8083063 Iustin Pop
    #HARDCODE
3068 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
3069 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnSecondary(lu, secondary_node, instance,
3070 3f78eef2 Iustin Pop
                                        device, False, info):
3071 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
3072 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
3073 a8083063 Iustin Pop
        return False
3074 a8083063 Iustin Pop
    #HARDCODE
3075 b9bddb6b Iustin Pop
    if not _CreateBlockDevOnPrimary(lu, instance.primary_node,
3076 3f78eef2 Iustin Pop
                                    instance, device, info):
3077 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
3078 a8083063 Iustin Pop
                   device.iv_name)
3079 a8083063 Iustin Pop
      return False
3080 1c6e3627 Manuel Franceschini
3081 a8083063 Iustin Pop
  return True
3082 a8083063 Iustin Pop
3083 a8083063 Iustin Pop
3084 b9bddb6b Iustin Pop
def _RemoveDisks(lu, instance):
3085 a8083063 Iustin Pop
  """Remove all disks for an instance.
3086 a8083063 Iustin Pop

3087 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
3088 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
3089 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
3090 a8083063 Iustin Pop
  with `_CreateDisks()`).
3091 a8083063 Iustin Pop

3092 a8083063 Iustin Pop
  Args:
3093 a8083063 Iustin Pop
    instance: the instance object
3094 a8083063 Iustin Pop

3095 a8083063 Iustin Pop
  Returns:
3096 a8083063 Iustin Pop
    True or False showing the success of the removal proces
3097 a8083063 Iustin Pop

3098 a8083063 Iustin Pop
  """
3099 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
3100 a8083063 Iustin Pop
3101 a8083063 Iustin Pop
  result = True
3102 a8083063 Iustin Pop
  for device in instance.disks:
3103 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
3104 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
3105 72737a7f Iustin Pop
      if not lu.rpc.call_blockdev_remove(node, disk):
3106 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
3107 a8083063 Iustin Pop
                     " continuing anyway" %
3108 a8083063 Iustin Pop
                     (device.iv_name, node))
3109 a8083063 Iustin Pop
        result = False
3110 0f1a06e3 Manuel Franceschini
3111 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
3112 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3113 72737a7f Iustin Pop
    if not lu.rpc.call_file_storage_dir_remove(instance.primary_node,
3114 72737a7f Iustin Pop
                                               file_storage_dir):
3115 0f1a06e3 Manuel Franceschini
      logger.Error("could not remove directory '%s'" % file_storage_dir)
3116 0f1a06e3 Manuel Franceschini
      result = False
3117 0f1a06e3 Manuel Franceschini
3118 a8083063 Iustin Pop
  return result
3119 a8083063 Iustin Pop
3120 a8083063 Iustin Pop
3121 e2fe6369 Iustin Pop
def _ComputeDiskSize(disk_template, disk_size, swap_size):
3122 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
3123 e2fe6369 Iustin Pop

3124 e2fe6369 Iustin Pop
  This is currently hard-coded for the two-drive layout.
3125 e2fe6369 Iustin Pop

3126 e2fe6369 Iustin Pop
  """
3127 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
3128 e2fe6369 Iustin Pop
  req_size_dict = {
3129 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
3130 e2fe6369 Iustin Pop
    constants.DT_PLAIN: disk_size + swap_size,
3131 e2fe6369 Iustin Pop
    # 256 MB are added for drbd metadata, 128MB for each drbd device
3132 e2fe6369 Iustin Pop
    constants.DT_DRBD8: disk_size + swap_size + 256,
3133 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
3134 e2fe6369 Iustin Pop
  }
3135 e2fe6369 Iustin Pop
3136 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
3137 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
3138 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
3139 e2fe6369 Iustin Pop
3140 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
3141 e2fe6369 Iustin Pop
3142 e2fe6369 Iustin Pop
3143 74409b12 Iustin Pop
def _CheckHVParams(lu, nodenames, hvname, hvparams):
3144 74409b12 Iustin Pop
  """Hypervisor parameter validation.
3145 74409b12 Iustin Pop

3146 74409b12 Iustin Pop
  This function abstract the hypervisor parameter validation to be
3147 74409b12 Iustin Pop
  used in both instance create and instance modify.
3148 74409b12 Iustin Pop

3149 74409b12 Iustin Pop
  @type lu: L{LogicalUnit}
3150 74409b12 Iustin Pop
  @param lu: the logical unit for which we check
3151 74409b12 Iustin Pop
  @type nodenames: list
3152 74409b12 Iustin Pop
  @param nodenames: the list of nodes on which we should check
3153 74409b12 Iustin Pop
  @type hvname: string
3154 74409b12 Iustin Pop
  @param hvname: the name of the hypervisor we should use
3155 74409b12 Iustin Pop
  @type hvparams: dict
3156 74409b12 Iustin Pop
  @param hvparams: the parameters which we need to check
3157 74409b12 Iustin Pop
  @raise errors.OpPrereqError: if the parameters are not valid
3158 74409b12 Iustin Pop

3159 74409b12 Iustin Pop
  """
3160 74409b12 Iustin Pop
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
3161 74409b12 Iustin Pop
                                                  hvname,
3162 74409b12 Iustin Pop
                                                  hvparams)
3163 74409b12 Iustin Pop
  for node in nodenames:
3164 74409b12 Iustin Pop
    info = hvinfo.get(node, None)
3165 74409b12 Iustin Pop
    if not info or not isinstance(info, (tuple, list)):
3166 74409b12 Iustin Pop
      raise errors.OpPrereqError("Cannot get current information"
3167 74409b12 Iustin Pop
                                 " from node '%s' (%s)" % (node, info))
3168 74409b12 Iustin Pop
    if not info[0]:
3169 74409b12 Iustin Pop
      raise errors.OpPrereqError("Hypervisor parameter validation failed:"
3170 74409b12 Iustin Pop
                                 " %s" % info[1])
3171 74409b12 Iustin Pop
3172 74409b12 Iustin Pop
3173 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
3174 a8083063 Iustin Pop
  """Create an instance.
3175 a8083063 Iustin Pop

3176 a8083063 Iustin Pop
  """
3177 a8083063 Iustin Pop
  HPATH = "instance-add"
3178 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3179 538475ca Iustin Pop
  _OP_REQP = ["instance_name", "mem_size", "disk_size",
3180 a8083063 Iustin Pop
              "disk_template", "swap_size", "mode", "start", "vcpus",
3181 6785674e Iustin Pop
              "wait_for_sync", "ip_check", "mac", "hvparams"]
3182 7baf741d Guido Trotter
  REQ_BGL = False
3183 7baf741d Guido Trotter
3184 7baf741d Guido Trotter
  def _ExpandNode(self, node):
3185 7baf741d Guido Trotter
    """Expands and checks one node name.
3186 7baf741d Guido Trotter

3187 7baf741d Guido Trotter
    """
3188 7baf741d Guido Trotter
    node_full = self.cfg.ExpandNodeName(node)
3189 7baf741d Guido Trotter
    if node_full is None:
3190 7baf741d Guido Trotter
      raise errors.OpPrereqError("Unknown node %s" % node)
3191 7baf741d Guido Trotter
    return node_full
3192 7baf741d Guido Trotter
3193 7baf741d Guido Trotter
  def ExpandNames(self):
3194 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
3195 7baf741d Guido Trotter

3196 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
3197 7baf741d Guido Trotter

3198 7baf741d Guido Trotter
    """
3199 7baf741d Guido Trotter
    self.needed_locks = {}
3200 7baf741d Guido Trotter
3201 7baf741d Guido Trotter
    # set optional parameters to none if they don't exist
3202 6785674e Iustin Pop
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
3203 7baf741d Guido Trotter
      if not hasattr(self.op, attr):
3204 7baf741d Guido Trotter
        setattr(self.op, attr, None)
3205 7baf741d Guido Trotter
3206 4b2f38dd Iustin Pop
    # cheap checks, mostly valid constants given
3207 4b2f38dd Iustin Pop
3208 7baf741d Guido Trotter
    # verify creation mode
3209 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
3210 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
3211 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
3212 7baf741d Guido Trotter
                                 self.op.mode)
3213 4b2f38dd Iustin Pop
3214 7baf741d Guido Trotter
    # disk template and mirror node verification
3215 7baf741d Guido Trotter
    if self.op.disk_template not in constants.DISK_TEMPLATES:
3216 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid disk template name")
3217 7baf741d Guido Trotter
3218 4b2f38dd Iustin Pop
    if self.op.hypervisor is None:
3219 4b2f38dd Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
3220 4b2f38dd Iustin Pop
3221 8705eb96 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
3222 8705eb96 Iustin Pop
    enabled_hvs = cluster.enabled_hypervisors
3223 4b2f38dd Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
3224 4b2f38dd Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
3225 4b2f38dd Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
3226 4b2f38dd Iustin Pop
                                  ",".join(enabled_hvs)))
3227 4b2f38dd Iustin Pop
3228 6785674e Iustin Pop
    # check hypervisor parameter syntax (locally)
3229 6785674e Iustin Pop
3230 8705eb96 Iustin Pop
    filled_hvp = cluster.FillDict(cluster.hvparams[self.op.hypervisor],
3231 8705eb96 Iustin Pop
                                  self.op.hvparams)
3232 6785674e Iustin Pop
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
3233 8705eb96 Iustin Pop
    hv_type.CheckParameterSyntax(filled_hvp)
3234 6785674e Iustin Pop
3235 7baf741d Guido Trotter
    #### instance parameters check
3236 7baf741d Guido Trotter
3237 7baf741d Guido Trotter
    # instance name verification
3238 7baf741d Guido Trotter
    hostname1 = utils.HostInfo(self.op.instance_name)
3239 7baf741d Guido Trotter
    self.op.instance_name = instance_name = hostname1.name
3240 7baf741d Guido Trotter
3241 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
3242 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
3243 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
3244 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3245 7baf741d Guido Trotter
                                 instance_name)
3246 7baf741d Guido Trotter
3247 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
3248 7baf741d Guido Trotter
3249 7baf741d Guido Trotter
    # ip validity checks
3250 7baf741d Guido Trotter
    ip = getattr(self.op, "ip", None)
3251 7baf741d Guido Trotter
    if ip is None or ip.lower() == "none":
3252 7baf741d Guido Trotter
      inst_ip = None
3253 7baf741d Guido Trotter
    elif ip.lower() == "auto":
3254 7baf741d Guido Trotter
      inst_ip = hostname1.ip
3255 7baf741d Guido Trotter
    else:
3256 7baf741d Guido Trotter
      if not utils.IsValidIP(ip):
3257 7baf741d Guido Trotter
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
3258 7baf741d Guido Trotter
                                   " like a valid IP" % ip)
3259 7baf741d Guido Trotter
      inst_ip = ip
3260 7baf741d Guido Trotter
    self.inst_ip = self.op.ip = inst_ip
3261 7baf741d Guido Trotter
    # used in CheckPrereq for ip ping check
3262 7baf741d Guido Trotter
    self.check_ip = hostname1.ip
3263 7baf741d Guido Trotter
3264 7baf741d Guido Trotter
    # MAC address verification
3265 7baf741d Guido Trotter
    if self.op.mac != "auto":
3266 7baf741d Guido Trotter
      if not utils.IsValidMac(self.op.mac.lower()):
3267 7baf741d Guido Trotter
        raise errors.OpPrereqError("invalid MAC address specified: %s" %
3268 7baf741d Guido Trotter
                                   self.op.mac)
3269 7baf741d Guido Trotter
3270 7baf741d Guido Trotter
    # file storage checks
3271 7baf741d Guido Trotter
    if (self.op.file_driver and
3272 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
3273 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
3274 7baf741d Guido Trotter
                                 self.op.file_driver)
3275 7baf741d Guido Trotter
3276 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
3277 7baf741d Guido Trotter
      raise errors.OpPrereqError("File storage directory path not absolute")
3278 7baf741d Guido Trotter
3279 7baf741d Guido Trotter
    ### Node/iallocator related checks
3280 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
3281 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
3282 7baf741d Guido Trotter
                                 " node must be given")
3283 7baf741d Guido Trotter
3284 7baf741d Guido Trotter
    if self.op.iallocator:
3285 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3286 7baf741d Guido Trotter
    else:
3287 7baf741d Guido Trotter
      self.op.pnode = self._ExpandNode(self.op.pnode)
3288 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
3289 7baf741d Guido Trotter
      if self.op.snode is not None:
3290 7baf741d Guido Trotter
        self.op.snode = self._ExpandNode(self.op.snode)
3291 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
3292 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
3293 7baf741d Guido Trotter
3294 7baf741d Guido Trotter
    # in case of import lock the source node too
3295 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
3296 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
3297 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
3298 7baf741d Guido Trotter
3299 7baf741d Guido Trotter
      if src_node is None or src_path is None:
3300 7baf741d Guido Trotter
        raise errors.OpPrereqError("Importing an instance requires source"
3301 7baf741d Guido Trotter
                                   " node and path options")
3302 7baf741d Guido Trotter
3303 7baf741d Guido Trotter
      if not os.path.isabs(src_path):
3304 7baf741d Guido Trotter
        raise errors.OpPrereqError("The source path must be absolute")
3305 7baf741d Guido Trotter
3306 7baf741d Guido Trotter
      self.op.src_node = src_node = self._ExpandNode(src_node)
3307 7baf741d Guido Trotter
      if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
3308 7baf741d Guido Trotter
        self.needed_locks[locking.LEVEL_NODE].append(src_node)
3309 7baf741d Guido Trotter
3310 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
3311 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
3312 7baf741d Guido Trotter
        raise errors.OpPrereqError("No guest OS specified")
3313 a8083063 Iustin Pop
3314 538475ca Iustin Pop
  def _RunAllocator(self):
3315 538475ca Iustin Pop
    """Run the allocator based on input opcode.
3316 538475ca Iustin Pop

3317 538475ca Iustin Pop
    """
3318 538475ca Iustin Pop
    disks = [{"size": self.op.disk_size, "mode": "w"},
3319 538475ca Iustin Pop
             {"size": self.op.swap_size, "mode": "w"}]
3320 538475ca Iustin Pop
    nics = [{"mac": self.op.mac, "ip": getattr(self.op, "ip", None),
3321 538475ca Iustin Pop
             "bridge": self.op.bridge}]
3322 72737a7f Iustin Pop
    ial = IAllocator(self,
3323 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
3324 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
3325 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
3326 d1c2dd75 Iustin Pop
                     tags=[],
3327 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
3328 d1c2dd75 Iustin Pop
                     vcpus=self.op.vcpus,
3329 d1c2dd75 Iustin Pop
                     mem_size=self.op.mem_size,
3330 d1c2dd75 Iustin Pop
                     disks=disks,
3331 d1c2dd75 Iustin Pop
                     nics=nics,
3332 29859cb7 Iustin Pop
                     )
3333 d1c2dd75 Iustin Pop
3334 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
3335 d1c2dd75 Iustin Pop
3336 d1c2dd75 Iustin Pop
    if not ial.success:
3337 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3338 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3339 d1c2dd75 Iustin Pop
                                                           ial.info))
3340 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3341 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3342 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
3343 97abc79f Iustin Pop
                                 (self.op.iallocator, len(ial.nodes),
3344 1ce4bbe3 Renรฉ Nussbaumer
                                  ial.required_nodes))
3345 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
3346 538475ca Iustin Pop
    logger.ToStdout("Selected nodes for the instance: %s" %
3347 d1c2dd75 Iustin Pop
                    (", ".join(ial.nodes),))
3348 538475ca Iustin Pop
    logger.Info("Selected nodes for instance %s via iallocator %s: %s" %
3349 d1c2dd75 Iustin Pop
                (self.op.instance_name, self.op.iallocator, ial.nodes))
3350 27579978 Iustin Pop
    if ial.required_nodes == 2:
3351 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
3352 538475ca Iustin Pop
3353 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3354 a8083063 Iustin Pop
    """Build hooks env.
3355 a8083063 Iustin Pop

3356 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3357 a8083063 Iustin Pop

3358 a8083063 Iustin Pop
    """
3359 a8083063 Iustin Pop
    env = {
3360 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
3361 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
3362 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
3363 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
3364 a8083063 Iustin Pop
      }
3365 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3366 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
3367 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
3368 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_IMAGE"] = self.src_image
3369 396e1b78 Michael Hanselmann
3370 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
3371 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
3372 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
3373 396e1b78 Michael Hanselmann
      status=self.instance_status,
3374 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
3375 396e1b78 Michael Hanselmann
      memory=self.op.mem_size,
3376 396e1b78 Michael Hanselmann
      vcpus=self.op.vcpus,
3377 c7b27e9e Iustin Pop
      nics=[(self.inst_ip, self.op.bridge, self.op.mac)],
3378 396e1b78 Michael Hanselmann
    ))
3379 a8083063 Iustin Pop
3380 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
3381 a8083063 Iustin Pop
          self.secondaries)
3382 a8083063 Iustin Pop
    return env, nl, nl
3383 a8083063 Iustin Pop
3384 a8083063 Iustin Pop
3385 a8083063 Iustin Pop
  def CheckPrereq(self):
3386 a8083063 Iustin Pop
    """Check prerequisites.
3387 a8083063 Iustin Pop

3388 a8083063 Iustin Pop
    """
3389 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
3390 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
3391 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
3392 eedc99de Manuel Franceschini
                                 " instances")
3393 eedc99de Manuel Franceschini
3394 e69d05fd Iustin Pop
3395 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3396 7baf741d Guido Trotter
      src_node = self.op.src_node
3397 7baf741d Guido Trotter
      src_path = self.op.src_path
3398 a8083063 Iustin Pop
3399 72737a7f Iustin Pop
      export_info = self.rpc.call_export_info(src_node, src_path)
3400 a8083063 Iustin Pop
3401 a8083063 Iustin Pop
      if not export_info:
3402 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
3403 a8083063 Iustin Pop
3404 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
3405 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
3406 a8083063 Iustin Pop
3407 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
3408 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
3409 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
3410 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
3411 a8083063 Iustin Pop
3412 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
3413 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Can't import instance with more than"
3414 3ecf6786 Iustin Pop
                                   " one data disk")
3415 a8083063 Iustin Pop
3416 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
3417 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
3418 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
3419 a8083063 Iustin Pop
                                                         'disk0_dump'))
3420 a8083063 Iustin Pop
      self.src_image = diskimage
3421 901a65c1 Iustin Pop
3422 7baf741d Guido Trotter
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
3423 901a65c1 Iustin Pop
3424 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
3425 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
3426 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
3427 901a65c1 Iustin Pop
3428 901a65c1 Iustin Pop
    if self.op.ip_check:
3429 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
3430 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3431 7b3a8fb5 Iustin Pop
                                   (self.check_ip, self.op.instance_name))
3432 901a65c1 Iustin Pop
3433 901a65c1 Iustin Pop
    # bridge verification
3434 901a65c1 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
3435 901a65c1 Iustin Pop
    if bridge is None:
3436 901a65c1 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
3437 901a65c1 Iustin Pop
    else:
3438 901a65c1 Iustin Pop
      self.op.bridge = bridge
3439 901a65c1 Iustin Pop
3440 538475ca Iustin Pop
    #### allocator run
3441 538475ca Iustin Pop
3442 538475ca Iustin Pop
    if self.op.iallocator is not None:
3443 538475ca Iustin Pop
      self._RunAllocator()
3444 0f1a06e3 Manuel Franceschini
3445 901a65c1 Iustin Pop
    #### node related checks
3446 901a65c1 Iustin Pop
3447 901a65c1 Iustin Pop
    # check primary node
3448 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
3449 7baf741d Guido Trotter
    assert self.pnode is not None, \
3450 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
3451 901a65c1 Iustin Pop
    self.secondaries = []
3452 901a65c1 Iustin Pop
3453 901a65c1 Iustin Pop
    # mirror node verification
3454 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
3455 7baf741d Guido Trotter
      if self.op.snode is None:
3456 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
3457 3ecf6786 Iustin Pop
                                   " a mirror node")
3458 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
3459 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
3460 3ecf6786 Iustin Pop
                                   " the primary node.")
3461 7baf741d Guido Trotter
      self.secondaries.append(self.op.snode)
3462 a8083063 Iustin Pop
3463 6785674e Iustin Pop
    nodenames = [pnode.name] + self.secondaries
3464 6785674e Iustin Pop
3465 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
3466 e2fe6369 Iustin Pop
                                self.op.disk_size, self.op.swap_size)
3467 ed1ebc60 Guido Trotter
3468 8d75db10 Iustin Pop
    # Check lv size requirements
3469 8d75db10 Iustin Pop
    if req_size is not None:
3470 72737a7f Iustin Pop
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
3471 72737a7f Iustin Pop
                                         self.op.hypervisor)
3472 8d75db10 Iustin Pop
      for node in nodenames:
3473 8d75db10 Iustin Pop
        info = nodeinfo.get(node, None)
3474 8d75db10 Iustin Pop
        if not info:
3475 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
3476 3e91897b Iustin Pop
                                     " from node '%s'" % node)
3477 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
3478 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
3479 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
3480 8d75db10 Iustin Pop
                                     " node %s" % node)
3481 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
3482 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
3483 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
3484 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
3485 ed1ebc60 Guido Trotter
3486 74409b12 Iustin Pop
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
3487 6785674e Iustin Pop
3488 a8083063 Iustin Pop
    # os verification
3489 72737a7f Iustin Pop
    os_obj = self.rpc.call_os_get(pnode.name, self.op.os_type)
3490 dfa96ded Guido Trotter
    if not os_obj:
3491 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
3492 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
3493 a8083063 Iustin Pop
3494 901a65c1 Iustin Pop
    # bridge check on primary node
3495 72737a7f Iustin Pop
    if not self.rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
3496 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
3497 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
3498 3ecf6786 Iustin Pop
                                 (self.op.bridge, pnode.name))
3499 a8083063 Iustin Pop
3500 49ce1563 Iustin Pop
    # memory check on primary node
3501 49ce1563 Iustin Pop
    if self.op.start:
3502 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
3503 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
3504 e69d05fd Iustin Pop
                           self.op.mem_size, self.op.hypervisor)
3505 49ce1563 Iustin Pop
3506 a8083063 Iustin Pop
    if self.op.start:
3507 a8083063 Iustin Pop
      self.instance_status = 'up'
3508 a8083063 Iustin Pop
    else:
3509 a8083063 Iustin Pop
      self.instance_status = 'down'
3510 a8083063 Iustin Pop
3511 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3512 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
3513 a8083063 Iustin Pop

3514 a8083063 Iustin Pop
    """
3515 a8083063 Iustin Pop
    instance = self.op.instance_name
3516 a8083063 Iustin Pop
    pnode_name = self.pnode.name
3517 a8083063 Iustin Pop
3518 1862d460 Alexander Schreiber
    if self.op.mac == "auto":
3519 ba4b62cf Iustin Pop
      mac_address = self.cfg.GenerateMAC()
3520 1862d460 Alexander Schreiber
    else:
3521 ba4b62cf Iustin Pop
      mac_address = self.op.mac
3522 1862d460 Alexander Schreiber
3523 1862d460 Alexander Schreiber
    nic = objects.NIC(bridge=self.op.bridge, mac=mac_address)
3524 a8083063 Iustin Pop
    if self.inst_ip is not None:
3525 a8083063 Iustin Pop
      nic.ip = self.inst_ip
3526 a8083063 Iustin Pop
3527 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
3528 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
3529 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
3530 2a6469d5 Alexander Schreiber
    else:
3531 2a6469d5 Alexander Schreiber
      network_port = None
3532 58acb49d Alexander Schreiber
3533 6785674e Iustin Pop
    ##if self.op.vnc_bind_address is None:
3534 6785674e Iustin Pop
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
3535 31a853d2 Iustin Pop
3536 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
3537 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
3538 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
3539 2c313123 Manuel Franceschini
    else:
3540 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
3541 2c313123 Manuel Franceschini
3542 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
3543 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
3544 d6a02168 Michael Hanselmann
                                        self.cfg.GetFileStorageDir(),
3545 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
3546 0f1a06e3 Manuel Franceschini
3547 0f1a06e3 Manuel Franceschini
3548 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
3549 a8083063 Iustin Pop
                                  self.op.disk_template,
3550 a8083063 Iustin Pop
                                  instance, pnode_name,
3551 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
3552 0f1a06e3 Manuel Franceschini
                                  self.op.swap_size,
3553 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
3554 0f1a06e3 Manuel Franceschini
                                  self.op.file_driver)
3555 a8083063 Iustin Pop
3556 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3557 a8083063 Iustin Pop
                            primary_node=pnode_name,
3558 a8083063 Iustin Pop
                            memory=self.op.mem_size,
3559 a8083063 Iustin Pop
                            vcpus=self.op.vcpus,
3560 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
3561 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
3562 a8083063 Iustin Pop
                            status=self.instance_status,
3563 58acb49d Alexander Schreiber
                            network_port=network_port,
3564 6785674e Iustin Pop
                            hvparams=self.op.hvparams,
3565 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
3566 a8083063 Iustin Pop
                            )
3567 a8083063 Iustin Pop
3568 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
3569 b9bddb6b Iustin Pop
    if not _CreateDisks(self, iobj):
3570 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
3571 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance)
3572 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
3573 a8083063 Iustin Pop
3574 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
3575 a8083063 Iustin Pop
3576 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
3577 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
3578 7baf741d Guido Trotter
    # added the instance to the config
3579 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
3580 a1578d63 Iustin Pop
    # Remove the temp. assignements for the instance's drbds
3581 a1578d63 Iustin Pop
    self.cfg.ReleaseDRBDMinors(instance)
3582 a8083063 Iustin Pop
3583 a8083063 Iustin Pop
    if self.op.wait_for_sync:
3584 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
3585 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3586 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
3587 a8083063 Iustin Pop
      time.sleep(15)
3588 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
3589 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
3590 a8083063 Iustin Pop
    else:
3591 a8083063 Iustin Pop
      disk_abort = False
3592 a8083063 Iustin Pop
3593 a8083063 Iustin Pop
    if disk_abort:
3594 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
3595 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3596 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
3597 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
3598 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3599 3ecf6786 Iustin Pop
                               " this instance")
3600 a8083063 Iustin Pop
3601 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3602 a8083063 Iustin Pop
                (instance, pnode_name))
3603 a8083063 Iustin Pop
3604 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3605 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3606 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3607 72737a7f Iustin Pop
        if not self.rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
3608 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3609 3ecf6786 Iustin Pop
                                   " on node %s" %
3610 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3611 a8083063 Iustin Pop
3612 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3613 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3614 a8083063 Iustin Pop
        src_node = self.op.src_node
3615 a8083063 Iustin Pop
        src_image = self.src_image
3616 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
3617 72737a7f Iustin Pop
        if not self.rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
3618 72737a7f Iustin Pop
                                                src_node, src_image,
3619 72737a7f Iustin Pop
                                                cluster_name):
3620 3ecf6786 Iustin Pop
          raise errors.OpExecError("Could not import os for instance"
3621 3ecf6786 Iustin Pop
                                   " %s on node %s" %
3622 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3623 a8083063 Iustin Pop
      else:
3624 a8083063 Iustin Pop
        # also checked in the prereq part
3625 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3626 3ecf6786 Iustin Pop
                                     % self.op.mode)
3627 a8083063 Iustin Pop
3628 a8083063 Iustin Pop
    if self.op.start:
3629 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
3630 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3631 72737a7f Iustin Pop
      if not self.rpc.call_instance_start(pnode_name, iobj, None):
3632 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3633 a8083063 Iustin Pop
3634 a8083063 Iustin Pop
3635 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3636 a8083063 Iustin Pop
  """Connect to an instance's console.
3637 a8083063 Iustin Pop

3638 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3639 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3640 a8083063 Iustin Pop
  console.
3641 a8083063 Iustin Pop

3642 a8083063 Iustin Pop
  """
3643 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3644 8659b73e Guido Trotter
  REQ_BGL = False
3645 8659b73e Guido Trotter
3646 8659b73e Guido Trotter
  def ExpandNames(self):
3647 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
3648 a8083063 Iustin Pop
3649 a8083063 Iustin Pop
  def CheckPrereq(self):
3650 a8083063 Iustin Pop
    """Check prerequisites.
3651 a8083063 Iustin Pop

3652 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3653 a8083063 Iustin Pop

3654 a8083063 Iustin Pop
    """
3655 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3656 8659b73e Guido Trotter
    assert self.instance is not None, \
3657 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3658 a8083063 Iustin Pop
3659 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3660 a8083063 Iustin Pop
    """Connect to the console of an instance
3661 a8083063 Iustin Pop

3662 a8083063 Iustin Pop
    """
3663 a8083063 Iustin Pop
    instance = self.instance
3664 a8083063 Iustin Pop
    node = instance.primary_node
3665 a8083063 Iustin Pop
3666 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
3667 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
3668 a8083063 Iustin Pop
    if node_insts is False:
3669 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3670 a8083063 Iustin Pop
3671 a8083063 Iustin Pop
    if instance.name not in node_insts:
3672 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3673 a8083063 Iustin Pop
3674 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
3675 a8083063 Iustin Pop
3676 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
3677 30989e69 Alexander Schreiber
    console_cmd = hyper.GetShellCommandForConsole(instance)
3678 b047857b Michael Hanselmann
3679 82122173 Iustin Pop
    # build ssh cmdline
3680 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
3681 a8083063 Iustin Pop
3682 a8083063 Iustin Pop
3683 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3684 a8083063 Iustin Pop
  """Replace the disks of an instance.
3685 a8083063 Iustin Pop

3686 a8083063 Iustin Pop
  """
3687 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3688 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3689 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
3690 efd990e4 Guido Trotter
  REQ_BGL = False
3691 efd990e4 Guido Trotter
3692 efd990e4 Guido Trotter
  def ExpandNames(self):
3693 efd990e4 Guido Trotter
    self._ExpandAndLockInstance()
3694 efd990e4 Guido Trotter
3695 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
3696 efd990e4 Guido Trotter
      self.op.remote_node = None
3697 efd990e4 Guido Trotter
3698 efd990e4 Guido Trotter
    ia_name = getattr(self.op, "iallocator", None)
3699 efd990e4 Guido Trotter
    if ia_name is not None:
3700 efd990e4 Guido Trotter
      if self.op.remote_node is not None:
3701 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Give either the iallocator or the new"
3702 efd990e4 Guido Trotter
                                   " secondary, not both")
3703 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3704 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
3705 efd990e4 Guido Trotter
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
3706 efd990e4 Guido Trotter
      if remote_node is None:
3707 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Node '%s' not known" %
3708 efd990e4 Guido Trotter
                                   self.op.remote_node)
3709 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
3710 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
3711 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3712 efd990e4 Guido Trotter
    else:
3713 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
3714 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3715 efd990e4 Guido Trotter
3716 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
3717 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
3718 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
3719 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
3720 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
3721 efd990e4 Guido Trotter
      self._LockInstancesNodes()
3722 a8083063 Iustin Pop
3723 b6e82a65 Iustin Pop
  def _RunAllocator(self):
3724 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
3725 b6e82a65 Iustin Pop

3726 b6e82a65 Iustin Pop
    """
3727 72737a7f Iustin Pop
    ial = IAllocator(self,
3728 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
3729 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
3730 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
3731 b6e82a65 Iustin Pop
3732 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
3733 b6e82a65 Iustin Pop
3734 b6e82a65 Iustin Pop
    if not ial.success:
3735 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3736 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3737 b6e82a65 Iustin Pop
                                                           ial.info))
3738 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3739 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3740 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
3741 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
3742 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
3743 b6e82a65 Iustin Pop
    logger.ToStdout("Selected new secondary for the instance: %s" %
3744 b6e82a65 Iustin Pop
                    self.op.remote_node)
3745 b6e82a65 Iustin Pop
3746 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3747 a8083063 Iustin Pop
    """Build hooks env.
3748 a8083063 Iustin Pop

3749 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3750 a8083063 Iustin Pop

3751 a8083063 Iustin Pop
    """
3752 a8083063 Iustin Pop
    env = {
3753 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
3754 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3755 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3756 a8083063 Iustin Pop
      }
3757 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3758 0834c866 Iustin Pop
    nl = [
3759 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
3760 0834c866 Iustin Pop
      self.instance.primary_node,
3761 0834c866 Iustin Pop
      ]
3762 0834c866 Iustin Pop
    if self.op.remote_node is not None:
3763 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
3764 a8083063 Iustin Pop
    return env, nl, nl
3765 a8083063 Iustin Pop
3766 a8083063 Iustin Pop
  def CheckPrereq(self):
3767 a8083063 Iustin Pop
    """Check prerequisites.
3768 a8083063 Iustin Pop

3769 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3770 a8083063 Iustin Pop

3771 a8083063 Iustin Pop
    """
3772 efd990e4 Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3773 efd990e4 Guido Trotter
    assert instance is not None, \
3774 efd990e4 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3775 a8083063 Iustin Pop
    self.instance = instance
3776 a8083063 Iustin Pop
3777 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3778 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3779 a9e0c397 Iustin Pop
                                 " network mirrored.")
3780 a8083063 Iustin Pop
3781 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3782 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3783 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3784 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3785 a8083063 Iustin Pop
3786 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
3787 a9e0c397 Iustin Pop
3788 b6e82a65 Iustin Pop
    ia_name = getattr(self.op, "iallocator", None)
3789 b6e82a65 Iustin Pop
    if ia_name is not None:
3790 de8c7666 Guido Trotter
      self._RunAllocator()
3791 b6e82a65 Iustin Pop
3792 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
3793 a9e0c397 Iustin Pop
    if remote_node is not None:
3794 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
3795 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
3796 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
3797 a9e0c397 Iustin Pop
    else:
3798 a9e0c397 Iustin Pop
      self.remote_node_info = None
3799 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3800 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3801 3ecf6786 Iustin Pop
                                 " the instance.")
3802 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
3803 0834c866 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_SEC:
3804 0834c866 Iustin Pop
        # this is for DRBD8, where we can't execute the same mode of
3805 0834c866 Iustin Pop
        # replacement as for drbd7 (no different port allocated)
3806 0834c866 Iustin Pop
        raise errors.OpPrereqError("Same secondary given, cannot execute"
3807 0834c866 Iustin Pop
                                   " replacement")
3808 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3809 7df43a76 Iustin Pop
      if (self.op.mode == constants.REPLACE_DISK_ALL and
3810 7df43a76 Iustin Pop
          remote_node is not None):
3811 7df43a76 Iustin Pop
        # switch to replace secondary mode
3812 7df43a76 Iustin Pop
        self.op.mode = constants.REPLACE_DISK_SEC
3813 7df43a76 Iustin Pop
3814 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
3815 12c3449a Michael Hanselmann
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
3816 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
3817 a9e0c397 Iustin Pop
                                   " both at once")
3818 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
3819 a9e0c397 Iustin Pop
        if remote_node is not None:
3820 12c3449a Michael Hanselmann
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
3821 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
3822 a9e0c397 Iustin Pop
                                     " node disk replacement")
3823 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
3824 cff90b79 Iustin Pop
        self.oth_node = instance.secondary_nodes[0]
3825 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
3826 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
3827 a9e0c397 Iustin Pop
                                    # we don't change the secondary
3828 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
3829 cff90b79 Iustin Pop
        self.oth_node = instance.primary_node
3830 a9e0c397 Iustin Pop
      else:
3831 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
3832 a9e0c397 Iustin Pop
3833 a9e0c397 Iustin Pop
    for name in self.op.disks:
3834 a9e0c397 Iustin Pop
      if instance.FindDisk(name) is None:
3835 a9e0c397 Iustin Pop
        raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3836 a9e0c397 Iustin Pop
                                   (name, instance.name))
3837 a8083063 Iustin Pop
3838 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
3839 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
3840 a9e0c397 Iustin Pop

3841 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3842 a9e0c397 Iustin Pop
      - for each disk to be replaced:
3843 a9e0c397 Iustin Pop
        - create new LVs on the target node with unique names
3844 a9e0c397 Iustin Pop
        - detach old LVs from the drbd device
3845 a9e0c397 Iustin Pop
        - rename old LVs to name_replaced.<time_t>
3846 a9e0c397 Iustin Pop
        - rename new LVs to old LVs
3847 a9e0c397 Iustin Pop
        - attach the new LVs (with the old names now) to the drbd device
3848 a9e0c397 Iustin Pop
      - wait for sync across all devices
3849 a9e0c397 Iustin Pop
      - for each modified disk:
3850 a9e0c397 Iustin Pop
        - remove old LVs (which have the name name_replaces.<time_t>)
3851 a9e0c397 Iustin Pop

3852 a9e0c397 Iustin Pop
    Failures are not very well handled.
3853 cff90b79 Iustin Pop

3854 a9e0c397 Iustin Pop
    """
3855 cff90b79 Iustin Pop
    steps_total = 6
3856 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3857 a9e0c397 Iustin Pop
    instance = self.instance
3858 a9e0c397 Iustin Pop
    iv_names = {}
3859 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3860 a9e0c397 Iustin Pop
    # start of work
3861 a9e0c397 Iustin Pop
    cfg = self.cfg
3862 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
3863 cff90b79 Iustin Pop
    oth_node = self.oth_node
3864 cff90b79 Iustin Pop
3865 cff90b79 Iustin Pop
    # Step: check device activation
3866 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3867 cff90b79 Iustin Pop
    info("checking volume groups")
3868 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
3869 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([oth_node, tgt_node])
3870 cff90b79 Iustin Pop
    if not results:
3871 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3872 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
3873 cff90b79 Iustin Pop
      res = results.get(node, False)
3874 cff90b79 Iustin Pop
      if not res or my_vg not in res:
3875 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3876 cff90b79 Iustin Pop
                                 (my_vg, node))
3877 cff90b79 Iustin Pop
    for dev in instance.disks:
3878 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3879 cff90b79 Iustin Pop
        continue
3880 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
3881 cff90b79 Iustin Pop
        info("checking %s on %s" % (dev.iv_name, node))
3882 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
3883 72737a7f Iustin Pop
        if not self.rpc.call_blockdev_find(node, dev):
3884 cff90b79 Iustin Pop
          raise errors.OpExecError("Can't find device %s on node %s" %
3885 cff90b79 Iustin Pop
                                   (dev.iv_name, node))
3886 cff90b79 Iustin Pop
3887 cff90b79 Iustin Pop
    # Step: check other node consistency
3888 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3889 cff90b79 Iustin Pop
    for dev in instance.disks:
3890 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3891 cff90b79 Iustin Pop
        continue
3892 cff90b79 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, oth_node))
3893 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, oth_node,
3894 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
3895 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
3896 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
3897 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
3898 cff90b79 Iustin Pop
3899 cff90b79 Iustin Pop
    # Step: create new storage
3900 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3901 a9e0c397 Iustin Pop
    for dev in instance.disks:
3902 a9e0c397 Iustin Pop
      if not dev.iv_name in self.op.disks:
3903 a9e0c397 Iustin Pop
        continue
3904 a9e0c397 Iustin Pop
      size = dev.size
3905 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
3906 a9e0c397 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3907 b9bddb6b Iustin Pop
      names = _GenerateUniqueNames(self, lv_names)
3908 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3909 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
3910 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3911 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
3912 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
3913 a9e0c397 Iustin Pop
      old_lvs = dev.children
3914 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
3915 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
3916 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
3917 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3918 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3919 a9e0c397 Iustin Pop
      # are talking about the secondary node
3920 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
3921 b9bddb6b Iustin Pop
        if not _CreateBlockDevOnPrimary(self, tgt_node, instance, new_lv,
3922 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3923 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3924 a9e0c397 Iustin Pop
                                   " node '%s'" %
3925 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
3926 a9e0c397 Iustin Pop
3927 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
3928 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
3929 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
3930 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
3931 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
3932 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
3933 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
3934 cff90b79 Iustin Pop
      #dev.children = []
3935 cff90b79 Iustin Pop
      #cfg.Update(instance)
3936 a9e0c397 Iustin Pop
3937 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
3938 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
3939 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
3940 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
3941 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
3942 cff90b79 Iustin Pop
3943 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
3944 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
3945 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
3946 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
3947 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
3948 cff90b79 Iustin Pop
      rlist = []
3949 cff90b79 Iustin Pop
      for to_ren in old_lvs:
3950 72737a7f Iustin Pop
        find_res = self.rpc.call_blockdev_find(tgt_node, to_ren)
3951 cff90b79 Iustin Pop
        if find_res is not None: # device exists
3952 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
3953 cff90b79 Iustin Pop
3954 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
3955 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_rename(tgt_node, rlist):
3956 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
3957 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
3958 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
3959 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
3960 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_rename(tgt_node, rlist):
3961 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
3962 cff90b79 Iustin Pop
3963 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
3964 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
3965 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
3966 a9e0c397 Iustin Pop
3967 cff90b79 Iustin Pop
      for disk in old_lvs:
3968 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
3969 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
3970 a9e0c397 Iustin Pop
3971 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
3972 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
3973 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
3974 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
3975 72737a7f Iustin Pop
          if not self.rpc.call_blockdev_remove(tgt_node, new_lv):
3976 79caa9ed Guido Trotter
            warning("Can't rollback device %s", hint="manually cleanup unused"
3977 cff90b79 Iustin Pop
                    " logical volumes")
3978 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
3979 a9e0c397 Iustin Pop
3980 a9e0c397 Iustin Pop
      dev.children = new_lvs
3981 a9e0c397 Iustin Pop
      cfg.Update(instance)
3982 a9e0c397 Iustin Pop
3983 cff90b79 Iustin Pop
    # Step: wait for sync
3984 a9e0c397 Iustin Pop
3985 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3986 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3987 a9e0c397 Iustin Pop
    # return value
3988 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3989 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
3990 a9e0c397 Iustin Pop
3991 a9e0c397 Iustin Pop
    # so check manually all the devices
3992 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3993 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3994 72737a7f Iustin Pop
      is_degr = self.rpc.call_blockdev_find(instance.primary_node, dev)[5]
3995 a9e0c397 Iustin Pop
      if is_degr:
3996 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3997 a9e0c397 Iustin Pop
3998 cff90b79 Iustin Pop
    # Step: remove old storage
3999 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
4000 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
4001 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
4002 a9e0c397 Iustin Pop
      for lv in old_lvs:
4003 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
4004 72737a7f Iustin Pop
        if not self.rpc.call_blockdev_remove(tgt_node, lv):
4005 79caa9ed Guido Trotter
          warning("Can't remove old LV", hint="manually remove unused LVs")
4006 a9e0c397 Iustin Pop
          continue
4007 a9e0c397 Iustin Pop
4008 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
4009 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
4010 a9e0c397 Iustin Pop

4011 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
4012 a9e0c397 Iustin Pop
      - for all disks of the instance:
4013 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
4014 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
4015 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
4016 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
4017 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
4018 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
4019 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
4020 a9e0c397 Iustin Pop
          not network enabled
4021 a9e0c397 Iustin Pop
      - wait for sync across all devices
4022 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
4023 a9e0c397 Iustin Pop

4024 a9e0c397 Iustin Pop
    Failures are not very well handled.
4025 0834c866 Iustin Pop

4026 a9e0c397 Iustin Pop
    """
4027 0834c866 Iustin Pop
    steps_total = 6
4028 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
4029 a9e0c397 Iustin Pop
    instance = self.instance
4030 a9e0c397 Iustin Pop
    iv_names = {}
4031 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
4032 a9e0c397 Iustin Pop
    # start of work
4033 a9e0c397 Iustin Pop
    cfg = self.cfg
4034 a9e0c397 Iustin Pop
    old_node = self.tgt_node
4035 a9e0c397 Iustin Pop
    new_node = self.new_node
4036 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
4037 0834c866 Iustin Pop
4038 0834c866 Iustin Pop
    # Step: check device activation
4039 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
4040 0834c866 Iustin Pop
    info("checking volume groups")
4041 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
4042 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([pri_node, new_node])
4043 0834c866 Iustin Pop
    if not results:
4044 0834c866 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
4045 0834c866 Iustin Pop
    for node in pri_node, new_node:
4046 0834c866 Iustin Pop
      res = results.get(node, False)
4047 0834c866 Iustin Pop
      if not res or my_vg not in res:
4048 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
4049 0834c866 Iustin Pop
                                 (my_vg, node))
4050 0834c866 Iustin Pop
    for dev in instance.disks:
4051 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
4052 0834c866 Iustin Pop
        continue
4053 0834c866 Iustin Pop
      info("checking %s on %s" % (dev.iv_name, pri_node))
4054 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4055 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_find(pri_node, dev):
4056 0834c866 Iustin Pop
        raise errors.OpExecError("Can't find device %s on node %s" %
4057 0834c866 Iustin Pop
                                 (dev.iv_name, pri_node))
4058 0834c866 Iustin Pop
4059 0834c866 Iustin Pop
    # Step: check other node consistency
4060 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
4061 0834c866 Iustin Pop
    for dev in instance.disks:
4062 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
4063 0834c866 Iustin Pop
        continue
4064 0834c866 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, pri_node))
4065 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
4066 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
4067 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
4068 0834c866 Iustin Pop
                                 pri_node)
4069 0834c866 Iustin Pop
4070 0834c866 Iustin Pop
    # Step: create new storage
4071 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
4072 468b46f9 Iustin Pop
    for dev in instance.disks:
4073 a9e0c397 Iustin Pop
      size = dev.size
4074 0834c866 Iustin Pop
      info("adding new local storage on %s for %s" % (new_node, dev.iv_name))
4075 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
4076 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
4077 a9e0c397 Iustin Pop
      # are talking about the secondary node
4078 a9e0c397 Iustin Pop
      for new_lv in dev.children:
4079 b9bddb6b Iustin Pop
        if not _CreateBlockDevOnPrimary(self, new_node, instance, new_lv,
4080 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
4081 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
4082 a9e0c397 Iustin Pop
                                   " node '%s'" %
4083 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
4084 a9e0c397 Iustin Pop
4085 0834c866 Iustin Pop
4086 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
4087 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
4088 a1578d63 Iustin Pop
    # error and the success paths
4089 a1578d63 Iustin Pop
    minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
4090 a1578d63 Iustin Pop
                                   instance.name)
4091 468b46f9 Iustin Pop
    logging.debug("Allocated minors %s" % (minors,))
4092 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
4093 468b46f9 Iustin Pop
    for dev, new_minor in zip(instance.disks, minors):
4094 0834c866 Iustin Pop
      size = dev.size
4095 0834c866 Iustin Pop
      info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
4096 a9e0c397 Iustin Pop
      # create new devices on new_node
4097 ffa1c0dc Iustin Pop
      if pri_node == dev.logical_id[0]:
4098 ffa1c0dc Iustin Pop
        new_logical_id = (pri_node, new_node,
4099 f9518d38 Iustin Pop
                          dev.logical_id[2], dev.logical_id[3], new_minor,
4100 f9518d38 Iustin Pop
                          dev.logical_id[5])
4101 ffa1c0dc Iustin Pop
      else:
4102 ffa1c0dc Iustin Pop
        new_logical_id = (new_node, pri_node,
4103 f9518d38 Iustin Pop
                          dev.logical_id[2], new_minor, dev.logical_id[4],
4104 f9518d38 Iustin Pop
                          dev.logical_id[5])
4105 468b46f9 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children, new_logical_id)
4106 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
4107 a1578d63 Iustin Pop
                    new_logical_id)
4108 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
4109 ffa1c0dc Iustin Pop
                              logical_id=new_logical_id,
4110 a9e0c397 Iustin Pop
                              children=dev.children)
4111 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnSecondary(self, new_node, instance,
4112 3f78eef2 Iustin Pop
                                        new_drbd, False,
4113 b9bddb6b Iustin Pop
                                        _GetInstanceInfoText(instance)):
4114 a1578d63 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
4115 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
4116 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
4117 a9e0c397 Iustin Pop
4118 0834c866 Iustin Pop
    for dev in instance.disks:
4119 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
4120 0834c866 Iustin Pop
      info("shutting down drbd for %s on old node" % dev.iv_name)
4121 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
4122 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_shutdown(old_node, dev):
4123 0834c866 Iustin Pop
        warning("Failed to shutdown drbd for %s on old node" % dev.iv_name,
4124 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
4125 a9e0c397 Iustin Pop
4126 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
4127 642445d9 Iustin Pop
    done = 0
4128 642445d9 Iustin Pop
    for dev in instance.disks:
4129 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4130 f9518d38 Iustin Pop
      # set the network part of the physical (unique in bdev terms) id
4131 f9518d38 Iustin Pop
      # to None, meaning detach from network
4132 f9518d38 Iustin Pop
      dev.physical_id = (None, None, None, None) + dev.physical_id[4:]
4133 642445d9 Iustin Pop
      # and 'find' the device, which will 'fix' it to match the
4134 642445d9 Iustin Pop
      # standalone state
4135 72737a7f Iustin Pop
      if self.rpc.call_blockdev_find(pri_node, dev):
4136 642445d9 Iustin Pop
        done += 1
4137 642445d9 Iustin Pop
      else:
4138 642445d9 Iustin Pop
        warning("Failed to detach drbd %s from network, unusual case" %
4139 642445d9 Iustin Pop
                dev.iv_name)
4140 642445d9 Iustin Pop
4141 642445d9 Iustin Pop
    if not done:
4142 642445d9 Iustin Pop
      # no detaches succeeded (very unlikely)
4143 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance.name)
4144 642445d9 Iustin Pop
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
4145 642445d9 Iustin Pop
4146 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
4147 642445d9 Iustin Pop
    # the instance to point to the new secondary
4148 642445d9 Iustin Pop
    info("updating instance configuration")
4149 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
4150 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
4151 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4152 642445d9 Iustin Pop
    cfg.Update(instance)
4153 a1578d63 Iustin Pop
    # we can remove now the temp minors as now the new values are
4154 a1578d63 Iustin Pop
    # written to the config file (and therefore stable)
4155 a1578d63 Iustin Pop
    self.cfg.ReleaseDRBDMinors(instance.name)
4156 a9e0c397 Iustin Pop
4157 642445d9 Iustin Pop
    # and now perform the drbd attach
4158 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
4159 642445d9 Iustin Pop
    failures = []
4160 642445d9 Iustin Pop
    for dev in instance.disks:
4161 642445d9 Iustin Pop
      info("attaching primary drbd for %s to new secondary node" % dev.iv_name)
4162 642445d9 Iustin Pop
      # since the attach is smart, it's enough to 'find' the device,
4163 642445d9 Iustin Pop
      # it will automatically activate the network, if the physical_id
4164 642445d9 Iustin Pop
      # is correct
4165 642445d9 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4166 ffa1c0dc Iustin Pop
      logging.debug("Disk to attach: %s", dev)
4167 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_find(pri_node, dev):
4168 642445d9 Iustin Pop
        warning("can't attach drbd %s to new secondary!" % dev.iv_name,
4169 642445d9 Iustin Pop
                "please do a gnt-instance info to see the status of disks")
4170 a9e0c397 Iustin Pop
4171 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
4172 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
4173 a9e0c397 Iustin Pop
    # return value
4174 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
4175 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
4176 a9e0c397 Iustin Pop
4177 a9e0c397 Iustin Pop
    # so check manually all the devices
4178 ffa1c0dc Iustin Pop
    for name, (dev, old_lvs, _) in iv_names.iteritems():
4179 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4180 72737a7f Iustin Pop
      is_degr = self.rpc.call_blockdev_find(pri_node, dev)[5]
4181 a9e0c397 Iustin Pop
      if is_degr:
4182 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
4183 a9e0c397 Iustin Pop
4184 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
4185 ffa1c0dc Iustin Pop
    for name, (dev, old_lvs, _) in iv_names.iteritems():
4186 0834c866 Iustin Pop
      info("remove logical volumes for %s" % name)
4187 a9e0c397 Iustin Pop
      for lv in old_lvs:
4188 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
4189 72737a7f Iustin Pop
        if not self.rpc.call_blockdev_remove(old_node, lv):
4190 0834c866 Iustin Pop
          warning("Can't remove LV on old secondary",
4191 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
4192 a9e0c397 Iustin Pop
4193 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
4194 a9e0c397 Iustin Pop
    """Execute disk replacement.
4195 a9e0c397 Iustin Pop

4196 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
4197 a9e0c397 Iustin Pop

4198 a9e0c397 Iustin Pop
    """
4199 a9e0c397 Iustin Pop
    instance = self.instance
4200 22985314 Guido Trotter
4201 22985314 Guido Trotter
    # Activate the instance disks if we're replacing them on a down instance
4202 22985314 Guido Trotter
    if instance.status == "down":
4203 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, True)
4204 22985314 Guido Trotter
4205 abdf0113 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
4206 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
4207 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
4208 a9e0c397 Iustin Pop
      else:
4209 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
4210 a9e0c397 Iustin Pop
    else:
4211 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
4212 22985314 Guido Trotter
4213 22985314 Guido Trotter
    ret = fn(feedback_fn)
4214 22985314 Guido Trotter
4215 22985314 Guido Trotter
    # Deactivate the instance disks if we're replacing them on a down instance
4216 22985314 Guido Trotter
    if instance.status == "down":
4217 b9bddb6b Iustin Pop
      _SafeShutdownInstanceDisks(self, instance)
4218 22985314 Guido Trotter
4219 22985314 Guido Trotter
    return ret
4220 a9e0c397 Iustin Pop
4221 a8083063 Iustin Pop
4222 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
4223 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
4224 8729e0d7 Iustin Pop

4225 8729e0d7 Iustin Pop
  """
4226 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
4227 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4228 8729e0d7 Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount"]
4229 31e63dbf Guido Trotter
  REQ_BGL = False
4230 31e63dbf Guido Trotter
4231 31e63dbf Guido Trotter
  def ExpandNames(self):
4232 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
4233 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4234 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4235 31e63dbf Guido Trotter
4236 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
4237 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
4238 31e63dbf Guido Trotter
      self._LockInstancesNodes()
4239 8729e0d7 Iustin Pop
4240 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
4241 8729e0d7 Iustin Pop
    """Build hooks env.
4242 8729e0d7 Iustin Pop

4243 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
4244 8729e0d7 Iustin Pop

4245 8729e0d7 Iustin Pop
    """
4246 8729e0d7 Iustin Pop
    env = {
4247 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
4248 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
4249 8729e0d7 Iustin Pop
      }
4250 8729e0d7 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4251 8729e0d7 Iustin Pop
    nl = [
4252 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
4253 8729e0d7 Iustin Pop
      self.instance.primary_node,
4254 8729e0d7 Iustin Pop
      ]
4255 8729e0d7 Iustin Pop
    return env, nl, nl
4256 8729e0d7 Iustin Pop
4257 8729e0d7 Iustin Pop
  def CheckPrereq(self):
4258 8729e0d7 Iustin Pop
    """Check prerequisites.
4259 8729e0d7 Iustin Pop

4260 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
4261 8729e0d7 Iustin Pop

4262 8729e0d7 Iustin Pop
    """
4263 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4264 31e63dbf Guido Trotter
    assert instance is not None, \
4265 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4266 31e63dbf Guido Trotter
4267 8729e0d7 Iustin Pop
    self.instance = instance
4268 8729e0d7 Iustin Pop
4269 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
4270 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
4271 8729e0d7 Iustin Pop
                                 " growing.")
4272 8729e0d7 Iustin Pop
4273 8729e0d7 Iustin Pop
    if instance.FindDisk(self.op.disk) is None:
4274 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
4275 c7cdfc90 Iustin Pop
                                 (self.op.disk, instance.name))
4276 8729e0d7 Iustin Pop
4277 8729e0d7 Iustin Pop
    nodenames = [instance.primary_node] + list(instance.secondary_nodes)
4278 72737a7f Iustin Pop
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4279 72737a7f Iustin Pop
                                       instance.hypervisor)
4280 8729e0d7 Iustin Pop
    for node in nodenames:
4281 8729e0d7 Iustin Pop
      info = nodeinfo.get(node, None)
4282 8729e0d7 Iustin Pop
      if not info:
4283 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
4284 8729e0d7 Iustin Pop
                                   " from node '%s'" % node)
4285 8729e0d7 Iustin Pop
      vg_free = info.get('vg_free', None)
4286 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
4287 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
4288 8729e0d7 Iustin Pop
                                   " node %s" % node)
4289 8729e0d7 Iustin Pop
      if self.op.amount > info['vg_free']:
4290 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
4291 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
4292 8729e0d7 Iustin Pop
                                   (node, info['vg_free'], self.op.amount))
4293 8729e0d7 Iustin Pop
4294 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
4295 8729e0d7 Iustin Pop
    """Execute disk grow.
4296 8729e0d7 Iustin Pop

4297 8729e0d7 Iustin Pop
    """
4298 8729e0d7 Iustin Pop
    instance = self.instance
4299 8729e0d7 Iustin Pop
    disk = instance.FindDisk(self.op.disk)
4300 8729e0d7 Iustin Pop
    for node in (instance.secondary_nodes + (instance.primary_node,)):
4301 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
4302 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
4303 72737a7f Iustin Pop
      if (not result or not isinstance(result, (list, tuple)) or
4304 72737a7f Iustin Pop
          len(result) != 2):
4305 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s" % node)
4306 8729e0d7 Iustin Pop
      elif not result[0]:
4307 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s: %s" %
4308 8729e0d7 Iustin Pop
                                 (node, result[1]))
4309 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
4310 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
4311 8729e0d7 Iustin Pop
    return
4312 8729e0d7 Iustin Pop
4313 8729e0d7 Iustin Pop
4314 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
4315 a8083063 Iustin Pop
  """Query runtime instance data.
4316 a8083063 Iustin Pop

4317 a8083063 Iustin Pop
  """
4318 57821cac Iustin Pop
  _OP_REQP = ["instances", "static"]
4319 a987fa48 Guido Trotter
  REQ_BGL = False
4320 ae5849b5 Michael Hanselmann
4321 a987fa48 Guido Trotter
  def ExpandNames(self):
4322 a987fa48 Guido Trotter
    self.needed_locks = {}
4323 a987fa48 Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
4324 a987fa48 Guido Trotter
4325 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
4326 a987fa48 Guido Trotter
      raise errors.OpPrereqError("Invalid argument type 'instances'")
4327 a987fa48 Guido Trotter
4328 a987fa48 Guido Trotter
    if self.op.instances:
4329 a987fa48 Guido Trotter
      self.wanted_names = []
4330 a987fa48 Guido Trotter
      for name in self.op.instances:
4331 a987fa48 Guido Trotter
        full_name = self.cfg.ExpandInstanceName(name)
4332 a987fa48 Guido Trotter
        if full_name is None:
4333 a987fa48 Guido Trotter
          raise errors.OpPrereqError("Instance '%s' not known" %
4334 a987fa48 Guido Trotter
                                     self.op.instance_name)
4335 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
4336 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
4337 a987fa48 Guido Trotter
    else:
4338 a987fa48 Guido Trotter
      self.wanted_names = None
4339 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
4340 a987fa48 Guido Trotter
4341 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4342 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4343 a987fa48 Guido Trotter
4344 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
4345 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
4346 a987fa48 Guido Trotter
      self._LockInstancesNodes()
4347 a8083063 Iustin Pop
4348 a8083063 Iustin Pop
  def CheckPrereq(self):
4349 a8083063 Iustin Pop
    """Check prerequisites.
4350 a8083063 Iustin Pop

4351 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
4352 a8083063 Iustin Pop

4353 a8083063 Iustin Pop
    """
4354 a987fa48 Guido Trotter
    if self.wanted_names is None:
4355 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
4356 a8083063 Iustin Pop
4357 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
4358 a987fa48 Guido Trotter
                             in self.wanted_names]
4359 a987fa48 Guido Trotter
    return
4360 a8083063 Iustin Pop
4361 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
4362 a8083063 Iustin Pop
    """Compute block device status.
4363 a8083063 Iustin Pop

4364 a8083063 Iustin Pop
    """
4365 57821cac Iustin Pop
    static = self.op.static
4366 57821cac Iustin Pop
    if not static:
4367 57821cac Iustin Pop
      self.cfg.SetDiskID(dev, instance.primary_node)
4368 57821cac Iustin Pop
      dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
4369 57821cac Iustin Pop
    else:
4370 57821cac Iustin Pop
      dev_pstatus = None
4371 57821cac Iustin Pop
4372 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
4373 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
4374 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
4375 a8083063 Iustin Pop
        snode = dev.logical_id[1]
4376 a8083063 Iustin Pop
      else:
4377 a8083063 Iustin Pop
        snode = dev.logical_id[0]
4378 a8083063 Iustin Pop
4379 57821cac Iustin Pop
    if snode and not static:
4380 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
4381 72737a7f Iustin Pop
      dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
4382 a8083063 Iustin Pop
    else:
4383 a8083063 Iustin Pop
      dev_sstatus = None
4384 a8083063 Iustin Pop
4385 a8083063 Iustin Pop
    if dev.children:
4386 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
4387 a8083063 Iustin Pop
                      for child in dev.children]
4388 a8083063 Iustin Pop
    else:
4389 a8083063 Iustin Pop
      dev_children = []
4390 a8083063 Iustin Pop
4391 a8083063 Iustin Pop
    data = {
4392 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
4393 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
4394 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
4395 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
4396 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
4397 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
4398 a8083063 Iustin Pop
      "children": dev_children,
4399 a8083063 Iustin Pop
      }
4400 a8083063 Iustin Pop
4401 a8083063 Iustin Pop
    return data
4402 a8083063 Iustin Pop
4403 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4404 a8083063 Iustin Pop
    """Gather and return data"""
4405 a8083063 Iustin Pop
    result = {}
4406 a8083063 Iustin Pop
    for instance in self.wanted_instances:
4407 57821cac Iustin Pop
      if not self.op.static:
4408 57821cac Iustin Pop
        remote_info = self.rpc.call_instance_info(instance.primary_node,
4409 57821cac Iustin Pop
                                                  instance.name,
4410 57821cac Iustin Pop
                                                  instance.hypervisor)
4411 57821cac Iustin Pop
        if remote_info and "state" in remote_info:
4412 57821cac Iustin Pop
          remote_state = "up"
4413 57821cac Iustin Pop
        else:
4414 57821cac Iustin Pop
          remote_state = "down"
4415 a8083063 Iustin Pop
      else:
4416 57821cac Iustin Pop
        remote_state = None
4417 a8083063 Iustin Pop
      if instance.status == "down":
4418 a8083063 Iustin Pop
        config_state = "down"
4419 a8083063 Iustin Pop
      else:
4420 a8083063 Iustin Pop
        config_state = "up"
4421 a8083063 Iustin Pop
4422 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
4423 a8083063 Iustin Pop
               for device in instance.disks]
4424 a8083063 Iustin Pop
4425 a8083063 Iustin Pop
      idict = {
4426 a8083063 Iustin Pop
        "name": instance.name,
4427 a8083063 Iustin Pop
        "config_state": config_state,
4428 a8083063 Iustin Pop
        "run_state": remote_state,
4429 a8083063 Iustin Pop
        "pnode": instance.primary_node,
4430 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
4431 a8083063 Iustin Pop
        "os": instance.os,
4432 a8083063 Iustin Pop
        "memory": instance.memory,
4433 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4434 a8083063 Iustin Pop
        "disks": disks,
4435 f55ff7ec Iustin Pop
        "vcpus": instance.vcpus,
4436 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
4437 24838135 Iustin Pop
        "network_port": instance.network_port,
4438 24838135 Iustin Pop
        "hv_instance": instance.hvparams,
4439 24838135 Iustin Pop
        "hv_actual": self.cfg.GetClusterInfo().FillHV(instance),
4440 a8083063 Iustin Pop
        }
4441 a8083063 Iustin Pop
4442 a8083063 Iustin Pop
      result[instance.name] = idict
4443 a8083063 Iustin Pop
4444 a8083063 Iustin Pop
    return result
4445 a8083063 Iustin Pop
4446 a8083063 Iustin Pop
4447 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
4448 a8083063 Iustin Pop
  """Modifies an instances's parameters.
4449 a8083063 Iustin Pop

4450 a8083063 Iustin Pop
  """
4451 a8083063 Iustin Pop
  HPATH = "instance-modify"
4452 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4453 74409b12 Iustin Pop
  _OP_REQP = ["instance_name", "hvparams"]
4454 1a5c7281 Guido Trotter
  REQ_BGL = False
4455 1a5c7281 Guido Trotter
4456 1a5c7281 Guido Trotter
  def ExpandNames(self):
4457 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
4458 74409b12 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
4459 74409b12 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4460 74409b12 Iustin Pop
4461 74409b12 Iustin Pop
4462 74409b12 Iustin Pop
  def DeclareLocks(self, level):
4463 74409b12 Iustin Pop
    if level == locking.LEVEL_NODE:
4464 74409b12 Iustin Pop
      self._LockInstancesNodes()
4465 a8083063 Iustin Pop
4466 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4467 a8083063 Iustin Pop
    """Build hooks env.
4468 a8083063 Iustin Pop

4469 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
4470 a8083063 Iustin Pop

4471 a8083063 Iustin Pop
    """
4472 396e1b78 Michael Hanselmann
    args = dict()
4473 a8083063 Iustin Pop
    if self.mem:
4474 396e1b78 Michael Hanselmann
      args['memory'] = self.mem
4475 a8083063 Iustin Pop
    if self.vcpus:
4476 396e1b78 Michael Hanselmann
      args['vcpus'] = self.vcpus
4477 ef756965 Iustin Pop
    if self.do_ip or self.do_bridge or self.mac:
4478 396e1b78 Michael Hanselmann
      if self.do_ip:
4479 396e1b78 Michael Hanselmann
        ip = self.ip
4480 396e1b78 Michael Hanselmann
      else:
4481 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
4482 396e1b78 Michael Hanselmann
      if self.bridge:
4483 396e1b78 Michael Hanselmann
        bridge = self.bridge
4484 396e1b78 Michael Hanselmann
      else:
4485 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
4486 ef756965 Iustin Pop
      if self.mac:
4487 ef756965 Iustin Pop
        mac = self.mac
4488 ef756965 Iustin Pop
      else:
4489 ef756965 Iustin Pop
        mac = self.instance.nics[0].mac
4490 ef756965 Iustin Pop
      args['nics'] = [(ip, bridge, mac)]
4491 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance, override=args)
4492 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(),
4493 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
4494 a8083063 Iustin Pop
    return env, nl, nl
4495 a8083063 Iustin Pop
4496 a8083063 Iustin Pop
  def CheckPrereq(self):
4497 a8083063 Iustin Pop
    """Check prerequisites.
4498 a8083063 Iustin Pop

4499 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
4500 a8083063 Iustin Pop

4501 a8083063 Iustin Pop
    """
4502 1a5c7281 Guido Trotter
    # FIXME: all the parameters could be checked before, in ExpandNames, or in
4503 1a5c7281 Guido Trotter
    # a separate CheckArguments function, if we implement one, so the operation
4504 1a5c7281 Guido Trotter
    # can be aborted without waiting for any lock, should it have an error...
4505 a8083063 Iustin Pop
    self.mem = getattr(self.op, "mem", None)
4506 a8083063 Iustin Pop
    self.vcpus = getattr(self.op, "vcpus", None)
4507 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
4508 1862d460 Alexander Schreiber
    self.mac = getattr(self.op, "mac", None)
4509 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
4510 973d7867 Iustin Pop
    self.kernel_path = getattr(self.op, "kernel_path", None)
4511 973d7867 Iustin Pop
    self.initrd_path = getattr(self.op, "initrd_path", None)
4512 4300c4b6 Guido Trotter
    self.force = getattr(self.op, "force", None)
4513 74409b12 Iustin Pop
    all_parms = [self.mem, self.vcpus, self.ip, self.bridge, self.mac]
4514 74409b12 Iustin Pop
    if all_parms.count(None) == len(all_parms) and not self.op.hvparams:
4515 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
4516 a8083063 Iustin Pop
    if self.mem is not None:
4517 a8083063 Iustin Pop
      try:
4518 a8083063 Iustin Pop
        self.mem = int(self.mem)
4519 a8083063 Iustin Pop
      except ValueError, err:
4520 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid memory size: %s" % str(err))
4521 a8083063 Iustin Pop
    if self.vcpus is not None:
4522 a8083063 Iustin Pop
      try:
4523 a8083063 Iustin Pop
        self.vcpus = int(self.vcpus)
4524 a8083063 Iustin Pop
      except ValueError, err:
4525 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid vcpus number: %s" % str(err))
4526 a8083063 Iustin Pop
    if self.ip is not None:
4527 a8083063 Iustin Pop
      self.do_ip = True
4528 a8083063 Iustin Pop
      if self.ip.lower() == "none":
4529 a8083063 Iustin Pop
        self.ip = None
4530 a8083063 Iustin Pop
      else:
4531 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
4532 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
4533 a8083063 Iustin Pop
    else:
4534 a8083063 Iustin Pop
      self.do_ip = False
4535 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
4536 1862d460 Alexander Schreiber
    if self.mac is not None:
4537 1862d460 Alexander Schreiber
      if self.cfg.IsMacInUse(self.mac):
4538 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('MAC address %s already in use in cluster' %
4539 1862d460 Alexander Schreiber
                                   self.mac)
4540 1862d460 Alexander Schreiber
      if not utils.IsValidMac(self.mac):
4541 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('Invalid MAC address %s' % self.mac)
4542 a8083063 Iustin Pop
4543 74409b12 Iustin Pop
    # checking the new params on the primary/secondary nodes
4544 31a853d2 Iustin Pop
4545 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4546 1a5c7281 Guido Trotter
    assert self.instance is not None, \
4547 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4548 74409b12 Iustin Pop
    pnode = self.instance.primary_node
4549 74409b12 Iustin Pop
    nodelist = [pnode]
4550 74409b12 Iustin Pop
    nodelist.extend(instance.secondary_nodes)
4551 74409b12 Iustin Pop
4552 74409b12 Iustin Pop
    if self.op.hvparams:
4553 74409b12 Iustin Pop
      i_hvdict = copy.deepcopy(instance.hvparams)
4554 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
4555 74409b12 Iustin Pop
        if val is None:
4556 74409b12 Iustin Pop
          try:
4557 74409b12 Iustin Pop
            del i_hvdict[key]
4558 74409b12 Iustin Pop
          except KeyError:
4559 74409b12 Iustin Pop
            pass
4560 74409b12 Iustin Pop
        else:
4561 74409b12 Iustin Pop
          i_hvdict[key] = val
4562 74409b12 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
4563 74409b12 Iustin Pop
      hv_new = cluster.FillDict(cluster.hvparams[instance.hypervisor],
4564 74409b12 Iustin Pop
                                i_hvdict)
4565 74409b12 Iustin Pop
      # local check
4566 74409b12 Iustin Pop
      hypervisor.GetHypervisor(
4567 74409b12 Iustin Pop
        instance.hypervisor).CheckParameterSyntax(hv_new)
4568 74409b12 Iustin Pop
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
4569 74409b12 Iustin Pop
      self.hv_new = hv_new
4570 74409b12 Iustin Pop
4571 cfefe007 Guido Trotter
    self.warn = []
4572 cfefe007 Guido Trotter
    if self.mem is not None and not self.force:
4573 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
4574 72737a7f Iustin Pop
                                                  instance.hypervisor)
4575 72737a7f Iustin Pop
      nodeinfo = self.rpc.call_node_info(nodelist, self.cfg.GetVGName(),
4576 72737a7f Iustin Pop
                                         instance.hypervisor)
4577 cfefe007 Guido Trotter
4578 cfefe007 Guido Trotter
      if pnode not in nodeinfo or not isinstance(nodeinfo[pnode], dict):
4579 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
4580 cfefe007 Guido Trotter
        self.warn.append("Can't get info from primary node %s" % pnode)
4581 cfefe007 Guido Trotter
      else:
4582 cfefe007 Guido Trotter
        if instance_info:
4583 cfefe007 Guido Trotter
          current_mem = instance_info['memory']
4584 cfefe007 Guido Trotter
        else:
4585 cfefe007 Guido Trotter
          # Assume instance not running
4586 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
4587 cfefe007 Guido Trotter
          # and we have no other way to check)
4588 cfefe007 Guido Trotter
          current_mem = 0
4589 cfefe007 Guido Trotter
        miss_mem = self.mem - current_mem - nodeinfo[pnode]['memory_free']
4590 cfefe007 Guido Trotter
        if miss_mem > 0:
4591 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
4592 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
4593 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
4594 cfefe007 Guido Trotter
4595 cfefe007 Guido Trotter
      for node in instance.secondary_nodes:
4596 cfefe007 Guido Trotter
        if node not in nodeinfo or not isinstance(nodeinfo[node], dict):
4597 cfefe007 Guido Trotter
          self.warn.append("Can't get info from secondary node %s" % node)
4598 cfefe007 Guido Trotter
        elif self.mem > nodeinfo[node]['memory_free']:
4599 74409b12 Iustin Pop
          self.warn.append("Not enough memory to failover instance to"
4600 74409b12 Iustin Pop
                           " secondary node %s" % node)
4601 5bc84f33 Alexander Schreiber
4602 a8083063 Iustin Pop
    return
4603 a8083063 Iustin Pop
4604 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4605 a8083063 Iustin Pop
    """Modifies an instance.
4606 a8083063 Iustin Pop

4607 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
4608 a8083063 Iustin Pop
    """
4609 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
4610 cfefe007 Guido Trotter
    # feedback_fn there.
4611 cfefe007 Guido Trotter
    for warn in self.warn:
4612 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
4613 cfefe007 Guido Trotter
4614 a8083063 Iustin Pop
    result = []
4615 a8083063 Iustin Pop
    instance = self.instance
4616 a8083063 Iustin Pop
    if self.mem:
4617 a8083063 Iustin Pop
      instance.memory = self.mem
4618 a8083063 Iustin Pop
      result.append(("mem", self.mem))
4619 a8083063 Iustin Pop
    if self.vcpus:
4620 a8083063 Iustin Pop
      instance.vcpus = self.vcpus
4621 a8083063 Iustin Pop
      result.append(("vcpus",  self.vcpus))
4622 a8083063 Iustin Pop
    if self.do_ip:
4623 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
4624 a8083063 Iustin Pop
      result.append(("ip", self.ip))
4625 a8083063 Iustin Pop
    if self.bridge:
4626 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
4627 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
4628 1862d460 Alexander Schreiber
    if self.mac:
4629 1862d460 Alexander Schreiber
      instance.nics[0].mac = self.mac
4630 1862d460 Alexander Schreiber
      result.append(("mac", self.mac))
4631 74409b12 Iustin Pop
    if self.op.hvparams:
4632 74409b12 Iustin Pop
      instance.hvparams = self.hv_new
4633 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
4634 74409b12 Iustin Pop
        result.append(("hv/%s" % key, val))
4635 a8083063 Iustin Pop
4636 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
4637 a8083063 Iustin Pop
4638 a8083063 Iustin Pop
    return result
4639 a8083063 Iustin Pop
4640 a8083063 Iustin Pop
4641 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
4642 a8083063 Iustin Pop
  """Query the exports list
4643 a8083063 Iustin Pop

4644 a8083063 Iustin Pop
  """
4645 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
4646 21a15682 Guido Trotter
  REQ_BGL = False
4647 21a15682 Guido Trotter
4648 21a15682 Guido Trotter
  def ExpandNames(self):
4649 21a15682 Guido Trotter
    self.needed_locks = {}
4650 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
4651 21a15682 Guido Trotter
    if not self.op.nodes:
4652 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4653 21a15682 Guido Trotter
    else:
4654 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
4655 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
4656 a8083063 Iustin Pop
4657 a8083063 Iustin Pop
  def CheckPrereq(self):
4658 21a15682 Guido Trotter
    """Check prerequisites.
4659 a8083063 Iustin Pop

4660 a8083063 Iustin Pop
    """
4661 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
4662 a8083063 Iustin Pop
4663 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4664 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
4665 a8083063 Iustin Pop

4666 a8083063 Iustin Pop
    Returns:
4667 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
4668 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
4669 a8083063 Iustin Pop
      that node.
4670 a8083063 Iustin Pop

4671 a8083063 Iustin Pop
    """
4672 72737a7f Iustin Pop
    return self.rpc.call_export_list(self.nodes)
4673 a8083063 Iustin Pop
4674 a8083063 Iustin Pop
4675 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
4676 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
4677 a8083063 Iustin Pop

4678 a8083063 Iustin Pop
  """
4679 a8083063 Iustin Pop
  HPATH = "instance-export"
4680 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4681 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
4682 6657590e Guido Trotter
  REQ_BGL = False
4683 6657590e Guido Trotter
4684 6657590e Guido Trotter
  def ExpandNames(self):
4685 6657590e Guido Trotter
    self._ExpandAndLockInstance()
4686 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
4687 6657590e Guido Trotter
    #
4688 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
4689 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
4690 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
4691 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
4692 6657590e Guido Trotter
    #    then one to remove, after
4693 6657590e Guido Trotter
    #  - removing the removal operation altoghether
4694 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4695 6657590e Guido Trotter
4696 6657590e Guido Trotter
  def DeclareLocks(self, level):
4697 6657590e Guido Trotter
    """Last minute lock declaration."""
4698 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
4699 a8083063 Iustin Pop
4700 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4701 a8083063 Iustin Pop
    """Build hooks env.
4702 a8083063 Iustin Pop

4703 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
4704 a8083063 Iustin Pop

4705 a8083063 Iustin Pop
    """
4706 a8083063 Iustin Pop
    env = {
4707 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
4708 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
4709 a8083063 Iustin Pop
      }
4710 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4711 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
4712 a8083063 Iustin Pop
          self.op.target_node]
4713 a8083063 Iustin Pop
    return env, nl, nl
4714 a8083063 Iustin Pop
4715 a8083063 Iustin Pop
  def CheckPrereq(self):
4716 a8083063 Iustin Pop
    """Check prerequisites.
4717 a8083063 Iustin Pop

4718 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
4719 a8083063 Iustin Pop

4720 a8083063 Iustin Pop
    """
4721 6657590e Guido Trotter
    instance_name = self.op.instance_name
4722 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
4723 6657590e Guido Trotter
    assert self.instance is not None, \
4724 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
4725 a8083063 Iustin Pop
4726 6657590e Guido Trotter
    self.dst_node = self.cfg.GetNodeInfo(
4727 6657590e Guido Trotter
      self.cfg.ExpandNodeName(self.op.target_node))
4728 a8083063 Iustin Pop
4729 6657590e Guido Trotter
    assert self.dst_node is not None, \
4730 6657590e Guido Trotter
          "Cannot retrieve locked node %s" % self.op.target_node
4731 a8083063 Iustin Pop
4732 b6023d6c Manuel Franceschini
    # instance disk type verification
4733 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
4734 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
4735 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
4736 b6023d6c Manuel Franceschini
                                   " file-based disks")
4737 b6023d6c Manuel Franceschini
4738 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4739 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
4740 a8083063 Iustin Pop

4741 a8083063 Iustin Pop
    """
4742 a8083063 Iustin Pop
    instance = self.instance
4743 a8083063 Iustin Pop
    dst_node = self.dst_node
4744 a8083063 Iustin Pop
    src_node = instance.primary_node
4745 a8083063 Iustin Pop
    if self.op.shutdown:
4746 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
4747 72737a7f Iustin Pop
      if not self.rpc.call_instance_shutdown(src_node, instance):
4748 38206f3c Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
4749 38206f3c Iustin Pop
                                 (instance.name, src_node))
4750 a8083063 Iustin Pop
4751 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
4752 a8083063 Iustin Pop
4753 a8083063 Iustin Pop
    snap_disks = []
4754 a8083063 Iustin Pop
4755 a8083063 Iustin Pop
    try:
4756 a8083063 Iustin Pop
      for disk in instance.disks:
4757 a8083063 Iustin Pop
        if disk.iv_name == "sda":
4758 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
4759 72737a7f Iustin Pop
          new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk)
4760 a8083063 Iustin Pop
4761 a8083063 Iustin Pop
          if not new_dev_name:
4762 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
4763 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
4764 a8083063 Iustin Pop
          else:
4765 fe96220b Iustin Pop
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
4766 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
4767 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
4768 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
4769 a8083063 Iustin Pop
            snap_disks.append(new_dev)
4770 a8083063 Iustin Pop
4771 a8083063 Iustin Pop
    finally:
4772 fb300fb7 Guido Trotter
      if self.op.shutdown and instance.status == "up":
4773 72737a7f Iustin Pop
        if not self.rpc.call_instance_start(src_node, instance, None):
4774 b9bddb6b Iustin Pop
          _ShutdownInstanceDisks(self, instance)
4775 fb300fb7 Guido Trotter
          raise errors.OpExecError("Could not start instance")
4776 a8083063 Iustin Pop
4777 a8083063 Iustin Pop
    # TODO: check for size
4778 a8083063 Iustin Pop
4779 62c9ec92 Iustin Pop
    cluster_name = self.cfg.GetClusterName()
4780 a8083063 Iustin Pop
    for dev in snap_disks:
4781 72737a7f Iustin Pop
      if not self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
4782 62c9ec92 Iustin Pop
                                      instance, cluster_name):
4783 16687b98 Manuel Franceschini
        logger.Error("could not export block device %s from node %s to node %s"
4784 16687b98 Manuel Franceschini
                     % (dev.logical_id[1], src_node, dst_node.name))
4785 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_remove(src_node, dev):
4786 16687b98 Manuel Franceschini
        logger.Error("could not remove snapshot block device %s from node %s" %
4787 16687b98 Manuel Franceschini
                     (dev.logical_id[1], src_node))
4788 a8083063 Iustin Pop
4789 72737a7f Iustin Pop
    if not self.rpc.call_finalize_export(dst_node.name, instance, snap_disks):
4790 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
4791 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
4792 a8083063 Iustin Pop
4793 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
4794 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
4795 a8083063 Iustin Pop
4796 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
4797 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
4798 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
4799 a8083063 Iustin Pop
    if nodelist:
4800 72737a7f Iustin Pop
      exportlist = self.rpc.call_export_list(nodelist)
4801 a8083063 Iustin Pop
      for node in exportlist:
4802 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
4803 72737a7f Iustin Pop
          if not self.rpc.call_export_remove(node, instance.name):
4804 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
4805 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))
4806 5c947f38 Iustin Pop
4807 5c947f38 Iustin Pop
4808 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
4809 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
4810 9ac99fda Guido Trotter

4811 9ac99fda Guido Trotter
  """
4812 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
4813 3656b3af Guido Trotter
  REQ_BGL = False
4814 3656b3af Guido Trotter
4815 3656b3af Guido Trotter
  def ExpandNames(self):
4816 3656b3af Guido Trotter
    self.needed_locks = {}
4817 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
4818 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
4819 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
4820 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4821 9ac99fda Guido Trotter
4822 9ac99fda Guido Trotter
  def CheckPrereq(self):
4823 9ac99fda Guido Trotter
    """Check prerequisites.
4824 9ac99fda Guido Trotter
    """
4825 9ac99fda Guido Trotter
    pass
4826 9ac99fda Guido Trotter
4827 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
4828 9ac99fda Guido Trotter
    """Remove any export.
4829 9ac99fda Guido Trotter

4830 9ac99fda Guido Trotter
    """
4831 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4832 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
4833 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
4834 9ac99fda Guido Trotter
    fqdn_warn = False
4835 9ac99fda Guido Trotter
    if not instance_name:
4836 9ac99fda Guido Trotter
      fqdn_warn = True
4837 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
4838 9ac99fda Guido Trotter
4839 72737a7f Iustin Pop
    exportlist = self.rpc.call_export_list(self.acquired_locks[
4840 72737a7f Iustin Pop
      locking.LEVEL_NODE])
4841 9ac99fda Guido Trotter
    found = False
4842 9ac99fda Guido Trotter
    for node in exportlist:
4843 9ac99fda Guido Trotter
      if instance_name in exportlist[node]:
4844 9ac99fda Guido Trotter
        found = True
4845 72737a7f Iustin Pop
        if not self.rpc.call_export_remove(node, instance_name):
4846 9ac99fda Guido Trotter
          logger.Error("could not remove export for instance %s"
4847 9ac99fda Guido Trotter
                       " on node %s" % (instance_name, node))
4848 9ac99fda Guido Trotter
4849 9ac99fda Guido Trotter
    if fqdn_warn and not found:
4850 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
4851 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
4852 9ac99fda Guido Trotter
                  " Domain Name.")
4853 9ac99fda Guido Trotter
4854 9ac99fda Guido Trotter
4855 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
4856 5c947f38 Iustin Pop
  """Generic tags LU.
4857 5c947f38 Iustin Pop

4858 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
4859 5c947f38 Iustin Pop

4860 5c947f38 Iustin Pop
  """
4861 5c947f38 Iustin Pop
4862 8646adce Guido Trotter
  def ExpandNames(self):
4863 8646adce Guido Trotter
    self.needed_locks = {}
4864 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
4865 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
4866 5c947f38 Iustin Pop
      if name is None:
4867 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
4868 3ecf6786 Iustin Pop
                                   (self.op.name,))
4869 5c947f38 Iustin Pop
      self.op.name = name
4870 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = name
4871 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
4872 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
4873 5c947f38 Iustin Pop
      if name is None:
4874 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
4875 3ecf6786 Iustin Pop
                                   (self.op.name,))
4876 5c947f38 Iustin Pop
      self.op.name = name
4877 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = name
4878 8646adce Guido Trotter
4879 8646adce Guido Trotter
  def CheckPrereq(self):
4880 8646adce Guido Trotter
    """Check prerequisites.
4881 8646adce Guido Trotter

4882 8646adce Guido Trotter
    """
4883 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
4884 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
4885 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
4886 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
4887 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
4888 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
4889 5c947f38 Iustin Pop
    else:
4890 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
4891 3ecf6786 Iustin Pop
                                 str(self.op.kind))
4892 5c947f38 Iustin Pop
4893 5c947f38 Iustin Pop
4894 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
4895 5c947f38 Iustin Pop
  """Returns the tags of a given object.
4896 5c947f38 Iustin Pop

4897 5c947f38 Iustin Pop
  """
4898 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
4899 8646adce Guido Trotter
  REQ_BGL = False
4900 5c947f38 Iustin Pop
4901 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4902 5c947f38 Iustin Pop
    """Returns the tag list.
4903 5c947f38 Iustin Pop

4904 5c947f38 Iustin Pop
    """
4905 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
4906 5c947f38 Iustin Pop
4907 5c947f38 Iustin Pop
4908 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
4909 73415719 Iustin Pop
  """Searches the tags for a given pattern.
4910 73415719 Iustin Pop

4911 73415719 Iustin Pop
  """
4912 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
4913 8646adce Guido Trotter
  REQ_BGL = False
4914 8646adce Guido Trotter
4915 8646adce Guido Trotter
  def ExpandNames(self):
4916 8646adce Guido Trotter
    self.needed_locks = {}
4917 73415719 Iustin Pop
4918 73415719 Iustin Pop
  def CheckPrereq(self):
4919 73415719 Iustin Pop
    """Check prerequisites.
4920 73415719 Iustin Pop

4921 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
4922 73415719 Iustin Pop

4923 73415719 Iustin Pop
    """
4924 73415719 Iustin Pop
    try:
4925 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
4926 73415719 Iustin Pop
    except re.error, err:
4927 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
4928 73415719 Iustin Pop
                                 (self.op.pattern, err))
4929 73415719 Iustin Pop
4930 73415719 Iustin Pop
  def Exec(self, feedback_fn):
4931 73415719 Iustin Pop
    """Returns the tag list.
4932 73415719 Iustin Pop

4933 73415719 Iustin Pop
    """
4934 73415719 Iustin Pop
    cfg = self.cfg
4935 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
4936 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
4937 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
4938 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
4939 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
4940 73415719 Iustin Pop
    results = []
4941 73415719 Iustin Pop
    for path, target in tgts:
4942 73415719 Iustin Pop
      for tag in target.GetTags():
4943 73415719 Iustin Pop
        if self.re.search(tag):
4944 73415719 Iustin Pop
          results.append((path, tag))
4945 73415719 Iustin Pop
    return results
4946 73415719 Iustin Pop
4947 73415719 Iustin Pop
4948 f27302fa Iustin Pop
class LUAddTags(TagsLU):
4949 5c947f38 Iustin Pop
  """Sets a tag on a given object.
4950 5c947f38 Iustin Pop

4951 5c947f38 Iustin Pop
  """
4952 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4953 8646adce Guido Trotter
  REQ_BGL = False
4954 5c947f38 Iustin Pop
4955 5c947f38 Iustin Pop
  def CheckPrereq(self):
4956 5c947f38 Iustin Pop
    """Check prerequisites.
4957 5c947f38 Iustin Pop

4958 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
4959 5c947f38 Iustin Pop

4960 5c947f38 Iustin Pop
    """
4961 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4962 f27302fa Iustin Pop
    for tag in self.op.tags:
4963 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4964 5c947f38 Iustin Pop
4965 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4966 5c947f38 Iustin Pop
    """Sets the tag.
4967 5c947f38 Iustin Pop

4968 5c947f38 Iustin Pop
    """
4969 5c947f38 Iustin Pop
    try:
4970 f27302fa Iustin Pop
      for tag in self.op.tags:
4971 f27302fa Iustin Pop
        self.target.AddTag(tag)
4972 5c947f38 Iustin Pop
    except errors.TagError, err:
4973 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
4974 5c947f38 Iustin Pop
    try:
4975 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4976 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4977 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4978 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4979 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4980 5c947f38 Iustin Pop
4981 5c947f38 Iustin Pop
4982 f27302fa Iustin Pop
class LUDelTags(TagsLU):
4983 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
4984 5c947f38 Iustin Pop

4985 5c947f38 Iustin Pop
  """
4986 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4987 8646adce Guido Trotter
  REQ_BGL = False
4988 5c947f38 Iustin Pop
4989 5c947f38 Iustin Pop
  def CheckPrereq(self):
4990 5c947f38 Iustin Pop
    """Check prerequisites.
4991 5c947f38 Iustin Pop

4992 5c947f38 Iustin Pop
    This checks that we have the given tag.
4993 5c947f38 Iustin Pop

4994 5c947f38 Iustin Pop
    """
4995 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4996 f27302fa Iustin Pop
    for tag in self.op.tags:
4997 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4998 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
4999 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
5000 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
5001 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
5002 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
5003 f27302fa Iustin Pop
      diff_names.sort()
5004 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
5005 f27302fa Iustin Pop
                                 (",".join(diff_names)))
5006 5c947f38 Iustin Pop
5007 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5008 5c947f38 Iustin Pop
    """Remove the tag from the object.
5009 5c947f38 Iustin Pop

5010 5c947f38 Iustin Pop
    """
5011 f27302fa Iustin Pop
    for tag in self.op.tags:
5012 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
5013 5c947f38 Iustin Pop
    try:
5014 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
5015 5c947f38 Iustin Pop
    except errors.ConfigurationError:
5016 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
5017 3ecf6786 Iustin Pop
                                " config file and the operation has been"
5018 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
5019 06009e27 Iustin Pop
5020 0eed6e61 Guido Trotter
5021 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
5022 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
5023 06009e27 Iustin Pop

5024 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
5025 06009e27 Iustin Pop
  time.
5026 06009e27 Iustin Pop

5027 06009e27 Iustin Pop
  """
5028 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
5029 fbe9022f Guido Trotter
  REQ_BGL = False
5030 06009e27 Iustin Pop
5031 fbe9022f Guido Trotter
  def ExpandNames(self):
5032 fbe9022f Guido Trotter
    """Expand names and set required locks.
5033 06009e27 Iustin Pop

5034 fbe9022f Guido Trotter
    This expands the node list, if any.
5035 06009e27 Iustin Pop

5036 06009e27 Iustin Pop
    """
5037 fbe9022f Guido Trotter
    self.needed_locks = {}
5038 06009e27 Iustin Pop
    if self.op.on_nodes:
5039 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
5040 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
5041 fbe9022f Guido Trotter
      # more information.
5042 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
5043 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
5044 fbe9022f Guido Trotter
5045 fbe9022f Guido Trotter
  def CheckPrereq(self):
5046 fbe9022f Guido Trotter
    """Check prerequisites.
5047 fbe9022f Guido Trotter

5048 fbe9022f Guido Trotter
    """
5049 06009e27 Iustin Pop
5050 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
5051 06009e27 Iustin Pop
    """Do the actual sleep.
5052 06009e27 Iustin Pop

5053 06009e27 Iustin Pop
    """
5054 06009e27 Iustin Pop
    if self.op.on_master:
5055 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
5056 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
5057 06009e27 Iustin Pop
    if self.op.on_nodes:
5058 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
5059 06009e27 Iustin Pop
      if not result:
5060 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
5061 06009e27 Iustin Pop
      for node, node_result in result.items():
5062 06009e27 Iustin Pop
        if not node_result:
5063 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
5064 06009e27 Iustin Pop
                                   " result: %s" % (node, node_result))
5065 d61df03e Iustin Pop
5066 d61df03e Iustin Pop
5067 d1c2dd75 Iustin Pop
class IAllocator(object):
5068 d1c2dd75 Iustin Pop
  """IAllocator framework.
5069 d61df03e Iustin Pop

5070 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
5071 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
5072 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
5073 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
5074 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
5075 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
5076 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
5077 d1c2dd75 Iustin Pop
      easy usage
5078 d61df03e Iustin Pop

5079 d61df03e Iustin Pop
  """
5080 29859cb7 Iustin Pop
  _ALLO_KEYS = [
5081 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
5082 d1c2dd75 Iustin Pop
    "os", "tags", "nics", "vcpus",
5083 d1c2dd75 Iustin Pop
    ]
5084 29859cb7 Iustin Pop
  _RELO_KEYS = [
5085 29859cb7 Iustin Pop
    "relocate_from",
5086 29859cb7 Iustin Pop
    ]
5087 d1c2dd75 Iustin Pop
5088 72737a7f Iustin Pop
  def __init__(self, lu, mode, name, **kwargs):
5089 72737a7f Iustin Pop
    self.lu = lu
5090 d1c2dd75 Iustin Pop
    # init buffer variables
5091 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
5092 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
5093 29859cb7 Iustin Pop
    self.mode = mode
5094 29859cb7 Iustin Pop
    self.name = name
5095 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
5096 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
5097 29859cb7 Iustin Pop
    self.relocate_from = None
5098 27579978 Iustin Pop
    # computed fields
5099 27579978 Iustin Pop
    self.required_nodes = None
5100 d1c2dd75 Iustin Pop
    # init result fields
5101 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
5102 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5103 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
5104 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
5105 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
5106 29859cb7 Iustin Pop
    else:
5107 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
5108 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
5109 d1c2dd75 Iustin Pop
    for key in kwargs:
5110 29859cb7 Iustin Pop
      if key not in keyset:
5111 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
5112 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
5113 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
5114 29859cb7 Iustin Pop
    for key in keyset:
5115 d1c2dd75 Iustin Pop
      if key not in kwargs:
5116 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
5117 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
5118 d1c2dd75 Iustin Pop
    self._BuildInputData()
5119 d1c2dd75 Iustin Pop
5120 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
5121 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
5122 d1c2dd75 Iustin Pop

5123 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
5124 d1c2dd75 Iustin Pop

5125 d1c2dd75 Iustin Pop
    """
5126 72737a7f Iustin Pop
    cfg = self.lu.cfg
5127 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
5128 d1c2dd75 Iustin Pop
    # cluster data
5129 d1c2dd75 Iustin Pop
    data = {
5130 d1c2dd75 Iustin Pop
      "version": 1,
5131 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
5132 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
5133 e69d05fd Iustin Pop
      "enable_hypervisors": list(cluster_info.enabled_hypervisors),
5134 d1c2dd75 Iustin Pop
      # we don't have job IDs
5135 d61df03e Iustin Pop
      }
5136 d61df03e Iustin Pop
5137 6286519f Iustin Pop
    i_list = [cfg.GetInstanceInfo(iname) for iname in cfg.GetInstanceList()]
5138 6286519f Iustin Pop
5139 d1c2dd75 Iustin Pop
    # node data
5140 d1c2dd75 Iustin Pop
    node_results = {}
5141 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
5142 e69d05fd Iustin Pop
    # FIXME: here we have only one hypervisor information, but
5143 e69d05fd Iustin Pop
    # instance can belong to different hypervisors
5144 72737a7f Iustin Pop
    node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
5145 72737a7f Iustin Pop
                                           cfg.GetHypervisorType())
5146 d1c2dd75 Iustin Pop
    for nname in node_list:
5147 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
5148 d1c2dd75 Iustin Pop
      if nname not in node_data or not isinstance(node_data[nname], dict):
5149 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't get data for node %s" % nname)
5150 d1c2dd75 Iustin Pop
      remote_info = node_data[nname]
5151 b2662e7f Iustin Pop
      for attr in ['memory_total', 'memory_free', 'memory_dom0',
5152 4337cf1b Iustin Pop
                   'vg_size', 'vg_free', 'cpu_total']:
5153 d1c2dd75 Iustin Pop
        if attr not in remote_info:
5154 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' didn't return attribute '%s'" %
5155 d1c2dd75 Iustin Pop
                                   (nname, attr))
5156 d1c2dd75 Iustin Pop
        try:
5157 b2662e7f Iustin Pop
          remote_info[attr] = int(remote_info[attr])
5158 d1c2dd75 Iustin Pop
        except ValueError, err:
5159 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' returned invalid value for '%s':"
5160 d1c2dd75 Iustin Pop
                                   " %s" % (nname, attr, str(err)))
5161 6286519f Iustin Pop
      # compute memory used by primary instances
5162 6286519f Iustin Pop
      i_p_mem = i_p_up_mem = 0
5163 6286519f Iustin Pop
      for iinfo in i_list:
5164 6286519f Iustin Pop
        if iinfo.primary_node == nname:
5165 6286519f Iustin Pop
          i_p_mem += iinfo.memory
5166 6286519f Iustin Pop
          if iinfo.status == "up":
5167 6286519f Iustin Pop
            i_p_up_mem += iinfo.memory
5168 6286519f Iustin Pop
5169 b2662e7f Iustin Pop
      # compute memory used by instances
5170 d1c2dd75 Iustin Pop
      pnr = {
5171 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
5172 b2662e7f Iustin Pop
        "total_memory": remote_info['memory_total'],
5173 b2662e7f Iustin Pop
        "reserved_memory": remote_info['memory_dom0'],
5174 b2662e7f Iustin Pop
        "free_memory": remote_info['memory_free'],
5175 6286519f Iustin Pop
        "i_pri_memory": i_p_mem,
5176 6286519f Iustin Pop
        "i_pri_up_memory": i_p_up_mem,
5177 b2662e7f Iustin Pop
        "total_disk": remote_info['vg_size'],
5178 b2662e7f Iustin Pop
        "free_disk": remote_info['vg_free'],
5179 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
5180 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
5181 4337cf1b Iustin Pop
        "total_cpus": remote_info['cpu_total'],
5182 d1c2dd75 Iustin Pop
        }
5183 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
5184 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
5185 d1c2dd75 Iustin Pop
5186 d1c2dd75 Iustin Pop
    # instance data
5187 d1c2dd75 Iustin Pop
    instance_data = {}
5188 6286519f Iustin Pop
    for iinfo in i_list:
5189 d1c2dd75 Iustin Pop
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
5190 d1c2dd75 Iustin Pop
                  for n in iinfo.nics]
5191 d1c2dd75 Iustin Pop
      pir = {
5192 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
5193 d1c2dd75 Iustin Pop
        "should_run": iinfo.status == "up",
5194 d1c2dd75 Iustin Pop
        "vcpus": iinfo.vcpus,
5195 d1c2dd75 Iustin Pop
        "memory": iinfo.memory,
5196 d1c2dd75 Iustin Pop
        "os": iinfo.os,
5197 d1c2dd75 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
5198 d1c2dd75 Iustin Pop
        "nics": nic_data,
5199 d1c2dd75 Iustin Pop
        "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
5200 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
5201 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
5202 d1c2dd75 Iustin Pop
        }
5203 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
5204 d61df03e Iustin Pop
5205 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
5206 d61df03e Iustin Pop
5207 d1c2dd75 Iustin Pop
    self.in_data = data
5208 d61df03e Iustin Pop
5209 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
5210 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
5211 d61df03e Iustin Pop

5212 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
5213 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5214 d61df03e Iustin Pop

5215 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5216 d1c2dd75 Iustin Pop
    done.
5217 d61df03e Iustin Pop

5218 d1c2dd75 Iustin Pop
    """
5219 d1c2dd75 Iustin Pop
    data = self.in_data
5220 d1c2dd75 Iustin Pop
    if len(self.disks) != 2:
5221 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Only two-disk configurations supported")
5222 d1c2dd75 Iustin Pop
5223 d1c2dd75 Iustin Pop
    disk_space = _ComputeDiskSize(self.disk_template,
5224 d1c2dd75 Iustin Pop
                                  self.disks[0]["size"], self.disks[1]["size"])
5225 d1c2dd75 Iustin Pop
5226 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
5227 27579978 Iustin Pop
      self.required_nodes = 2
5228 27579978 Iustin Pop
    else:
5229 27579978 Iustin Pop
      self.required_nodes = 1
5230 d1c2dd75 Iustin Pop
    request = {
5231 d1c2dd75 Iustin Pop
      "type": "allocate",
5232 d1c2dd75 Iustin Pop
      "name": self.name,
5233 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
5234 d1c2dd75 Iustin Pop
      "tags": self.tags,
5235 d1c2dd75 Iustin Pop
      "os": self.os,
5236 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
5237 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
5238 d1c2dd75 Iustin Pop
      "disks": self.disks,
5239 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
5240 d1c2dd75 Iustin Pop
      "nics": self.nics,
5241 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5242 d1c2dd75 Iustin Pop
      }
5243 d1c2dd75 Iustin Pop
    data["request"] = request
5244 298fe380 Iustin Pop
5245 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
5246 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
5247 298fe380 Iustin Pop

5248 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
5249 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5250 d61df03e Iustin Pop

5251 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5252 d1c2dd75 Iustin Pop
    done.
5253 d61df03e Iustin Pop

5254 d1c2dd75 Iustin Pop
    """
5255 72737a7f Iustin Pop
    instance = self.lu.cfg.GetInstanceInfo(self.name)
5256 27579978 Iustin Pop
    if instance is None:
5257 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
5258 27579978 Iustin Pop
                                   " IAllocator" % self.name)
5259 27579978 Iustin Pop
5260 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
5261 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
5262 27579978 Iustin Pop
5263 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
5264 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
5265 2a139bb0 Iustin Pop
5266 27579978 Iustin Pop
    self.required_nodes = 1
5267 27579978 Iustin Pop
5268 27579978 Iustin Pop
    disk_space = _ComputeDiskSize(instance.disk_template,
5269 27579978 Iustin Pop
                                  instance.disks[0].size,
5270 27579978 Iustin Pop
                                  instance.disks[1].size)
5271 27579978 Iustin Pop
5272 d1c2dd75 Iustin Pop
    request = {
5273 2a139bb0 Iustin Pop
      "type": "relocate",
5274 d1c2dd75 Iustin Pop
      "name": self.name,
5275 27579978 Iustin Pop
      "disk_space_total": disk_space,
5276 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5277 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
5278 d1c2dd75 Iustin Pop
      }
5279 27579978 Iustin Pop
    self.in_data["request"] = request
5280 d61df03e Iustin Pop
5281 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
5282 d1c2dd75 Iustin Pop
    """Build input data structures.
5283 d61df03e Iustin Pop

5284 d1c2dd75 Iustin Pop
    """
5285 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
5286 d61df03e Iustin Pop
5287 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5288 d1c2dd75 Iustin Pop
      self._AddNewInstance()
5289 d1c2dd75 Iustin Pop
    else:
5290 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
5291 d61df03e Iustin Pop
5292 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
5293 d61df03e Iustin Pop
5294 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
5295 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
5296 298fe380 Iustin Pop

5297 d1c2dd75 Iustin Pop
    """
5298 72737a7f Iustin Pop
    if call_fn is None:
5299 72737a7f Iustin Pop
      call_fn = self.lu.rpc.call_iallocator_runner
5300 d1c2dd75 Iustin Pop
    data = self.in_text
5301 298fe380 Iustin Pop
5302 72737a7f Iustin Pop
    result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
5303 298fe380 Iustin Pop
5304 43f5ea7a Guido Trotter
    if not isinstance(result, (list, tuple)) or len(result) != 4:
5305 8d528b7c Iustin Pop
      raise errors.OpExecError("Invalid result from master iallocator runner")
5306 8d528b7c Iustin Pop
5307 8d528b7c Iustin Pop
    rcode, stdout, stderr, fail = result
5308 8d528b7c Iustin Pop
5309 8d528b7c Iustin Pop
    if rcode == constants.IARUN_NOTFOUND:
5310 8d528b7c Iustin Pop
      raise errors.OpExecError("Can't find allocator '%s'" % name)
5311 8d528b7c Iustin Pop
    elif rcode == constants.IARUN_FAILURE:
5312 38206f3c Iustin Pop
      raise errors.OpExecError("Instance allocator call failed: %s,"
5313 38206f3c Iustin Pop
                               " output: %s" % (fail, stdout+stderr))
5314 8d528b7c Iustin Pop
    self.out_text = stdout
5315 d1c2dd75 Iustin Pop
    if validate:
5316 d1c2dd75 Iustin Pop
      self._ValidateResult()
5317 298fe380 Iustin Pop
5318 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
5319 d1c2dd75 Iustin Pop
    """Process the allocator results.
5320 538475ca Iustin Pop

5321 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
5322 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
5323 538475ca Iustin Pop

5324 d1c2dd75 Iustin Pop
    """
5325 d1c2dd75 Iustin Pop
    try:
5326 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
5327 d1c2dd75 Iustin Pop
    except Exception, err:
5328 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
5329 d1c2dd75 Iustin Pop
5330 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
5331 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
5332 538475ca Iustin Pop
5333 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
5334 d1c2dd75 Iustin Pop
      if key not in rdict:
5335 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
5336 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
5337 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
5338 538475ca Iustin Pop
5339 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
5340 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
5341 d1c2dd75 Iustin Pop
                               " is not a list")
5342 d1c2dd75 Iustin Pop
    self.out_data = rdict
5343 538475ca Iustin Pop
5344 538475ca Iustin Pop
5345 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
5346 d61df03e Iustin Pop
  """Run allocator tests.
5347 d61df03e Iustin Pop

5348 d61df03e Iustin Pop
  This LU runs the allocator tests
5349 d61df03e Iustin Pop

5350 d61df03e Iustin Pop
  """
5351 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
5352 d61df03e Iustin Pop
5353 d61df03e Iustin Pop
  def CheckPrereq(self):
5354 d61df03e Iustin Pop
    """Check prerequisites.
5355 d61df03e Iustin Pop

5356 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
5357 d61df03e Iustin Pop

5358 d61df03e Iustin Pop
    """
5359 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5360 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
5361 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
5362 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
5363 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
5364 d61df03e Iustin Pop
                                     attr)
5365 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
5366 d61df03e Iustin Pop
      if iname is not None:
5367 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
5368 d61df03e Iustin Pop
                                   iname)
5369 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
5370 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
5371 d61df03e Iustin Pop
      for row in self.op.nics:
5372 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5373 d61df03e Iustin Pop
            "mac" not in row or
5374 d61df03e Iustin Pop
            "ip" not in row or
5375 d61df03e Iustin Pop
            "bridge" not in row):
5376 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5377 d61df03e Iustin Pop
                                     " 'nics' parameter")
5378 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
5379 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
5380 298fe380 Iustin Pop
      if len(self.op.disks) != 2:
5381 298fe380 Iustin Pop
        raise errors.OpPrereqError("Only two-disk configurations supported")
5382 d61df03e Iustin Pop
      for row in self.op.disks:
5383 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5384 d61df03e Iustin Pop
            "size" not in row or
5385 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
5386 d61df03e Iustin Pop
            "mode" not in row or
5387 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
5388 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5389 d61df03e Iustin Pop
                                     " 'disks' parameter")
5390 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
5391 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
5392 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
5393 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
5394 d61df03e Iustin Pop
      if fname is None:
5395 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
5396 d61df03e Iustin Pop
                                   self.op.name)
5397 d61df03e Iustin Pop
      self.op.name = fname
5398 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
5399 d61df03e Iustin Pop
    else:
5400 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
5401 d61df03e Iustin Pop
                                 self.op.mode)
5402 d61df03e Iustin Pop
5403 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
5404 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
5405 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
5406 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
5407 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
5408 d61df03e Iustin Pop
                                 self.op.direction)
5409 d61df03e Iustin Pop
5410 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
5411 d61df03e Iustin Pop
    """Run the allocator test.
5412 d61df03e Iustin Pop

5413 d61df03e Iustin Pop
    """
5414 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5415 72737a7f Iustin Pop
      ial = IAllocator(self,
5416 29859cb7 Iustin Pop
                       mode=self.op.mode,
5417 29859cb7 Iustin Pop
                       name=self.op.name,
5418 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
5419 29859cb7 Iustin Pop
                       disks=self.op.disks,
5420 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
5421 29859cb7 Iustin Pop
                       os=self.op.os,
5422 29859cb7 Iustin Pop
                       tags=self.op.tags,
5423 29859cb7 Iustin Pop
                       nics=self.op.nics,
5424 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
5425 29859cb7 Iustin Pop
                       )
5426 29859cb7 Iustin Pop
    else:
5427 72737a7f Iustin Pop
      ial = IAllocator(self,
5428 29859cb7 Iustin Pop
                       mode=self.op.mode,
5429 29859cb7 Iustin Pop
                       name=self.op.name,
5430 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
5431 29859cb7 Iustin Pop
                       )
5432 d61df03e Iustin Pop
5433 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
5434 d1c2dd75 Iustin Pop
      result = ial.in_text
5435 298fe380 Iustin Pop
    else:
5436 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
5437 d1c2dd75 Iustin Pop
      result = ial.out_text
5438 298fe380 Iustin Pop
    return result