Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 5bf7b5cf

History | View | Annotate | Download (191.5 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 ffa1c0dc Iustin Pop
import logging
34 a8083063 Iustin Pop
35 a8083063 Iustin Pop
from ganeti import ssh
36 a8083063 Iustin Pop
from ganeti import logger
37 a8083063 Iustin Pop
from ganeti import utils
38 a8083063 Iustin Pop
from ganeti import errors
39 a8083063 Iustin Pop
from ganeti import hypervisor
40 6048c986 Guido Trotter
from ganeti import locking
41 a8083063 Iustin Pop
from ganeti import constants
42 a8083063 Iustin Pop
from ganeti import objects
43 a8083063 Iustin Pop
from ganeti import opcodes
44 8d14b30d Iustin Pop
from ganeti import serializer
45 d61df03e Iustin Pop
46 d61df03e Iustin Pop
47 a8083063 Iustin Pop
class LogicalUnit(object):
48 396e1b78 Michael Hanselmann
  """Logical Unit base class.
49 a8083063 Iustin Pop

50 a8083063 Iustin Pop
  Subclasses must follow these rules:
51 d465bdc8 Guido Trotter
    - implement ExpandNames
52 d465bdc8 Guido Trotter
    - implement CheckPrereq
53 a8083063 Iustin Pop
    - implement Exec
54 a8083063 Iustin Pop
    - implement BuildHooksEnv
55 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
56 05f86716 Guido Trotter
    - optionally redefine their run requirements:
57 05f86716 Guido Trotter
        REQ_MASTER: the LU needs to run on the master node
58 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
59 05f86716 Guido Trotter

60 05f86716 Guido Trotter
  Note that all commands require root permissions.
61 a8083063 Iustin Pop

62 a8083063 Iustin Pop
  """
63 a8083063 Iustin Pop
  HPATH = None
64 a8083063 Iustin Pop
  HTYPE = None
65 a8083063 Iustin Pop
  _OP_REQP = []
66 a8083063 Iustin Pop
  REQ_MASTER = True
67 7e55040e Guido Trotter
  REQ_BGL = True
68 a8083063 Iustin Pop
69 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
70 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
71 a8083063 Iustin Pop

72 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
73 a8083063 Iustin Pop
    validity.
74 a8083063 Iustin Pop

75 a8083063 Iustin Pop
    """
76 5bfac263 Iustin Pop
    self.proc = processor
77 a8083063 Iustin Pop
    self.op = op
78 77b657a3 Guido Trotter
    self.cfg = context.cfg
79 77b657a3 Guido Trotter
    self.context = context
80 72737a7f Iustin Pop
    self.rpc = rpc
81 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
82 d465bdc8 Guido Trotter
    self.needed_locks = None
83 6683bba2 Guido Trotter
    self.acquired_locks = {}
84 3977a4c1 Guido Trotter
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
85 ca2a79e1 Guido Trotter
    self.add_locks = {}
86 ca2a79e1 Guido Trotter
    self.remove_locks = {}
87 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
88 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
89 c92b310a Michael Hanselmann
    self.__ssh = None
90 c92b310a Michael Hanselmann
91 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
92 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
93 a8083063 Iustin Pop
      if attr_val is None:
94 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
95 3ecf6786 Iustin Pop
                                   attr_name)
96 c6d58a2b Michael Hanselmann
97 f64c9de6 Guido Trotter
    if not self.cfg.IsCluster():
98 c6d58a2b Michael Hanselmann
      raise errors.OpPrereqError("Cluster not initialized yet,"
99 c6d58a2b Michael Hanselmann
                                 " use 'gnt-cluster init' first.")
100 c6d58a2b Michael Hanselmann
    if self.REQ_MASTER:
101 d6a02168 Michael Hanselmann
      master = self.cfg.GetMasterNode()
102 c6d58a2b Michael Hanselmann
      if master != utils.HostInfo().name:
103 c6d58a2b Michael Hanselmann
        raise errors.OpPrereqError("Commands must be run on the master"
104 c6d58a2b Michael Hanselmann
                                   " node %s" % master)
105 a8083063 Iustin Pop
106 c92b310a Michael Hanselmann
  def __GetSSH(self):
107 c92b310a Michael Hanselmann
    """Returns the SshRunner object
108 c92b310a Michael Hanselmann

109 c92b310a Michael Hanselmann
    """
110 c92b310a Michael Hanselmann
    if not self.__ssh:
111 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
112 c92b310a Michael Hanselmann
    return self.__ssh
113 c92b310a Michael Hanselmann
114 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
115 c92b310a Michael Hanselmann
116 d465bdc8 Guido Trotter
  def ExpandNames(self):
117 d465bdc8 Guido Trotter
    """Expand names for this LU.
118 d465bdc8 Guido Trotter

119 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
120 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
121 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
122 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
123 d465bdc8 Guido Trotter

124 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
125 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
126 d465bdc8 Guido Trotter
    as values. Rules:
127 d465bdc8 Guido Trotter
      - Use an empty dict if you don't need any lock
128 d465bdc8 Guido Trotter
      - If you don't need any lock at a particular level omit that level
129 d465bdc8 Guido Trotter
      - Don't put anything for the BGL level
130 e310b019 Guido Trotter
      - If you want all locks at a level use locking.ALL_SET as a value
131 d465bdc8 Guido Trotter

132 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
133 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
134 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
135 3977a4c1 Guido Trotter

136 d465bdc8 Guido Trotter
    Examples:
137 d465bdc8 Guido Trotter
    # Acquire all nodes and one instance
138 d465bdc8 Guido Trotter
    self.needed_locks = {
139 e310b019 Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
140 3a5d7305 Guido Trotter
      locking.LEVEL_INSTANCE: ['instance1.example.tld'],
141 d465bdc8 Guido Trotter
    }
142 d465bdc8 Guido Trotter
    # Acquire just two nodes
143 d465bdc8 Guido Trotter
    self.needed_locks = {
144 d465bdc8 Guido Trotter
      locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
145 d465bdc8 Guido Trotter
    }
146 d465bdc8 Guido Trotter
    # Acquire no locks
147 d465bdc8 Guido Trotter
    self.needed_locks = {} # No, you can't leave it to the default value None
148 d465bdc8 Guido Trotter

149 d465bdc8 Guido Trotter
    """
150 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
151 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
152 d465bdc8 Guido Trotter
    # time.
153 d465bdc8 Guido Trotter
    if self.REQ_BGL:
154 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
155 d465bdc8 Guido Trotter
    else:
156 d465bdc8 Guido Trotter
      raise NotImplementedError
157 d465bdc8 Guido Trotter
158 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
159 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
160 fb8dcb62 Guido Trotter

161 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
162 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
163 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
164 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
165 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
166 fb8dcb62 Guido Trotter
    default it does nothing.
167 fb8dcb62 Guido Trotter

168 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
169 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
170 fb8dcb62 Guido Trotter

171 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
172 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
173 fb8dcb62 Guido Trotter

174 fb8dcb62 Guido Trotter
    """
175 fb8dcb62 Guido Trotter
176 a8083063 Iustin Pop
  def CheckPrereq(self):
177 a8083063 Iustin Pop
    """Check prerequisites for this LU.
178 a8083063 Iustin Pop

179 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
180 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
181 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
182 a8083063 Iustin Pop
    allowed.
183 a8083063 Iustin Pop

184 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
185 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
186 a8083063 Iustin Pop

187 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
188 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
189 a8083063 Iustin Pop

190 a8083063 Iustin Pop
    """
191 a8083063 Iustin Pop
    raise NotImplementedError
192 a8083063 Iustin Pop
193 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
194 a8083063 Iustin Pop
    """Execute the LU.
195 a8083063 Iustin Pop

196 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
197 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
198 a8083063 Iustin Pop
    code, or expected.
199 a8083063 Iustin Pop

200 a8083063 Iustin Pop
    """
201 a8083063 Iustin Pop
    raise NotImplementedError
202 a8083063 Iustin Pop
203 a8083063 Iustin Pop
  def BuildHooksEnv(self):
204 a8083063 Iustin Pop
    """Build hooks environment for this LU.
205 a8083063 Iustin Pop

206 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
207 a8083063 Iustin Pop
    containing the environment that will be used for running the
208 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
209 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
210 a8083063 Iustin Pop
    the hook should run after the execution.
211 a8083063 Iustin Pop

212 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
213 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
214 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
215 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
216 a8083063 Iustin Pop

217 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
218 a8083063 Iustin Pop

219 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
220 a8083063 Iustin Pop
    not be called.
221 a8083063 Iustin Pop

222 a8083063 Iustin Pop
    """
223 a8083063 Iustin Pop
    raise NotImplementedError
224 a8083063 Iustin Pop
225 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
226 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
227 1fce5219 Guido Trotter

228 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
229 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
230 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
231 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
232 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
233 1fce5219 Guido Trotter

234 1fce5219 Guido Trotter
    Args:
235 1fce5219 Guido Trotter
      phase: the hooks phase that has just been run
236 1fce5219 Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
237 1fce5219 Guido Trotter
      feedback_fn: function to send feedback back to the caller
238 1fce5219 Guido Trotter
      lu_result: the previous result this LU had, or None in the PRE phase.
239 1fce5219 Guido Trotter

240 1fce5219 Guido Trotter
    """
241 1fce5219 Guido Trotter
    return lu_result
242 1fce5219 Guido Trotter
243 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
244 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
245 43905206 Guido Trotter

246 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
247 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
248 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
249 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
250 43905206 Guido Trotter
    before.
251 43905206 Guido Trotter

252 43905206 Guido Trotter
    """
253 43905206 Guido Trotter
    if self.needed_locks is None:
254 43905206 Guido Trotter
      self.needed_locks = {}
255 43905206 Guido Trotter
    else:
256 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
257 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
258 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
259 43905206 Guido Trotter
    if expanded_name is None:
260 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
261 43905206 Guido Trotter
                                  self.op.instance_name)
262 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
263 43905206 Guido Trotter
    self.op.instance_name = expanded_name
264 43905206 Guido Trotter
265 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
266 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
267 c4a2fee1 Guido Trotter

268 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
269 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
270 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
271 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
272 c4a2fee1 Guido Trotter

273 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
274 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
275 c4a2fee1 Guido Trotter

276 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
277 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
278 c4a2fee1 Guido Trotter

279 c4a2fee1 Guido Trotter
    If should be called in DeclareLocks in a way similar to:
280 c4a2fee1 Guido Trotter

281 c4a2fee1 Guido Trotter
    if level == locking.LEVEL_NODE:
282 c4a2fee1 Guido Trotter
      self._LockInstancesNodes()
283 c4a2fee1 Guido Trotter

284 a82ce292 Guido Trotter
    @type primary_only: boolean
285 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
286 a82ce292 Guido Trotter

287 c4a2fee1 Guido Trotter
    """
288 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
289 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
290 c4a2fee1 Guido Trotter
291 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
292 c4a2fee1 Guido Trotter
293 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
294 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
295 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
296 c4a2fee1 Guido Trotter
    wanted_nodes = []
297 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
298 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
299 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
300 a82ce292 Guido Trotter
      if not primary_only:
301 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
302 9513b6ab Guido Trotter
303 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
304 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
305 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
306 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
307 c4a2fee1 Guido Trotter
308 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
309 c4a2fee1 Guido Trotter
310 a8083063 Iustin Pop
311 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
312 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
313 a8083063 Iustin Pop

314 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
315 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
316 a8083063 Iustin Pop

317 a8083063 Iustin Pop
  """
318 a8083063 Iustin Pop
  HPATH = None
319 a8083063 Iustin Pop
  HTYPE = None
320 a8083063 Iustin Pop
321 a8083063 Iustin Pop
322 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
323 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
324 83120a01 Michael Hanselmann

325 83120a01 Michael Hanselmann
  Args:
326 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
327 83120a01 Michael Hanselmann

328 83120a01 Michael Hanselmann
  """
329 3312b702 Iustin Pop
  if not isinstance(nodes, list):
330 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
331 dcb93971 Michael Hanselmann
332 ea47808a Guido Trotter
  if not nodes:
333 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
334 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
335 dcb93971 Michael Hanselmann
336 ea47808a Guido Trotter
  wanted = []
337 ea47808a Guido Trotter
  for name in nodes:
338 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
339 ea47808a Guido Trotter
    if node is None:
340 ea47808a Guido Trotter
      raise errors.OpPrereqError("No such node name '%s'" % name)
341 ea47808a Guido Trotter
    wanted.append(node)
342 dcb93971 Michael Hanselmann
343 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
344 3312b702 Iustin Pop
345 3312b702 Iustin Pop
346 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
347 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
348 3312b702 Iustin Pop

349 3312b702 Iustin Pop
  Args:
350 3312b702 Iustin Pop
    instances: List of instances (strings) or None for all
351 3312b702 Iustin Pop

352 3312b702 Iustin Pop
  """
353 3312b702 Iustin Pop
  if not isinstance(instances, list):
354 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
355 3312b702 Iustin Pop
356 3312b702 Iustin Pop
  if instances:
357 3312b702 Iustin Pop
    wanted = []
358 3312b702 Iustin Pop
359 3312b702 Iustin Pop
    for name in instances:
360 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
361 3312b702 Iustin Pop
      if instance is None:
362 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
363 3312b702 Iustin Pop
      wanted.append(instance)
364 3312b702 Iustin Pop
365 3312b702 Iustin Pop
  else:
366 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
367 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
368 dcb93971 Michael Hanselmann
369 dcb93971 Michael Hanselmann
370 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
371 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
372 83120a01 Michael Hanselmann

373 83120a01 Michael Hanselmann
  Args:
374 83120a01 Michael Hanselmann
    static: Static fields
375 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
376 83120a01 Michael Hanselmann

377 83120a01 Michael Hanselmann
  """
378 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
379 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
380 dcb93971 Michael Hanselmann
381 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
382 dcb93971 Michael Hanselmann
383 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
384 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
385 3ecf6786 Iustin Pop
                               % ",".join(frozenset(selected).
386 3ecf6786 Iustin Pop
                                          difference(all_fields)))
387 dcb93971 Michael Hanselmann
388 dcb93971 Michael Hanselmann
389 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
390 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
391 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
392 ecb215b5 Michael Hanselmann

393 ecb215b5 Michael Hanselmann
  Args:
394 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
395 396e1b78 Michael Hanselmann
  """
396 396e1b78 Michael Hanselmann
  env = {
397 0e137c28 Iustin Pop
    "OP_TARGET": name,
398 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
399 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
400 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
401 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
402 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
403 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
404 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
405 396e1b78 Michael Hanselmann
  }
406 396e1b78 Michael Hanselmann
407 396e1b78 Michael Hanselmann
  if nics:
408 396e1b78 Michael Hanselmann
    nic_count = len(nics)
409 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
410 396e1b78 Michael Hanselmann
      if ip is None:
411 396e1b78 Michael Hanselmann
        ip = ""
412 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
413 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
414 53e4e875 Guido Trotter
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
415 396e1b78 Michael Hanselmann
  else:
416 396e1b78 Michael Hanselmann
    nic_count = 0
417 396e1b78 Michael Hanselmann
418 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
419 396e1b78 Michael Hanselmann
420 396e1b78 Michael Hanselmann
  return env
421 396e1b78 Michael Hanselmann
422 396e1b78 Michael Hanselmann
423 396e1b78 Michael Hanselmann
def _BuildInstanceHookEnvByObject(instance, override=None):
424 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
425 ecb215b5 Michael Hanselmann

426 ecb215b5 Michael Hanselmann
  Args:
427 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
428 ecb215b5 Michael Hanselmann
    override: dict of values to override
429 ecb215b5 Michael Hanselmann
  """
430 396e1b78 Michael Hanselmann
  args = {
431 396e1b78 Michael Hanselmann
    'name': instance.name,
432 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
433 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
434 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
435 396e1b78 Michael Hanselmann
    'status': instance.os,
436 396e1b78 Michael Hanselmann
    'memory': instance.memory,
437 396e1b78 Michael Hanselmann
    'vcpus': instance.vcpus,
438 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
439 396e1b78 Michael Hanselmann
  }
440 396e1b78 Michael Hanselmann
  if override:
441 396e1b78 Michael Hanselmann
    args.update(override)
442 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
443 396e1b78 Michael Hanselmann
444 396e1b78 Michael Hanselmann
445 b9bddb6b Iustin Pop
def _CheckInstanceBridgesExist(lu, instance):
446 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
447 bf6929a2 Alexander Schreiber

448 bf6929a2 Alexander Schreiber
  """
449 bf6929a2 Alexander Schreiber
  # check bridges existance
450 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
451 72737a7f Iustin Pop
  if not lu.rpc.call_bridges_exist(instance.primary_node, brlist):
452 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
453 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
454 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
455 bf6929a2 Alexander Schreiber
456 bf6929a2 Alexander Schreiber
457 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
458 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
459 a8083063 Iustin Pop

460 a8083063 Iustin Pop
  """
461 a8083063 Iustin Pop
  _OP_REQP = []
462 a8083063 Iustin Pop
463 a8083063 Iustin Pop
  def CheckPrereq(self):
464 a8083063 Iustin Pop
    """Check prerequisites.
465 a8083063 Iustin Pop

466 a8083063 Iustin Pop
    This checks whether the cluster is empty.
467 a8083063 Iustin Pop

468 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
469 a8083063 Iustin Pop

470 a8083063 Iustin Pop
    """
471 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
472 a8083063 Iustin Pop
473 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
474 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
475 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
476 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
477 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
478 db915bd1 Michael Hanselmann
    if instancelist:
479 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
480 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
481 a8083063 Iustin Pop
482 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
483 a8083063 Iustin Pop
    """Destroys the cluster.
484 a8083063 Iustin Pop

485 a8083063 Iustin Pop
    """
486 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
487 72737a7f Iustin Pop
    if not self.rpc.call_node_stop_master(master, False):
488 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
489 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
490 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
491 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
492 140aa4a8 Iustin Pop
    return master
493 a8083063 Iustin Pop
494 a8083063 Iustin Pop
495 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
496 a8083063 Iustin Pop
  """Verifies the cluster status.
497 a8083063 Iustin Pop

498 a8083063 Iustin Pop
  """
499 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
500 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
501 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
502 d4b9d97f Guido Trotter
  REQ_BGL = False
503 d4b9d97f Guido Trotter
504 d4b9d97f Guido Trotter
  def ExpandNames(self):
505 d4b9d97f Guido Trotter
    self.needed_locks = {
506 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
507 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
508 d4b9d97f Guido Trotter
    }
509 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
510 a8083063 Iustin Pop
511 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
512 a8083063 Iustin Pop
                  remote_version, feedback_fn):
513 a8083063 Iustin Pop
    """Run multiple tests against a node.
514 a8083063 Iustin Pop

515 a8083063 Iustin Pop
    Test list:
516 a8083063 Iustin Pop
      - compares ganeti version
517 a8083063 Iustin Pop
      - checks vg existance and size > 20G
518 a8083063 Iustin Pop
      - checks config file checksum
519 a8083063 Iustin Pop
      - checks ssh to other nodes
520 a8083063 Iustin Pop

521 a8083063 Iustin Pop
    Args:
522 a8083063 Iustin Pop
      node: name of the node to check
523 a8083063 Iustin Pop
      file_list: required list of files
524 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
525 098c0958 Michael Hanselmann

526 a8083063 Iustin Pop
    """
527 a8083063 Iustin Pop
    # compares ganeti version
528 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
529 a8083063 Iustin Pop
    if not remote_version:
530 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
531 a8083063 Iustin Pop
      return True
532 a8083063 Iustin Pop
533 a8083063 Iustin Pop
    if local_version != remote_version:
534 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
535 a8083063 Iustin Pop
                      (local_version, node, remote_version))
536 a8083063 Iustin Pop
      return True
537 a8083063 Iustin Pop
538 a8083063 Iustin Pop
    # checks vg existance and size > 20G
539 a8083063 Iustin Pop
540 a8083063 Iustin Pop
    bad = False
541 a8083063 Iustin Pop
    if not vglist:
542 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
543 a8083063 Iustin Pop
                      (node,))
544 a8083063 Iustin Pop
      bad = True
545 a8083063 Iustin Pop
    else:
546 8d1a2a64 Michael Hanselmann
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
547 8d1a2a64 Michael Hanselmann
                                            constants.MIN_VG_SIZE)
548 a8083063 Iustin Pop
      if vgstatus:
549 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
550 a8083063 Iustin Pop
        bad = True
551 a8083063 Iustin Pop
552 2eb78bc8 Guido Trotter
    if not node_result:
553 2eb78bc8 Guido Trotter
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
554 2eb78bc8 Guido Trotter
      return True
555 2eb78bc8 Guido Trotter
556 a8083063 Iustin Pop
    # checks config file checksum
557 a8083063 Iustin Pop
    # checks ssh to any
558 a8083063 Iustin Pop
559 a8083063 Iustin Pop
    if 'filelist' not in node_result:
560 a8083063 Iustin Pop
      bad = True
561 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
562 a8083063 Iustin Pop
    else:
563 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
564 a8083063 Iustin Pop
      for file_name in file_list:
565 a8083063 Iustin Pop
        if file_name not in remote_cksum:
566 a8083063 Iustin Pop
          bad = True
567 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
568 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
569 a8083063 Iustin Pop
          bad = True
570 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
571 a8083063 Iustin Pop
572 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
573 a8083063 Iustin Pop
      bad = True
574 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
575 a8083063 Iustin Pop
    else:
576 a8083063 Iustin Pop
      if node_result['nodelist']:
577 a8083063 Iustin Pop
        bad = True
578 a8083063 Iustin Pop
        for node in node_result['nodelist']:
579 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
580 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
581 9d4bfc96 Iustin Pop
    if 'node-net-test' not in node_result:
582 9d4bfc96 Iustin Pop
      bad = True
583 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
584 9d4bfc96 Iustin Pop
    else:
585 9d4bfc96 Iustin Pop
      if node_result['node-net-test']:
586 9d4bfc96 Iustin Pop
        bad = True
587 9d4bfc96 Iustin Pop
        nlist = utils.NiceSort(node_result['node-net-test'].keys())
588 9d4bfc96 Iustin Pop
        for node in nlist:
589 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
590 9d4bfc96 Iustin Pop
                          (node, node_result['node-net-test'][node]))
591 9d4bfc96 Iustin Pop
592 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
593 e69d05fd Iustin Pop
    if isinstance(hyp_result, dict):
594 e69d05fd Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
595 e69d05fd Iustin Pop
        if hv_result is not None:
596 e69d05fd Iustin Pop
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
597 e69d05fd Iustin Pop
                      (hv_name, hv_result))
598 a8083063 Iustin Pop
    return bad
599 a8083063 Iustin Pop
600 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
601 c5705f58 Guido Trotter
                      node_instance, feedback_fn):
602 a8083063 Iustin Pop
    """Verify an instance.
603 a8083063 Iustin Pop

604 a8083063 Iustin Pop
    This function checks to see if the required block devices are
605 a8083063 Iustin Pop
    available on the instance's node.
606 a8083063 Iustin Pop

607 a8083063 Iustin Pop
    """
608 a8083063 Iustin Pop
    bad = False
609 a8083063 Iustin Pop
610 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
611 a8083063 Iustin Pop
612 a8083063 Iustin Pop
    node_vol_should = {}
613 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
614 a8083063 Iustin Pop
615 a8083063 Iustin Pop
    for node in node_vol_should:
616 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
617 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
618 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
619 a8083063 Iustin Pop
                          (volume, node))
620 a8083063 Iustin Pop
          bad = True
621 a8083063 Iustin Pop
622 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
623 a872dae6 Guido Trotter
      if (node_current not in node_instance or
624 a872dae6 Guido Trotter
          not instance in node_instance[node_current]):
625 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
626 a8083063 Iustin Pop
                        (instance, node_current))
627 a8083063 Iustin Pop
        bad = True
628 a8083063 Iustin Pop
629 a8083063 Iustin Pop
    for node in node_instance:
630 a8083063 Iustin Pop
      if (not node == node_current):
631 a8083063 Iustin Pop
        if instance in node_instance[node]:
632 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
633 a8083063 Iustin Pop
                          (instance, node))
634 a8083063 Iustin Pop
          bad = True
635 a8083063 Iustin Pop
636 6a438c98 Michael Hanselmann
    return bad
637 a8083063 Iustin Pop
638 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
639 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
640 a8083063 Iustin Pop

641 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
642 a8083063 Iustin Pop
    reported as unknown.
643 a8083063 Iustin Pop

644 a8083063 Iustin Pop
    """
645 a8083063 Iustin Pop
    bad = False
646 a8083063 Iustin Pop
647 a8083063 Iustin Pop
    for node in node_vol_is:
648 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
649 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
650 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
651 a8083063 Iustin Pop
                      (volume, node))
652 a8083063 Iustin Pop
          bad = True
653 a8083063 Iustin Pop
    return bad
654 a8083063 Iustin Pop
655 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
656 a8083063 Iustin Pop
    """Verify the list of running instances.
657 a8083063 Iustin Pop

658 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
659 a8083063 Iustin Pop

660 a8083063 Iustin Pop
    """
661 a8083063 Iustin Pop
    bad = False
662 a8083063 Iustin Pop
    for node in node_instance:
663 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
664 a8083063 Iustin Pop
        if runninginstance not in instancelist:
665 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
666 a8083063 Iustin Pop
                          (runninginstance, node))
667 a8083063 Iustin Pop
          bad = True
668 a8083063 Iustin Pop
    return bad
669 a8083063 Iustin Pop
670 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
671 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
672 2b3b6ddd Guido Trotter

673 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
674 2b3b6ddd Guido Trotter
    was primary for.
675 2b3b6ddd Guido Trotter

676 2b3b6ddd Guido Trotter
    """
677 2b3b6ddd Guido Trotter
    bad = False
678 2b3b6ddd Guido Trotter
679 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
680 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
681 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
682 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
683 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
684 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
685 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
686 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
687 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
688 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
689 2b3b6ddd Guido Trotter
        needed_mem = 0
690 2b3b6ddd Guido Trotter
        for instance in instances:
691 2b3b6ddd Guido Trotter
          needed_mem += instance_cfg[instance].memory
692 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
693 2b3b6ddd Guido Trotter
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
694 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
695 2b3b6ddd Guido Trotter
          bad = True
696 2b3b6ddd Guido Trotter
    return bad
697 2b3b6ddd Guido Trotter
698 a8083063 Iustin Pop
  def CheckPrereq(self):
699 a8083063 Iustin Pop
    """Check prerequisites.
700 a8083063 Iustin Pop

701 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
702 e54c4c5e Guido Trotter
    all its members are valid.
703 a8083063 Iustin Pop

704 a8083063 Iustin Pop
    """
705 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
706 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
707 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
708 a8083063 Iustin Pop
709 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
710 d8fff41c Guido Trotter
    """Build hooks env.
711 d8fff41c Guido Trotter

712 d8fff41c Guido Trotter
    Cluster-Verify hooks just rone in the post phase and their failure makes
713 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
714 d8fff41c Guido Trotter

715 d8fff41c Guido Trotter
    """
716 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
717 d8fff41c Guido Trotter
    # TODO: populate the environment with useful information for verify hooks
718 d8fff41c Guido Trotter
    env = {}
719 d8fff41c Guido Trotter
    return env, [], all_nodes
720 d8fff41c Guido Trotter
721 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
722 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
723 a8083063 Iustin Pop

724 a8083063 Iustin Pop
    """
725 a8083063 Iustin Pop
    bad = False
726 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
727 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
728 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
729 a8083063 Iustin Pop
730 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
731 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
732 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
733 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
734 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
735 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
736 a8083063 Iustin Pop
    node_volume = {}
737 a8083063 Iustin Pop
    node_instance = {}
738 9c9c7d30 Guido Trotter
    node_info = {}
739 26b6af5e Guido Trotter
    instance_cfg = {}
740 a8083063 Iustin Pop
741 a8083063 Iustin Pop
    # FIXME: verify OS list
742 a8083063 Iustin Pop
    # do local checksums
743 d6a02168 Michael Hanselmann
    file_names = []
744 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
745 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
746 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
747 a8083063 Iustin Pop
748 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
749 72737a7f Iustin Pop
    all_volumeinfo = self.rpc.call_volume_list(nodelist, vg_name)
750 72737a7f Iustin Pop
    all_instanceinfo = self.rpc.call_instance_list(nodelist, hypervisors)
751 72737a7f Iustin Pop
    all_vglist = self.rpc.call_vg_list(nodelist)
752 a8083063 Iustin Pop
    node_verify_param = {
753 a8083063 Iustin Pop
      'filelist': file_names,
754 a8083063 Iustin Pop
      'nodelist': nodelist,
755 e69d05fd Iustin Pop
      'hypervisor': hypervisors,
756 9d4bfc96 Iustin Pop
      'node-net-test': [(node.name, node.primary_ip, node.secondary_ip)
757 9d4bfc96 Iustin Pop
                        for node in nodeinfo]
758 a8083063 Iustin Pop
      }
759 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
760 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
761 72737a7f Iustin Pop
    all_rversion = self.rpc.call_version(nodelist)
762 72737a7f Iustin Pop
    all_ninfo = self.rpc.call_node_info(nodelist, self.cfg.GetVGName(),
763 72737a7f Iustin Pop
                                        self.cfg.GetHypervisorType())
764 a8083063 Iustin Pop
765 a8083063 Iustin Pop
    for node in nodelist:
766 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
767 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
768 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
769 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
770 a8083063 Iustin Pop
      bad = bad or result
771 a8083063 Iustin Pop
772 a8083063 Iustin Pop
      # node_volume
773 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
774 a8083063 Iustin Pop
775 b63ed789 Iustin Pop
      if isinstance(volumeinfo, basestring):
776 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
777 b63ed789 Iustin Pop
                    (node, volumeinfo[-400:].encode('string_escape')))
778 b63ed789 Iustin Pop
        bad = True
779 b63ed789 Iustin Pop
        node_volume[node] = {}
780 b63ed789 Iustin Pop
      elif not isinstance(volumeinfo, dict):
781 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
782 a8083063 Iustin Pop
        bad = True
783 a8083063 Iustin Pop
        continue
784 b63ed789 Iustin Pop
      else:
785 b63ed789 Iustin Pop
        node_volume[node] = volumeinfo
786 a8083063 Iustin Pop
787 a8083063 Iustin Pop
      # node_instance
788 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
789 a8083063 Iustin Pop
      if type(nodeinstance) != list:
790 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
791 a8083063 Iustin Pop
        bad = True
792 a8083063 Iustin Pop
        continue
793 a8083063 Iustin Pop
794 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
795 a8083063 Iustin Pop
796 9c9c7d30 Guido Trotter
      # node_info
797 9c9c7d30 Guido Trotter
      nodeinfo = all_ninfo[node]
798 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
799 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
800 9c9c7d30 Guido Trotter
        bad = True
801 9c9c7d30 Guido Trotter
        continue
802 9c9c7d30 Guido Trotter
803 9c9c7d30 Guido Trotter
      try:
804 9c9c7d30 Guido Trotter
        node_info[node] = {
805 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
806 9c9c7d30 Guido Trotter
          "dfree": int(nodeinfo['vg_free']),
807 93e4c50b Guido Trotter
          "pinst": [],
808 93e4c50b Guido Trotter
          "sinst": [],
809 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
810 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
811 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
812 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
813 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
814 36e7da50 Guido Trotter
          # secondary.
815 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
816 9c9c7d30 Guido Trotter
        }
817 9c9c7d30 Guido Trotter
      except ValueError:
818 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
819 9c9c7d30 Guido Trotter
        bad = True
820 9c9c7d30 Guido Trotter
        continue
821 9c9c7d30 Guido Trotter
822 a8083063 Iustin Pop
    node_vol_should = {}
823 a8083063 Iustin Pop
824 a8083063 Iustin Pop
    for instance in instancelist:
825 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
826 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
827 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
828 c5705f58 Guido Trotter
                                     node_instance, feedback_fn)
829 c5705f58 Guido Trotter
      bad = bad or result
830 a8083063 Iustin Pop
831 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
832 a8083063 Iustin Pop
833 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
834 26b6af5e Guido Trotter
835 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
836 93e4c50b Guido Trotter
      if pnode in node_info:
837 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
838 93e4c50b Guido Trotter
      else:
839 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
840 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
841 93e4c50b Guido Trotter
        bad = True
842 93e4c50b Guido Trotter
843 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
844 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
845 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
846 93e4c50b Guido Trotter
      # supported either.
847 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
848 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
849 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
850 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
851 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
852 93e4c50b Guido Trotter
                    % instance)
853 93e4c50b Guido Trotter
854 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
855 93e4c50b Guido Trotter
        if snode in node_info:
856 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
857 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
858 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
859 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
860 93e4c50b Guido Trotter
        else:
861 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
862 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
863 93e4c50b Guido Trotter
864 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
865 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
866 a8083063 Iustin Pop
                                       feedback_fn)
867 a8083063 Iustin Pop
    bad = bad or result
868 a8083063 Iustin Pop
869 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
870 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
871 a8083063 Iustin Pop
                                         feedback_fn)
872 a8083063 Iustin Pop
    bad = bad or result
873 a8083063 Iustin Pop
874 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
875 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
876 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
877 e54c4c5e Guido Trotter
      bad = bad or result
878 2b3b6ddd Guido Trotter
879 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
880 2b3b6ddd Guido Trotter
    if i_non_redundant:
881 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
882 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
883 2b3b6ddd Guido Trotter
884 34290825 Michael Hanselmann
    return not bad
885 a8083063 Iustin Pop
886 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
887 d8fff41c Guido Trotter
    """Analize the post-hooks' result, handle it, and send some
888 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
889 d8fff41c Guido Trotter

890 d8fff41c Guido Trotter
    Args:
891 d8fff41c Guido Trotter
      phase: the hooks phase that has just been run
892 d8fff41c Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
893 d8fff41c Guido Trotter
      feedback_fn: function to send feedback back to the caller
894 d8fff41c Guido Trotter
      lu_result: previous Exec result
895 d8fff41c Guido Trotter

896 d8fff41c Guido Trotter
    """
897 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
898 38206f3c Iustin Pop
    # their results
899 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
900 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
901 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
902 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
903 d8fff41c Guido Trotter
      if not hooks_results:
904 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
905 d8fff41c Guido Trotter
        lu_result = 1
906 d8fff41c Guido Trotter
      else:
907 d8fff41c Guido Trotter
        for node_name in hooks_results:
908 d8fff41c Guido Trotter
          show_node_header = True
909 d8fff41c Guido Trotter
          res = hooks_results[node_name]
910 d8fff41c Guido Trotter
          if res is False or not isinstance(res, list):
911 d8fff41c Guido Trotter
            feedback_fn("    Communication failure")
912 d8fff41c Guido Trotter
            lu_result = 1
913 d8fff41c Guido Trotter
            continue
914 d8fff41c Guido Trotter
          for script, hkr, output in res:
915 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
916 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
917 d8fff41c Guido Trotter
              # failing hooks on that node
918 d8fff41c Guido Trotter
              if show_node_header:
919 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
920 d8fff41c Guido Trotter
                show_node_header = False
921 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
922 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
923 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
924 d8fff41c Guido Trotter
              lu_result = 1
925 d8fff41c Guido Trotter
926 d8fff41c Guido Trotter
      return lu_result
927 d8fff41c Guido Trotter
928 a8083063 Iustin Pop
929 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
930 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
931 2c95a8d4 Iustin Pop

932 2c95a8d4 Iustin Pop
  """
933 2c95a8d4 Iustin Pop
  _OP_REQP = []
934 d4b9d97f Guido Trotter
  REQ_BGL = False
935 d4b9d97f Guido Trotter
936 d4b9d97f Guido Trotter
  def ExpandNames(self):
937 d4b9d97f Guido Trotter
    self.needed_locks = {
938 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
939 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
940 d4b9d97f Guido Trotter
    }
941 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
942 2c95a8d4 Iustin Pop
943 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
944 2c95a8d4 Iustin Pop
    """Check prerequisites.
945 2c95a8d4 Iustin Pop

946 2c95a8d4 Iustin Pop
    This has no prerequisites.
947 2c95a8d4 Iustin Pop

948 2c95a8d4 Iustin Pop
    """
949 2c95a8d4 Iustin Pop
    pass
950 2c95a8d4 Iustin Pop
951 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
952 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
953 2c95a8d4 Iustin Pop

954 2c95a8d4 Iustin Pop
    """
955 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
956 2c95a8d4 Iustin Pop
957 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
958 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
959 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
960 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
961 2c95a8d4 Iustin Pop
962 2c95a8d4 Iustin Pop
    nv_dict = {}
963 2c95a8d4 Iustin Pop
    for inst in instances:
964 2c95a8d4 Iustin Pop
      inst_lvs = {}
965 2c95a8d4 Iustin Pop
      if (inst.status != "up" or
966 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
967 2c95a8d4 Iustin Pop
        continue
968 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
969 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
970 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
971 2c95a8d4 Iustin Pop
        for vol in vol_list:
972 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
973 2c95a8d4 Iustin Pop
974 2c95a8d4 Iustin Pop
    if not nv_dict:
975 2c95a8d4 Iustin Pop
      return result
976 2c95a8d4 Iustin Pop
977 72737a7f Iustin Pop
    node_lvs = self.rpc.call_volume_list(nodes, vg_name)
978 2c95a8d4 Iustin Pop
979 2c95a8d4 Iustin Pop
    to_act = set()
980 2c95a8d4 Iustin Pop
    for node in nodes:
981 2c95a8d4 Iustin Pop
      # node_volume
982 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
983 2c95a8d4 Iustin Pop
984 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
985 b63ed789 Iustin Pop
        logger.Info("error enumerating LVs on node %s: %s" % (node, lvs))
986 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
987 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
988 2c95a8d4 Iustin Pop
        logger.Info("connection to node %s failed or invalid data returned" %
989 2c95a8d4 Iustin Pop
                    (node,))
990 2c95a8d4 Iustin Pop
        res_nodes.append(node)
991 2c95a8d4 Iustin Pop
        continue
992 2c95a8d4 Iustin Pop
993 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
994 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
995 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
996 b63ed789 Iustin Pop
            and inst.name not in res_instances):
997 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
998 2c95a8d4 Iustin Pop
999 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1000 b63ed789 Iustin Pop
    # data better
1001 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1002 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1003 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1004 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1005 b63ed789 Iustin Pop
1006 2c95a8d4 Iustin Pop
    return result
1007 2c95a8d4 Iustin Pop
1008 2c95a8d4 Iustin Pop
1009 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1010 07bd8a51 Iustin Pop
  """Rename the cluster.
1011 07bd8a51 Iustin Pop

1012 07bd8a51 Iustin Pop
  """
1013 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1014 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1015 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1016 07bd8a51 Iustin Pop
1017 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1018 07bd8a51 Iustin Pop
    """Build hooks env.
1019 07bd8a51 Iustin Pop

1020 07bd8a51 Iustin Pop
    """
1021 07bd8a51 Iustin Pop
    env = {
1022 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1023 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1024 07bd8a51 Iustin Pop
      }
1025 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1026 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1027 07bd8a51 Iustin Pop
1028 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1029 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1030 07bd8a51 Iustin Pop

1031 07bd8a51 Iustin Pop
    """
1032 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1033 07bd8a51 Iustin Pop
1034 bcf043c9 Iustin Pop
    new_name = hostname.name
1035 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1036 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
1037 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
1038 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1039 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1040 07bd8a51 Iustin Pop
                                 " cluster has changed")
1041 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1042 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1043 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1044 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1045 07bd8a51 Iustin Pop
                                   new_ip)
1046 07bd8a51 Iustin Pop
1047 07bd8a51 Iustin Pop
    self.op.name = new_name
1048 07bd8a51 Iustin Pop
1049 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1050 07bd8a51 Iustin Pop
    """Rename the cluster.
1051 07bd8a51 Iustin Pop

1052 07bd8a51 Iustin Pop
    """
1053 07bd8a51 Iustin Pop
    clustername = self.op.name
1054 07bd8a51 Iustin Pop
    ip = self.ip
1055 07bd8a51 Iustin Pop
1056 07bd8a51 Iustin Pop
    # shutdown the master IP
1057 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
1058 72737a7f Iustin Pop
    if not self.rpc.call_node_stop_master(master, False):
1059 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1060 07bd8a51 Iustin Pop
1061 07bd8a51 Iustin Pop
    try:
1062 07bd8a51 Iustin Pop
      # modify the sstore
1063 d6a02168 Michael Hanselmann
      # TODO: sstore
1064 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
1065 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
1066 07bd8a51 Iustin Pop
1067 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
1068 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
1069 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
1070 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
1071 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
1072 07bd8a51 Iustin Pop
1073 07bd8a51 Iustin Pop
      logger.Debug("Copying updated ssconf data to all nodes")
1074 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1075 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
1076 72737a7f Iustin Pop
        result = self.rpc.call_upload_file(dist_nodes, fname)
1077 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
1078 07bd8a51 Iustin Pop
          if not result[to_node]:
1079 07bd8a51 Iustin Pop
            logger.Error("copy of file %s to node %s failed" %
1080 07bd8a51 Iustin Pop
                         (fname, to_node))
1081 07bd8a51 Iustin Pop
    finally:
1082 72737a7f Iustin Pop
      if not self.rpc.call_node_start_master(master, False):
1083 f4bc1f2c Michael Hanselmann
        logger.Error("Could not re-enable the master role on the master,"
1084 f4bc1f2c Michael Hanselmann
                     " please restart manually.")
1085 07bd8a51 Iustin Pop
1086 07bd8a51 Iustin Pop
1087 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1088 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1089 8084f9f6 Manuel Franceschini

1090 8084f9f6 Manuel Franceschini
  Args:
1091 8084f9f6 Manuel Franceschini
    disk: ganeti.objects.Disk object
1092 8084f9f6 Manuel Franceschini

1093 8084f9f6 Manuel Franceschini
  Returns:
1094 8084f9f6 Manuel Franceschini
    boolean indicating whether a LD_LV dev_type was found or not
1095 8084f9f6 Manuel Franceschini

1096 8084f9f6 Manuel Franceschini
  """
1097 8084f9f6 Manuel Franceschini
  if disk.children:
1098 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1099 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1100 8084f9f6 Manuel Franceschini
        return True
1101 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1102 8084f9f6 Manuel Franceschini
1103 8084f9f6 Manuel Franceschini
1104 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1105 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1106 8084f9f6 Manuel Franceschini

1107 8084f9f6 Manuel Franceschini
  """
1108 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1109 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1110 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1111 c53279cf Guido Trotter
  REQ_BGL = False
1112 c53279cf Guido Trotter
1113 c53279cf Guido Trotter
  def ExpandNames(self):
1114 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
1115 c53279cf Guido Trotter
    # all nodes to be modified.
1116 c53279cf Guido Trotter
    self.needed_locks = {
1117 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1118 c53279cf Guido Trotter
    }
1119 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1120 8084f9f6 Manuel Franceschini
1121 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1122 8084f9f6 Manuel Franceschini
    """Build hooks env.
1123 8084f9f6 Manuel Franceschini

1124 8084f9f6 Manuel Franceschini
    """
1125 8084f9f6 Manuel Franceschini
    env = {
1126 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1127 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1128 8084f9f6 Manuel Franceschini
      }
1129 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1130 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1131 8084f9f6 Manuel Franceschini
1132 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1133 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1134 8084f9f6 Manuel Franceschini

1135 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1136 5f83e263 Iustin Pop
    if the given volume group is valid.
1137 8084f9f6 Manuel Franceschini

1138 8084f9f6 Manuel Franceschini
    """
1139 c53279cf Guido Trotter
    # FIXME: This only works because there is only one parameter that can be
1140 c53279cf Guido Trotter
    # changed or removed.
1141 8084f9f6 Manuel Franceschini
    if not self.op.vg_name:
1142 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
1143 8084f9f6 Manuel Franceschini
      for inst in instances:
1144 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1145 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1146 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1147 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1148 8084f9f6 Manuel Franceschini
1149 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1150 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1151 c53279cf Guido Trotter
      node_list = self.acquired_locks[locking.LEVEL_NODE]
1152 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
1153 8084f9f6 Manuel Franceschini
      for node in node_list:
1154 8d1a2a64 Michael Hanselmann
        vgstatus = utils.CheckVolumeGroupSize(vglist[node], self.op.vg_name,
1155 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1156 8084f9f6 Manuel Franceschini
        if vgstatus:
1157 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1158 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1159 8084f9f6 Manuel Franceschini
1160 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1161 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1162 8084f9f6 Manuel Franceschini

1163 8084f9f6 Manuel Franceschini
    """
1164 8084f9f6 Manuel Franceschini
    if self.op.vg_name != self.cfg.GetVGName():
1165 8084f9f6 Manuel Franceschini
      self.cfg.SetVGName(self.op.vg_name)
1166 8084f9f6 Manuel Franceschini
    else:
1167 8084f9f6 Manuel Franceschini
      feedback_fn("Cluster LVM configuration already in desired"
1168 8084f9f6 Manuel Franceschini
                  " state, not changing")
1169 8084f9f6 Manuel Franceschini
1170 8084f9f6 Manuel Franceschini
1171 b9bddb6b Iustin Pop
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1172 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1173 a8083063 Iustin Pop

1174 a8083063 Iustin Pop
  """
1175 a8083063 Iustin Pop
  if not instance.disks:
1176 a8083063 Iustin Pop
    return True
1177 a8083063 Iustin Pop
1178 a8083063 Iustin Pop
  if not oneshot:
1179 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1180 a8083063 Iustin Pop
1181 a8083063 Iustin Pop
  node = instance.primary_node
1182 a8083063 Iustin Pop
1183 a8083063 Iustin Pop
  for dev in instance.disks:
1184 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
1185 a8083063 Iustin Pop
1186 a8083063 Iustin Pop
  retries = 0
1187 a8083063 Iustin Pop
  while True:
1188 a8083063 Iustin Pop
    max_time = 0
1189 a8083063 Iustin Pop
    done = True
1190 a8083063 Iustin Pop
    cumul_degraded = False
1191 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1192 a8083063 Iustin Pop
    if not rstats:
1193 b9bddb6b Iustin Pop
      lu.proc.LogWarning("Can't get any data from node %s" % node)
1194 a8083063 Iustin Pop
      retries += 1
1195 a8083063 Iustin Pop
      if retries >= 10:
1196 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1197 3ecf6786 Iustin Pop
                                 " aborting." % node)
1198 a8083063 Iustin Pop
      time.sleep(6)
1199 a8083063 Iustin Pop
      continue
1200 a8083063 Iustin Pop
    retries = 0
1201 a8083063 Iustin Pop
    for i in range(len(rstats)):
1202 a8083063 Iustin Pop
      mstat = rstats[i]
1203 a8083063 Iustin Pop
      if mstat is None:
1204 b9bddb6b Iustin Pop
        lu.proc.LogWarning("Can't compute data for node %s/%s" %
1205 b9bddb6b Iustin Pop
                           (node, instance.disks[i].iv_name))
1206 a8083063 Iustin Pop
        continue
1207 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1208 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1209 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1210 a8083063 Iustin Pop
      if perc_done is not None:
1211 a8083063 Iustin Pop
        done = False
1212 a8083063 Iustin Pop
        if est_time is not None:
1213 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1214 a8083063 Iustin Pop
          max_time = est_time
1215 a8083063 Iustin Pop
        else:
1216 a8083063 Iustin Pop
          rem_time = "no time estimate"
1217 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1218 b9bddb6b Iustin Pop
                        (instance.disks[i].iv_name, perc_done, rem_time))
1219 a8083063 Iustin Pop
    if done or oneshot:
1220 a8083063 Iustin Pop
      break
1221 a8083063 Iustin Pop
1222 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
1223 a8083063 Iustin Pop
1224 a8083063 Iustin Pop
  if done:
1225 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1226 a8083063 Iustin Pop
  return not cumul_degraded
1227 a8083063 Iustin Pop
1228 a8083063 Iustin Pop
1229 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1230 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1231 a8083063 Iustin Pop

1232 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1233 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1234 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1235 0834c866 Iustin Pop

1236 a8083063 Iustin Pop
  """
1237 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
1238 0834c866 Iustin Pop
  if ldisk:
1239 0834c866 Iustin Pop
    idx = 6
1240 0834c866 Iustin Pop
  else:
1241 0834c866 Iustin Pop
    idx = 5
1242 a8083063 Iustin Pop
1243 a8083063 Iustin Pop
  result = True
1244 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1245 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
1246 a8083063 Iustin Pop
    if not rstats:
1247 aa9d0c32 Guido Trotter
      logger.ToStderr("Node %s: Disk degraded, not found or node down" % node)
1248 a8083063 Iustin Pop
      result = False
1249 a8083063 Iustin Pop
    else:
1250 0834c866 Iustin Pop
      result = result and (not rstats[idx])
1251 a8083063 Iustin Pop
  if dev.children:
1252 a8083063 Iustin Pop
    for child in dev.children:
1253 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1254 a8083063 Iustin Pop
1255 a8083063 Iustin Pop
  return result
1256 a8083063 Iustin Pop
1257 a8083063 Iustin Pop
1258 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1259 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1260 a8083063 Iustin Pop

1261 a8083063 Iustin Pop
  """
1262 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1263 6bf01bbb Guido Trotter
  REQ_BGL = False
1264 a8083063 Iustin Pop
1265 6bf01bbb Guido Trotter
  def ExpandNames(self):
1266 1f9430d6 Iustin Pop
    if self.op.names:
1267 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1268 1f9430d6 Iustin Pop
1269 1f9430d6 Iustin Pop
    self.dynamic_fields = frozenset(["name", "valid", "node_status"])
1270 1f9430d6 Iustin Pop
    _CheckOutputFields(static=[],
1271 1f9430d6 Iustin Pop
                       dynamic=self.dynamic_fields,
1272 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1273 1f9430d6 Iustin Pop
1274 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
1275 6bf01bbb Guido Trotter
    self.needed_locks = {}
1276 6bf01bbb Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1277 e310b019 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1278 6bf01bbb Guido Trotter
1279 6bf01bbb Guido Trotter
  def CheckPrereq(self):
1280 6bf01bbb Guido Trotter
    """Check prerequisites.
1281 6bf01bbb Guido Trotter

1282 6bf01bbb Guido Trotter
    """
1283 6bf01bbb Guido Trotter
1284 1f9430d6 Iustin Pop
  @staticmethod
1285 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1286 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1287 1f9430d6 Iustin Pop

1288 1f9430d6 Iustin Pop
      Args:
1289 1f9430d6 Iustin Pop
        node_list: a list with the names of all nodes
1290 1f9430d6 Iustin Pop
        rlist: a map with node names as keys and OS objects as values
1291 1f9430d6 Iustin Pop

1292 1f9430d6 Iustin Pop
      Returns:
1293 1f9430d6 Iustin Pop
        map: a map with osnames as keys and as value another map, with
1294 1f9430d6 Iustin Pop
             nodes as
1295 1f9430d6 Iustin Pop
             keys and list of OS objects as values
1296 1f9430d6 Iustin Pop
             e.g. {"debian-etch": {"node1": [<object>,...],
1297 1f9430d6 Iustin Pop
                                   "node2": [<object>,]}
1298 1f9430d6 Iustin Pop
                  }
1299 1f9430d6 Iustin Pop

1300 1f9430d6 Iustin Pop
    """
1301 1f9430d6 Iustin Pop
    all_os = {}
1302 1f9430d6 Iustin Pop
    for node_name, nr in rlist.iteritems():
1303 1f9430d6 Iustin Pop
      if not nr:
1304 1f9430d6 Iustin Pop
        continue
1305 b4de68a9 Iustin Pop
      for os_obj in nr:
1306 b4de68a9 Iustin Pop
        if os_obj.name not in all_os:
1307 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1308 1f9430d6 Iustin Pop
          # for each node in node_list
1309 b4de68a9 Iustin Pop
          all_os[os_obj.name] = {}
1310 1f9430d6 Iustin Pop
          for nname in node_list:
1311 b4de68a9 Iustin Pop
            all_os[os_obj.name][nname] = []
1312 b4de68a9 Iustin Pop
        all_os[os_obj.name][node_name].append(os_obj)
1313 1f9430d6 Iustin Pop
    return all_os
1314 a8083063 Iustin Pop
1315 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1316 a8083063 Iustin Pop
    """Compute the list of OSes.
1317 a8083063 Iustin Pop

1318 a8083063 Iustin Pop
    """
1319 6bf01bbb Guido Trotter
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1320 72737a7f Iustin Pop
    node_data = self.rpc.call_os_diagnose(node_list)
1321 a8083063 Iustin Pop
    if node_data == False:
1322 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1323 1f9430d6 Iustin Pop
    pol = self._DiagnoseByOS(node_list, node_data)
1324 1f9430d6 Iustin Pop
    output = []
1325 1f9430d6 Iustin Pop
    for os_name, os_data in pol.iteritems():
1326 1f9430d6 Iustin Pop
      row = []
1327 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1328 1f9430d6 Iustin Pop
        if field == "name":
1329 1f9430d6 Iustin Pop
          val = os_name
1330 1f9430d6 Iustin Pop
        elif field == "valid":
1331 1f9430d6 Iustin Pop
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1332 1f9430d6 Iustin Pop
        elif field == "node_status":
1333 1f9430d6 Iustin Pop
          val = {}
1334 1f9430d6 Iustin Pop
          for node_name, nos_list in os_data.iteritems():
1335 1f9430d6 Iustin Pop
            val[node_name] = [(v.status, v.path) for v in nos_list]
1336 1f9430d6 Iustin Pop
        else:
1337 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1338 1f9430d6 Iustin Pop
        row.append(val)
1339 1f9430d6 Iustin Pop
      output.append(row)
1340 1f9430d6 Iustin Pop
1341 1f9430d6 Iustin Pop
    return output
1342 a8083063 Iustin Pop
1343 a8083063 Iustin Pop
1344 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1345 a8083063 Iustin Pop
  """Logical unit for removing a node.
1346 a8083063 Iustin Pop

1347 a8083063 Iustin Pop
  """
1348 a8083063 Iustin Pop
  HPATH = "node-remove"
1349 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1350 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1351 a8083063 Iustin Pop
1352 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1353 a8083063 Iustin Pop
    """Build hooks env.
1354 a8083063 Iustin Pop

1355 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1356 d08869ee Guido Trotter
    node would then be impossible to remove.
1357 a8083063 Iustin Pop

1358 a8083063 Iustin Pop
    """
1359 396e1b78 Michael Hanselmann
    env = {
1360 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1361 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1362 396e1b78 Michael Hanselmann
      }
1363 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1364 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1365 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1366 a8083063 Iustin Pop
1367 a8083063 Iustin Pop
  def CheckPrereq(self):
1368 a8083063 Iustin Pop
    """Check prerequisites.
1369 a8083063 Iustin Pop

1370 a8083063 Iustin Pop
    This checks:
1371 a8083063 Iustin Pop
     - the node exists in the configuration
1372 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1373 a8083063 Iustin Pop
     - it's not the master
1374 a8083063 Iustin Pop

1375 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1376 a8083063 Iustin Pop

1377 a8083063 Iustin Pop
    """
1378 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1379 a8083063 Iustin Pop
    if node is None:
1380 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1381 a8083063 Iustin Pop
1382 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1383 a8083063 Iustin Pop
1384 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
1385 a8083063 Iustin Pop
    if node.name == masternode:
1386 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1387 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1388 a8083063 Iustin Pop
1389 a8083063 Iustin Pop
    for instance_name in instance_list:
1390 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1391 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1392 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1393 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1394 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1395 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1396 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1397 a8083063 Iustin Pop
    self.op.node_name = node.name
1398 a8083063 Iustin Pop
    self.node = node
1399 a8083063 Iustin Pop
1400 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1401 a8083063 Iustin Pop
    """Removes the node from the cluster.
1402 a8083063 Iustin Pop

1403 a8083063 Iustin Pop
    """
1404 a8083063 Iustin Pop
    node = self.node
1405 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1406 a8083063 Iustin Pop
                node.name)
1407 a8083063 Iustin Pop
1408 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
1409 a8083063 Iustin Pop
1410 72737a7f Iustin Pop
    self.rpc.call_node_leave_cluster(node.name)
1411 c8a0948f Michael Hanselmann
1412 a8083063 Iustin Pop
1413 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1414 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1415 a8083063 Iustin Pop

1416 a8083063 Iustin Pop
  """
1417 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1418 35705d8f Guido Trotter
  REQ_BGL = False
1419 a8083063 Iustin Pop
1420 35705d8f Guido Trotter
  def ExpandNames(self):
1421 e8a4c138 Iustin Pop
    self.dynamic_fields = frozenset([
1422 e8a4c138 Iustin Pop
      "dtotal", "dfree",
1423 e8a4c138 Iustin Pop
      "mtotal", "mnode", "mfree",
1424 e8a4c138 Iustin Pop
      "bootid",
1425 e8a4c138 Iustin Pop
      "ctotal",
1426 e8a4c138 Iustin Pop
      ])
1427 a8083063 Iustin Pop
1428 c8d8b4c8 Iustin Pop
    self.static_fields = frozenset([
1429 c8d8b4c8 Iustin Pop
      "name", "pinst_cnt", "sinst_cnt",
1430 c8d8b4c8 Iustin Pop
      "pinst_list", "sinst_list",
1431 c8d8b4c8 Iustin Pop
      "pip", "sip", "tags",
1432 38d7239a Iustin Pop
      "serial_no",
1433 c8d8b4c8 Iustin Pop
      ])
1434 c8d8b4c8 Iustin Pop
1435 c8d8b4c8 Iustin Pop
    _CheckOutputFields(static=self.static_fields,
1436 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1437 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1438 a8083063 Iustin Pop
1439 35705d8f Guido Trotter
    self.needed_locks = {}
1440 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1441 c8d8b4c8 Iustin Pop
1442 c8d8b4c8 Iustin Pop
    if self.op.names:
1443 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
1444 35705d8f Guido Trotter
    else:
1445 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
1446 c8d8b4c8 Iustin Pop
1447 c8d8b4c8 Iustin Pop
    self.do_locking = not self.static_fields.issuperset(self.op.output_fields)
1448 c8d8b4c8 Iustin Pop
    if self.do_locking:
1449 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
1450 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
1451 c8d8b4c8 Iustin Pop
1452 35705d8f Guido Trotter
1453 35705d8f Guido Trotter
  def CheckPrereq(self):
1454 35705d8f Guido Trotter
    """Check prerequisites.
1455 35705d8f Guido Trotter

1456 35705d8f Guido Trotter
    """
1457 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
1458 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
1459 c8d8b4c8 Iustin Pop
    pass
1460 a8083063 Iustin Pop
1461 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1462 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1463 a8083063 Iustin Pop

1464 a8083063 Iustin Pop
    """
1465 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
1466 c8d8b4c8 Iustin Pop
    if self.do_locking:
1467 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
1468 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
1469 3fa93523 Guido Trotter
      nodenames = self.wanted
1470 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
1471 3fa93523 Guido Trotter
      if missing:
1472 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
1473 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
1474 c8d8b4c8 Iustin Pop
    else:
1475 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
1476 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
1477 a8083063 Iustin Pop
1478 a8083063 Iustin Pop
    # begin data gathering
1479 a8083063 Iustin Pop
1480 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1481 a8083063 Iustin Pop
      live_data = {}
1482 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
1483 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
1484 a8083063 Iustin Pop
      for name in nodenames:
1485 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1486 a8083063 Iustin Pop
        if nodeinfo:
1487 a8083063 Iustin Pop
          live_data[name] = {
1488 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1489 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1490 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1491 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1492 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1493 e8a4c138 Iustin Pop
            "ctotal": utils.TryConvert(int, nodeinfo['cpu_total']),
1494 3ef10550 Michael Hanselmann
            "bootid": nodeinfo['bootid'],
1495 a8083063 Iustin Pop
            }
1496 a8083063 Iustin Pop
        else:
1497 a8083063 Iustin Pop
          live_data[name] = {}
1498 a8083063 Iustin Pop
    else:
1499 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1500 a8083063 Iustin Pop
1501 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1502 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1503 a8083063 Iustin Pop
1504 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1505 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1506 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1507 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1508 a8083063 Iustin Pop
1509 ec223efb Iustin Pop
      for instance_name in instancelist:
1510 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1511 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1512 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1513 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1514 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1515 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1516 a8083063 Iustin Pop
1517 a8083063 Iustin Pop
    # end data gathering
1518 a8083063 Iustin Pop
1519 a8083063 Iustin Pop
    output = []
1520 a8083063 Iustin Pop
    for node in nodelist:
1521 a8083063 Iustin Pop
      node_output = []
1522 a8083063 Iustin Pop
      for field in self.op.output_fields:
1523 a8083063 Iustin Pop
        if field == "name":
1524 a8083063 Iustin Pop
          val = node.name
1525 ec223efb Iustin Pop
        elif field == "pinst_list":
1526 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1527 ec223efb Iustin Pop
        elif field == "sinst_list":
1528 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1529 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1530 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1531 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1532 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1533 a8083063 Iustin Pop
        elif field == "pip":
1534 a8083063 Iustin Pop
          val = node.primary_ip
1535 a8083063 Iustin Pop
        elif field == "sip":
1536 a8083063 Iustin Pop
          val = node.secondary_ip
1537 130a6a6f Iustin Pop
        elif field == "tags":
1538 130a6a6f Iustin Pop
          val = list(node.GetTags())
1539 38d7239a Iustin Pop
        elif field == "serial_no":
1540 38d7239a Iustin Pop
          val = node.serial_no
1541 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1542 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1543 a8083063 Iustin Pop
        else:
1544 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1545 a8083063 Iustin Pop
        node_output.append(val)
1546 a8083063 Iustin Pop
      output.append(node_output)
1547 a8083063 Iustin Pop
1548 a8083063 Iustin Pop
    return output
1549 a8083063 Iustin Pop
1550 a8083063 Iustin Pop
1551 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1552 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1553 dcb93971 Michael Hanselmann

1554 dcb93971 Michael Hanselmann
  """
1555 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1556 21a15682 Guido Trotter
  REQ_BGL = False
1557 21a15682 Guido Trotter
1558 21a15682 Guido Trotter
  def ExpandNames(self):
1559 21a15682 Guido Trotter
    _CheckOutputFields(static=["node"],
1560 21a15682 Guido Trotter
                       dynamic=["phys", "vg", "name", "size", "instance"],
1561 21a15682 Guido Trotter
                       selected=self.op.output_fields)
1562 21a15682 Guido Trotter
1563 21a15682 Guido Trotter
    self.needed_locks = {}
1564 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1565 21a15682 Guido Trotter
    if not self.op.nodes:
1566 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1567 21a15682 Guido Trotter
    else:
1568 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
1569 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
1570 dcb93971 Michael Hanselmann
1571 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1572 dcb93971 Michael Hanselmann
    """Check prerequisites.
1573 dcb93971 Michael Hanselmann

1574 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1575 dcb93971 Michael Hanselmann

1576 dcb93971 Michael Hanselmann
    """
1577 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
1578 dcb93971 Michael Hanselmann
1579 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1580 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1581 dcb93971 Michael Hanselmann

1582 dcb93971 Michael Hanselmann
    """
1583 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1584 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
1585 dcb93971 Michael Hanselmann
1586 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1587 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1588 dcb93971 Michael Hanselmann
1589 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1590 dcb93971 Michael Hanselmann
1591 dcb93971 Michael Hanselmann
    output = []
1592 dcb93971 Michael Hanselmann
    for node in nodenames:
1593 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1594 37d19eb2 Michael Hanselmann
        continue
1595 37d19eb2 Michael Hanselmann
1596 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1597 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1598 dcb93971 Michael Hanselmann
1599 dcb93971 Michael Hanselmann
      for vol in node_vols:
1600 dcb93971 Michael Hanselmann
        node_output = []
1601 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1602 dcb93971 Michael Hanselmann
          if field == "node":
1603 dcb93971 Michael Hanselmann
            val = node
1604 dcb93971 Michael Hanselmann
          elif field == "phys":
1605 dcb93971 Michael Hanselmann
            val = vol['dev']
1606 dcb93971 Michael Hanselmann
          elif field == "vg":
1607 dcb93971 Michael Hanselmann
            val = vol['vg']
1608 dcb93971 Michael Hanselmann
          elif field == "name":
1609 dcb93971 Michael Hanselmann
            val = vol['name']
1610 dcb93971 Michael Hanselmann
          elif field == "size":
1611 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1612 dcb93971 Michael Hanselmann
          elif field == "instance":
1613 dcb93971 Michael Hanselmann
            for inst in ilist:
1614 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1615 dcb93971 Michael Hanselmann
                continue
1616 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1617 dcb93971 Michael Hanselmann
                val = inst.name
1618 dcb93971 Michael Hanselmann
                break
1619 dcb93971 Michael Hanselmann
            else:
1620 dcb93971 Michael Hanselmann
              val = '-'
1621 dcb93971 Michael Hanselmann
          else:
1622 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1623 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1624 dcb93971 Michael Hanselmann
1625 dcb93971 Michael Hanselmann
        output.append(node_output)
1626 dcb93971 Michael Hanselmann
1627 dcb93971 Michael Hanselmann
    return output
1628 dcb93971 Michael Hanselmann
1629 dcb93971 Michael Hanselmann
1630 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1631 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1632 a8083063 Iustin Pop

1633 a8083063 Iustin Pop
  """
1634 a8083063 Iustin Pop
  HPATH = "node-add"
1635 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1636 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1637 a8083063 Iustin Pop
1638 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1639 a8083063 Iustin Pop
    """Build hooks env.
1640 a8083063 Iustin Pop

1641 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1642 a8083063 Iustin Pop

1643 a8083063 Iustin Pop
    """
1644 a8083063 Iustin Pop
    env = {
1645 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1646 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1647 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1648 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1649 a8083063 Iustin Pop
      }
1650 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1651 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1652 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1653 a8083063 Iustin Pop
1654 a8083063 Iustin Pop
  def CheckPrereq(self):
1655 a8083063 Iustin Pop
    """Check prerequisites.
1656 a8083063 Iustin Pop

1657 a8083063 Iustin Pop
    This checks:
1658 a8083063 Iustin Pop
     - the new node is not already in the config
1659 a8083063 Iustin Pop
     - it is resolvable
1660 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1661 a8083063 Iustin Pop

1662 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1663 a8083063 Iustin Pop

1664 a8083063 Iustin Pop
    """
1665 a8083063 Iustin Pop
    node_name = self.op.node_name
1666 a8083063 Iustin Pop
    cfg = self.cfg
1667 a8083063 Iustin Pop
1668 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1669 a8083063 Iustin Pop
1670 bcf043c9 Iustin Pop
    node = dns_data.name
1671 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1672 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1673 a8083063 Iustin Pop
    if secondary_ip is None:
1674 a8083063 Iustin Pop
      secondary_ip = primary_ip
1675 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1676 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1677 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1678 e7c6e02b Michael Hanselmann
1679 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1680 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
1681 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
1682 e7c6e02b Michael Hanselmann
                                 node)
1683 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
1684 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
1685 a8083063 Iustin Pop
1686 a8083063 Iustin Pop
    for existing_node_name in node_list:
1687 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1688 e7c6e02b Michael Hanselmann
1689 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
1690 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
1691 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
1692 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
1693 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
1694 e7c6e02b Michael Hanselmann
        continue
1695 e7c6e02b Michael Hanselmann
1696 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1697 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1698 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1699 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1700 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1701 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1702 a8083063 Iustin Pop
1703 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1704 a8083063 Iustin Pop
    # same as for the master
1705 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
1706 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1707 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1708 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1709 a8083063 Iustin Pop
      if master_singlehomed:
1710 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1711 3ecf6786 Iustin Pop
                                   " new node has one")
1712 a8083063 Iustin Pop
      else:
1713 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1714 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1715 a8083063 Iustin Pop
1716 a8083063 Iustin Pop
    # checks reachablity
1717 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
1718 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1719 a8083063 Iustin Pop
1720 a8083063 Iustin Pop
    if not newbie_singlehomed:
1721 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1722 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
1723 b15d625f Iustin Pop
                           source=myself.secondary_ip):
1724 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1725 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
1726 a8083063 Iustin Pop
1727 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1728 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1729 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1730 a8083063 Iustin Pop
1731 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1732 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1733 a8083063 Iustin Pop

1734 a8083063 Iustin Pop
    """
1735 a8083063 Iustin Pop
    new_node = self.new_node
1736 a8083063 Iustin Pop
    node = new_node.name
1737 a8083063 Iustin Pop
1738 a8083063 Iustin Pop
    # check connectivity
1739 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
1740 a8083063 Iustin Pop
    if result:
1741 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1742 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1743 a8083063 Iustin Pop
                    (node, result))
1744 a8083063 Iustin Pop
      else:
1745 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1746 3ecf6786 Iustin Pop
                                 " node version %s" %
1747 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1748 a8083063 Iustin Pop
    else:
1749 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1750 a8083063 Iustin Pop
1751 a8083063 Iustin Pop
    # setup ssh on node
1752 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1753 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1754 a8083063 Iustin Pop
    keyarray = []
1755 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1756 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1757 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1758 a8083063 Iustin Pop
1759 a8083063 Iustin Pop
    for i in keyfiles:
1760 a8083063 Iustin Pop
      f = open(i, 'r')
1761 a8083063 Iustin Pop
      try:
1762 a8083063 Iustin Pop
        keyarray.append(f.read())
1763 a8083063 Iustin Pop
      finally:
1764 a8083063 Iustin Pop
        f.close()
1765 a8083063 Iustin Pop
1766 72737a7f Iustin Pop
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
1767 72737a7f Iustin Pop
                                    keyarray[2],
1768 72737a7f Iustin Pop
                                    keyarray[3], keyarray[4], keyarray[5])
1769 a8083063 Iustin Pop
1770 a8083063 Iustin Pop
    if not result:
1771 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1772 a8083063 Iustin Pop
1773 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1774 d9c02ca6 Michael Hanselmann
    utils.AddHostToEtcHosts(new_node.name)
1775 c8a0948f Michael Hanselmann
1776 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1777 caad16e2 Iustin Pop
      if not self.rpc.call_node_has_ip_address(new_node.name,
1778 caad16e2 Iustin Pop
                                               new_node.secondary_ip):
1779 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
1780 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
1781 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
1782 a8083063 Iustin Pop
1783 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
1784 5c0527ed Guido Trotter
    node_verify_param = {
1785 5c0527ed Guido Trotter
      'nodelist': [node],
1786 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
1787 5c0527ed Guido Trotter
    }
1788 5c0527ed Guido Trotter
1789 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
1790 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
1791 5c0527ed Guido Trotter
    for verifier in node_verify_list:
1792 5c0527ed Guido Trotter
      if not result[verifier]:
1793 5c0527ed Guido Trotter
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
1794 5c0527ed Guido Trotter
                                 " for remote verification" % verifier)
1795 5c0527ed Guido Trotter
      if result[verifier]['nodelist']:
1796 5c0527ed Guido Trotter
        for failed in result[verifier]['nodelist']:
1797 5c0527ed Guido Trotter
          feedback_fn("ssh/hostname verification failed %s -> %s" %
1798 5c0527ed Guido Trotter
                      (verifier, result[verifier]['nodelist'][failed]))
1799 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
1800 ff98055b Iustin Pop
1801 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1802 a8083063 Iustin Pop
    # including the node just added
1803 d6a02168 Michael Hanselmann
    myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
1804 102b115b Michael Hanselmann
    dist_nodes = self.cfg.GetNodeList()
1805 102b115b Michael Hanselmann
    if not self.op.readd:
1806 102b115b Michael Hanselmann
      dist_nodes.append(node)
1807 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1808 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1809 a8083063 Iustin Pop
1810 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1811 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
1812 72737a7f Iustin Pop
      result = self.rpc.call_upload_file(dist_nodes, fname)
1813 a8083063 Iustin Pop
      for to_node in dist_nodes:
1814 a8083063 Iustin Pop
        if not result[to_node]:
1815 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1816 a8083063 Iustin Pop
                       (fname, to_node))
1817 a8083063 Iustin Pop
1818 d6a02168 Michael Hanselmann
    to_copy = []
1819 00cd937c Iustin Pop
    if constants.HT_XEN_HVM in self.cfg.GetClusterInfo().enabled_hypervisors:
1820 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
1821 a8083063 Iustin Pop
    for fname in to_copy:
1822 72737a7f Iustin Pop
      result = self.rpc.call_upload_file([node], fname)
1823 b5602d15 Guido Trotter
      if not result[node]:
1824 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1825 a8083063 Iustin Pop
1826 d8470559 Michael Hanselmann
    if self.op.readd:
1827 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
1828 d8470559 Michael Hanselmann
    else:
1829 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
1830 a8083063 Iustin Pop
1831 a8083063 Iustin Pop
1832 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1833 a8083063 Iustin Pop
  """Query cluster configuration.
1834 a8083063 Iustin Pop

1835 a8083063 Iustin Pop
  """
1836 a8083063 Iustin Pop
  _OP_REQP = []
1837 59322403 Iustin Pop
  REQ_MASTER = False
1838 642339cf Guido Trotter
  REQ_BGL = False
1839 642339cf Guido Trotter
1840 642339cf Guido Trotter
  def ExpandNames(self):
1841 642339cf Guido Trotter
    self.needed_locks = {}
1842 a8083063 Iustin Pop
1843 a8083063 Iustin Pop
  def CheckPrereq(self):
1844 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1845 a8083063 Iustin Pop

1846 a8083063 Iustin Pop
    """
1847 a8083063 Iustin Pop
    pass
1848 a8083063 Iustin Pop
1849 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1850 a8083063 Iustin Pop
    """Return cluster config.
1851 a8083063 Iustin Pop

1852 a8083063 Iustin Pop
    """
1853 a8083063 Iustin Pop
    result = {
1854 d6a02168 Michael Hanselmann
      "name": self.cfg.GetClusterName(),
1855 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1856 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1857 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1858 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1859 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1860 d6a02168 Michael Hanselmann
      "master": self.cfg.GetMasterNode(),
1861 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1862 d6a02168 Michael Hanselmann
      "hypervisor_type": self.cfg.GetHypervisorType(),
1863 e69d05fd Iustin Pop
      "enabled_hypervisors": self.cfg.GetClusterInfo().enabled_hypervisors,
1864 a8083063 Iustin Pop
      }
1865 a8083063 Iustin Pop
1866 a8083063 Iustin Pop
    return result
1867 a8083063 Iustin Pop
1868 a8083063 Iustin Pop
1869 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
1870 ae5849b5 Michael Hanselmann
  """Return configuration values.
1871 a8083063 Iustin Pop

1872 a8083063 Iustin Pop
  """
1873 a8083063 Iustin Pop
  _OP_REQP = []
1874 642339cf Guido Trotter
  REQ_BGL = False
1875 642339cf Guido Trotter
1876 642339cf Guido Trotter
  def ExpandNames(self):
1877 642339cf Guido Trotter
    self.needed_locks = {}
1878 a8083063 Iustin Pop
1879 ae5849b5 Michael Hanselmann
    static_fields = ["cluster_name", "master_node"]
1880 ae5849b5 Michael Hanselmann
    _CheckOutputFields(static=static_fields,
1881 ae5849b5 Michael Hanselmann
                       dynamic=[],
1882 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
1883 ae5849b5 Michael Hanselmann
1884 a8083063 Iustin Pop
  def CheckPrereq(self):
1885 a8083063 Iustin Pop
    """No prerequisites.
1886 a8083063 Iustin Pop

1887 a8083063 Iustin Pop
    """
1888 a8083063 Iustin Pop
    pass
1889 a8083063 Iustin Pop
1890 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1891 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
1892 a8083063 Iustin Pop

1893 a8083063 Iustin Pop
    """
1894 ae5849b5 Michael Hanselmann
    values = []
1895 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
1896 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
1897 ae5849b5 Michael Hanselmann
        values.append(self.cfg.GetClusterName())
1898 ae5849b5 Michael Hanselmann
      elif field == "master_node":
1899 ae5849b5 Michael Hanselmann
        values.append(self.cfg.GetMasterNode())
1900 ae5849b5 Michael Hanselmann
      else:
1901 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
1902 ae5849b5 Michael Hanselmann
    return values
1903 a8083063 Iustin Pop
1904 a8083063 Iustin Pop
1905 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
1906 a8083063 Iustin Pop
  """Bring up an instance's disks.
1907 a8083063 Iustin Pop

1908 a8083063 Iustin Pop
  """
1909 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1910 f22a8ba3 Guido Trotter
  REQ_BGL = False
1911 f22a8ba3 Guido Trotter
1912 f22a8ba3 Guido Trotter
  def ExpandNames(self):
1913 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
1914 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
1915 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1916 f22a8ba3 Guido Trotter
1917 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
1918 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
1919 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
1920 a8083063 Iustin Pop
1921 a8083063 Iustin Pop
  def CheckPrereq(self):
1922 a8083063 Iustin Pop
    """Check prerequisites.
1923 a8083063 Iustin Pop

1924 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1925 a8083063 Iustin Pop

1926 a8083063 Iustin Pop
    """
1927 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1928 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
1929 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
1930 a8083063 Iustin Pop
1931 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1932 a8083063 Iustin Pop
    """Activate the disks.
1933 a8083063 Iustin Pop

1934 a8083063 Iustin Pop
    """
1935 b9bddb6b Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
1936 a8083063 Iustin Pop
    if not disks_ok:
1937 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
1938 a8083063 Iustin Pop
1939 a8083063 Iustin Pop
    return disks_info
1940 a8083063 Iustin Pop
1941 a8083063 Iustin Pop
1942 b9bddb6b Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
1943 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
1944 a8083063 Iustin Pop

1945 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
1946 a8083063 Iustin Pop

1947 a8083063 Iustin Pop
  Args:
1948 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
1949 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
1950 a8083063 Iustin Pop
                        in an error return from the function
1951 a8083063 Iustin Pop

1952 a8083063 Iustin Pop
  Returns:
1953 a8083063 Iustin Pop
    false if the operation failed
1954 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
1955 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
1956 a8083063 Iustin Pop
  """
1957 a8083063 Iustin Pop
  device_info = []
1958 a8083063 Iustin Pop
  disks_ok = True
1959 fdbd668d Iustin Pop
  iname = instance.name
1960 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
1961 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
1962 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
1963 fdbd668d Iustin Pop
1964 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
1965 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
1966 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
1967 fdbd668d Iustin Pop
  # SyncSource, etc.)
1968 fdbd668d Iustin Pop
1969 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
1970 a8083063 Iustin Pop
  for inst_disk in instance.disks:
1971 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1972 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
1973 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
1974 a8083063 Iustin Pop
      if not result:
1975 f4bc1f2c Michael Hanselmann
        logger.Error("could not prepare block device %s on node %s"
1976 fdbd668d Iustin Pop
                     " (is_primary=False, pass=1)" % (inst_disk.iv_name, node))
1977 fdbd668d Iustin Pop
        if not ignore_secondaries:
1978 a8083063 Iustin Pop
          disks_ok = False
1979 fdbd668d Iustin Pop
1980 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
1981 fdbd668d Iustin Pop
1982 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
1983 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
1984 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1985 fdbd668d Iustin Pop
      if node != instance.primary_node:
1986 fdbd668d Iustin Pop
        continue
1987 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
1988 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
1989 fdbd668d Iustin Pop
      if not result:
1990 fdbd668d Iustin Pop
        logger.Error("could not prepare block device %s on node %s"
1991 fdbd668d Iustin Pop
                     " (is_primary=True, pass=2)" % (inst_disk.iv_name, node))
1992 fdbd668d Iustin Pop
        disks_ok = False
1993 fdbd668d Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name, result))
1994 a8083063 Iustin Pop
1995 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
1996 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
1997 b352ab5b Iustin Pop
  # improving the logical/physical id handling
1998 b352ab5b Iustin Pop
  for disk in instance.disks:
1999 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
2000 b352ab5b Iustin Pop
2001 a8083063 Iustin Pop
  return disks_ok, device_info
2002 a8083063 Iustin Pop
2003 a8083063 Iustin Pop
2004 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
2005 3ecf6786 Iustin Pop
  """Start the disks of an instance.
2006 3ecf6786 Iustin Pop

2007 3ecf6786 Iustin Pop
  """
2008 b9bddb6b Iustin Pop
  disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2009 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
2010 fe7b0351 Michael Hanselmann
  if not disks_ok:
2011 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
2012 fe7b0351 Michael Hanselmann
    if force is not None and not force:
2013 fe7b0351 Michael Hanselmann
      logger.Error("If the message above refers to a secondary node,"
2014 fe7b0351 Michael Hanselmann
                   " you can retry the operation using '--force'.")
2015 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
2016 fe7b0351 Michael Hanselmann
2017 fe7b0351 Michael Hanselmann
2018 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
2019 a8083063 Iustin Pop
  """Shutdown an instance's disks.
2020 a8083063 Iustin Pop

2021 a8083063 Iustin Pop
  """
2022 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2023 f22a8ba3 Guido Trotter
  REQ_BGL = False
2024 f22a8ba3 Guido Trotter
2025 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2026 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2027 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2028 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2029 f22a8ba3 Guido Trotter
2030 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2031 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2032 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2033 a8083063 Iustin Pop
2034 a8083063 Iustin Pop
  def CheckPrereq(self):
2035 a8083063 Iustin Pop
    """Check prerequisites.
2036 a8083063 Iustin Pop

2037 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2038 a8083063 Iustin Pop

2039 a8083063 Iustin Pop
    """
2040 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2041 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2042 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2043 a8083063 Iustin Pop
2044 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2045 a8083063 Iustin Pop
    """Deactivate the disks
2046 a8083063 Iustin Pop

2047 a8083063 Iustin Pop
    """
2048 a8083063 Iustin Pop
    instance = self.instance
2049 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
2050 a8083063 Iustin Pop
2051 a8083063 Iustin Pop
2052 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
2053 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
2054 155d6c75 Guido Trotter

2055 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
2056 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
2057 155d6c75 Guido Trotter

2058 155d6c75 Guido Trotter
  """
2059 72737a7f Iustin Pop
  ins_l = lu.rpc.call_instance_list([instance.primary_node],
2060 72737a7f Iustin Pop
                                      [instance.hypervisor])
2061 155d6c75 Guido Trotter
  ins_l = ins_l[instance.primary_node]
2062 155d6c75 Guido Trotter
  if not type(ins_l) is list:
2063 155d6c75 Guido Trotter
    raise errors.OpExecError("Can't contact node '%s'" %
2064 155d6c75 Guido Trotter
                             instance.primary_node)
2065 155d6c75 Guido Trotter
2066 155d6c75 Guido Trotter
  if instance.name in ins_l:
2067 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
2068 155d6c75 Guido Trotter
                             " block devices.")
2069 155d6c75 Guido Trotter
2070 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
2071 a8083063 Iustin Pop
2072 a8083063 Iustin Pop
2073 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2074 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
2075 a8083063 Iustin Pop

2076 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
2077 a8083063 Iustin Pop

2078 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
2079 a8083063 Iustin Pop
  ignored.
2080 a8083063 Iustin Pop

2081 a8083063 Iustin Pop
  """
2082 a8083063 Iustin Pop
  result = True
2083 a8083063 Iustin Pop
  for disk in instance.disks:
2084 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2085 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
2086 72737a7f Iustin Pop
      if not lu.rpc.call_blockdev_shutdown(node, top_disk):
2087 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
2088 a8083063 Iustin Pop
                     (disk.iv_name, node))
2089 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
2090 a8083063 Iustin Pop
          result = False
2091 a8083063 Iustin Pop
  return result
2092 a8083063 Iustin Pop
2093 a8083063 Iustin Pop
2094 b9bddb6b Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor):
2095 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
2096 d4f16fd9 Iustin Pop

2097 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
2098 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
2099 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
2100 d4f16fd9 Iustin Pop
  exception.
2101 d4f16fd9 Iustin Pop

2102 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
2103 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
2104 e69d05fd Iustin Pop
  @type node: C{str}
2105 e69d05fd Iustin Pop
  @param node: the node to check
2106 e69d05fd Iustin Pop
  @type reason: C{str}
2107 e69d05fd Iustin Pop
  @param reason: string to use in the error message
2108 e69d05fd Iustin Pop
  @type requested: C{int}
2109 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
2110 e69d05fd Iustin Pop
  @type hypervisor: C{str}
2111 e69d05fd Iustin Pop
  @param hypervisor: the hypervisor to ask for memory stats
2112 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2113 e69d05fd Iustin Pop
      we cannot check the node
2114 d4f16fd9 Iustin Pop

2115 d4f16fd9 Iustin Pop
  """
2116 72737a7f Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor)
2117 d4f16fd9 Iustin Pop
  if not nodeinfo or not isinstance(nodeinfo, dict):
2118 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Could not contact node %s for resource"
2119 d4f16fd9 Iustin Pop
                             " information" % (node,))
2120 d4f16fd9 Iustin Pop
2121 d4f16fd9 Iustin Pop
  free_mem = nodeinfo[node].get('memory_free')
2122 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2123 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2124 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
2125 d4f16fd9 Iustin Pop
  if requested > free_mem:
2126 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2127 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
2128 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
2129 d4f16fd9 Iustin Pop
2130 d4f16fd9 Iustin Pop
2131 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2132 a8083063 Iustin Pop
  """Starts an instance.
2133 a8083063 Iustin Pop

2134 a8083063 Iustin Pop
  """
2135 a8083063 Iustin Pop
  HPATH = "instance-start"
2136 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2137 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2138 e873317a Guido Trotter
  REQ_BGL = False
2139 e873317a Guido Trotter
2140 e873317a Guido Trotter
  def ExpandNames(self):
2141 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2142 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2143 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2144 e873317a Guido Trotter
2145 e873317a Guido Trotter
  def DeclareLocks(self, level):
2146 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2147 e873317a Guido Trotter
      self._LockInstancesNodes()
2148 a8083063 Iustin Pop
2149 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2150 a8083063 Iustin Pop
    """Build hooks env.
2151 a8083063 Iustin Pop

2152 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2153 a8083063 Iustin Pop

2154 a8083063 Iustin Pop
    """
2155 a8083063 Iustin Pop
    env = {
2156 a8083063 Iustin Pop
      "FORCE": self.op.force,
2157 a8083063 Iustin Pop
      }
2158 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2159 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2160 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2161 a8083063 Iustin Pop
    return env, nl, nl
2162 a8083063 Iustin Pop
2163 a8083063 Iustin Pop
  def CheckPrereq(self):
2164 a8083063 Iustin Pop
    """Check prerequisites.
2165 a8083063 Iustin Pop

2166 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2167 a8083063 Iustin Pop

2168 a8083063 Iustin Pop
    """
2169 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2170 e873317a Guido Trotter
    assert self.instance is not None, \
2171 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2172 a8083063 Iustin Pop
2173 a8083063 Iustin Pop
    # check bridges existance
2174 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2175 a8083063 Iustin Pop
2176 b9bddb6b Iustin Pop
    _CheckNodeFreeMemory(self, instance.primary_node,
2177 d4f16fd9 Iustin Pop
                         "starting instance %s" % instance.name,
2178 e69d05fd Iustin Pop
                         instance.memory, instance.hypervisor)
2179 d4f16fd9 Iustin Pop
2180 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2181 a8083063 Iustin Pop
    """Start the instance.
2182 a8083063 Iustin Pop

2183 a8083063 Iustin Pop
    """
2184 a8083063 Iustin Pop
    instance = self.instance
2185 a8083063 Iustin Pop
    force = self.op.force
2186 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
2187 a8083063 Iustin Pop
2188 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2189 fe482621 Iustin Pop
2190 a8083063 Iustin Pop
    node_current = instance.primary_node
2191 a8083063 Iustin Pop
2192 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
2193 a8083063 Iustin Pop
2194 72737a7f Iustin Pop
    if not self.rpc.call_instance_start(node_current, instance, extra_args):
2195 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
2196 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
2197 a8083063 Iustin Pop
2198 a8083063 Iustin Pop
2199 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2200 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2201 bf6929a2 Alexander Schreiber

2202 bf6929a2 Alexander Schreiber
  """
2203 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2204 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2205 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2206 e873317a Guido Trotter
  REQ_BGL = False
2207 e873317a Guido Trotter
2208 e873317a Guido Trotter
  def ExpandNames(self):
2209 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2210 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2211 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
2212 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2213 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
2214 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2215 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
2216 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2217 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2218 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2219 e873317a Guido Trotter
2220 e873317a Guido Trotter
  def DeclareLocks(self, level):
2221 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2222 849da276 Guido Trotter
      primary_only = not constants.INSTANCE_REBOOT_FULL
2223 849da276 Guido Trotter
      self._LockInstancesNodes(primary_only=primary_only)
2224 bf6929a2 Alexander Schreiber
2225 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2226 bf6929a2 Alexander Schreiber
    """Build hooks env.
2227 bf6929a2 Alexander Schreiber

2228 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2229 bf6929a2 Alexander Schreiber

2230 bf6929a2 Alexander Schreiber
    """
2231 bf6929a2 Alexander Schreiber
    env = {
2232 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2233 bf6929a2 Alexander Schreiber
      }
2234 bf6929a2 Alexander Schreiber
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2235 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2236 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
2237 bf6929a2 Alexander Schreiber
    return env, nl, nl
2238 bf6929a2 Alexander Schreiber
2239 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2240 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2241 bf6929a2 Alexander Schreiber

2242 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2243 bf6929a2 Alexander Schreiber

2244 bf6929a2 Alexander Schreiber
    """
2245 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2246 e873317a Guido Trotter
    assert self.instance is not None, \
2247 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2248 bf6929a2 Alexander Schreiber
2249 bf6929a2 Alexander Schreiber
    # check bridges existance
2250 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2251 bf6929a2 Alexander Schreiber
2252 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2253 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2254 bf6929a2 Alexander Schreiber

2255 bf6929a2 Alexander Schreiber
    """
2256 bf6929a2 Alexander Schreiber
    instance = self.instance
2257 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2258 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2259 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2260 bf6929a2 Alexander Schreiber
2261 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2262 bf6929a2 Alexander Schreiber
2263 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2264 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2265 72737a7f Iustin Pop
      if not self.rpc.call_instance_reboot(node_current, instance,
2266 72737a7f Iustin Pop
                                           reboot_type, extra_args):
2267 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2268 bf6929a2 Alexander Schreiber
    else:
2269 72737a7f Iustin Pop
      if not self.rpc.call_instance_shutdown(node_current, instance):
2270 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2271 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
2272 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
2273 72737a7f Iustin Pop
      if not self.rpc.call_instance_start(node_current, instance, extra_args):
2274 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
2275 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2276 bf6929a2 Alexander Schreiber
2277 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2278 bf6929a2 Alexander Schreiber
2279 bf6929a2 Alexander Schreiber
2280 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2281 a8083063 Iustin Pop
  """Shutdown an instance.
2282 a8083063 Iustin Pop

2283 a8083063 Iustin Pop
  """
2284 a8083063 Iustin Pop
  HPATH = "instance-stop"
2285 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2286 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2287 e873317a Guido Trotter
  REQ_BGL = False
2288 e873317a Guido Trotter
2289 e873317a Guido Trotter
  def ExpandNames(self):
2290 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2291 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2292 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2293 e873317a Guido Trotter
2294 e873317a Guido Trotter
  def DeclareLocks(self, level):
2295 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2296 e873317a Guido Trotter
      self._LockInstancesNodes()
2297 a8083063 Iustin Pop
2298 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2299 a8083063 Iustin Pop
    """Build hooks env.
2300 a8083063 Iustin Pop

2301 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2302 a8083063 Iustin Pop

2303 a8083063 Iustin Pop
    """
2304 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2305 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2306 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2307 a8083063 Iustin Pop
    return env, nl, nl
2308 a8083063 Iustin Pop
2309 a8083063 Iustin Pop
  def CheckPrereq(self):
2310 a8083063 Iustin Pop
    """Check prerequisites.
2311 a8083063 Iustin Pop

2312 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2313 a8083063 Iustin Pop

2314 a8083063 Iustin Pop
    """
2315 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2316 e873317a Guido Trotter
    assert self.instance is not None, \
2317 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2318 a8083063 Iustin Pop
2319 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2320 a8083063 Iustin Pop
    """Shutdown the instance.
2321 a8083063 Iustin Pop

2322 a8083063 Iustin Pop
    """
2323 a8083063 Iustin Pop
    instance = self.instance
2324 a8083063 Iustin Pop
    node_current = instance.primary_node
2325 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2326 72737a7f Iustin Pop
    if not self.rpc.call_instance_shutdown(node_current, instance):
2327 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
2328 a8083063 Iustin Pop
2329 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
2330 a8083063 Iustin Pop
2331 a8083063 Iustin Pop
2332 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2333 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2334 fe7b0351 Michael Hanselmann

2335 fe7b0351 Michael Hanselmann
  """
2336 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2337 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2338 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2339 4e0b4d2d Guido Trotter
  REQ_BGL = False
2340 4e0b4d2d Guido Trotter
2341 4e0b4d2d Guido Trotter
  def ExpandNames(self):
2342 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
2343 4e0b4d2d Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2344 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2345 4e0b4d2d Guido Trotter
2346 4e0b4d2d Guido Trotter
  def DeclareLocks(self, level):
2347 4e0b4d2d Guido Trotter
    if level == locking.LEVEL_NODE:
2348 4e0b4d2d Guido Trotter
      self._LockInstancesNodes()
2349 fe7b0351 Michael Hanselmann
2350 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2351 fe7b0351 Michael Hanselmann
    """Build hooks env.
2352 fe7b0351 Michael Hanselmann

2353 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2354 fe7b0351 Michael Hanselmann

2355 fe7b0351 Michael Hanselmann
    """
2356 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2357 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2358 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2359 fe7b0351 Michael Hanselmann
    return env, nl, nl
2360 fe7b0351 Michael Hanselmann
2361 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2362 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2363 fe7b0351 Michael Hanselmann

2364 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2365 fe7b0351 Michael Hanselmann

2366 fe7b0351 Michael Hanselmann
    """
2367 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2368 4e0b4d2d Guido Trotter
    assert instance is not None, \
2369 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2370 4e0b4d2d Guido Trotter
2371 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2372 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2373 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2374 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2375 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2376 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2377 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2378 72737a7f Iustin Pop
                                              instance.name,
2379 72737a7f Iustin Pop
                                              instance.hypervisor)
2380 fe7b0351 Michael Hanselmann
    if remote_info:
2381 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2382 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2383 3ecf6786 Iustin Pop
                                  instance.primary_node))
2384 d0834de3 Michael Hanselmann
2385 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2386 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2387 d0834de3 Michael Hanselmann
      # OS verification
2388 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2389 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2390 d0834de3 Michael Hanselmann
      if pnode is None:
2391 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2392 3ecf6786 Iustin Pop
                                   self.op.pnode)
2393 72737a7f Iustin Pop
      os_obj = self.rpc.call_os_get(pnode.name, self.op.os_type)
2394 dfa96ded Guido Trotter
      if not os_obj:
2395 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2396 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2397 d0834de3 Michael Hanselmann
2398 fe7b0351 Michael Hanselmann
    self.instance = instance
2399 fe7b0351 Michael Hanselmann
2400 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2401 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2402 fe7b0351 Michael Hanselmann

2403 fe7b0351 Michael Hanselmann
    """
2404 fe7b0351 Michael Hanselmann
    inst = self.instance
2405 fe7b0351 Michael Hanselmann
2406 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2407 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2408 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2409 97abc79f Iustin Pop
      self.cfg.Update(inst)
2410 d0834de3 Michael Hanselmann
2411 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
2412 fe7b0351 Michael Hanselmann
    try:
2413 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2414 72737a7f Iustin Pop
      if not self.rpc.call_instance_os_add(inst.primary_node, inst,
2415 72737a7f Iustin Pop
                                           "sda", "sdb"):
2416 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
2417 f4bc1f2c Michael Hanselmann
                                 " on node %s" %
2418 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2419 fe7b0351 Michael Hanselmann
    finally:
2420 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
2421 fe7b0351 Michael Hanselmann
2422 fe7b0351 Michael Hanselmann
2423 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2424 decd5f45 Iustin Pop
  """Rename an instance.
2425 decd5f45 Iustin Pop

2426 decd5f45 Iustin Pop
  """
2427 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2428 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2429 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2430 decd5f45 Iustin Pop
2431 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2432 decd5f45 Iustin Pop
    """Build hooks env.
2433 decd5f45 Iustin Pop

2434 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2435 decd5f45 Iustin Pop

2436 decd5f45 Iustin Pop
    """
2437 decd5f45 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self.instance)
2438 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2439 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2440 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2441 decd5f45 Iustin Pop
    return env, nl, nl
2442 decd5f45 Iustin Pop
2443 decd5f45 Iustin Pop
  def CheckPrereq(self):
2444 decd5f45 Iustin Pop
    """Check prerequisites.
2445 decd5f45 Iustin Pop

2446 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2447 decd5f45 Iustin Pop

2448 decd5f45 Iustin Pop
    """
2449 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2450 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2451 decd5f45 Iustin Pop
    if instance is None:
2452 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2453 decd5f45 Iustin Pop
                                 self.op.instance_name)
2454 decd5f45 Iustin Pop
    if instance.status != "down":
2455 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2456 decd5f45 Iustin Pop
                                 self.op.instance_name)
2457 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2458 72737a7f Iustin Pop
                                              instance.name,
2459 72737a7f Iustin Pop
                                              instance.hypervisor)
2460 decd5f45 Iustin Pop
    if remote_info:
2461 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2462 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2463 decd5f45 Iustin Pop
                                  instance.primary_node))
2464 decd5f45 Iustin Pop
    self.instance = instance
2465 decd5f45 Iustin Pop
2466 decd5f45 Iustin Pop
    # new name verification
2467 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2468 decd5f45 Iustin Pop
2469 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2470 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
2471 7bde3275 Guido Trotter
    if new_name in instance_list:
2472 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2473 c09f363f Manuel Franceschini
                                 new_name)
2474 7bde3275 Guido Trotter
2475 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2476 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
2477 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2478 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2479 decd5f45 Iustin Pop
2480 decd5f45 Iustin Pop
2481 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2482 decd5f45 Iustin Pop
    """Reinstall the instance.
2483 decd5f45 Iustin Pop

2484 decd5f45 Iustin Pop
    """
2485 decd5f45 Iustin Pop
    inst = self.instance
2486 decd5f45 Iustin Pop
    old_name = inst.name
2487 decd5f45 Iustin Pop
2488 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2489 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2490 b23c4333 Manuel Franceschini
2491 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2492 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
2493 74b5913f Guido Trotter
    self.context.glm.remove(locking.LEVEL_INSTANCE, inst.name)
2494 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
2495 decd5f45 Iustin Pop
2496 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2497 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2498 decd5f45 Iustin Pop
2499 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2500 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2501 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
2502 72737a7f Iustin Pop
                                                     old_file_storage_dir,
2503 72737a7f Iustin Pop
                                                     new_file_storage_dir)
2504 b23c4333 Manuel Franceschini
2505 b23c4333 Manuel Franceschini
      if not result:
2506 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not connect to node '%s' to rename"
2507 b23c4333 Manuel Franceschini
                                 " directory '%s' to '%s' (but the instance"
2508 b23c4333 Manuel Franceschini
                                 " has been renamed in Ganeti)" % (
2509 b23c4333 Manuel Franceschini
                                 inst.primary_node, old_file_storage_dir,
2510 b23c4333 Manuel Franceschini
                                 new_file_storage_dir))
2511 b23c4333 Manuel Franceschini
2512 b23c4333 Manuel Franceschini
      if not result[0]:
2513 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2514 b23c4333 Manuel Franceschini
                                 " (but the instance has been renamed in"
2515 b23c4333 Manuel Franceschini
                                 " Ganeti)" % (old_file_storage_dir,
2516 b23c4333 Manuel Franceschini
                                               new_file_storage_dir))
2517 b23c4333 Manuel Franceschini
2518 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
2519 decd5f45 Iustin Pop
    try:
2520 72737a7f Iustin Pop
      if not self.rpc.call_instance_run_rename(inst.primary_node, inst,
2521 72737a7f Iustin Pop
                                               old_name,
2522 72737a7f Iustin Pop
                                               "sda", "sdb"):
2523 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
2524 6291574d Alexander Schreiber
               " (but the instance has been renamed in Ganeti)" %
2525 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2526 decd5f45 Iustin Pop
        logger.Error(msg)
2527 decd5f45 Iustin Pop
    finally:
2528 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
2529 decd5f45 Iustin Pop
2530 decd5f45 Iustin Pop
2531 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2532 a8083063 Iustin Pop
  """Remove an instance.
2533 a8083063 Iustin Pop

2534 a8083063 Iustin Pop
  """
2535 a8083063 Iustin Pop
  HPATH = "instance-remove"
2536 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2537 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
2538 cf472233 Guido Trotter
  REQ_BGL = False
2539 cf472233 Guido Trotter
2540 cf472233 Guido Trotter
  def ExpandNames(self):
2541 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
2542 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2543 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2544 cf472233 Guido Trotter
2545 cf472233 Guido Trotter
  def DeclareLocks(self, level):
2546 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
2547 cf472233 Guido Trotter
      self._LockInstancesNodes()
2548 a8083063 Iustin Pop
2549 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2550 a8083063 Iustin Pop
    """Build hooks env.
2551 a8083063 Iustin Pop

2552 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2553 a8083063 Iustin Pop

2554 a8083063 Iustin Pop
    """
2555 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2556 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
2557 a8083063 Iustin Pop
    return env, nl, nl
2558 a8083063 Iustin Pop
2559 a8083063 Iustin Pop
  def CheckPrereq(self):
2560 a8083063 Iustin Pop
    """Check prerequisites.
2561 a8083063 Iustin Pop

2562 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2563 a8083063 Iustin Pop

2564 a8083063 Iustin Pop
    """
2565 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2566 cf472233 Guido Trotter
    assert self.instance is not None, \
2567 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2568 a8083063 Iustin Pop
2569 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2570 a8083063 Iustin Pop
    """Remove the instance.
2571 a8083063 Iustin Pop

2572 a8083063 Iustin Pop
    """
2573 a8083063 Iustin Pop
    instance = self.instance
2574 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
2575 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
2576 a8083063 Iustin Pop
2577 72737a7f Iustin Pop
    if not self.rpc.call_instance_shutdown(instance.primary_node, instance):
2578 1d67656e Iustin Pop
      if self.op.ignore_failures:
2579 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2580 1d67656e Iustin Pop
      else:
2581 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2582 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2583 a8083063 Iustin Pop
2584 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
2585 a8083063 Iustin Pop
2586 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
2587 1d67656e Iustin Pop
      if self.op.ignore_failures:
2588 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2589 1d67656e Iustin Pop
      else:
2590 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2591 a8083063 Iustin Pop
2592 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
2593 a8083063 Iustin Pop
2594 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2595 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
2596 a8083063 Iustin Pop
2597 a8083063 Iustin Pop
2598 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2599 a8083063 Iustin Pop
  """Logical unit for querying instances.
2600 a8083063 Iustin Pop

2601 a8083063 Iustin Pop
  """
2602 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2603 7eb9d8f7 Guido Trotter
  REQ_BGL = False
2604 a8083063 Iustin Pop
2605 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
2606 d8052456 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram", "status"])
2607 57a2fb91 Iustin Pop
    self.static_fields = frozenset([
2608 57a2fb91 Iustin Pop
      "name", "os", "pnode", "snodes",
2609 57a2fb91 Iustin Pop
      "admin_state", "admin_ram",
2610 57a2fb91 Iustin Pop
      "disk_template", "ip", "mac", "bridge",
2611 57a2fb91 Iustin Pop
      "sda_size", "sdb_size", "vcpus", "tags",
2612 57a2fb91 Iustin Pop
      "network_port", "kernel_path", "initrd_path",
2613 57a2fb91 Iustin Pop
      "hvm_boot_order", "hvm_acpi", "hvm_pae",
2614 57a2fb91 Iustin Pop
      "hvm_cdrom_image_path", "hvm_nic_type",
2615 57a2fb91 Iustin Pop
      "hvm_disk_type", "vnc_bind_address",
2616 e69d05fd Iustin Pop
      "serial_no", "hypervisor",
2617 57a2fb91 Iustin Pop
      ])
2618 57a2fb91 Iustin Pop
    _CheckOutputFields(static=self.static_fields,
2619 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2620 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2621 a8083063 Iustin Pop
2622 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
2623 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
2624 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2625 7eb9d8f7 Guido Trotter
2626 57a2fb91 Iustin Pop
    if self.op.names:
2627 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
2628 7eb9d8f7 Guido Trotter
    else:
2629 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
2630 7eb9d8f7 Guido Trotter
2631 57a2fb91 Iustin Pop
    self.do_locking = not self.static_fields.issuperset(self.op.output_fields)
2632 57a2fb91 Iustin Pop
    if self.do_locking:
2633 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
2634 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
2635 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2636 7eb9d8f7 Guido Trotter
2637 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
2638 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
2639 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
2640 7eb9d8f7 Guido Trotter
2641 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
2642 7eb9d8f7 Guido Trotter
    """Check prerequisites.
2643 7eb9d8f7 Guido Trotter

2644 7eb9d8f7 Guido Trotter
    """
2645 57a2fb91 Iustin Pop
    pass
2646 069dcc86 Iustin Pop
2647 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2648 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2649 a8083063 Iustin Pop

2650 a8083063 Iustin Pop
    """
2651 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
2652 57a2fb91 Iustin Pop
    if self.do_locking:
2653 57a2fb91 Iustin Pop
      instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2654 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
2655 3fa93523 Guido Trotter
      instance_names = self.wanted
2656 3fa93523 Guido Trotter
      missing = set(instance_names).difference(all_info.keys())
2657 3fa93523 Guido Trotter
      if missing:
2658 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
2659 3fa93523 Guido Trotter
          "Some instances were removed before retrieving their data: %s"
2660 3fa93523 Guido Trotter
          % missing)
2661 57a2fb91 Iustin Pop
    else:
2662 57a2fb91 Iustin Pop
      instance_names = all_info.keys()
2663 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
2664 a8083063 Iustin Pop
2665 a8083063 Iustin Pop
    # begin data gathering
2666 a8083063 Iustin Pop
2667 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2668 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
2669 a8083063 Iustin Pop
2670 a8083063 Iustin Pop
    bad_nodes = []
2671 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2672 a8083063 Iustin Pop
      live_data = {}
2673 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
2674 a8083063 Iustin Pop
      for name in nodes:
2675 a8083063 Iustin Pop
        result = node_data[name]
2676 a8083063 Iustin Pop
        if result:
2677 a8083063 Iustin Pop
          live_data.update(result)
2678 a8083063 Iustin Pop
        elif result == False:
2679 a8083063 Iustin Pop
          bad_nodes.append(name)
2680 a8083063 Iustin Pop
        # else no instance is alive
2681 a8083063 Iustin Pop
    else:
2682 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2683 a8083063 Iustin Pop
2684 a8083063 Iustin Pop
    # end data gathering
2685 a8083063 Iustin Pop
2686 a8083063 Iustin Pop
    output = []
2687 a8083063 Iustin Pop
    for instance in instance_list:
2688 a8083063 Iustin Pop
      iout = []
2689 a8083063 Iustin Pop
      for field in self.op.output_fields:
2690 a8083063 Iustin Pop
        if field == "name":
2691 a8083063 Iustin Pop
          val = instance.name
2692 a8083063 Iustin Pop
        elif field == "os":
2693 a8083063 Iustin Pop
          val = instance.os
2694 a8083063 Iustin Pop
        elif field == "pnode":
2695 a8083063 Iustin Pop
          val = instance.primary_node
2696 a8083063 Iustin Pop
        elif field == "snodes":
2697 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2698 a8083063 Iustin Pop
        elif field == "admin_state":
2699 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2700 a8083063 Iustin Pop
        elif field == "oper_state":
2701 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2702 8a23d2d3 Iustin Pop
            val = None
2703 a8083063 Iustin Pop
          else:
2704 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2705 d8052456 Iustin Pop
        elif field == "status":
2706 d8052456 Iustin Pop
          if instance.primary_node in bad_nodes:
2707 d8052456 Iustin Pop
            val = "ERROR_nodedown"
2708 d8052456 Iustin Pop
          else:
2709 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
2710 d8052456 Iustin Pop
            if running:
2711 d8052456 Iustin Pop
              if instance.status != "down":
2712 d8052456 Iustin Pop
                val = "running"
2713 d8052456 Iustin Pop
              else:
2714 d8052456 Iustin Pop
                val = "ERROR_up"
2715 d8052456 Iustin Pop
            else:
2716 d8052456 Iustin Pop
              if instance.status != "down":
2717 d8052456 Iustin Pop
                val = "ERROR_down"
2718 d8052456 Iustin Pop
              else:
2719 d8052456 Iustin Pop
                val = "ADMIN_down"
2720 a8083063 Iustin Pop
        elif field == "admin_ram":
2721 a8083063 Iustin Pop
          val = instance.memory
2722 a8083063 Iustin Pop
        elif field == "oper_ram":
2723 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2724 8a23d2d3 Iustin Pop
            val = None
2725 a8083063 Iustin Pop
          elif instance.name in live_data:
2726 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2727 a8083063 Iustin Pop
          else:
2728 a8083063 Iustin Pop
            val = "-"
2729 a8083063 Iustin Pop
        elif field == "disk_template":
2730 a8083063 Iustin Pop
          val = instance.disk_template
2731 a8083063 Iustin Pop
        elif field == "ip":
2732 a8083063 Iustin Pop
          val = instance.nics[0].ip
2733 a8083063 Iustin Pop
        elif field == "bridge":
2734 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2735 a8083063 Iustin Pop
        elif field == "mac":
2736 a8083063 Iustin Pop
          val = instance.nics[0].mac
2737 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2738 644eeef9 Iustin Pop
          disk = instance.FindDisk(field[:3])
2739 644eeef9 Iustin Pop
          if disk is None:
2740 8a23d2d3 Iustin Pop
            val = None
2741 644eeef9 Iustin Pop
          else:
2742 644eeef9 Iustin Pop
            val = disk.size
2743 d6d415e8 Iustin Pop
        elif field == "vcpus":
2744 d6d415e8 Iustin Pop
          val = instance.vcpus
2745 130a6a6f Iustin Pop
        elif field == "tags":
2746 130a6a6f Iustin Pop
          val = list(instance.GetTags())
2747 38d7239a Iustin Pop
        elif field == "serial_no":
2748 38d7239a Iustin Pop
          val = instance.serial_no
2749 3fb1e1c5 Alexander Schreiber
        elif field in ("network_port", "kernel_path", "initrd_path",
2750 3fb1e1c5 Alexander Schreiber
                       "hvm_boot_order", "hvm_acpi", "hvm_pae",
2751 3fb1e1c5 Alexander Schreiber
                       "hvm_cdrom_image_path", "hvm_nic_type",
2752 3fb1e1c5 Alexander Schreiber
                       "hvm_disk_type", "vnc_bind_address"):
2753 3fb1e1c5 Alexander Schreiber
          val = getattr(instance, field, None)
2754 3fb1e1c5 Alexander Schreiber
          if val is not None:
2755 3fb1e1c5 Alexander Schreiber
            pass
2756 3fb1e1c5 Alexander Schreiber
          elif field in ("hvm_nic_type", "hvm_disk_type",
2757 3fb1e1c5 Alexander Schreiber
                         "kernel_path", "initrd_path"):
2758 3fb1e1c5 Alexander Schreiber
            val = "default"
2759 3fb1e1c5 Alexander Schreiber
          else:
2760 3fb1e1c5 Alexander Schreiber
            val = "-"
2761 e69d05fd Iustin Pop
        elif field == "hypervisor":
2762 e69d05fd Iustin Pop
          val = instance.hypervisor
2763 a8083063 Iustin Pop
        else:
2764 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2765 a8083063 Iustin Pop
        iout.append(val)
2766 a8083063 Iustin Pop
      output.append(iout)
2767 a8083063 Iustin Pop
2768 a8083063 Iustin Pop
    return output
2769 a8083063 Iustin Pop
2770 a8083063 Iustin Pop
2771 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2772 a8083063 Iustin Pop
  """Failover an instance.
2773 a8083063 Iustin Pop

2774 a8083063 Iustin Pop
  """
2775 a8083063 Iustin Pop
  HPATH = "instance-failover"
2776 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2777 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2778 c9e5c064 Guido Trotter
  REQ_BGL = False
2779 c9e5c064 Guido Trotter
2780 c9e5c064 Guido Trotter
  def ExpandNames(self):
2781 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
2782 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2783 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2784 c9e5c064 Guido Trotter
2785 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
2786 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
2787 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
2788 a8083063 Iustin Pop
2789 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2790 a8083063 Iustin Pop
    """Build hooks env.
2791 a8083063 Iustin Pop

2792 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2793 a8083063 Iustin Pop

2794 a8083063 Iustin Pop
    """
2795 a8083063 Iustin Pop
    env = {
2796 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2797 a8083063 Iustin Pop
      }
2798 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2799 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
2800 a8083063 Iustin Pop
    return env, nl, nl
2801 a8083063 Iustin Pop
2802 a8083063 Iustin Pop
  def CheckPrereq(self):
2803 a8083063 Iustin Pop
    """Check prerequisites.
2804 a8083063 Iustin Pop

2805 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2806 a8083063 Iustin Pop

2807 a8083063 Iustin Pop
    """
2808 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2809 c9e5c064 Guido Trotter
    assert self.instance is not None, \
2810 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2811 a8083063 Iustin Pop
2812 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2813 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2814 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
2815 2a710df1 Michael Hanselmann
2816 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2817 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2818 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2819 abdf0113 Iustin Pop
                                   "a mirrored disk template")
2820 2a710df1 Michael Hanselmann
2821 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2822 d4f16fd9 Iustin Pop
    # check memory requirements on the secondary node
2823 b9bddb6b Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
2824 e69d05fd Iustin Pop
                         instance.name, instance.memory,
2825 e69d05fd Iustin Pop
                         instance.hypervisor)
2826 3a7c308e Guido Trotter
2827 a8083063 Iustin Pop
    # check bridge existance
2828 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2829 72737a7f Iustin Pop
    if not self.rpc.call_bridges_exist(target_node, brlist):
2830 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2831 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2832 50ff9a7a Iustin Pop
                                 (brlist, target_node))
2833 a8083063 Iustin Pop
2834 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2835 a8083063 Iustin Pop
    """Failover an instance.
2836 a8083063 Iustin Pop

2837 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2838 a8083063 Iustin Pop
    starting it on the secondary.
2839 a8083063 Iustin Pop

2840 a8083063 Iustin Pop
    """
2841 a8083063 Iustin Pop
    instance = self.instance
2842 a8083063 Iustin Pop
2843 a8083063 Iustin Pop
    source_node = instance.primary_node
2844 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2845 a8083063 Iustin Pop
2846 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2847 a8083063 Iustin Pop
    for dev in instance.disks:
2848 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
2849 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
2850 a0aaa0d0 Guido Trotter
        if instance.status == "up" and not self.op.ignore_consistency:
2851 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
2852 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
2853 a8083063 Iustin Pop
2854 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2855 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2856 a8083063 Iustin Pop
                (instance.name, source_node))
2857 a8083063 Iustin Pop
2858 72737a7f Iustin Pop
    if not self.rpc.call_instance_shutdown(source_node, instance):
2859 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
2860 24a40d57 Iustin Pop
        logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2861 24a40d57 Iustin Pop
                     " anyway. Please make sure node %s is down"  %
2862 24a40d57 Iustin Pop
                     (instance.name, source_node, source_node))
2863 24a40d57 Iustin Pop
      else:
2864 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2865 24a40d57 Iustin Pop
                                 (instance.name, source_node))
2866 a8083063 Iustin Pop
2867 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2868 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
2869 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
2870 a8083063 Iustin Pop
2871 a8083063 Iustin Pop
    instance.primary_node = target_node
2872 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2873 b6102dab Guido Trotter
    self.cfg.Update(instance)
2874 a8083063 Iustin Pop
2875 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
2876 12a0cfbe Guido Trotter
    if instance.status == "up":
2877 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
2878 12a0cfbe Guido Trotter
      logger.Info("Starting instance %s on node %s" %
2879 12a0cfbe Guido Trotter
                  (instance.name, target_node))
2880 12a0cfbe Guido Trotter
2881 b9bddb6b Iustin Pop
      disks_ok, dummy = _AssembleInstanceDisks(self, instance,
2882 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
2883 12a0cfbe Guido Trotter
      if not disks_ok:
2884 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
2885 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
2886 a8083063 Iustin Pop
2887 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
2888 72737a7f Iustin Pop
      if not self.rpc.call_instance_start(target_node, instance, None):
2889 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
2890 12a0cfbe Guido Trotter
        raise errors.OpExecError("Could not start instance %s on node %s." %
2891 12a0cfbe Guido Trotter
                                 (instance.name, target_node))
2892 a8083063 Iustin Pop
2893 a8083063 Iustin Pop
2894 b9bddb6b Iustin Pop
def _CreateBlockDevOnPrimary(lu, node, instance, device, info):
2895 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2896 a8083063 Iustin Pop

2897 a8083063 Iustin Pop
  This always creates all devices.
2898 a8083063 Iustin Pop

2899 a8083063 Iustin Pop
  """
2900 a8083063 Iustin Pop
  if device.children:
2901 a8083063 Iustin Pop
    for child in device.children:
2902 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnPrimary(lu, node, instance, child, info):
2903 a8083063 Iustin Pop
        return False
2904 a8083063 Iustin Pop
2905 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
2906 72737a7f Iustin Pop
  new_id = lu.rpc.call_blockdev_create(node, device, device.size,
2907 72737a7f Iustin Pop
                                       instance.name, True, info)
2908 a8083063 Iustin Pop
  if not new_id:
2909 a8083063 Iustin Pop
    return False
2910 a8083063 Iustin Pop
  if device.physical_id is None:
2911 a8083063 Iustin Pop
    device.physical_id = new_id
2912 a8083063 Iustin Pop
  return True
2913 a8083063 Iustin Pop
2914 a8083063 Iustin Pop
2915 b9bddb6b Iustin Pop
def _CreateBlockDevOnSecondary(lu, node, instance, device, force, info):
2916 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2917 a8083063 Iustin Pop

2918 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2919 a8083063 Iustin Pop
  all its children.
2920 a8083063 Iustin Pop

2921 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2922 a8083063 Iustin Pop

2923 a8083063 Iustin Pop
  """
2924 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2925 a8083063 Iustin Pop
    force = True
2926 a8083063 Iustin Pop
  if device.children:
2927 a8083063 Iustin Pop
    for child in device.children:
2928 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnSecondary(lu, node, instance,
2929 3f78eef2 Iustin Pop
                                        child, force, info):
2930 a8083063 Iustin Pop
        return False
2931 a8083063 Iustin Pop
2932 a8083063 Iustin Pop
  if not force:
2933 a8083063 Iustin Pop
    return True
2934 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
2935 72737a7f Iustin Pop
  new_id = lu.rpc.call_blockdev_create(node, device, device.size,
2936 72737a7f Iustin Pop
                                       instance.name, False, info)
2937 a8083063 Iustin Pop
  if not new_id:
2938 a8083063 Iustin Pop
    return False
2939 a8083063 Iustin Pop
  if device.physical_id is None:
2940 a8083063 Iustin Pop
    device.physical_id = new_id
2941 a8083063 Iustin Pop
  return True
2942 a8083063 Iustin Pop
2943 a8083063 Iustin Pop
2944 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
2945 923b1523 Iustin Pop
  """Generate a suitable LV name.
2946 923b1523 Iustin Pop

2947 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
2948 923b1523 Iustin Pop

2949 923b1523 Iustin Pop
  """
2950 923b1523 Iustin Pop
  results = []
2951 923b1523 Iustin Pop
  for val in exts:
2952 b9bddb6b Iustin Pop
    new_id = lu.cfg.GenerateUniqueID()
2953 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
2954 923b1523 Iustin Pop
  return results
2955 923b1523 Iustin Pop
2956 923b1523 Iustin Pop
2957 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
2958 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
2959 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
2960 a1f445d3 Iustin Pop

2961 a1f445d3 Iustin Pop
  """
2962 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
2963 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
2964 b9bddb6b Iustin Pop
  shared_secret = lu.cfg.GenerateDRBDSecret()
2965 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2966 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
2967 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2968 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
2969 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
2970 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
2971 f9518d38 Iustin Pop
                                      p_minor, s_minor,
2972 f9518d38 Iustin Pop
                                      shared_secret),
2973 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
2974 a1f445d3 Iustin Pop
                          iv_name=iv_name)
2975 a1f445d3 Iustin Pop
  return drbd_dev
2976 a1f445d3 Iustin Pop
2977 7c0d6283 Michael Hanselmann
2978 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
2979 a8083063 Iustin Pop
                          instance_name, primary_node,
2980 0f1a06e3 Manuel Franceschini
                          secondary_nodes, disk_sz, swap_sz,
2981 0f1a06e3 Manuel Franceschini
                          file_storage_dir, file_driver):
2982 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
2983 a8083063 Iustin Pop

2984 a8083063 Iustin Pop
  """
2985 a8083063 Iustin Pop
  #TODO: compute space requirements
2986 a8083063 Iustin Pop
2987 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
2988 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
2989 a8083063 Iustin Pop
    disks = []
2990 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
2991 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2992 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2993 923b1523 Iustin Pop
2994 b9bddb6b Iustin Pop
    names = _GenerateUniqueNames(lu, [".sda", ".sdb"])
2995 fe96220b Iustin Pop
    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2996 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
2997 a8083063 Iustin Pop
                           iv_name = "sda")
2998 fe96220b Iustin Pop
    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2999 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
3000 a8083063 Iustin Pop
                           iv_name = "sdb")
3001 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
3002 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
3003 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
3004 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
3005 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
3006 ffa1c0dc Iustin Pop
    (minor_pa, minor_pb,
3007 b9bddb6b Iustin Pop
     minor_sa, minor_sb) = lu.cfg.AllocateDRBDMinor(
3008 a1578d63 Iustin Pop
      [primary_node, primary_node, remote_node, remote_node], instance_name)
3009 ffa1c0dc Iustin Pop
3010 b9bddb6b Iustin Pop
    names = _GenerateUniqueNames(lu, [".sda_data", ".sda_meta",
3011 b9bddb6b Iustin Pop
                                      ".sdb_data", ".sdb_meta"])
3012 b9bddb6b Iustin Pop
    drbd_sda_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
3013 ffa1c0dc Iustin Pop
                                        disk_sz, names[0:2], "sda",
3014 ffa1c0dc Iustin Pop
                                        minor_pa, minor_sa)
3015 b9bddb6b Iustin Pop
    drbd_sdb_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
3016 ffa1c0dc Iustin Pop
                                        swap_sz, names[2:4], "sdb",
3017 ffa1c0dc Iustin Pop
                                        minor_pb, minor_sb)
3018 a1f445d3 Iustin Pop
    disks = [drbd_sda_dev, drbd_sdb_dev]
3019 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
3020 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
3021 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
3022 0f1a06e3 Manuel Franceschini
3023 0f1a06e3 Manuel Franceschini
    file_sda_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk_sz,
3024 0f1a06e3 Manuel Franceschini
                                iv_name="sda", logical_id=(file_driver,
3025 0f1a06e3 Manuel Franceschini
                                "%s/sda" % file_storage_dir))
3026 0f1a06e3 Manuel Franceschini
    file_sdb_dev = objects.Disk(dev_type=constants.LD_FILE, size=swap_sz,
3027 0f1a06e3 Manuel Franceschini
                                iv_name="sdb", logical_id=(file_driver,
3028 0f1a06e3 Manuel Franceschini
                                "%s/sdb" % file_storage_dir))
3029 0f1a06e3 Manuel Franceschini
    disks = [file_sda_dev, file_sdb_dev]
3030 a8083063 Iustin Pop
  else:
3031 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
3032 a8083063 Iustin Pop
  return disks
3033 a8083063 Iustin Pop
3034 a8083063 Iustin Pop
3035 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
3036 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
3037 3ecf6786 Iustin Pop

3038 3ecf6786 Iustin Pop
  """
3039 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
3040 a0c3fea1 Michael Hanselmann
3041 a0c3fea1 Michael Hanselmann
3042 b9bddb6b Iustin Pop
def _CreateDisks(lu, instance):
3043 a8083063 Iustin Pop
  """Create all disks for an instance.
3044 a8083063 Iustin Pop

3045 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
3046 a8083063 Iustin Pop

3047 a8083063 Iustin Pop
  Args:
3048 a8083063 Iustin Pop
    instance: the instance object
3049 a8083063 Iustin Pop

3050 a8083063 Iustin Pop
  Returns:
3051 a8083063 Iustin Pop
    True or False showing the success of the creation process
3052 a8083063 Iustin Pop

3053 a8083063 Iustin Pop
  """
3054 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
3055 a0c3fea1 Michael Hanselmann
3056 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
3057 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3058 72737a7f Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(instance.primary_node,
3059 72737a7f Iustin Pop
                                                 file_storage_dir)
3060 0f1a06e3 Manuel Franceschini
3061 0f1a06e3 Manuel Franceschini
    if not result:
3062 b62ddbe5 Guido Trotter
      logger.Error("Could not connect to node '%s'" % instance.primary_node)
3063 0f1a06e3 Manuel Franceschini
      return False
3064 0f1a06e3 Manuel Franceschini
3065 0f1a06e3 Manuel Franceschini
    if not result[0]:
3066 0f1a06e3 Manuel Franceschini
      logger.Error("failed to create directory '%s'" % file_storage_dir)
3067 0f1a06e3 Manuel Franceschini
      return False
3068 0f1a06e3 Manuel Franceschini
3069 a8083063 Iustin Pop
  for device in instance.disks:
3070 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
3071 1c6e3627 Manuel Franceschini
                (device.iv_name, instance.name))
3072 a8083063 Iustin Pop
    #HARDCODE
3073 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
3074 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnSecondary(lu, secondary_node, instance,
3075 3f78eef2 Iustin Pop
                                        device, False, info):
3076 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
3077 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
3078 a8083063 Iustin Pop
        return False
3079 a8083063 Iustin Pop
    #HARDCODE
3080 b9bddb6b Iustin Pop
    if not _CreateBlockDevOnPrimary(lu, instance.primary_node,
3081 3f78eef2 Iustin Pop
                                    instance, device, info):
3082 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
3083 a8083063 Iustin Pop
                   device.iv_name)
3084 a8083063 Iustin Pop
      return False
3085 1c6e3627 Manuel Franceschini
3086 a8083063 Iustin Pop
  return True
3087 a8083063 Iustin Pop
3088 a8083063 Iustin Pop
3089 b9bddb6b Iustin Pop
def _RemoveDisks(lu, instance):
3090 a8083063 Iustin Pop
  """Remove all disks for an instance.
3091 a8083063 Iustin Pop

3092 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
3093 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
3094 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
3095 a8083063 Iustin Pop
  with `_CreateDisks()`).
3096 a8083063 Iustin Pop

3097 a8083063 Iustin Pop
  Args:
3098 a8083063 Iustin Pop
    instance: the instance object
3099 a8083063 Iustin Pop

3100 a8083063 Iustin Pop
  Returns:
3101 a8083063 Iustin Pop
    True or False showing the success of the removal proces
3102 a8083063 Iustin Pop

3103 a8083063 Iustin Pop
  """
3104 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
3105 a8083063 Iustin Pop
3106 a8083063 Iustin Pop
  result = True
3107 a8083063 Iustin Pop
  for device in instance.disks:
3108 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
3109 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
3110 72737a7f Iustin Pop
      if not lu.rpc.call_blockdev_remove(node, disk):
3111 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
3112 a8083063 Iustin Pop
                     " continuing anyway" %
3113 a8083063 Iustin Pop
                     (device.iv_name, node))
3114 a8083063 Iustin Pop
        result = False
3115 0f1a06e3 Manuel Franceschini
3116 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
3117 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3118 72737a7f Iustin Pop
    if not lu.rpc.call_file_storage_dir_remove(instance.primary_node,
3119 72737a7f Iustin Pop
                                               file_storage_dir):
3120 0f1a06e3 Manuel Franceschini
      logger.Error("could not remove directory '%s'" % file_storage_dir)
3121 0f1a06e3 Manuel Franceschini
      result = False
3122 0f1a06e3 Manuel Franceschini
3123 a8083063 Iustin Pop
  return result
3124 a8083063 Iustin Pop
3125 a8083063 Iustin Pop
3126 e2fe6369 Iustin Pop
def _ComputeDiskSize(disk_template, disk_size, swap_size):
3127 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
3128 e2fe6369 Iustin Pop

3129 e2fe6369 Iustin Pop
  This is currently hard-coded for the two-drive layout.
3130 e2fe6369 Iustin Pop

3131 e2fe6369 Iustin Pop
  """
3132 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
3133 e2fe6369 Iustin Pop
  req_size_dict = {
3134 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
3135 e2fe6369 Iustin Pop
    constants.DT_PLAIN: disk_size + swap_size,
3136 e2fe6369 Iustin Pop
    # 256 MB are added for drbd metadata, 128MB for each drbd device
3137 e2fe6369 Iustin Pop
    constants.DT_DRBD8: disk_size + swap_size + 256,
3138 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
3139 e2fe6369 Iustin Pop
  }
3140 e2fe6369 Iustin Pop
3141 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
3142 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
3143 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
3144 e2fe6369 Iustin Pop
3145 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
3146 e2fe6369 Iustin Pop
3147 e2fe6369 Iustin Pop
3148 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
3149 a8083063 Iustin Pop
  """Create an instance.
3150 a8083063 Iustin Pop

3151 a8083063 Iustin Pop
  """
3152 a8083063 Iustin Pop
  HPATH = "instance-add"
3153 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3154 538475ca Iustin Pop
  _OP_REQP = ["instance_name", "mem_size", "disk_size",
3155 a8083063 Iustin Pop
              "disk_template", "swap_size", "mode", "start", "vcpus",
3156 1862d460 Alexander Schreiber
              "wait_for_sync", "ip_check", "mac"]
3157 7baf741d Guido Trotter
  REQ_BGL = False
3158 7baf741d Guido Trotter
3159 7baf741d Guido Trotter
  def _ExpandNode(self, node):
3160 7baf741d Guido Trotter
    """Expands and checks one node name.
3161 7baf741d Guido Trotter

3162 7baf741d Guido Trotter
    """
3163 7baf741d Guido Trotter
    node_full = self.cfg.ExpandNodeName(node)
3164 7baf741d Guido Trotter
    if node_full is None:
3165 7baf741d Guido Trotter
      raise errors.OpPrereqError("Unknown node %s" % node)
3166 7baf741d Guido Trotter
    return node_full
3167 7baf741d Guido Trotter
3168 7baf741d Guido Trotter
  def ExpandNames(self):
3169 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
3170 7baf741d Guido Trotter

3171 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
3172 7baf741d Guido Trotter

3173 7baf741d Guido Trotter
    """
3174 7baf741d Guido Trotter
    self.needed_locks = {}
3175 7baf741d Guido Trotter
3176 7baf741d Guido Trotter
    # set optional parameters to none if they don't exist
3177 7baf741d Guido Trotter
    for attr in ["kernel_path", "initrd_path", "pnode", "snode",
3178 7baf741d Guido Trotter
                 "iallocator", "hvm_boot_order", "hvm_acpi", "hvm_pae",
3179 7baf741d Guido Trotter
                 "hvm_cdrom_image_path", "hvm_nic_type", "hvm_disk_type",
3180 e69d05fd Iustin Pop
                 "vnc_bind_address", "hypervisor"]:
3181 7baf741d Guido Trotter
      if not hasattr(self.op, attr):
3182 7baf741d Guido Trotter
        setattr(self.op, attr, None)
3183 7baf741d Guido Trotter
3184 4b2f38dd Iustin Pop
    # cheap checks, mostly valid constants given
3185 4b2f38dd Iustin Pop
3186 7baf741d Guido Trotter
    # verify creation mode
3187 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
3188 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
3189 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
3190 7baf741d Guido Trotter
                                 self.op.mode)
3191 4b2f38dd Iustin Pop
3192 7baf741d Guido Trotter
    # disk template and mirror node verification
3193 7baf741d Guido Trotter
    if self.op.disk_template not in constants.DISK_TEMPLATES:
3194 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid disk template name")
3195 7baf741d Guido Trotter
3196 4b2f38dd Iustin Pop
    if self.op.hypervisor is None:
3197 4b2f38dd Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
3198 4b2f38dd Iustin Pop
3199 4b2f38dd Iustin Pop
    enabled_hvs = self.cfg.GetClusterInfo().enabled_hypervisors
3200 4b2f38dd Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
3201 4b2f38dd Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
3202 4b2f38dd Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
3203 4b2f38dd Iustin Pop
                                  ",".join(enabled_hvs)))
3204 4b2f38dd Iustin Pop
3205 7baf741d Guido Trotter
    #### instance parameters check
3206 7baf741d Guido Trotter
3207 7baf741d Guido Trotter
    # instance name verification
3208 7baf741d Guido Trotter
    hostname1 = utils.HostInfo(self.op.instance_name)
3209 7baf741d Guido Trotter
    self.op.instance_name = instance_name = hostname1.name
3210 7baf741d Guido Trotter
3211 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
3212 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
3213 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
3214 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3215 7baf741d Guido Trotter
                                 instance_name)
3216 7baf741d Guido Trotter
3217 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
3218 7baf741d Guido Trotter
3219 7baf741d Guido Trotter
    # ip validity checks
3220 7baf741d Guido Trotter
    ip = getattr(self.op, "ip", None)
3221 7baf741d Guido Trotter
    if ip is None or ip.lower() == "none":
3222 7baf741d Guido Trotter
      inst_ip = None
3223 7baf741d Guido Trotter
    elif ip.lower() == "auto":
3224 7baf741d Guido Trotter
      inst_ip = hostname1.ip
3225 7baf741d Guido Trotter
    else:
3226 7baf741d Guido Trotter
      if not utils.IsValidIP(ip):
3227 7baf741d Guido Trotter
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
3228 7baf741d Guido Trotter
                                   " like a valid IP" % ip)
3229 7baf741d Guido Trotter
      inst_ip = ip
3230 7baf741d Guido Trotter
    self.inst_ip = self.op.ip = inst_ip
3231 7baf741d Guido Trotter
    # used in CheckPrereq for ip ping check
3232 7baf741d Guido Trotter
    self.check_ip = hostname1.ip
3233 7baf741d Guido Trotter
3234 7baf741d Guido Trotter
    # MAC address verification
3235 7baf741d Guido Trotter
    if self.op.mac != "auto":
3236 7baf741d Guido Trotter
      if not utils.IsValidMac(self.op.mac.lower()):
3237 7baf741d Guido Trotter
        raise errors.OpPrereqError("invalid MAC address specified: %s" %
3238 7baf741d Guido Trotter
                                   self.op.mac)
3239 7baf741d Guido Trotter
3240 7baf741d Guido Trotter
    # boot order verification
3241 7baf741d Guido Trotter
    if self.op.hvm_boot_order is not None:
3242 7baf741d Guido Trotter
      if len(self.op.hvm_boot_order.strip("acdn")) != 0:
3243 7baf741d Guido Trotter
        raise errors.OpPrereqError("invalid boot order specified,"
3244 7baf741d Guido Trotter
                                   " must be one or more of [acdn]")
3245 7baf741d Guido Trotter
    # file storage checks
3246 7baf741d Guido Trotter
    if (self.op.file_driver and
3247 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
3248 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
3249 7baf741d Guido Trotter
                                 self.op.file_driver)
3250 7baf741d Guido Trotter
3251 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
3252 7baf741d Guido Trotter
      raise errors.OpPrereqError("File storage directory path not absolute")
3253 7baf741d Guido Trotter
3254 7baf741d Guido Trotter
    ### Node/iallocator related checks
3255 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
3256 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
3257 7baf741d Guido Trotter
                                 " node must be given")
3258 7baf741d Guido Trotter
3259 7baf741d Guido Trotter
    if self.op.iallocator:
3260 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3261 7baf741d Guido Trotter
    else:
3262 7baf741d Guido Trotter
      self.op.pnode = self._ExpandNode(self.op.pnode)
3263 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
3264 7baf741d Guido Trotter
      if self.op.snode is not None:
3265 7baf741d Guido Trotter
        self.op.snode = self._ExpandNode(self.op.snode)
3266 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
3267 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
3268 7baf741d Guido Trotter
3269 7baf741d Guido Trotter
    # in case of import lock the source node too
3270 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
3271 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
3272 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
3273 7baf741d Guido Trotter
3274 7baf741d Guido Trotter
      if src_node is None or src_path is None:
3275 7baf741d Guido Trotter
        raise errors.OpPrereqError("Importing an instance requires source"
3276 7baf741d Guido Trotter
                                   " node and path options")
3277 7baf741d Guido Trotter
3278 7baf741d Guido Trotter
      if not os.path.isabs(src_path):
3279 7baf741d Guido Trotter
        raise errors.OpPrereqError("The source path must be absolute")
3280 7baf741d Guido Trotter
3281 7baf741d Guido Trotter
      self.op.src_node = src_node = self._ExpandNode(src_node)
3282 7baf741d Guido Trotter
      if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
3283 7baf741d Guido Trotter
        self.needed_locks[locking.LEVEL_NODE].append(src_node)
3284 7baf741d Guido Trotter
3285 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
3286 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
3287 7baf741d Guido Trotter
        raise errors.OpPrereqError("No guest OS specified")
3288 a8083063 Iustin Pop
3289 538475ca Iustin Pop
  def _RunAllocator(self):
3290 538475ca Iustin Pop
    """Run the allocator based on input opcode.
3291 538475ca Iustin Pop

3292 538475ca Iustin Pop
    """
3293 538475ca Iustin Pop
    disks = [{"size": self.op.disk_size, "mode": "w"},
3294 538475ca Iustin Pop
             {"size": self.op.swap_size, "mode": "w"}]
3295 538475ca Iustin Pop
    nics = [{"mac": self.op.mac, "ip": getattr(self.op, "ip", None),
3296 538475ca Iustin Pop
             "bridge": self.op.bridge}]
3297 72737a7f Iustin Pop
    ial = IAllocator(self,
3298 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
3299 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
3300 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
3301 d1c2dd75 Iustin Pop
                     tags=[],
3302 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
3303 d1c2dd75 Iustin Pop
                     vcpus=self.op.vcpus,
3304 d1c2dd75 Iustin Pop
                     mem_size=self.op.mem_size,
3305 d1c2dd75 Iustin Pop
                     disks=disks,
3306 d1c2dd75 Iustin Pop
                     nics=nics,
3307 29859cb7 Iustin Pop
                     )
3308 d1c2dd75 Iustin Pop
3309 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
3310 d1c2dd75 Iustin Pop
3311 d1c2dd75 Iustin Pop
    if not ial.success:
3312 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3313 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3314 d1c2dd75 Iustin Pop
                                                           ial.info))
3315 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3316 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3317 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
3318 97abc79f Iustin Pop
                                 (self.op.iallocator, len(ial.nodes),
3319 1ce4bbe3 Renรฉ Nussbaumer
                                  ial.required_nodes))
3320 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
3321 538475ca Iustin Pop
    logger.ToStdout("Selected nodes for the instance: %s" %
3322 d1c2dd75 Iustin Pop
                    (", ".join(ial.nodes),))
3323 538475ca Iustin Pop
    logger.Info("Selected nodes for instance %s via iallocator %s: %s" %
3324 d1c2dd75 Iustin Pop
                (self.op.instance_name, self.op.iallocator, ial.nodes))
3325 27579978 Iustin Pop
    if ial.required_nodes == 2:
3326 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
3327 538475ca Iustin Pop
3328 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3329 a8083063 Iustin Pop
    """Build hooks env.
3330 a8083063 Iustin Pop

3331 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3332 a8083063 Iustin Pop

3333 a8083063 Iustin Pop
    """
3334 a8083063 Iustin Pop
    env = {
3335 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
3336 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
3337 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
3338 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
3339 a8083063 Iustin Pop
      }
3340 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3341 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
3342 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
3343 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_IMAGE"] = self.src_image
3344 396e1b78 Michael Hanselmann
3345 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
3346 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
3347 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
3348 396e1b78 Michael Hanselmann
      status=self.instance_status,
3349 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
3350 396e1b78 Michael Hanselmann
      memory=self.op.mem_size,
3351 396e1b78 Michael Hanselmann
      vcpus=self.op.vcpus,
3352 c7b27e9e Iustin Pop
      nics=[(self.inst_ip, self.op.bridge, self.op.mac)],
3353 396e1b78 Michael Hanselmann
    ))
3354 a8083063 Iustin Pop
3355 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
3356 a8083063 Iustin Pop
          self.secondaries)
3357 a8083063 Iustin Pop
    return env, nl, nl
3358 a8083063 Iustin Pop
3359 a8083063 Iustin Pop
3360 a8083063 Iustin Pop
  def CheckPrereq(self):
3361 a8083063 Iustin Pop
    """Check prerequisites.
3362 a8083063 Iustin Pop

3363 a8083063 Iustin Pop
    """
3364 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
3365 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
3366 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
3367 eedc99de Manuel Franceschini
                                 " instances")
3368 eedc99de Manuel Franceschini
3369 e69d05fd Iustin Pop
3370 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3371 7baf741d Guido Trotter
      src_node = self.op.src_node
3372 7baf741d Guido Trotter
      src_path = self.op.src_path
3373 a8083063 Iustin Pop
3374 72737a7f Iustin Pop
      export_info = self.rpc.call_export_info(src_node, src_path)
3375 a8083063 Iustin Pop
3376 a8083063 Iustin Pop
      if not export_info:
3377 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
3378 a8083063 Iustin Pop
3379 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
3380 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
3381 a8083063 Iustin Pop
3382 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
3383 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
3384 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
3385 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
3386 a8083063 Iustin Pop
3387 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
3388 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Can't import instance with more than"
3389 3ecf6786 Iustin Pop
                                   " one data disk")
3390 a8083063 Iustin Pop
3391 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
3392 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
3393 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
3394 a8083063 Iustin Pop
                                                         'disk0_dump'))
3395 a8083063 Iustin Pop
      self.src_image = diskimage
3396 901a65c1 Iustin Pop
3397 7baf741d Guido Trotter
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
3398 901a65c1 Iustin Pop
3399 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
3400 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
3401 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
3402 901a65c1 Iustin Pop
3403 901a65c1 Iustin Pop
    if self.op.ip_check:
3404 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
3405 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3406 7b3a8fb5 Iustin Pop
                                   (self.check_ip, self.op.instance_name))
3407 901a65c1 Iustin Pop
3408 901a65c1 Iustin Pop
    # bridge verification
3409 901a65c1 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
3410 901a65c1 Iustin Pop
    if bridge is None:
3411 901a65c1 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
3412 901a65c1 Iustin Pop
    else:
3413 901a65c1 Iustin Pop
      self.op.bridge = bridge
3414 901a65c1 Iustin Pop
3415 538475ca Iustin Pop
    #### allocator run
3416 538475ca Iustin Pop
3417 538475ca Iustin Pop
    if self.op.iallocator is not None:
3418 538475ca Iustin Pop
      self._RunAllocator()
3419 0f1a06e3 Manuel Franceschini
3420 901a65c1 Iustin Pop
    #### node related checks
3421 901a65c1 Iustin Pop
3422 901a65c1 Iustin Pop
    # check primary node
3423 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
3424 7baf741d Guido Trotter
    assert self.pnode is not None, \
3425 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
3426 901a65c1 Iustin Pop
    self.secondaries = []
3427 901a65c1 Iustin Pop
3428 901a65c1 Iustin Pop
    # mirror node verification
3429 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
3430 7baf741d Guido Trotter
      if self.op.snode is None:
3431 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
3432 3ecf6786 Iustin Pop
                                   " a mirror node")
3433 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
3434 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
3435 3ecf6786 Iustin Pop
                                   " the primary node.")
3436 7baf741d Guido Trotter
      self.secondaries.append(self.op.snode)
3437 a8083063 Iustin Pop
3438 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
3439 e2fe6369 Iustin Pop
                                self.op.disk_size, self.op.swap_size)
3440 ed1ebc60 Guido Trotter
3441 8d75db10 Iustin Pop
    # Check lv size requirements
3442 8d75db10 Iustin Pop
    if req_size is not None:
3443 8d75db10 Iustin Pop
      nodenames = [pnode.name] + self.secondaries
3444 72737a7f Iustin Pop
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
3445 72737a7f Iustin Pop
                                         self.op.hypervisor)
3446 8d75db10 Iustin Pop
      for node in nodenames:
3447 8d75db10 Iustin Pop
        info = nodeinfo.get(node, None)
3448 8d75db10 Iustin Pop
        if not info:
3449 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
3450 3e91897b Iustin Pop
                                     " from node '%s'" % node)
3451 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
3452 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
3453 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
3454 8d75db10 Iustin Pop
                                     " node %s" % node)
3455 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
3456 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
3457 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
3458 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
3459 ed1ebc60 Guido Trotter
3460 a8083063 Iustin Pop
    # os verification
3461 72737a7f Iustin Pop
    os_obj = self.rpc.call_os_get(pnode.name, self.op.os_type)
3462 dfa96ded Guido Trotter
    if not os_obj:
3463 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
3464 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
3465 a8083063 Iustin Pop
3466 3b6d8c9b Iustin Pop
    if self.op.kernel_path == constants.VALUE_NONE:
3467 3b6d8c9b Iustin Pop
      raise errors.OpPrereqError("Can't set instance kernel to none")
3468 3b6d8c9b Iustin Pop
3469 901a65c1 Iustin Pop
    # bridge check on primary node
3470 72737a7f Iustin Pop
    if not self.rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
3471 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
3472 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
3473 3ecf6786 Iustin Pop
                                 (self.op.bridge, pnode.name))
3474 a8083063 Iustin Pop
3475 49ce1563 Iustin Pop
    # memory check on primary node
3476 49ce1563 Iustin Pop
    if self.op.start:
3477 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
3478 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
3479 e69d05fd Iustin Pop
                           self.op.mem_size, self.op.hypervisor)
3480 49ce1563 Iustin Pop
3481 31a853d2 Iustin Pop
    # hvm_cdrom_image_path verification
3482 31a853d2 Iustin Pop
    if self.op.hvm_cdrom_image_path is not None:
3483 7baf741d Guido Trotter
      # FIXME (als): shouldn't these checks happen on the destination node?
3484 31a853d2 Iustin Pop
      if not os.path.isabs(self.op.hvm_cdrom_image_path):
3485 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The path to the HVM CDROM image must"
3486 31a853d2 Iustin Pop
                                   " be an absolute path or None, not %s" %
3487 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
3488 31a853d2 Iustin Pop
      if not os.path.isfile(self.op.hvm_cdrom_image_path):
3489 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The HVM CDROM image must either be a"
3490 31a853d2 Iustin Pop
                                   " regular file or a symlink pointing to"
3491 31a853d2 Iustin Pop
                                   " an existing regular file, not %s" %
3492 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
3493 31a853d2 Iustin Pop
3494 31a853d2 Iustin Pop
    # vnc_bind_address verification
3495 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is not None:
3496 31a853d2 Iustin Pop
      if not utils.IsValidIP(self.op.vnc_bind_address):
3497 31a853d2 Iustin Pop
        raise errors.OpPrereqError("given VNC bind address '%s' doesn't look"
3498 31a853d2 Iustin Pop
                                   " like a valid IP address" %
3499 31a853d2 Iustin Pop
                                   self.op.vnc_bind_address)
3500 31a853d2 Iustin Pop
3501 5397e0b7 Alexander Schreiber
    # Xen HVM device type checks
3502 00cd937c Iustin Pop
    if self.op.hypervisor == constants.HT_XEN_HVM:
3503 5397e0b7 Alexander Schreiber
      if self.op.hvm_nic_type not in constants.HT_HVM_VALID_NIC_TYPES:
3504 5397e0b7 Alexander Schreiber
        raise errors.OpPrereqError("Invalid NIC type %s specified for Xen HVM"
3505 5397e0b7 Alexander Schreiber
                                   " hypervisor" % self.op.hvm_nic_type)
3506 5397e0b7 Alexander Schreiber
      if self.op.hvm_disk_type not in constants.HT_HVM_VALID_DISK_TYPES:
3507 5397e0b7 Alexander Schreiber
        raise errors.OpPrereqError("Invalid disk type %s specified for Xen HVM"
3508 5397e0b7 Alexander Schreiber
                                   " hypervisor" % self.op.hvm_disk_type)
3509 5397e0b7 Alexander Schreiber
3510 a8083063 Iustin Pop
    if self.op.start:
3511 a8083063 Iustin Pop
      self.instance_status = 'up'
3512 a8083063 Iustin Pop
    else:
3513 a8083063 Iustin Pop
      self.instance_status = 'down'
3514 a8083063 Iustin Pop
3515 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3516 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
3517 a8083063 Iustin Pop

3518 a8083063 Iustin Pop
    """
3519 a8083063 Iustin Pop
    instance = self.op.instance_name
3520 a8083063 Iustin Pop
    pnode_name = self.pnode.name
3521 a8083063 Iustin Pop
3522 1862d460 Alexander Schreiber
    if self.op.mac == "auto":
3523 ba4b62cf Iustin Pop
      mac_address = self.cfg.GenerateMAC()
3524 1862d460 Alexander Schreiber
    else:
3525 ba4b62cf Iustin Pop
      mac_address = self.op.mac
3526 1862d460 Alexander Schreiber
3527 1862d460 Alexander Schreiber
    nic = objects.NIC(bridge=self.op.bridge, mac=mac_address)
3528 a8083063 Iustin Pop
    if self.inst_ip is not None:
3529 a8083063 Iustin Pop
      nic.ip = self.inst_ip
3530 a8083063 Iustin Pop
3531 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
3532 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
3533 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
3534 2a6469d5 Alexander Schreiber
    else:
3535 2a6469d5 Alexander Schreiber
      network_port = None
3536 58acb49d Alexander Schreiber
3537 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is None:
3538 31a853d2 Iustin Pop
      self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
3539 31a853d2 Iustin Pop
3540 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
3541 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
3542 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
3543 2c313123 Manuel Franceschini
    else:
3544 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
3545 2c313123 Manuel Franceschini
3546 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
3547 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
3548 d6a02168 Michael Hanselmann
                                        self.cfg.GetFileStorageDir(),
3549 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
3550 0f1a06e3 Manuel Franceschini
3551 0f1a06e3 Manuel Franceschini
3552 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
3553 a8083063 Iustin Pop
                                  self.op.disk_template,
3554 a8083063 Iustin Pop
                                  instance, pnode_name,
3555 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
3556 0f1a06e3 Manuel Franceschini
                                  self.op.swap_size,
3557 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
3558 0f1a06e3 Manuel Franceschini
                                  self.op.file_driver)
3559 a8083063 Iustin Pop
3560 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3561 a8083063 Iustin Pop
                            primary_node=pnode_name,
3562 a8083063 Iustin Pop
                            memory=self.op.mem_size,
3563 a8083063 Iustin Pop
                            vcpus=self.op.vcpus,
3564 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
3565 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
3566 a8083063 Iustin Pop
                            status=self.instance_status,
3567 58acb49d Alexander Schreiber
                            network_port=network_port,
3568 3b6d8c9b Iustin Pop
                            kernel_path=self.op.kernel_path,
3569 3b6d8c9b Iustin Pop
                            initrd_path=self.op.initrd_path,
3570 25c5878d Alexander Schreiber
                            hvm_boot_order=self.op.hvm_boot_order,
3571 31a853d2 Iustin Pop
                            hvm_acpi=self.op.hvm_acpi,
3572 31a853d2 Iustin Pop
                            hvm_pae=self.op.hvm_pae,
3573 31a853d2 Iustin Pop
                            hvm_cdrom_image_path=self.op.hvm_cdrom_image_path,
3574 31a853d2 Iustin Pop
                            vnc_bind_address=self.op.vnc_bind_address,
3575 5397e0b7 Alexander Schreiber
                            hvm_nic_type=self.op.hvm_nic_type,
3576 5397e0b7 Alexander Schreiber
                            hvm_disk_type=self.op.hvm_disk_type,
3577 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
3578 a8083063 Iustin Pop
                            )
3579 a8083063 Iustin Pop
3580 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
3581 b9bddb6b Iustin Pop
    if not _CreateDisks(self, iobj):
3582 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
3583 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance)
3584 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
3585 a8083063 Iustin Pop
3586 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
3587 a8083063 Iustin Pop
3588 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
3589 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
3590 7baf741d Guido Trotter
    # added the instance to the config
3591 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
3592 a1578d63 Iustin Pop
    # Remove the temp. assignements for the instance's drbds
3593 a1578d63 Iustin Pop
    self.cfg.ReleaseDRBDMinors(instance)
3594 a8083063 Iustin Pop
3595 a8083063 Iustin Pop
    if self.op.wait_for_sync:
3596 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
3597 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3598 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
3599 a8083063 Iustin Pop
      time.sleep(15)
3600 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
3601 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
3602 a8083063 Iustin Pop
    else:
3603 a8083063 Iustin Pop
      disk_abort = False
3604 a8083063 Iustin Pop
3605 a8083063 Iustin Pop
    if disk_abort:
3606 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
3607 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3608 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
3609 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
3610 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3611 3ecf6786 Iustin Pop
                               " this instance")
3612 a8083063 Iustin Pop
3613 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3614 a8083063 Iustin Pop
                (instance, pnode_name))
3615 a8083063 Iustin Pop
3616 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3617 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3618 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3619 72737a7f Iustin Pop
        if not self.rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
3620 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3621 3ecf6786 Iustin Pop
                                   " on node %s" %
3622 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3623 a8083063 Iustin Pop
3624 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3625 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3626 a8083063 Iustin Pop
        src_node = self.op.src_node
3627 a8083063 Iustin Pop
        src_image = self.src_image
3628 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
3629 72737a7f Iustin Pop
        if not self.rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
3630 72737a7f Iustin Pop
                                                src_node, src_image,
3631 72737a7f Iustin Pop
                                                cluster_name):
3632 3ecf6786 Iustin Pop
          raise errors.OpExecError("Could not import os for instance"
3633 3ecf6786 Iustin Pop
                                   " %s on node %s" %
3634 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3635 a8083063 Iustin Pop
      else:
3636 a8083063 Iustin Pop
        # also checked in the prereq part
3637 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3638 3ecf6786 Iustin Pop
                                     % self.op.mode)
3639 a8083063 Iustin Pop
3640 a8083063 Iustin Pop
    if self.op.start:
3641 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
3642 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3643 72737a7f Iustin Pop
      if not self.rpc.call_instance_start(pnode_name, iobj, None):
3644 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3645 a8083063 Iustin Pop
3646 a8083063 Iustin Pop
3647 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3648 a8083063 Iustin Pop
  """Connect to an instance's console.
3649 a8083063 Iustin Pop

3650 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3651 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3652 a8083063 Iustin Pop
  console.
3653 a8083063 Iustin Pop

3654 a8083063 Iustin Pop
  """
3655 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3656 8659b73e Guido Trotter
  REQ_BGL = False
3657 8659b73e Guido Trotter
3658 8659b73e Guido Trotter
  def ExpandNames(self):
3659 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
3660 a8083063 Iustin Pop
3661 a8083063 Iustin Pop
  def CheckPrereq(self):
3662 a8083063 Iustin Pop
    """Check prerequisites.
3663 a8083063 Iustin Pop

3664 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3665 a8083063 Iustin Pop

3666 a8083063 Iustin Pop
    """
3667 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3668 8659b73e Guido Trotter
    assert self.instance is not None, \
3669 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3670 a8083063 Iustin Pop
3671 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3672 a8083063 Iustin Pop
    """Connect to the console of an instance
3673 a8083063 Iustin Pop

3674 a8083063 Iustin Pop
    """
3675 a8083063 Iustin Pop
    instance = self.instance
3676 a8083063 Iustin Pop
    node = instance.primary_node
3677 a8083063 Iustin Pop
3678 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
3679 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
3680 a8083063 Iustin Pop
    if node_insts is False:
3681 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3682 a8083063 Iustin Pop
3683 a8083063 Iustin Pop
    if instance.name not in node_insts:
3684 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3685 a8083063 Iustin Pop
3686 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
3687 a8083063 Iustin Pop
3688 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
3689 30989e69 Alexander Schreiber
    console_cmd = hyper.GetShellCommandForConsole(instance)
3690 b047857b Michael Hanselmann
3691 82122173 Iustin Pop
    # build ssh cmdline
3692 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
3693 a8083063 Iustin Pop
3694 a8083063 Iustin Pop
3695 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3696 a8083063 Iustin Pop
  """Replace the disks of an instance.
3697 a8083063 Iustin Pop

3698 a8083063 Iustin Pop
  """
3699 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3700 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3701 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
3702 efd990e4 Guido Trotter
  REQ_BGL = False
3703 efd990e4 Guido Trotter
3704 efd990e4 Guido Trotter
  def ExpandNames(self):
3705 efd990e4 Guido Trotter
    self._ExpandAndLockInstance()
3706 efd990e4 Guido Trotter
3707 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
3708 efd990e4 Guido Trotter
      self.op.remote_node = None
3709 efd990e4 Guido Trotter
3710 efd990e4 Guido Trotter
    ia_name = getattr(self.op, "iallocator", None)
3711 efd990e4 Guido Trotter
    if ia_name is not None:
3712 efd990e4 Guido Trotter
      if self.op.remote_node is not None:
3713 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Give either the iallocator or the new"
3714 efd990e4 Guido Trotter
                                   " secondary, not both")
3715 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3716 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
3717 efd990e4 Guido Trotter
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
3718 efd990e4 Guido Trotter
      if remote_node is None:
3719 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Node '%s' not known" %
3720 efd990e4 Guido Trotter
                                   self.op.remote_node)
3721 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
3722 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
3723 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3724 efd990e4 Guido Trotter
    else:
3725 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
3726 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3727 efd990e4 Guido Trotter
3728 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
3729 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
3730 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
3731 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
3732 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
3733 efd990e4 Guido Trotter
      self._LockInstancesNodes()
3734 a8083063 Iustin Pop
3735 b6e82a65 Iustin Pop
  def _RunAllocator(self):
3736 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
3737 b6e82a65 Iustin Pop

3738 b6e82a65 Iustin Pop
    """
3739 72737a7f Iustin Pop
    ial = IAllocator(self,
3740 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
3741 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
3742 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
3743 b6e82a65 Iustin Pop
3744 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
3745 b6e82a65 Iustin Pop
3746 b6e82a65 Iustin Pop
    if not ial.success:
3747 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3748 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3749 b6e82a65 Iustin Pop
                                                           ial.info))
3750 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3751 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3752 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
3753 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
3754 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
3755 b6e82a65 Iustin Pop
    logger.ToStdout("Selected new secondary for the instance: %s" %
3756 b6e82a65 Iustin Pop
                    self.op.remote_node)
3757 b6e82a65 Iustin Pop
3758 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3759 a8083063 Iustin Pop
    """Build hooks env.
3760 a8083063 Iustin Pop

3761 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3762 a8083063 Iustin Pop

3763 a8083063 Iustin Pop
    """
3764 a8083063 Iustin Pop
    env = {
3765 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
3766 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3767 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3768 a8083063 Iustin Pop
      }
3769 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3770 0834c866 Iustin Pop
    nl = [
3771 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
3772 0834c866 Iustin Pop
      self.instance.primary_node,
3773 0834c866 Iustin Pop
      ]
3774 0834c866 Iustin Pop
    if self.op.remote_node is not None:
3775 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
3776 a8083063 Iustin Pop
    return env, nl, nl
3777 a8083063 Iustin Pop
3778 a8083063 Iustin Pop
  def CheckPrereq(self):
3779 a8083063 Iustin Pop
    """Check prerequisites.
3780 a8083063 Iustin Pop

3781 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3782 a8083063 Iustin Pop

3783 a8083063 Iustin Pop
    """
3784 efd990e4 Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3785 efd990e4 Guido Trotter
    assert instance is not None, \
3786 efd990e4 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3787 a8083063 Iustin Pop
    self.instance = instance
3788 a8083063 Iustin Pop
3789 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3790 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3791 a9e0c397 Iustin Pop
                                 " network mirrored.")
3792 a8083063 Iustin Pop
3793 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3794 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3795 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3796 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3797 a8083063 Iustin Pop
3798 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
3799 a9e0c397 Iustin Pop
3800 b6e82a65 Iustin Pop
    ia_name = getattr(self.op, "iallocator", None)
3801 b6e82a65 Iustin Pop
    if ia_name is not None:
3802 de8c7666 Guido Trotter
      self._RunAllocator()
3803 b6e82a65 Iustin Pop
3804 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
3805 a9e0c397 Iustin Pop
    if remote_node is not None:
3806 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
3807 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
3808 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
3809 a9e0c397 Iustin Pop
    else:
3810 a9e0c397 Iustin Pop
      self.remote_node_info = None
3811 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3812 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3813 3ecf6786 Iustin Pop
                                 " the instance.")
3814 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
3815 0834c866 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_SEC:
3816 0834c866 Iustin Pop
        # this is for DRBD8, where we can't execute the same mode of
3817 0834c866 Iustin Pop
        # replacement as for drbd7 (no different port allocated)
3818 0834c866 Iustin Pop
        raise errors.OpPrereqError("Same secondary given, cannot execute"
3819 0834c866 Iustin Pop
                                   " replacement")
3820 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3821 7df43a76 Iustin Pop
      if (self.op.mode == constants.REPLACE_DISK_ALL and
3822 7df43a76 Iustin Pop
          remote_node is not None):
3823 7df43a76 Iustin Pop
        # switch to replace secondary mode
3824 7df43a76 Iustin Pop
        self.op.mode = constants.REPLACE_DISK_SEC
3825 7df43a76 Iustin Pop
3826 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
3827 12c3449a Michael Hanselmann
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
3828 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
3829 a9e0c397 Iustin Pop
                                   " both at once")
3830 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
3831 a9e0c397 Iustin Pop
        if remote_node is not None:
3832 12c3449a Michael Hanselmann
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
3833 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
3834 a9e0c397 Iustin Pop
                                     " node disk replacement")
3835 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
3836 cff90b79 Iustin Pop
        self.oth_node = instance.secondary_nodes[0]
3837 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
3838 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
3839 a9e0c397 Iustin Pop
                                    # we don't change the secondary
3840 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
3841 cff90b79 Iustin Pop
        self.oth_node = instance.primary_node
3842 a9e0c397 Iustin Pop
      else:
3843 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
3844 a9e0c397 Iustin Pop
3845 a9e0c397 Iustin Pop
    for name in self.op.disks:
3846 a9e0c397 Iustin Pop
      if instance.FindDisk(name) is None:
3847 a9e0c397 Iustin Pop
        raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3848 a9e0c397 Iustin Pop
                                   (name, instance.name))
3849 a8083063 Iustin Pop
3850 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
3851 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
3852 a9e0c397 Iustin Pop

3853 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3854 a9e0c397 Iustin Pop
      - for each disk to be replaced:
3855 a9e0c397 Iustin Pop
        - create new LVs on the target node with unique names
3856 a9e0c397 Iustin Pop
        - detach old LVs from the drbd device
3857 a9e0c397 Iustin Pop
        - rename old LVs to name_replaced.<time_t>
3858 a9e0c397 Iustin Pop
        - rename new LVs to old LVs
3859 a9e0c397 Iustin Pop
        - attach the new LVs (with the old names now) to the drbd device
3860 a9e0c397 Iustin Pop
      - wait for sync across all devices
3861 a9e0c397 Iustin Pop
      - for each modified disk:
3862 a9e0c397 Iustin Pop
        - remove old LVs (which have the name name_replaces.<time_t>)
3863 a9e0c397 Iustin Pop

3864 a9e0c397 Iustin Pop
    Failures are not very well handled.
3865 cff90b79 Iustin Pop

3866 a9e0c397 Iustin Pop
    """
3867 cff90b79 Iustin Pop
    steps_total = 6
3868 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3869 a9e0c397 Iustin Pop
    instance = self.instance
3870 a9e0c397 Iustin Pop
    iv_names = {}
3871 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3872 a9e0c397 Iustin Pop
    # start of work
3873 a9e0c397 Iustin Pop
    cfg = self.cfg
3874 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
3875 cff90b79 Iustin Pop
    oth_node = self.oth_node
3876 cff90b79 Iustin Pop
3877 cff90b79 Iustin Pop
    # Step: check device activation
3878 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3879 cff90b79 Iustin Pop
    info("checking volume groups")
3880 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
3881 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([oth_node, tgt_node])
3882 cff90b79 Iustin Pop
    if not results:
3883 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3884 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
3885 cff90b79 Iustin Pop
      res = results.get(node, False)
3886 cff90b79 Iustin Pop
      if not res or my_vg not in res:
3887 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3888 cff90b79 Iustin Pop
                                 (my_vg, node))
3889 cff90b79 Iustin Pop
    for dev in instance.disks:
3890 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3891 cff90b79 Iustin Pop
        continue
3892 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
3893 cff90b79 Iustin Pop
        info("checking %s on %s" % (dev.iv_name, node))
3894 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
3895 72737a7f Iustin Pop
        if not self.rpc.call_blockdev_find(node, dev):
3896 cff90b79 Iustin Pop
          raise errors.OpExecError("Can't find device %s on node %s" %
3897 cff90b79 Iustin Pop
                                   (dev.iv_name, node))
3898 cff90b79 Iustin Pop
3899 cff90b79 Iustin Pop
    # Step: check other node consistency
3900 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3901 cff90b79 Iustin Pop
    for dev in instance.disks:
3902 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3903 cff90b79 Iustin Pop
        continue
3904 cff90b79 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, oth_node))
3905 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, oth_node,
3906 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
3907 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
3908 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
3909 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
3910 cff90b79 Iustin Pop
3911 cff90b79 Iustin Pop
    # Step: create new storage
3912 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3913 a9e0c397 Iustin Pop
    for dev in instance.disks:
3914 a9e0c397 Iustin Pop
      if not dev.iv_name in self.op.disks:
3915 a9e0c397 Iustin Pop
        continue
3916 a9e0c397 Iustin Pop
      size = dev.size
3917 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
3918 a9e0c397 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3919 b9bddb6b Iustin Pop
      names = _GenerateUniqueNames(self, lv_names)
3920 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3921 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
3922 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3923 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
3924 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
3925 a9e0c397 Iustin Pop
      old_lvs = dev.children
3926 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
3927 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
3928 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
3929 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3930 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3931 a9e0c397 Iustin Pop
      # are talking about the secondary node
3932 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
3933 b9bddb6b Iustin Pop
        if not _CreateBlockDevOnPrimary(self, tgt_node, instance, new_lv,
3934 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3935 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3936 a9e0c397 Iustin Pop
                                   " node '%s'" %
3937 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
3938 a9e0c397 Iustin Pop
3939 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
3940 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
3941 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
3942 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
3943 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
3944 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
3945 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
3946 cff90b79 Iustin Pop
      #dev.children = []
3947 cff90b79 Iustin Pop
      #cfg.Update(instance)
3948 a9e0c397 Iustin Pop
3949 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
3950 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
3951 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
3952 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
3953 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
3954 cff90b79 Iustin Pop
3955 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
3956 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
3957 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
3958 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
3959 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
3960 cff90b79 Iustin Pop
      rlist = []
3961 cff90b79 Iustin Pop
      for to_ren in old_lvs:
3962 72737a7f Iustin Pop
        find_res = self.rpc.call_blockdev_find(tgt_node, to_ren)
3963 cff90b79 Iustin Pop
        if find_res is not None: # device exists
3964 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
3965 cff90b79 Iustin Pop
3966 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
3967 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_rename(tgt_node, rlist):
3968 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
3969 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
3970 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
3971 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
3972 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_rename(tgt_node, rlist):
3973 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
3974 cff90b79 Iustin Pop
3975 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
3976 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
3977 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
3978 a9e0c397 Iustin Pop
3979 cff90b79 Iustin Pop
      for disk in old_lvs:
3980 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
3981 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
3982 a9e0c397 Iustin Pop
3983 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
3984 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
3985 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
3986 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
3987 72737a7f Iustin Pop
          if not self.rpc.call_blockdev_remove(tgt_node, new_lv):
3988 79caa9ed Guido Trotter
            warning("Can't rollback device %s", hint="manually cleanup unused"
3989 cff90b79 Iustin Pop
                    " logical volumes")
3990 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
3991 a9e0c397 Iustin Pop
3992 a9e0c397 Iustin Pop
      dev.children = new_lvs
3993 a9e0c397 Iustin Pop
      cfg.Update(instance)
3994 a9e0c397 Iustin Pop
3995 cff90b79 Iustin Pop
    # Step: wait for sync
3996 a9e0c397 Iustin Pop
3997 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3998 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3999 a9e0c397 Iustin Pop
    # return value
4000 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
4001 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
4002 a9e0c397 Iustin Pop
4003 a9e0c397 Iustin Pop
    # so check manually all the devices
4004 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
4005 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
4006 72737a7f Iustin Pop
      is_degr = self.rpc.call_blockdev_find(instance.primary_node, dev)[5]
4007 a9e0c397 Iustin Pop
      if is_degr:
4008 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
4009 a9e0c397 Iustin Pop
4010 cff90b79 Iustin Pop
    # Step: remove old storage
4011 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
4012 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
4013 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
4014 a9e0c397 Iustin Pop
      for lv in old_lvs:
4015 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
4016 72737a7f Iustin Pop
        if not self.rpc.call_blockdev_remove(tgt_node, lv):
4017 79caa9ed Guido Trotter
          warning("Can't remove old LV", hint="manually remove unused LVs")
4018 a9e0c397 Iustin Pop
          continue
4019 a9e0c397 Iustin Pop
4020 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
4021 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
4022 a9e0c397 Iustin Pop

4023 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
4024 a9e0c397 Iustin Pop
      - for all disks of the instance:
4025 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
4026 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
4027 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
4028 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
4029 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
4030 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
4031 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
4032 a9e0c397 Iustin Pop
          not network enabled
4033 a9e0c397 Iustin Pop
      - wait for sync across all devices
4034 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
4035 a9e0c397 Iustin Pop

4036 a9e0c397 Iustin Pop
    Failures are not very well handled.
4037 0834c866 Iustin Pop

4038 a9e0c397 Iustin Pop
    """
4039 0834c866 Iustin Pop
    steps_total = 6
4040 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
4041 a9e0c397 Iustin Pop
    instance = self.instance
4042 a9e0c397 Iustin Pop
    iv_names = {}
4043 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
4044 a9e0c397 Iustin Pop
    # start of work
4045 a9e0c397 Iustin Pop
    cfg = self.cfg
4046 a9e0c397 Iustin Pop
    old_node = self.tgt_node
4047 a9e0c397 Iustin Pop
    new_node = self.new_node
4048 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
4049 0834c866 Iustin Pop
4050 0834c866 Iustin Pop
    # Step: check device activation
4051 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
4052 0834c866 Iustin Pop
    info("checking volume groups")
4053 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
4054 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([pri_node, new_node])
4055 0834c866 Iustin Pop
    if not results:
4056 0834c866 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
4057 0834c866 Iustin Pop
    for node in pri_node, new_node:
4058 0834c866 Iustin Pop
      res = results.get(node, False)
4059 0834c866 Iustin Pop
      if not res or my_vg not in res:
4060 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
4061 0834c866 Iustin Pop
                                 (my_vg, node))
4062 0834c866 Iustin Pop
    for dev in instance.disks:
4063 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
4064 0834c866 Iustin Pop
        continue
4065 0834c866 Iustin Pop
      info("checking %s on %s" % (dev.iv_name, pri_node))
4066 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4067 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_find(pri_node, dev):
4068 0834c866 Iustin Pop
        raise errors.OpExecError("Can't find device %s on node %s" %
4069 0834c866 Iustin Pop
                                 (dev.iv_name, pri_node))
4070 0834c866 Iustin Pop
4071 0834c866 Iustin Pop
    # Step: check other node consistency
4072 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
4073 0834c866 Iustin Pop
    for dev in instance.disks:
4074 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
4075 0834c866 Iustin Pop
        continue
4076 0834c866 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, pri_node))
4077 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
4078 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
4079 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
4080 0834c866 Iustin Pop
                                 pri_node)
4081 0834c866 Iustin Pop
4082 0834c866 Iustin Pop
    # Step: create new storage
4083 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
4084 468b46f9 Iustin Pop
    for dev in instance.disks:
4085 a9e0c397 Iustin Pop
      size = dev.size
4086 0834c866 Iustin Pop
      info("adding new local storage on %s for %s" % (new_node, dev.iv_name))
4087 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
4088 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
4089 a9e0c397 Iustin Pop
      # are talking about the secondary node
4090 a9e0c397 Iustin Pop
      for new_lv in dev.children:
4091 b9bddb6b Iustin Pop
        if not _CreateBlockDevOnPrimary(self, new_node, instance, new_lv,
4092 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
4093 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
4094 a9e0c397 Iustin Pop
                                   " node '%s'" %
4095 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
4096 a9e0c397 Iustin Pop
4097 0834c866 Iustin Pop
4098 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
4099 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
4100 a1578d63 Iustin Pop
    # error and the success paths
4101 a1578d63 Iustin Pop
    minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
4102 a1578d63 Iustin Pop
                                   instance.name)
4103 468b46f9 Iustin Pop
    logging.debug("Allocated minors %s" % (minors,))
4104 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
4105 468b46f9 Iustin Pop
    for dev, new_minor in zip(instance.disks, minors):
4106 0834c866 Iustin Pop
      size = dev.size
4107 0834c866 Iustin Pop
      info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
4108 a9e0c397 Iustin Pop
      # create new devices on new_node
4109 ffa1c0dc Iustin Pop
      if pri_node == dev.logical_id[0]:
4110 ffa1c0dc Iustin Pop
        new_logical_id = (pri_node, new_node,
4111 f9518d38 Iustin Pop
                          dev.logical_id[2], dev.logical_id[3], new_minor,
4112 f9518d38 Iustin Pop
                          dev.logical_id[5])
4113 ffa1c0dc Iustin Pop
      else:
4114 ffa1c0dc Iustin Pop
        new_logical_id = (new_node, pri_node,
4115 f9518d38 Iustin Pop
                          dev.logical_id[2], new_minor, dev.logical_id[4],
4116 f9518d38 Iustin Pop
                          dev.logical_id[5])
4117 468b46f9 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children, new_logical_id)
4118 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
4119 a1578d63 Iustin Pop
                    new_logical_id)
4120 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
4121 ffa1c0dc Iustin Pop
                              logical_id=new_logical_id,
4122 a9e0c397 Iustin Pop
                              children=dev.children)
4123 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnSecondary(self, new_node, instance,
4124 3f78eef2 Iustin Pop
                                        new_drbd, False,
4125 b9bddb6b Iustin Pop
                                        _GetInstanceInfoText(instance)):
4126 a1578d63 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
4127 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
4128 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
4129 a9e0c397 Iustin Pop
4130 0834c866 Iustin Pop
    for dev in instance.disks:
4131 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
4132 0834c866 Iustin Pop
      info("shutting down drbd for %s on old node" % dev.iv_name)
4133 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
4134 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_shutdown(old_node, dev):
4135 0834c866 Iustin Pop
        warning("Failed to shutdown drbd for %s on old node" % dev.iv_name,
4136 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
4137 a9e0c397 Iustin Pop
4138 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
4139 642445d9 Iustin Pop
    done = 0
4140 642445d9 Iustin Pop
    for dev in instance.disks:
4141 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4142 f9518d38 Iustin Pop
      # set the network part of the physical (unique in bdev terms) id
4143 f9518d38 Iustin Pop
      # to None, meaning detach from network
4144 f9518d38 Iustin Pop
      dev.physical_id = (None, None, None, None) + dev.physical_id[4:]
4145 642445d9 Iustin Pop
      # and 'find' the device, which will 'fix' it to match the
4146 642445d9 Iustin Pop
      # standalone state
4147 72737a7f Iustin Pop
      if self.rpc.call_blockdev_find(pri_node, dev):
4148 642445d9 Iustin Pop
        done += 1
4149 642445d9 Iustin Pop
      else:
4150 642445d9 Iustin Pop
        warning("Failed to detach drbd %s from network, unusual case" %
4151 642445d9 Iustin Pop
                dev.iv_name)
4152 642445d9 Iustin Pop
4153 642445d9 Iustin Pop
    if not done:
4154 642445d9 Iustin Pop
      # no detaches succeeded (very unlikely)
4155 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance.name)
4156 642445d9 Iustin Pop
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
4157 642445d9 Iustin Pop
4158 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
4159 642445d9 Iustin Pop
    # the instance to point to the new secondary
4160 642445d9 Iustin Pop
    info("updating instance configuration")
4161 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
4162 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
4163 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4164 642445d9 Iustin Pop
    cfg.Update(instance)
4165 a1578d63 Iustin Pop
    # we can remove now the temp minors as now the new values are
4166 a1578d63 Iustin Pop
    # written to the config file (and therefore stable)
4167 a1578d63 Iustin Pop
    self.cfg.ReleaseDRBDMinors(instance.name)
4168 a9e0c397 Iustin Pop
4169 642445d9 Iustin Pop
    # and now perform the drbd attach
4170 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
4171 642445d9 Iustin Pop
    failures = []
4172 642445d9 Iustin Pop
    for dev in instance.disks:
4173 642445d9 Iustin Pop
      info("attaching primary drbd for %s to new secondary node" % dev.iv_name)
4174 642445d9 Iustin Pop
      # since the attach is smart, it's enough to 'find' the device,
4175 642445d9 Iustin Pop
      # it will automatically activate the network, if the physical_id
4176 642445d9 Iustin Pop
      # is correct
4177 642445d9 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4178 ffa1c0dc Iustin Pop
      logging.debug("Disk to attach: %s", dev)
4179 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_find(pri_node, dev):
4180 642445d9 Iustin Pop
        warning("can't attach drbd %s to new secondary!" % dev.iv_name,
4181 642445d9 Iustin Pop
                "please do a gnt-instance info to see the status of disks")
4182 a9e0c397 Iustin Pop
4183 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
4184 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
4185 a9e0c397 Iustin Pop
    # return value
4186 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
4187 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
4188 a9e0c397 Iustin Pop
4189 a9e0c397 Iustin Pop
    # so check manually all the devices
4190 ffa1c0dc Iustin Pop
    for name, (dev, old_lvs, _) in iv_names.iteritems():
4191 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4192 72737a7f Iustin Pop
      is_degr = self.rpc.call_blockdev_find(pri_node, dev)[5]
4193 a9e0c397 Iustin Pop
      if is_degr:
4194 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
4195 a9e0c397 Iustin Pop
4196 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
4197 ffa1c0dc Iustin Pop
    for name, (dev, old_lvs, _) in iv_names.iteritems():
4198 0834c866 Iustin Pop
      info("remove logical volumes for %s" % name)
4199 a9e0c397 Iustin Pop
      for lv in old_lvs:
4200 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
4201 72737a7f Iustin Pop
        if not self.rpc.call_blockdev_remove(old_node, lv):
4202 0834c866 Iustin Pop
          warning("Can't remove LV on old secondary",
4203 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
4204 a9e0c397 Iustin Pop
4205 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
4206 a9e0c397 Iustin Pop
    """Execute disk replacement.
4207 a9e0c397 Iustin Pop

4208 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
4209 a9e0c397 Iustin Pop

4210 a9e0c397 Iustin Pop
    """
4211 a9e0c397 Iustin Pop
    instance = self.instance
4212 22985314 Guido Trotter
4213 22985314 Guido Trotter
    # Activate the instance disks if we're replacing them on a down instance
4214 22985314 Guido Trotter
    if instance.status == "down":
4215 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, True)
4216 22985314 Guido Trotter
4217 abdf0113 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
4218 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
4219 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
4220 a9e0c397 Iustin Pop
      else:
4221 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
4222 a9e0c397 Iustin Pop
    else:
4223 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
4224 22985314 Guido Trotter
4225 22985314 Guido Trotter
    ret = fn(feedback_fn)
4226 22985314 Guido Trotter
4227 22985314 Guido Trotter
    # Deactivate the instance disks if we're replacing them on a down instance
4228 22985314 Guido Trotter
    if instance.status == "down":
4229 b9bddb6b Iustin Pop
      _SafeShutdownInstanceDisks(self, instance)
4230 22985314 Guido Trotter
4231 22985314 Guido Trotter
    return ret
4232 a9e0c397 Iustin Pop
4233 a8083063 Iustin Pop
4234 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
4235 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
4236 8729e0d7 Iustin Pop

4237 8729e0d7 Iustin Pop
  """
4238 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
4239 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4240 8729e0d7 Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount"]
4241 31e63dbf Guido Trotter
  REQ_BGL = False
4242 31e63dbf Guido Trotter
4243 31e63dbf Guido Trotter
  def ExpandNames(self):
4244 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
4245 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4246 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4247 31e63dbf Guido Trotter
4248 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
4249 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
4250 31e63dbf Guido Trotter
      self._LockInstancesNodes()
4251 8729e0d7 Iustin Pop
4252 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
4253 8729e0d7 Iustin Pop
    """Build hooks env.
4254 8729e0d7 Iustin Pop

4255 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
4256 8729e0d7 Iustin Pop

4257 8729e0d7 Iustin Pop
    """
4258 8729e0d7 Iustin Pop
    env = {
4259 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
4260 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
4261 8729e0d7 Iustin Pop
      }
4262 8729e0d7 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4263 8729e0d7 Iustin Pop
    nl = [
4264 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
4265 8729e0d7 Iustin Pop
      self.instance.primary_node,
4266 8729e0d7 Iustin Pop
      ]
4267 8729e0d7 Iustin Pop
    return env, nl, nl
4268 8729e0d7 Iustin Pop
4269 8729e0d7 Iustin Pop
  def CheckPrereq(self):
4270 8729e0d7 Iustin Pop
    """Check prerequisites.
4271 8729e0d7 Iustin Pop

4272 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
4273 8729e0d7 Iustin Pop

4274 8729e0d7 Iustin Pop
    """
4275 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4276 31e63dbf Guido Trotter
    assert instance is not None, \
4277 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4278 31e63dbf Guido Trotter
4279 8729e0d7 Iustin Pop
    self.instance = instance
4280 8729e0d7 Iustin Pop
4281 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
4282 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
4283 8729e0d7 Iustin Pop
                                 " growing.")
4284 8729e0d7 Iustin Pop
4285 8729e0d7 Iustin Pop
    if instance.FindDisk(self.op.disk) is None:
4286 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
4287 c7cdfc90 Iustin Pop
                                 (self.op.disk, instance.name))
4288 8729e0d7 Iustin Pop
4289 8729e0d7 Iustin Pop
    nodenames = [instance.primary_node] + list(instance.secondary_nodes)
4290 72737a7f Iustin Pop
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4291 72737a7f Iustin Pop
                                       instance.hypervisor)
4292 8729e0d7 Iustin Pop
    for node in nodenames:
4293 8729e0d7 Iustin Pop
      info = nodeinfo.get(node, None)
4294 8729e0d7 Iustin Pop
      if not info:
4295 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
4296 8729e0d7 Iustin Pop
                                   " from node '%s'" % node)
4297 8729e0d7 Iustin Pop
      vg_free = info.get('vg_free', None)
4298 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
4299 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
4300 8729e0d7 Iustin Pop
                                   " node %s" % node)
4301 8729e0d7 Iustin Pop
      if self.op.amount > info['vg_free']:
4302 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
4303 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
4304 8729e0d7 Iustin Pop
                                   (node, info['vg_free'], self.op.amount))
4305 8729e0d7 Iustin Pop
4306 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
4307 8729e0d7 Iustin Pop
    """Execute disk grow.
4308 8729e0d7 Iustin Pop

4309 8729e0d7 Iustin Pop
    """
4310 8729e0d7 Iustin Pop
    instance = self.instance
4311 8729e0d7 Iustin Pop
    disk = instance.FindDisk(self.op.disk)
4312 8729e0d7 Iustin Pop
    for node in (instance.secondary_nodes + (instance.primary_node,)):
4313 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
4314 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
4315 72737a7f Iustin Pop
      if (not result or not isinstance(result, (list, tuple)) or
4316 72737a7f Iustin Pop
          len(result) != 2):
4317 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s" % node)
4318 8729e0d7 Iustin Pop
      elif not result[0]:
4319 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s: %s" %
4320 8729e0d7 Iustin Pop
                                 (node, result[1]))
4321 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
4322 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
4323 8729e0d7 Iustin Pop
    return
4324 8729e0d7 Iustin Pop
4325 8729e0d7 Iustin Pop
4326 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
4327 a8083063 Iustin Pop
  """Query runtime instance data.
4328 a8083063 Iustin Pop

4329 a8083063 Iustin Pop
  """
4330 a8083063 Iustin Pop
  _OP_REQP = ["instances"]
4331 a987fa48 Guido Trotter
  REQ_BGL = False
4332 ae5849b5 Michael Hanselmann
4333 a987fa48 Guido Trotter
  def ExpandNames(self):
4334 a987fa48 Guido Trotter
    self.needed_locks = {}
4335 a987fa48 Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
4336 a987fa48 Guido Trotter
4337 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
4338 a987fa48 Guido Trotter
      raise errors.OpPrereqError("Invalid argument type 'instances'")
4339 a987fa48 Guido Trotter
4340 a987fa48 Guido Trotter
    if self.op.instances:
4341 a987fa48 Guido Trotter
      self.wanted_names = []
4342 a987fa48 Guido Trotter
      for name in self.op.instances:
4343 a987fa48 Guido Trotter
        full_name = self.cfg.ExpandInstanceName(name)
4344 a987fa48 Guido Trotter
        if full_name is None:
4345 a987fa48 Guido Trotter
          raise errors.OpPrereqError("Instance '%s' not known" %
4346 a987fa48 Guido Trotter
                                     self.op.instance_name)
4347 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
4348 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
4349 a987fa48 Guido Trotter
    else:
4350 a987fa48 Guido Trotter
      self.wanted_names = None
4351 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
4352 a987fa48 Guido Trotter
4353 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4354 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4355 a987fa48 Guido Trotter
4356 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
4357 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
4358 a987fa48 Guido Trotter
      self._LockInstancesNodes()
4359 a8083063 Iustin Pop
4360 a8083063 Iustin Pop
  def CheckPrereq(self):
4361 a8083063 Iustin Pop
    """Check prerequisites.
4362 a8083063 Iustin Pop

4363 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
4364 a8083063 Iustin Pop

4365 a8083063 Iustin Pop
    """
4366 a987fa48 Guido Trotter
    if self.wanted_names is None:
4367 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
4368 a8083063 Iustin Pop
4369 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
4370 a987fa48 Guido Trotter
                             in self.wanted_names]
4371 a987fa48 Guido Trotter
    return
4372 a8083063 Iustin Pop
4373 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
4374 a8083063 Iustin Pop
    """Compute block device status.
4375 a8083063 Iustin Pop

4376 a8083063 Iustin Pop
    """
4377 a8083063 Iustin Pop
    self.cfg.SetDiskID(dev, instance.primary_node)
4378 72737a7f Iustin Pop
    dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
4379 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
4380 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
4381 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
4382 a8083063 Iustin Pop
        snode = dev.logical_id[1]
4383 a8083063 Iustin Pop
      else:
4384 a8083063 Iustin Pop
        snode = dev.logical_id[0]
4385 a8083063 Iustin Pop
4386 a8083063 Iustin Pop
    if snode:
4387 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
4388 72737a7f Iustin Pop
      dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
4389 a8083063 Iustin Pop
    else:
4390 a8083063 Iustin Pop
      dev_sstatus = None
4391 a8083063 Iustin Pop
4392 a8083063 Iustin Pop
    if dev.children:
4393 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
4394 a8083063 Iustin Pop
                      for child in dev.children]
4395 a8083063 Iustin Pop
    else:
4396 a8083063 Iustin Pop
      dev_children = []
4397 a8083063 Iustin Pop
4398 a8083063 Iustin Pop
    data = {
4399 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
4400 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
4401 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
4402 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
4403 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
4404 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
4405 a8083063 Iustin Pop
      "children": dev_children,
4406 a8083063 Iustin Pop
      }
4407 a8083063 Iustin Pop
4408 a8083063 Iustin Pop
    return data
4409 a8083063 Iustin Pop
4410 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4411 a8083063 Iustin Pop
    """Gather and return data"""
4412 a8083063 Iustin Pop
    result = {}
4413 a8083063 Iustin Pop
    for instance in self.wanted_instances:
4414 72737a7f Iustin Pop
      remote_info = self.rpc.call_instance_info(instance.primary_node,
4415 72737a7f Iustin Pop
                                                instance.name,
4416 72737a7f Iustin Pop
                                                instance.hypervisor)
4417 a8083063 Iustin Pop
      if remote_info and "state" in remote_info:
4418 a8083063 Iustin Pop
        remote_state = "up"
4419 a8083063 Iustin Pop
      else:
4420 a8083063 Iustin Pop
        remote_state = "down"
4421 a8083063 Iustin Pop
      if instance.status == "down":
4422 a8083063 Iustin Pop
        config_state = "down"
4423 a8083063 Iustin Pop
      else:
4424 a8083063 Iustin Pop
        config_state = "up"
4425 a8083063 Iustin Pop
4426 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
4427 a8083063 Iustin Pop
               for device in instance.disks]
4428 a8083063 Iustin Pop
4429 a8083063 Iustin Pop
      idict = {
4430 a8083063 Iustin Pop
        "name": instance.name,
4431 a8083063 Iustin Pop
        "config_state": config_state,
4432 a8083063 Iustin Pop
        "run_state": remote_state,
4433 a8083063 Iustin Pop
        "pnode": instance.primary_node,
4434 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
4435 a8083063 Iustin Pop
        "os": instance.os,
4436 a8083063 Iustin Pop
        "memory": instance.memory,
4437 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4438 a8083063 Iustin Pop
        "disks": disks,
4439 f55ff7ec Iustin Pop
        "vcpus": instance.vcpus,
4440 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
4441 a8083063 Iustin Pop
        }
4442 a8083063 Iustin Pop
4443 e69d05fd Iustin Pop
      htkind = instance.hypervisor
4444 00cd937c Iustin Pop
      if htkind == constants.HT_XEN_PVM:
4445 a8340917 Iustin Pop
        idict["kernel_path"] = instance.kernel_path
4446 a8340917 Iustin Pop
        idict["initrd_path"] = instance.initrd_path
4447 a8340917 Iustin Pop
4448 00cd937c Iustin Pop
      if htkind == constants.HT_XEN_HVM:
4449 a8340917 Iustin Pop
        idict["hvm_boot_order"] = instance.hvm_boot_order
4450 a8340917 Iustin Pop
        idict["hvm_acpi"] = instance.hvm_acpi
4451 a8340917 Iustin Pop
        idict["hvm_pae"] = instance.hvm_pae
4452 a8340917 Iustin Pop
        idict["hvm_cdrom_image_path"] = instance.hvm_cdrom_image_path
4453 5397e0b7 Alexander Schreiber
        idict["hvm_nic_type"] = instance.hvm_nic_type
4454 5397e0b7 Alexander Schreiber
        idict["hvm_disk_type"] = instance.hvm_disk_type
4455 a8340917 Iustin Pop
4456 a8340917 Iustin Pop
      if htkind in constants.HTS_REQ_PORT:
4457 d0c11cf7 Alexander Schreiber
        if instance.vnc_bind_address is None:
4458 d0c11cf7 Alexander Schreiber
          vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
4459 d0c11cf7 Alexander Schreiber
        else:
4460 d0c11cf7 Alexander Schreiber
          vnc_bind_address = instance.vnc_bind_address
4461 34b6ab97 Alexander Schreiber
        if instance.network_port is None:
4462 34b6ab97 Alexander Schreiber
          vnc_console_port = None
4463 d0c11cf7 Alexander Schreiber
        elif vnc_bind_address == constants.BIND_ADDRESS_GLOBAL:
4464 a4273aba Alexander Schreiber
          vnc_console_port = "%s:%s" % (instance.primary_node,
4465 34b6ab97 Alexander Schreiber
                                       instance.network_port)
4466 d0c11cf7 Alexander Schreiber
        elif vnc_bind_address == constants.LOCALHOST_IP_ADDRESS:
4467 d0c11cf7 Alexander Schreiber
          vnc_console_port = "%s:%s on node %s" % (vnc_bind_address,
4468 a4273aba Alexander Schreiber
                                                   instance.network_port,
4469 a4273aba Alexander Schreiber
                                                   instance.primary_node)
4470 34b6ab97 Alexander Schreiber
        else:
4471 34b6ab97 Alexander Schreiber
          vnc_console_port = "%s:%s" % (instance.vnc_bind_address,
4472 34b6ab97 Alexander Schreiber
                                        instance.network_port)
4473 34b6ab97 Alexander Schreiber
        idict["vnc_console_port"] = vnc_console_port
4474 d0c11cf7 Alexander Schreiber
        idict["vnc_bind_address"] = vnc_bind_address
4475 a8340917 Iustin Pop
        idict["network_port"] = instance.network_port
4476 a8340917 Iustin Pop
4477 a8083063 Iustin Pop
      result[instance.name] = idict
4478 a8083063 Iustin Pop
4479 a8083063 Iustin Pop
    return result
4480 a8083063 Iustin Pop
4481 a8083063 Iustin Pop
4482 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
4483 a8083063 Iustin Pop
  """Modifies an instances's parameters.
4484 a8083063 Iustin Pop

4485 a8083063 Iustin Pop
  """
4486 a8083063 Iustin Pop
  HPATH = "instance-modify"
4487 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4488 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
4489 1a5c7281 Guido Trotter
  REQ_BGL = False
4490 1a5c7281 Guido Trotter
4491 1a5c7281 Guido Trotter
  def ExpandNames(self):
4492 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
4493 a8083063 Iustin Pop
4494 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4495 a8083063 Iustin Pop
    """Build hooks env.
4496 a8083063 Iustin Pop

4497 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
4498 a8083063 Iustin Pop

4499 a8083063 Iustin Pop
    """
4500 396e1b78 Michael Hanselmann
    args = dict()
4501 a8083063 Iustin Pop
    if self.mem:
4502 396e1b78 Michael Hanselmann
      args['memory'] = self.mem
4503 a8083063 Iustin Pop
    if self.vcpus:
4504 396e1b78 Michael Hanselmann
      args['vcpus'] = self.vcpus
4505 ef756965 Iustin Pop
    if self.do_ip or self.do_bridge or self.mac:
4506 396e1b78 Michael Hanselmann
      if self.do_ip:
4507 396e1b78 Michael Hanselmann
        ip = self.ip
4508 396e1b78 Michael Hanselmann
      else:
4509 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
4510 396e1b78 Michael Hanselmann
      if self.bridge:
4511 396e1b78 Michael Hanselmann
        bridge = self.bridge
4512 396e1b78 Michael Hanselmann
      else:
4513 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
4514 ef756965 Iustin Pop
      if self.mac:
4515 ef756965 Iustin Pop
        mac = self.mac
4516 ef756965 Iustin Pop
      else:
4517 ef756965 Iustin Pop
        mac = self.instance.nics[0].mac
4518 ef756965 Iustin Pop
      args['nics'] = [(ip, bridge, mac)]
4519 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance, override=args)
4520 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(),
4521 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
4522 a8083063 Iustin Pop
    return env, nl, nl
4523 a8083063 Iustin Pop
4524 a8083063 Iustin Pop
  def CheckPrereq(self):
4525 a8083063 Iustin Pop
    """Check prerequisites.
4526 a8083063 Iustin Pop

4527 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
4528 a8083063 Iustin Pop

4529 a8083063 Iustin Pop
    """
4530 1a5c7281 Guido Trotter
    # FIXME: all the parameters could be checked before, in ExpandNames, or in
4531 1a5c7281 Guido Trotter
    # a separate CheckArguments function, if we implement one, so the operation
4532 1a5c7281 Guido Trotter
    # can be aborted without waiting for any lock, should it have an error...
4533 a8083063 Iustin Pop
    self.mem = getattr(self.op, "mem", None)
4534 a8083063 Iustin Pop
    self.vcpus = getattr(self.op, "vcpus", None)
4535 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
4536 1862d460 Alexander Schreiber
    self.mac = getattr(self.op, "mac", None)
4537 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
4538 973d7867 Iustin Pop
    self.kernel_path = getattr(self.op, "kernel_path", None)
4539 973d7867 Iustin Pop
    self.initrd_path = getattr(self.op, "initrd_path", None)
4540 25c5878d Alexander Schreiber
    self.hvm_boot_order = getattr(self.op, "hvm_boot_order", None)
4541 31a853d2 Iustin Pop
    self.hvm_acpi = getattr(self.op, "hvm_acpi", None)
4542 31a853d2 Iustin Pop
    self.hvm_pae = getattr(self.op, "hvm_pae", None)
4543 5397e0b7 Alexander Schreiber
    self.hvm_nic_type = getattr(self.op, "hvm_nic_type", None)
4544 5397e0b7 Alexander Schreiber
    self.hvm_disk_type = getattr(self.op, "hvm_disk_type", None)
4545 31a853d2 Iustin Pop
    self.hvm_cdrom_image_path = getattr(self.op, "hvm_cdrom_image_path", None)
4546 31a853d2 Iustin Pop
    self.vnc_bind_address = getattr(self.op, "vnc_bind_address", None)
4547 4300c4b6 Guido Trotter
    self.force = getattr(self.op, "force", None)
4548 31a853d2 Iustin Pop
    all_parms = [self.mem, self.vcpus, self.ip, self.bridge, self.mac,
4549 31a853d2 Iustin Pop
                 self.kernel_path, self.initrd_path, self.hvm_boot_order,
4550 31a853d2 Iustin Pop
                 self.hvm_acpi, self.hvm_pae, self.hvm_cdrom_image_path,
4551 5397e0b7 Alexander Schreiber
                 self.vnc_bind_address, self.hvm_nic_type, self.hvm_disk_type]
4552 31a853d2 Iustin Pop
    if all_parms.count(None) == len(all_parms):
4553 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
4554 a8083063 Iustin Pop
    if self.mem is not None:
4555 a8083063 Iustin Pop
      try:
4556 a8083063 Iustin Pop
        self.mem = int(self.mem)
4557 a8083063 Iustin Pop
      except ValueError, err:
4558 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid memory size: %s" % str(err))
4559 a8083063 Iustin Pop
    if self.vcpus is not None:
4560 a8083063 Iustin Pop
      try:
4561 a8083063 Iustin Pop
        self.vcpus = int(self.vcpus)
4562 a8083063 Iustin Pop
      except ValueError, err:
4563 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid vcpus number: %s" % str(err))
4564 a8083063 Iustin Pop
    if self.ip is not None:
4565 a8083063 Iustin Pop
      self.do_ip = True
4566 a8083063 Iustin Pop
      if self.ip.lower() == "none":
4567 a8083063 Iustin Pop
        self.ip = None
4568 a8083063 Iustin Pop
      else:
4569 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
4570 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
4571 a8083063 Iustin Pop
    else:
4572 a8083063 Iustin Pop
      self.do_ip = False
4573 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
4574 1862d460 Alexander Schreiber
    if self.mac is not None:
4575 1862d460 Alexander Schreiber
      if self.cfg.IsMacInUse(self.mac):
4576 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('MAC address %s already in use in cluster' %
4577 1862d460 Alexander Schreiber
                                   self.mac)
4578 1862d460 Alexander Schreiber
      if not utils.IsValidMac(self.mac):
4579 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('Invalid MAC address %s' % self.mac)
4580 a8083063 Iustin Pop
4581 973d7867 Iustin Pop
    if self.kernel_path is not None:
4582 973d7867 Iustin Pop
      self.do_kernel_path = True
4583 973d7867 Iustin Pop
      if self.kernel_path == constants.VALUE_NONE:
4584 973d7867 Iustin Pop
        raise errors.OpPrereqError("Can't set instance to no kernel")
4585 973d7867 Iustin Pop
4586 973d7867 Iustin Pop
      if self.kernel_path != constants.VALUE_DEFAULT:
4587 973d7867 Iustin Pop
        if not os.path.isabs(self.kernel_path):
4588 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The kernel path must be an absolute"
4589 973d7867 Iustin Pop
                                    " filename")
4590 8cafeb26 Iustin Pop
    else:
4591 8cafeb26 Iustin Pop
      self.do_kernel_path = False
4592 973d7867 Iustin Pop
4593 973d7867 Iustin Pop
    if self.initrd_path is not None:
4594 973d7867 Iustin Pop
      self.do_initrd_path = True
4595 973d7867 Iustin Pop
      if self.initrd_path not in (constants.VALUE_NONE,
4596 973d7867 Iustin Pop
                                  constants.VALUE_DEFAULT):
4597 2bc22872 Iustin Pop
        if not os.path.isabs(self.initrd_path):
4598 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The initrd path must be an absolute"
4599 973d7867 Iustin Pop
                                    " filename")
4600 8cafeb26 Iustin Pop
    else:
4601 8cafeb26 Iustin Pop
      self.do_initrd_path = False
4602 973d7867 Iustin Pop
4603 25c5878d Alexander Schreiber
    # boot order verification
4604 25c5878d Alexander Schreiber
    if self.hvm_boot_order is not None:
4605 25c5878d Alexander Schreiber
      if self.hvm_boot_order != constants.VALUE_DEFAULT:
4606 25c5878d Alexander Schreiber
        if len(self.hvm_boot_order.strip("acdn")) != 0:
4607 25c5878d Alexander Schreiber
          raise errors.OpPrereqError("invalid boot order specified,"
4608 25c5878d Alexander Schreiber
                                     " must be one or more of [acdn]"
4609 25c5878d Alexander Schreiber
                                     " or 'default'")
4610 25c5878d Alexander Schreiber
4611 31a853d2 Iustin Pop
    # hvm_cdrom_image_path verification
4612 31a853d2 Iustin Pop
    if self.op.hvm_cdrom_image_path is not None:
4613 3fc175f0 Alexander Schreiber
      if not (os.path.isabs(self.op.hvm_cdrom_image_path) or
4614 3fc175f0 Alexander Schreiber
              self.op.hvm_cdrom_image_path.lower() == "none"):
4615 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The path to the HVM CDROM image must"
4616 31a853d2 Iustin Pop
                                   " be an absolute path or None, not %s" %
4617 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
4618 3fc175f0 Alexander Schreiber
      if not (os.path.isfile(self.op.hvm_cdrom_image_path) or
4619 3fc175f0 Alexander Schreiber
              self.op.hvm_cdrom_image_path.lower() == "none"):
4620 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The HVM CDROM image must either be a"
4621 31a853d2 Iustin Pop
                                   " regular file or a symlink pointing to"
4622 31a853d2 Iustin Pop
                                   " an existing regular file, not %s" %
4623 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
4624 31a853d2 Iustin Pop
4625 31a853d2 Iustin Pop
    # vnc_bind_address verification
4626 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is not None:
4627 31a853d2 Iustin Pop
      if not utils.IsValidIP(self.op.vnc_bind_address):
4628 31a853d2 Iustin Pop
        raise errors.OpPrereqError("given VNC bind address '%s' doesn't look"
4629 31a853d2 Iustin Pop
                                   " like a valid IP address" %
4630 31a853d2 Iustin Pop
                                   self.op.vnc_bind_address)
4631 31a853d2 Iustin Pop
4632 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4633 1a5c7281 Guido Trotter
    assert self.instance is not None, \
4634 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4635 cfefe007 Guido Trotter
    self.warn = []
4636 cfefe007 Guido Trotter
    if self.mem is not None and not self.force:
4637 cfefe007 Guido Trotter
      pnode = self.instance.primary_node
4638 cfefe007 Guido Trotter
      nodelist = [pnode]
4639 cfefe007 Guido Trotter
      nodelist.extend(instance.secondary_nodes)
4640 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
4641 72737a7f Iustin Pop
                                                  instance.hypervisor)
4642 72737a7f Iustin Pop
      nodeinfo = self.rpc.call_node_info(nodelist, self.cfg.GetVGName(),
4643 72737a7f Iustin Pop
                                         instance.hypervisor)
4644 cfefe007 Guido Trotter
4645 cfefe007 Guido Trotter
      if pnode not in nodeinfo or not isinstance(nodeinfo[pnode], dict):
4646 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
4647 cfefe007 Guido Trotter
        self.warn.append("Can't get info from primary node %s" % pnode)
4648 cfefe007 Guido Trotter
      else:
4649 cfefe007 Guido Trotter
        if instance_info:
4650 cfefe007 Guido Trotter
          current_mem = instance_info['memory']
4651 cfefe007 Guido Trotter
        else:
4652 cfefe007 Guido Trotter
          # Assume instance not running
4653 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
4654 cfefe007 Guido Trotter
          # and we have no other way to check)
4655 cfefe007 Guido Trotter
          current_mem = 0
4656 cfefe007 Guido Trotter
        miss_mem = self.mem - current_mem - nodeinfo[pnode]['memory_free']
4657 cfefe007 Guido Trotter
        if miss_mem > 0:
4658 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
4659 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
4660 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
4661 cfefe007 Guido Trotter
4662 cfefe007 Guido Trotter
      for node in instance.secondary_nodes:
4663 cfefe007 Guido Trotter
        if node not in nodeinfo or not isinstance(nodeinfo[node], dict):
4664 cfefe007 Guido Trotter
          self.warn.append("Can't get info from secondary node %s" % node)
4665 cfefe007 Guido Trotter
        elif self.mem > nodeinfo[node]['memory_free']:
4666 cfefe007 Guido Trotter
          self.warn.append("Not enough memory to failover instance to secondary"
4667 cfefe007 Guido Trotter
                           " node %s" % node)
4668 cfefe007 Guido Trotter
4669 5bc84f33 Alexander Schreiber
    # Xen HVM device type checks
4670 00cd937c Iustin Pop
    if instance.hypervisor == constants.HT_XEN_HVM:
4671 5bc84f33 Alexander Schreiber
      if self.op.hvm_nic_type is not None:
4672 5bc84f33 Alexander Schreiber
        if self.op.hvm_nic_type not in constants.HT_HVM_VALID_NIC_TYPES:
4673 5bc84f33 Alexander Schreiber
          raise errors.OpPrereqError("Invalid NIC type %s specified for Xen"
4674 5bc84f33 Alexander Schreiber
                                     " HVM  hypervisor" % self.op.hvm_nic_type)
4675 5bc84f33 Alexander Schreiber
      if self.op.hvm_disk_type is not None:
4676 5bc84f33 Alexander Schreiber
        if self.op.hvm_disk_type not in constants.HT_HVM_VALID_DISK_TYPES:
4677 5bc84f33 Alexander Schreiber
          raise errors.OpPrereqError("Invalid disk type %s specified for Xen"
4678 5bc84f33 Alexander Schreiber
                                     " HVM hypervisor" % self.op.hvm_disk_type)
4679 5bc84f33 Alexander Schreiber
4680 a8083063 Iustin Pop
    return
4681 a8083063 Iustin Pop
4682 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4683 a8083063 Iustin Pop
    """Modifies an instance.
4684 a8083063 Iustin Pop

4685 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
4686 a8083063 Iustin Pop
    """
4687 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
4688 cfefe007 Guido Trotter
    # feedback_fn there.
4689 cfefe007 Guido Trotter
    for warn in self.warn:
4690 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
4691 cfefe007 Guido Trotter
4692 a8083063 Iustin Pop
    result = []
4693 a8083063 Iustin Pop
    instance = self.instance
4694 a8083063 Iustin Pop
    if self.mem:
4695 a8083063 Iustin Pop
      instance.memory = self.mem
4696 a8083063 Iustin Pop
      result.append(("mem", self.mem))
4697 a8083063 Iustin Pop
    if self.vcpus:
4698 a8083063 Iustin Pop
      instance.vcpus = self.vcpus
4699 a8083063 Iustin Pop
      result.append(("vcpus",  self.vcpus))
4700 a8083063 Iustin Pop
    if self.do_ip:
4701 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
4702 a8083063 Iustin Pop
      result.append(("ip", self.ip))
4703 a8083063 Iustin Pop
    if self.bridge:
4704 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
4705 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
4706 1862d460 Alexander Schreiber
    if self.mac:
4707 1862d460 Alexander Schreiber
      instance.nics[0].mac = self.mac
4708 1862d460 Alexander Schreiber
      result.append(("mac", self.mac))
4709 973d7867 Iustin Pop
    if self.do_kernel_path:
4710 973d7867 Iustin Pop
      instance.kernel_path = self.kernel_path
4711 973d7867 Iustin Pop
      result.append(("kernel_path", self.kernel_path))
4712 973d7867 Iustin Pop
    if self.do_initrd_path:
4713 973d7867 Iustin Pop
      instance.initrd_path = self.initrd_path
4714 973d7867 Iustin Pop
      result.append(("initrd_path", self.initrd_path))
4715 25c5878d Alexander Schreiber
    if self.hvm_boot_order:
4716 25c5878d Alexander Schreiber
      if self.hvm_boot_order == constants.VALUE_DEFAULT:
4717 25c5878d Alexander Schreiber
        instance.hvm_boot_order = None
4718 25c5878d Alexander Schreiber
      else:
4719 25c5878d Alexander Schreiber
        instance.hvm_boot_order = self.hvm_boot_order
4720 25c5878d Alexander Schreiber
      result.append(("hvm_boot_order", self.hvm_boot_order))
4721 3fc175f0 Alexander Schreiber
    if self.hvm_acpi is not None:
4722 ec1ba002 Iustin Pop
      instance.hvm_acpi = self.hvm_acpi
4723 31a853d2 Iustin Pop
      result.append(("hvm_acpi", self.hvm_acpi))
4724 3fc175f0 Alexander Schreiber
    if self.hvm_pae is not None:
4725 ec1ba002 Iustin Pop
      instance.hvm_pae = self.hvm_pae
4726 31a853d2 Iustin Pop
      result.append(("hvm_pae", self.hvm_pae))
4727 5397e0b7 Alexander Schreiber
    if self.hvm_nic_type is not None:
4728 5397e0b7 Alexander Schreiber
      instance.hvm_nic_type = self.hvm_nic_type
4729 5397e0b7 Alexander Schreiber
      result.append(("hvm_nic_type", self.hvm_nic_type))
4730 5397e0b7 Alexander Schreiber
    if self.hvm_disk_type is not None:
4731 5397e0b7 Alexander Schreiber
      instance.hvm_disk_type = self.hvm_disk_type
4732 5397e0b7 Alexander Schreiber
      result.append(("hvm_disk_type", self.hvm_disk_type))
4733 31a853d2 Iustin Pop
    if self.hvm_cdrom_image_path:
4734 3fc175f0 Alexander Schreiber
      if self.hvm_cdrom_image_path == constants.VALUE_NONE:
4735 3fc175f0 Alexander Schreiber
        instance.hvm_cdrom_image_path = None
4736 3fc175f0 Alexander Schreiber
      else:
4737 3fc175f0 Alexander Schreiber
        instance.hvm_cdrom_image_path = self.hvm_cdrom_image_path
4738 31a853d2 Iustin Pop
      result.append(("hvm_cdrom_image_path", self.hvm_cdrom_image_path))
4739 31a853d2 Iustin Pop
    if self.vnc_bind_address:
4740 31a853d2 Iustin Pop
      instance.vnc_bind_address = self.vnc_bind_address
4741 31a853d2 Iustin Pop
      result.append(("vnc_bind_address", self.vnc_bind_address))
4742 a8083063 Iustin Pop
4743 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
4744 a8083063 Iustin Pop
4745 a8083063 Iustin Pop
    return result
4746 a8083063 Iustin Pop
4747 a8083063 Iustin Pop
4748 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
4749 a8083063 Iustin Pop
  """Query the exports list
4750 a8083063 Iustin Pop

4751 a8083063 Iustin Pop
  """
4752 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
4753 21a15682 Guido Trotter
  REQ_BGL = False
4754 21a15682 Guido Trotter
4755 21a15682 Guido Trotter
  def ExpandNames(self):
4756 21a15682 Guido Trotter
    self.needed_locks = {}
4757 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
4758 21a15682 Guido Trotter
    if not self.op.nodes:
4759 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4760 21a15682 Guido Trotter
    else:
4761 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
4762 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
4763 a8083063 Iustin Pop
4764 a8083063 Iustin Pop
  def CheckPrereq(self):
4765 21a15682 Guido Trotter
    """Check prerequisites.
4766 a8083063 Iustin Pop

4767 a8083063 Iustin Pop
    """
4768 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
4769 a8083063 Iustin Pop
4770 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4771 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
4772 a8083063 Iustin Pop

4773 a8083063 Iustin Pop
    Returns:
4774 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
4775 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
4776 a8083063 Iustin Pop
      that node.
4777 a8083063 Iustin Pop

4778 a8083063 Iustin Pop
    """
4779 72737a7f Iustin Pop
    return self.rpc.call_export_list(self.nodes)
4780 a8083063 Iustin Pop
4781 a8083063 Iustin Pop
4782 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
4783 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
4784 a8083063 Iustin Pop

4785 a8083063 Iustin Pop
  """
4786 a8083063 Iustin Pop
  HPATH = "instance-export"
4787 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4788 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
4789 6657590e Guido Trotter
  REQ_BGL = False
4790 6657590e Guido Trotter
4791 6657590e Guido Trotter
  def ExpandNames(self):
4792 6657590e Guido Trotter
    self._ExpandAndLockInstance()
4793 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
4794 6657590e Guido Trotter
    #
4795 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
4796 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
4797 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
4798 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
4799 6657590e Guido Trotter
    #    then one to remove, after
4800 6657590e Guido Trotter
    #  - removing the removal operation altoghether
4801 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4802 6657590e Guido Trotter
4803 6657590e Guido Trotter
  def DeclareLocks(self, level):
4804 6657590e Guido Trotter
    """Last minute lock declaration."""
4805 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
4806 a8083063 Iustin Pop
4807 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4808 a8083063 Iustin Pop
    """Build hooks env.
4809 a8083063 Iustin Pop

4810 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
4811 a8083063 Iustin Pop

4812 a8083063 Iustin Pop
    """
4813 a8083063 Iustin Pop
    env = {
4814 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
4815 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
4816 a8083063 Iustin Pop
      }
4817 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4818 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
4819 a8083063 Iustin Pop
          self.op.target_node]
4820 a8083063 Iustin Pop
    return env, nl, nl
4821 a8083063 Iustin Pop
4822 a8083063 Iustin Pop
  def CheckPrereq(self):
4823 a8083063 Iustin Pop
    """Check prerequisites.
4824 a8083063 Iustin Pop

4825 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
4826 a8083063 Iustin Pop

4827 a8083063 Iustin Pop
    """
4828 6657590e Guido Trotter
    instance_name = self.op.instance_name
4829 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
4830 6657590e Guido Trotter
    assert self.instance is not None, \
4831 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
4832 a8083063 Iustin Pop
4833 6657590e Guido Trotter
    self.dst_node = self.cfg.GetNodeInfo(
4834 6657590e Guido Trotter
      self.cfg.ExpandNodeName(self.op.target_node))
4835 a8083063 Iustin Pop
4836 6657590e Guido Trotter
    assert self.dst_node is not None, \
4837 6657590e Guido Trotter
          "Cannot retrieve locked node %s" % self.op.target_node
4838 a8083063 Iustin Pop
4839 b6023d6c Manuel Franceschini
    # instance disk type verification
4840 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
4841 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
4842 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
4843 b6023d6c Manuel Franceschini
                                   " file-based disks")
4844 b6023d6c Manuel Franceschini
4845 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4846 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
4847 a8083063 Iustin Pop

4848 a8083063 Iustin Pop
    """
4849 a8083063 Iustin Pop
    instance = self.instance
4850 a8083063 Iustin Pop
    dst_node = self.dst_node
4851 a8083063 Iustin Pop
    src_node = instance.primary_node
4852 a8083063 Iustin Pop
    if self.op.shutdown:
4853 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
4854 72737a7f Iustin Pop
      if not self.rpc.call_instance_shutdown(src_node, instance):
4855 38206f3c Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
4856 38206f3c Iustin Pop
                                 (instance.name, src_node))
4857 a8083063 Iustin Pop
4858 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
4859 a8083063 Iustin Pop
4860 a8083063 Iustin Pop
    snap_disks = []
4861 a8083063 Iustin Pop
4862 a8083063 Iustin Pop
    try:
4863 a8083063 Iustin Pop
      for disk in instance.disks:
4864 a8083063 Iustin Pop
        if disk.iv_name == "sda":
4865 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
4866 72737a7f Iustin Pop
          new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk)
4867 a8083063 Iustin Pop
4868 a8083063 Iustin Pop
          if not new_dev_name:
4869 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
4870 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
4871 a8083063 Iustin Pop
          else:
4872 fe96220b Iustin Pop
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
4873 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
4874 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
4875 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
4876 a8083063 Iustin Pop
            snap_disks.append(new_dev)
4877 a8083063 Iustin Pop
4878 a8083063 Iustin Pop
    finally:
4879 fb300fb7 Guido Trotter
      if self.op.shutdown and instance.status == "up":
4880 72737a7f Iustin Pop
        if not self.rpc.call_instance_start(src_node, instance, None):
4881 b9bddb6b Iustin Pop
          _ShutdownInstanceDisks(self, instance)
4882 fb300fb7 Guido Trotter
          raise errors.OpExecError("Could not start instance")
4883 a8083063 Iustin Pop
4884 a8083063 Iustin Pop
    # TODO: check for size
4885 a8083063 Iustin Pop
4886 62c9ec92 Iustin Pop
    cluster_name = self.cfg.GetClusterName()
4887 a8083063 Iustin Pop
    for dev in snap_disks:
4888 72737a7f Iustin Pop
      if not self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
4889 62c9ec92 Iustin Pop
                                      instance, cluster_name):
4890 16687b98 Manuel Franceschini
        logger.Error("could not export block device %s from node %s to node %s"
4891 16687b98 Manuel Franceschini
                     % (dev.logical_id[1], src_node, dst_node.name))
4892 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_remove(src_node, dev):
4893 16687b98 Manuel Franceschini
        logger.Error("could not remove snapshot block device %s from node %s" %
4894 16687b98 Manuel Franceschini
                     (dev.logical_id[1], src_node))
4895 a8083063 Iustin Pop
4896 72737a7f Iustin Pop
    if not self.rpc.call_finalize_export(dst_node.name, instance, snap_disks):
4897 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
4898 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
4899 a8083063 Iustin Pop
4900 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
4901 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
4902 a8083063 Iustin Pop
4903 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
4904 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
4905 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
4906 a8083063 Iustin Pop
    if nodelist:
4907 72737a7f Iustin Pop
      exportlist = self.rpc.call_export_list(nodelist)
4908 a8083063 Iustin Pop
      for node in exportlist:
4909 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
4910 72737a7f Iustin Pop
          if not self.rpc.call_export_remove(node, instance.name):
4911 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
4912 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))
4913 5c947f38 Iustin Pop
4914 5c947f38 Iustin Pop
4915 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
4916 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
4917 9ac99fda Guido Trotter

4918 9ac99fda Guido Trotter
  """
4919 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
4920 3656b3af Guido Trotter
  REQ_BGL = False
4921 3656b3af Guido Trotter
4922 3656b3af Guido Trotter
  def ExpandNames(self):
4923 3656b3af Guido Trotter
    self.needed_locks = {}
4924 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
4925 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
4926 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
4927 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4928 9ac99fda Guido Trotter
4929 9ac99fda Guido Trotter
  def CheckPrereq(self):
4930 9ac99fda Guido Trotter
    """Check prerequisites.
4931 9ac99fda Guido Trotter
    """
4932 9ac99fda Guido Trotter
    pass
4933 9ac99fda Guido Trotter
4934 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
4935 9ac99fda Guido Trotter
    """Remove any export.
4936 9ac99fda Guido Trotter

4937 9ac99fda Guido Trotter
    """
4938 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4939 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
4940 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
4941 9ac99fda Guido Trotter
    fqdn_warn = False
4942 9ac99fda Guido Trotter
    if not instance_name:
4943 9ac99fda Guido Trotter
      fqdn_warn = True
4944 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
4945 9ac99fda Guido Trotter
4946 72737a7f Iustin Pop
    exportlist = self.rpc.call_export_list(self.acquired_locks[
4947 72737a7f Iustin Pop
      locking.LEVEL_NODE])
4948 9ac99fda Guido Trotter
    found = False
4949 9ac99fda Guido Trotter
    for node in exportlist:
4950 9ac99fda Guido Trotter
      if instance_name in exportlist[node]:
4951 9ac99fda Guido Trotter
        found = True
4952 72737a7f Iustin Pop
        if not self.rpc.call_export_remove(node, instance_name):
4953 9ac99fda Guido Trotter
          logger.Error("could not remove export for instance %s"
4954 9ac99fda Guido Trotter
                       " on node %s" % (instance_name, node))
4955 9ac99fda Guido Trotter
4956 9ac99fda Guido Trotter
    if fqdn_warn and not found:
4957 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
4958 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
4959 9ac99fda Guido Trotter
                  " Domain Name.")
4960 9ac99fda Guido Trotter
4961 9ac99fda Guido Trotter
4962 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
4963 5c947f38 Iustin Pop
  """Generic tags LU.
4964 5c947f38 Iustin Pop

4965 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
4966 5c947f38 Iustin Pop

4967 5c947f38 Iustin Pop
  """
4968 5c947f38 Iustin Pop
4969 8646adce Guido Trotter
  def ExpandNames(self):
4970 8646adce Guido Trotter
    self.needed_locks = {}
4971 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
4972 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
4973 5c947f38 Iustin Pop
      if name is None:
4974 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
4975 3ecf6786 Iustin Pop
                                   (self.op.name,))
4976 5c947f38 Iustin Pop
      self.op.name = name
4977 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = name
4978 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
4979 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
4980 5c947f38 Iustin Pop
      if name is None:
4981 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
4982 3ecf6786 Iustin Pop
                                   (self.op.name,))
4983 5c947f38 Iustin Pop
      self.op.name = name
4984 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = name
4985 8646adce Guido Trotter
4986 8646adce Guido Trotter
  def CheckPrereq(self):
4987 8646adce Guido Trotter
    """Check prerequisites.
4988 8646adce Guido Trotter

4989 8646adce Guido Trotter
    """
4990 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
4991 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
4992 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
4993 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
4994 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
4995 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
4996 5c947f38 Iustin Pop
    else:
4997 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
4998 3ecf6786 Iustin Pop
                                 str(self.op.kind))
4999 5c947f38 Iustin Pop
5000 5c947f38 Iustin Pop
5001 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
5002 5c947f38 Iustin Pop
  """Returns the tags of a given object.
5003 5c947f38 Iustin Pop

5004 5c947f38 Iustin Pop
  """
5005 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
5006 8646adce Guido Trotter
  REQ_BGL = False
5007 5c947f38 Iustin Pop
5008 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5009 5c947f38 Iustin Pop
    """Returns the tag list.
5010 5c947f38 Iustin Pop

5011 5c947f38 Iustin Pop
    """
5012 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
5013 5c947f38 Iustin Pop
5014 5c947f38 Iustin Pop
5015 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
5016 73415719 Iustin Pop
  """Searches the tags for a given pattern.
5017 73415719 Iustin Pop

5018 73415719 Iustin Pop
  """
5019 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
5020 8646adce Guido Trotter
  REQ_BGL = False
5021 8646adce Guido Trotter
5022 8646adce Guido Trotter
  def ExpandNames(self):
5023 8646adce Guido Trotter
    self.needed_locks = {}
5024 73415719 Iustin Pop
5025 73415719 Iustin Pop
  def CheckPrereq(self):
5026 73415719 Iustin Pop
    """Check prerequisites.
5027 73415719 Iustin Pop

5028 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
5029 73415719 Iustin Pop

5030 73415719 Iustin Pop
    """
5031 73415719 Iustin Pop
    try:
5032 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
5033 73415719 Iustin Pop
    except re.error, err:
5034 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
5035 73415719 Iustin Pop
                                 (self.op.pattern, err))
5036 73415719 Iustin Pop
5037 73415719 Iustin Pop
  def Exec(self, feedback_fn):
5038 73415719 Iustin Pop
    """Returns the tag list.
5039 73415719 Iustin Pop

5040 73415719 Iustin Pop
    """
5041 73415719 Iustin Pop
    cfg = self.cfg
5042 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
5043 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
5044 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
5045 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
5046 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
5047 73415719 Iustin Pop
    results = []
5048 73415719 Iustin Pop
    for path, target in tgts:
5049 73415719 Iustin Pop
      for tag in target.GetTags():
5050 73415719 Iustin Pop
        if self.re.search(tag):
5051 73415719 Iustin Pop
          results.append((path, tag))
5052 73415719 Iustin Pop
    return results
5053 73415719 Iustin Pop
5054 73415719 Iustin Pop
5055 f27302fa Iustin Pop
class LUAddTags(TagsLU):
5056 5c947f38 Iustin Pop
  """Sets a tag on a given object.
5057 5c947f38 Iustin Pop

5058 5c947f38 Iustin Pop
  """
5059 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
5060 8646adce Guido Trotter
  REQ_BGL = False
5061 5c947f38 Iustin Pop
5062 5c947f38 Iustin Pop
  def CheckPrereq(self):
5063 5c947f38 Iustin Pop
    """Check prerequisites.
5064 5c947f38 Iustin Pop

5065 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
5066 5c947f38 Iustin Pop

5067 5c947f38 Iustin Pop
    """
5068 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
5069 f27302fa Iustin Pop
    for tag in self.op.tags:
5070 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
5071 5c947f38 Iustin Pop
5072 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5073 5c947f38 Iustin Pop
    """Sets the tag.
5074 5c947f38 Iustin Pop

5075 5c947f38 Iustin Pop
    """
5076 5c947f38 Iustin Pop
    try:
5077 f27302fa Iustin Pop
      for tag in self.op.tags:
5078 f27302fa Iustin Pop
        self.target.AddTag(tag)
5079 5c947f38 Iustin Pop
    except errors.TagError, err:
5080 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
5081 5c947f38 Iustin Pop
    try:
5082 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
5083 5c947f38 Iustin Pop
    except errors.ConfigurationError:
5084 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
5085 3ecf6786 Iustin Pop
                                " config file and the operation has been"
5086 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
5087 5c947f38 Iustin Pop
5088 5c947f38 Iustin Pop
5089 f27302fa Iustin Pop
class LUDelTags(TagsLU):
5090 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
5091 5c947f38 Iustin Pop

5092 5c947f38 Iustin Pop
  """
5093 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
5094 8646adce Guido Trotter
  REQ_BGL = False
5095 5c947f38 Iustin Pop
5096 5c947f38 Iustin Pop
  def CheckPrereq(self):
5097 5c947f38 Iustin Pop
    """Check prerequisites.
5098 5c947f38 Iustin Pop

5099 5c947f38 Iustin Pop
    This checks that we have the given tag.
5100 5c947f38 Iustin Pop

5101 5c947f38 Iustin Pop
    """
5102 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
5103 f27302fa Iustin Pop
    for tag in self.op.tags:
5104 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
5105 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
5106 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
5107 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
5108 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
5109 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
5110 f27302fa Iustin Pop
      diff_names.sort()
5111 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
5112 f27302fa Iustin Pop
                                 (",".join(diff_names)))
5113 5c947f38 Iustin Pop
5114 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5115 5c947f38 Iustin Pop
    """Remove the tag from the object.
5116 5c947f38 Iustin Pop

5117 5c947f38 Iustin Pop
    """
5118 f27302fa Iustin Pop
    for tag in self.op.tags:
5119 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
5120 5c947f38 Iustin Pop
    try:
5121 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
5122 5c947f38 Iustin Pop
    except errors.ConfigurationError:
5123 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
5124 3ecf6786 Iustin Pop
                                " config file and the operation has been"
5125 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
5126 06009e27 Iustin Pop
5127 0eed6e61 Guido Trotter
5128 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
5129 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
5130 06009e27 Iustin Pop

5131 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
5132 06009e27 Iustin Pop
  time.
5133 06009e27 Iustin Pop

5134 06009e27 Iustin Pop
  """
5135 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
5136 fbe9022f Guido Trotter
  REQ_BGL = False
5137 06009e27 Iustin Pop
5138 fbe9022f Guido Trotter
  def ExpandNames(self):
5139 fbe9022f Guido Trotter
    """Expand names and set required locks.
5140 06009e27 Iustin Pop

5141 fbe9022f Guido Trotter
    This expands the node list, if any.
5142 06009e27 Iustin Pop

5143 06009e27 Iustin Pop
    """
5144 fbe9022f Guido Trotter
    self.needed_locks = {}
5145 06009e27 Iustin Pop
    if self.op.on_nodes:
5146 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
5147 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
5148 fbe9022f Guido Trotter
      # more information.
5149 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
5150 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
5151 fbe9022f Guido Trotter
5152 fbe9022f Guido Trotter
  def CheckPrereq(self):
5153 fbe9022f Guido Trotter
    """Check prerequisites.
5154 fbe9022f Guido Trotter

5155 fbe9022f Guido Trotter
    """
5156 06009e27 Iustin Pop
5157 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
5158 06009e27 Iustin Pop
    """Do the actual sleep.
5159 06009e27 Iustin Pop

5160 06009e27 Iustin Pop
    """
5161 06009e27 Iustin Pop
    if self.op.on_master:
5162 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
5163 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
5164 06009e27 Iustin Pop
    if self.op.on_nodes:
5165 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
5166 06009e27 Iustin Pop
      if not result:
5167 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
5168 06009e27 Iustin Pop
      for node, node_result in result.items():
5169 06009e27 Iustin Pop
        if not node_result:
5170 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
5171 06009e27 Iustin Pop
                                   " result: %s" % (node, node_result))
5172 d61df03e Iustin Pop
5173 d61df03e Iustin Pop
5174 d1c2dd75 Iustin Pop
class IAllocator(object):
5175 d1c2dd75 Iustin Pop
  """IAllocator framework.
5176 d61df03e Iustin Pop

5177 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
5178 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
5179 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
5180 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
5181 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
5182 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
5183 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
5184 d1c2dd75 Iustin Pop
      easy usage
5185 d61df03e Iustin Pop

5186 d61df03e Iustin Pop
  """
5187 29859cb7 Iustin Pop
  _ALLO_KEYS = [
5188 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
5189 d1c2dd75 Iustin Pop
    "os", "tags", "nics", "vcpus",
5190 d1c2dd75 Iustin Pop
    ]
5191 29859cb7 Iustin Pop
  _RELO_KEYS = [
5192 29859cb7 Iustin Pop
    "relocate_from",
5193 29859cb7 Iustin Pop
    ]
5194 d1c2dd75 Iustin Pop
5195 72737a7f Iustin Pop
  def __init__(self, lu, mode, name, **kwargs):
5196 72737a7f Iustin Pop
    self.lu = lu
5197 d1c2dd75 Iustin Pop
    # init buffer variables
5198 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
5199 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
5200 29859cb7 Iustin Pop
    self.mode = mode
5201 29859cb7 Iustin Pop
    self.name = name
5202 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
5203 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
5204 29859cb7 Iustin Pop
    self.relocate_from = None
5205 27579978 Iustin Pop
    # computed fields
5206 27579978 Iustin Pop
    self.required_nodes = None
5207 d1c2dd75 Iustin Pop
    # init result fields
5208 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
5209 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5210 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
5211 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
5212 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
5213 29859cb7 Iustin Pop
    else:
5214 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
5215 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
5216 d1c2dd75 Iustin Pop
    for key in kwargs:
5217 29859cb7 Iustin Pop
      if key not in keyset:
5218 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
5219 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
5220 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
5221 29859cb7 Iustin Pop
    for key in keyset:
5222 d1c2dd75 Iustin Pop
      if key not in kwargs:
5223 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
5224 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
5225 d1c2dd75 Iustin Pop
    self._BuildInputData()
5226 d1c2dd75 Iustin Pop
5227 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
5228 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
5229 d1c2dd75 Iustin Pop

5230 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
5231 d1c2dd75 Iustin Pop

5232 d1c2dd75 Iustin Pop
    """
5233 72737a7f Iustin Pop
    cfg = self.lu.cfg
5234 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
5235 d1c2dd75 Iustin Pop
    # cluster data
5236 d1c2dd75 Iustin Pop
    data = {
5237 d1c2dd75 Iustin Pop
      "version": 1,
5238 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
5239 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
5240 e69d05fd Iustin Pop
      "enable_hypervisors": list(cluster_info.enabled_hypervisors),
5241 d1c2dd75 Iustin Pop
      # we don't have job IDs
5242 d61df03e Iustin Pop
      }
5243 d61df03e Iustin Pop
5244 6286519f Iustin Pop
    i_list = [cfg.GetInstanceInfo(iname) for iname in cfg.GetInstanceList()]
5245 6286519f Iustin Pop
5246 d1c2dd75 Iustin Pop
    # node data
5247 d1c2dd75 Iustin Pop
    node_results = {}
5248 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
5249 e69d05fd Iustin Pop
    # FIXME: here we have only one hypervisor information, but
5250 e69d05fd Iustin Pop
    # instance can belong to different hypervisors
5251 72737a7f Iustin Pop
    node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
5252 72737a7f Iustin Pop
                                           cfg.GetHypervisorType())
5253 d1c2dd75 Iustin Pop
    for nname in node_list:
5254 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
5255 d1c2dd75 Iustin Pop
      if nname not in node_data or not isinstance(node_data[nname], dict):
5256 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't get data for node %s" % nname)
5257 d1c2dd75 Iustin Pop
      remote_info = node_data[nname]
5258 b2662e7f Iustin Pop
      for attr in ['memory_total', 'memory_free', 'memory_dom0',
5259 4337cf1b Iustin Pop
                   'vg_size', 'vg_free', 'cpu_total']:
5260 d1c2dd75 Iustin Pop
        if attr not in remote_info:
5261 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' didn't return attribute '%s'" %
5262 d1c2dd75 Iustin Pop
                                   (nname, attr))
5263 d1c2dd75 Iustin Pop
        try:
5264 b2662e7f Iustin Pop
          remote_info[attr] = int(remote_info[attr])
5265 d1c2dd75 Iustin Pop
        except ValueError, err:
5266 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' returned invalid value for '%s':"
5267 d1c2dd75 Iustin Pop
                                   " %s" % (nname, attr, str(err)))
5268 6286519f Iustin Pop
      # compute memory used by primary instances
5269 6286519f Iustin Pop
      i_p_mem = i_p_up_mem = 0
5270 6286519f Iustin Pop
      for iinfo in i_list:
5271 6286519f Iustin Pop
        if iinfo.primary_node == nname:
5272 6286519f Iustin Pop
          i_p_mem += iinfo.memory
5273 6286519f Iustin Pop
          if iinfo.status == "up":
5274 6286519f Iustin Pop
            i_p_up_mem += iinfo.memory
5275 6286519f Iustin Pop
5276 b2662e7f Iustin Pop
      # compute memory used by instances
5277 d1c2dd75 Iustin Pop
      pnr = {
5278 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
5279 b2662e7f Iustin Pop
        "total_memory": remote_info['memory_total'],
5280 b2662e7f Iustin Pop
        "reserved_memory": remote_info['memory_dom0'],
5281 b2662e7f Iustin Pop
        "free_memory": remote_info['memory_free'],
5282 6286519f Iustin Pop
        "i_pri_memory": i_p_mem,
5283 6286519f Iustin Pop
        "i_pri_up_memory": i_p_up_mem,
5284 b2662e7f Iustin Pop
        "total_disk": remote_info['vg_size'],
5285 b2662e7f Iustin Pop
        "free_disk": remote_info['vg_free'],
5286 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
5287 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
5288 4337cf1b Iustin Pop
        "total_cpus": remote_info['cpu_total'],
5289 d1c2dd75 Iustin Pop
        }
5290 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
5291 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
5292 d1c2dd75 Iustin Pop
5293 d1c2dd75 Iustin Pop
    # instance data
5294 d1c2dd75 Iustin Pop
    instance_data = {}
5295 6286519f Iustin Pop
    for iinfo in i_list:
5296 d1c2dd75 Iustin Pop
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
5297 d1c2dd75 Iustin Pop
                  for n in iinfo.nics]
5298 d1c2dd75 Iustin Pop
      pir = {
5299 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
5300 d1c2dd75 Iustin Pop
        "should_run": iinfo.status == "up",
5301 d1c2dd75 Iustin Pop
        "vcpus": iinfo.vcpus,
5302 d1c2dd75 Iustin Pop
        "memory": iinfo.memory,
5303 d1c2dd75 Iustin Pop
        "os": iinfo.os,
5304 d1c2dd75 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
5305 d1c2dd75 Iustin Pop
        "nics": nic_data,
5306 d1c2dd75 Iustin Pop
        "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
5307 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
5308 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
5309 d1c2dd75 Iustin Pop
        }
5310 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
5311 d61df03e Iustin Pop
5312 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
5313 d61df03e Iustin Pop
5314 d1c2dd75 Iustin Pop
    self.in_data = data
5315 d61df03e Iustin Pop
5316 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
5317 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
5318 d61df03e Iustin Pop

5319 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
5320 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5321 d61df03e Iustin Pop

5322 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5323 d1c2dd75 Iustin Pop
    done.
5324 d61df03e Iustin Pop

5325 d1c2dd75 Iustin Pop
    """
5326 d1c2dd75 Iustin Pop
    data = self.in_data
5327 d1c2dd75 Iustin Pop
    if len(self.disks) != 2:
5328 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Only two-disk configurations supported")
5329 d1c2dd75 Iustin Pop
5330 d1c2dd75 Iustin Pop
    disk_space = _ComputeDiskSize(self.disk_template,
5331 d1c2dd75 Iustin Pop
                                  self.disks[0]["size"], self.disks[1]["size"])
5332 d1c2dd75 Iustin Pop
5333 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
5334 27579978 Iustin Pop
      self.required_nodes = 2
5335 27579978 Iustin Pop
    else:
5336 27579978 Iustin Pop
      self.required_nodes = 1
5337 d1c2dd75 Iustin Pop
    request = {
5338 d1c2dd75 Iustin Pop
      "type": "allocate",
5339 d1c2dd75 Iustin Pop
      "name": self.name,
5340 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
5341 d1c2dd75 Iustin Pop
      "tags": self.tags,
5342 d1c2dd75 Iustin Pop
      "os": self.os,
5343 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
5344 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
5345 d1c2dd75 Iustin Pop
      "disks": self.disks,
5346 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
5347 d1c2dd75 Iustin Pop
      "nics": self.nics,
5348 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5349 d1c2dd75 Iustin Pop
      }
5350 d1c2dd75 Iustin Pop
    data["request"] = request
5351 298fe380 Iustin Pop
5352 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
5353 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
5354 298fe380 Iustin Pop

5355 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
5356 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5357 d61df03e Iustin Pop

5358 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5359 d1c2dd75 Iustin Pop
    done.
5360 d61df03e Iustin Pop

5361 d1c2dd75 Iustin Pop
    """
5362 72737a7f Iustin Pop
    instance = self.lu.cfg.GetInstanceInfo(self.name)
5363 27579978 Iustin Pop
    if instance is None:
5364 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
5365 27579978 Iustin Pop
                                   " IAllocator" % self.name)
5366 27579978 Iustin Pop
5367 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
5368 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
5369 27579978 Iustin Pop
5370 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
5371 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
5372 2a139bb0 Iustin Pop
5373 27579978 Iustin Pop
    self.required_nodes = 1
5374 27579978 Iustin Pop
5375 27579978 Iustin Pop
    disk_space = _ComputeDiskSize(instance.disk_template,
5376 27579978 Iustin Pop
                                  instance.disks[0].size,
5377 27579978 Iustin Pop
                                  instance.disks[1].size)
5378 27579978 Iustin Pop
5379 d1c2dd75 Iustin Pop
    request = {
5380 2a139bb0 Iustin Pop
      "type": "relocate",
5381 d1c2dd75 Iustin Pop
      "name": self.name,
5382 27579978 Iustin Pop
      "disk_space_total": disk_space,
5383 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5384 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
5385 d1c2dd75 Iustin Pop
      }
5386 27579978 Iustin Pop
    self.in_data["request"] = request
5387 d61df03e Iustin Pop
5388 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
5389 d1c2dd75 Iustin Pop
    """Build input data structures.
5390 d61df03e Iustin Pop

5391 d1c2dd75 Iustin Pop
    """
5392 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
5393 d61df03e Iustin Pop
5394 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5395 d1c2dd75 Iustin Pop
      self._AddNewInstance()
5396 d1c2dd75 Iustin Pop
    else:
5397 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
5398 d61df03e Iustin Pop
5399 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
5400 d61df03e Iustin Pop
5401 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
5402 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
5403 298fe380 Iustin Pop

5404 d1c2dd75 Iustin Pop
    """
5405 72737a7f Iustin Pop
    if call_fn is None:
5406 72737a7f Iustin Pop
      call_fn = self.lu.rpc.call_iallocator_runner
5407 d1c2dd75 Iustin Pop
    data = self.in_text
5408 298fe380 Iustin Pop
5409 72737a7f Iustin Pop
    result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
5410 298fe380 Iustin Pop
5411 43f5ea7a Guido Trotter
    if not isinstance(result, (list, tuple)) or len(result) != 4:
5412 8d528b7c Iustin Pop
      raise errors.OpExecError("Invalid result from master iallocator runner")
5413 8d528b7c Iustin Pop
5414 8d528b7c Iustin Pop
    rcode, stdout, stderr, fail = result
5415 8d528b7c Iustin Pop
5416 8d528b7c Iustin Pop
    if rcode == constants.IARUN_NOTFOUND:
5417 8d528b7c Iustin Pop
      raise errors.OpExecError("Can't find allocator '%s'" % name)
5418 8d528b7c Iustin Pop
    elif rcode == constants.IARUN_FAILURE:
5419 38206f3c Iustin Pop
      raise errors.OpExecError("Instance allocator call failed: %s,"
5420 38206f3c Iustin Pop
                               " output: %s" % (fail, stdout+stderr))
5421 8d528b7c Iustin Pop
    self.out_text = stdout
5422 d1c2dd75 Iustin Pop
    if validate:
5423 d1c2dd75 Iustin Pop
      self._ValidateResult()
5424 298fe380 Iustin Pop
5425 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
5426 d1c2dd75 Iustin Pop
    """Process the allocator results.
5427 538475ca Iustin Pop

5428 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
5429 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
5430 538475ca Iustin Pop

5431 d1c2dd75 Iustin Pop
    """
5432 d1c2dd75 Iustin Pop
    try:
5433 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
5434 d1c2dd75 Iustin Pop
    except Exception, err:
5435 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
5436 d1c2dd75 Iustin Pop
5437 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
5438 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
5439 538475ca Iustin Pop
5440 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
5441 d1c2dd75 Iustin Pop
      if key not in rdict:
5442 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
5443 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
5444 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
5445 538475ca Iustin Pop
5446 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
5447 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
5448 d1c2dd75 Iustin Pop
                               " is not a list")
5449 d1c2dd75 Iustin Pop
    self.out_data = rdict
5450 538475ca Iustin Pop
5451 538475ca Iustin Pop
5452 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
5453 d61df03e Iustin Pop
  """Run allocator tests.
5454 d61df03e Iustin Pop

5455 d61df03e Iustin Pop
  This LU runs the allocator tests
5456 d61df03e Iustin Pop

5457 d61df03e Iustin Pop
  """
5458 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
5459 d61df03e Iustin Pop
5460 d61df03e Iustin Pop
  def CheckPrereq(self):
5461 d61df03e Iustin Pop
    """Check prerequisites.
5462 d61df03e Iustin Pop

5463 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
5464 d61df03e Iustin Pop

5465 d61df03e Iustin Pop
    """
5466 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5467 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
5468 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
5469 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
5470 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
5471 d61df03e Iustin Pop
                                     attr)
5472 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
5473 d61df03e Iustin Pop
      if iname is not None:
5474 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
5475 d61df03e Iustin Pop
                                   iname)
5476 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
5477 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
5478 d61df03e Iustin Pop
      for row in self.op.nics:
5479 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5480 d61df03e Iustin Pop
            "mac" not in row or
5481 d61df03e Iustin Pop
            "ip" not in row or
5482 d61df03e Iustin Pop
            "bridge" not in row):
5483 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5484 d61df03e Iustin Pop
                                     " 'nics' parameter")
5485 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
5486 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
5487 298fe380 Iustin Pop
      if len(self.op.disks) != 2:
5488 298fe380 Iustin Pop
        raise errors.OpPrereqError("Only two-disk configurations supported")
5489 d61df03e Iustin Pop
      for row in self.op.disks:
5490 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5491 d61df03e Iustin Pop
            "size" not in row or
5492 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
5493 d61df03e Iustin Pop
            "mode" not in row or
5494 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
5495 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5496 d61df03e Iustin Pop
                                     " 'disks' parameter")
5497 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
5498 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
5499 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
5500 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
5501 d61df03e Iustin Pop
      if fname is None:
5502 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
5503 d61df03e Iustin Pop
                                   self.op.name)
5504 d61df03e Iustin Pop
      self.op.name = fname
5505 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
5506 d61df03e Iustin Pop
    else:
5507 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
5508 d61df03e Iustin Pop
                                 self.op.mode)
5509 d61df03e Iustin Pop
5510 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
5511 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
5512 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
5513 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
5514 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
5515 d61df03e Iustin Pop
                                 self.op.direction)
5516 d61df03e Iustin Pop
5517 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
5518 d61df03e Iustin Pop
    """Run the allocator test.
5519 d61df03e Iustin Pop

5520 d61df03e Iustin Pop
    """
5521 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5522 72737a7f Iustin Pop
      ial = IAllocator(self,
5523 29859cb7 Iustin Pop
                       mode=self.op.mode,
5524 29859cb7 Iustin Pop
                       name=self.op.name,
5525 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
5526 29859cb7 Iustin Pop
                       disks=self.op.disks,
5527 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
5528 29859cb7 Iustin Pop
                       os=self.op.os,
5529 29859cb7 Iustin Pop
                       tags=self.op.tags,
5530 29859cb7 Iustin Pop
                       nics=self.op.nics,
5531 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
5532 29859cb7 Iustin Pop
                       )
5533 29859cb7 Iustin Pop
    else:
5534 72737a7f Iustin Pop
      ial = IAllocator(self,
5535 29859cb7 Iustin Pop
                       mode=self.op.mode,
5536 29859cb7 Iustin Pop
                       name=self.op.name,
5537 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
5538 29859cb7 Iustin Pop
                       )
5539 d61df03e Iustin Pop
5540 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
5541 d1c2dd75 Iustin Pop
      result = ial.in_text
5542 298fe380 Iustin Pop
    else:
5543 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
5544 d1c2dd75 Iustin Pop
      result = ial.out_text
5545 298fe380 Iustin Pop
    return result