Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 0fcc5db3

History | View | Annotate | Download (172.3 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 a8083063 Iustin Pop
34 a8083063 Iustin Pop
from ganeti import rpc
35 a8083063 Iustin Pop
from ganeti import ssh
36 a8083063 Iustin Pop
from ganeti import logger
37 a8083063 Iustin Pop
from ganeti import utils
38 a8083063 Iustin Pop
from ganeti import errors
39 a8083063 Iustin Pop
from ganeti import hypervisor
40 6048c986 Guido Trotter
from ganeti import locking
41 a8083063 Iustin Pop
from ganeti import constants
42 a8083063 Iustin Pop
from ganeti import objects
43 a8083063 Iustin Pop
from ganeti import opcodes
44 8d14b30d Iustin Pop
from ganeti import serializer
45 d61df03e Iustin Pop
46 d61df03e Iustin Pop
47 a8083063 Iustin Pop
class LogicalUnit(object):
48 396e1b78 Michael Hanselmann
  """Logical Unit base class.
49 a8083063 Iustin Pop

50 a8083063 Iustin Pop
  Subclasses must follow these rules:
51 d465bdc8 Guido Trotter
    - implement ExpandNames
52 d465bdc8 Guido Trotter
    - implement CheckPrereq
53 a8083063 Iustin Pop
    - implement Exec
54 a8083063 Iustin Pop
    - implement BuildHooksEnv
55 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
56 05f86716 Guido Trotter
    - optionally redefine their run requirements:
57 05f86716 Guido Trotter
        REQ_MASTER: the LU needs to run on the master node
58 05f86716 Guido Trotter
        REQ_WSSTORE: the LU needs a writable SimpleStore
59 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
60 05f86716 Guido Trotter

61 05f86716 Guido Trotter
  Note that all commands require root permissions.
62 a8083063 Iustin Pop

63 a8083063 Iustin Pop
  """
64 a8083063 Iustin Pop
  HPATH = None
65 a8083063 Iustin Pop
  HTYPE = None
66 a8083063 Iustin Pop
  _OP_REQP = []
67 a8083063 Iustin Pop
  REQ_MASTER = True
68 05f86716 Guido Trotter
  REQ_WSSTORE = False
69 7e55040e Guido Trotter
  REQ_BGL = True
70 a8083063 Iustin Pop
71 77b657a3 Guido Trotter
  def __init__(self, processor, op, context, sstore):
72 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
73 a8083063 Iustin Pop

74 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
75 a8083063 Iustin Pop
    validity.
76 a8083063 Iustin Pop

77 a8083063 Iustin Pop
    """
78 5bfac263 Iustin Pop
    self.proc = processor
79 a8083063 Iustin Pop
    self.op = op
80 77b657a3 Guido Trotter
    self.cfg = context.cfg
81 a8083063 Iustin Pop
    self.sstore = sstore
82 77b657a3 Guido Trotter
    self.context = context
83 d465bdc8 Guido Trotter
    self.needed_locks = None
84 3977a4c1 Guido Trotter
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
85 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
86 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
87 c92b310a Michael Hanselmann
    self.__ssh = None
88 c92b310a Michael Hanselmann
89 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
90 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
91 a8083063 Iustin Pop
      if attr_val is None:
92 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
93 3ecf6786 Iustin Pop
                                   attr_name)
94 c6d58a2b Michael Hanselmann
95 f64c9de6 Guido Trotter
    if not self.cfg.IsCluster():
96 c6d58a2b Michael Hanselmann
      raise errors.OpPrereqError("Cluster not initialized yet,"
97 c6d58a2b Michael Hanselmann
                                 " use 'gnt-cluster init' first.")
98 c6d58a2b Michael Hanselmann
    if self.REQ_MASTER:
99 c6d58a2b Michael Hanselmann
      master = sstore.GetMasterNode()
100 c6d58a2b Michael Hanselmann
      if master != utils.HostInfo().name:
101 c6d58a2b Michael Hanselmann
        raise errors.OpPrereqError("Commands must be run on the master"
102 c6d58a2b Michael Hanselmann
                                   " node %s" % master)
103 a8083063 Iustin Pop
104 c92b310a Michael Hanselmann
  def __GetSSH(self):
105 c92b310a Michael Hanselmann
    """Returns the SshRunner object
106 c92b310a Michael Hanselmann

107 c92b310a Michael Hanselmann
    """
108 c92b310a Michael Hanselmann
    if not self.__ssh:
109 1ff08570 Michael Hanselmann
      self.__ssh = ssh.SshRunner(self.sstore)
110 c92b310a Michael Hanselmann
    return self.__ssh
111 c92b310a Michael Hanselmann
112 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
113 c92b310a Michael Hanselmann
114 d465bdc8 Guido Trotter
  def ExpandNames(self):
115 d465bdc8 Guido Trotter
    """Expand names for this LU.
116 d465bdc8 Guido Trotter

117 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
118 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
119 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
120 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
121 d465bdc8 Guido Trotter

122 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
123 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
124 d465bdc8 Guido Trotter
    as values. Rules:
125 d465bdc8 Guido Trotter
      - Use an empty dict if you don't need any lock
126 d465bdc8 Guido Trotter
      - If you don't need any lock at a particular level omit that level
127 d465bdc8 Guido Trotter
      - Don't put anything for the BGL level
128 d465bdc8 Guido Trotter
      - If you want all locks at a level use None as a value
129 d465bdc8 Guido Trotter
        (this reflects what LockSet does, and will be replaced before
130 d465bdc8 Guido Trotter
        CheckPrereq with the full list of nodes that have been locked)
131 d465bdc8 Guido Trotter

132 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
133 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
134 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
135 3977a4c1 Guido Trotter

136 d465bdc8 Guido Trotter
    Examples:
137 d465bdc8 Guido Trotter
    # Acquire all nodes and one instance
138 d465bdc8 Guido Trotter
    self.needed_locks = {
139 d465bdc8 Guido Trotter
      locking.LEVEL_NODE: None,
140 d465bdc8 Guido Trotter
      locking.LEVEL_INSTANCES: ['instance1.example.tld'],
141 d465bdc8 Guido Trotter
    }
142 d465bdc8 Guido Trotter
    # Acquire just two nodes
143 d465bdc8 Guido Trotter
    self.needed_locks = {
144 d465bdc8 Guido Trotter
      locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
145 d465bdc8 Guido Trotter
    }
146 d465bdc8 Guido Trotter
    # Acquire no locks
147 d465bdc8 Guido Trotter
    self.needed_locks = {} # No, you can't leave it to the default value None
148 d465bdc8 Guido Trotter

149 d465bdc8 Guido Trotter
    """
150 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
151 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
152 d465bdc8 Guido Trotter
    # time.
153 d465bdc8 Guido Trotter
    if self.REQ_BGL:
154 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
155 d465bdc8 Guido Trotter
    else:
156 d465bdc8 Guido Trotter
      raise NotImplementedError
157 d465bdc8 Guido Trotter
158 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
159 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
160 fb8dcb62 Guido Trotter

161 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
162 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
163 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
164 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
165 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
166 fb8dcb62 Guido Trotter
    default it does nothing.
167 fb8dcb62 Guido Trotter

168 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
169 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
170 fb8dcb62 Guido Trotter

171 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
172 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
173 fb8dcb62 Guido Trotter

174 fb8dcb62 Guido Trotter
    """
175 fb8dcb62 Guido Trotter
176 a8083063 Iustin Pop
  def CheckPrereq(self):
177 a8083063 Iustin Pop
    """Check prerequisites for this LU.
178 a8083063 Iustin Pop

179 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
180 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
181 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
182 a8083063 Iustin Pop
    allowed.
183 a8083063 Iustin Pop

184 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
185 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
186 a8083063 Iustin Pop

187 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
188 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
189 a8083063 Iustin Pop

190 a8083063 Iustin Pop
    """
191 a8083063 Iustin Pop
    raise NotImplementedError
192 a8083063 Iustin Pop
193 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
194 a8083063 Iustin Pop
    """Execute the LU.
195 a8083063 Iustin Pop

196 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
197 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
198 a8083063 Iustin Pop
    code, or expected.
199 a8083063 Iustin Pop

200 a8083063 Iustin Pop
    """
201 a8083063 Iustin Pop
    raise NotImplementedError
202 a8083063 Iustin Pop
203 a8083063 Iustin Pop
  def BuildHooksEnv(self):
204 a8083063 Iustin Pop
    """Build hooks environment for this LU.
205 a8083063 Iustin Pop

206 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
207 a8083063 Iustin Pop
    containing the environment that will be used for running the
208 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
209 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
210 a8083063 Iustin Pop
    the hook should run after the execution.
211 a8083063 Iustin Pop

212 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
213 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
214 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
215 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
216 a8083063 Iustin Pop

217 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
218 a8083063 Iustin Pop

219 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
220 a8083063 Iustin Pop
    not be called.
221 a8083063 Iustin Pop

222 a8083063 Iustin Pop
    """
223 a8083063 Iustin Pop
    raise NotImplementedError
224 a8083063 Iustin Pop
225 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
226 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
227 1fce5219 Guido Trotter

228 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
229 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
230 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
231 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
232 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
233 1fce5219 Guido Trotter

234 1fce5219 Guido Trotter
    Args:
235 1fce5219 Guido Trotter
      phase: the hooks phase that has just been run
236 1fce5219 Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
237 1fce5219 Guido Trotter
      feedback_fn: function to send feedback back to the caller
238 1fce5219 Guido Trotter
      lu_result: the previous result this LU had, or None in the PRE phase.
239 1fce5219 Guido Trotter

240 1fce5219 Guido Trotter
    """
241 1fce5219 Guido Trotter
    return lu_result
242 1fce5219 Guido Trotter
243 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
244 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
245 43905206 Guido Trotter

246 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
247 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
248 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
249 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
250 43905206 Guido Trotter
    before.
251 43905206 Guido Trotter

252 43905206 Guido Trotter
    """
253 43905206 Guido Trotter
    if self.needed_locks is None:
254 43905206 Guido Trotter
      self.needed_locks = {}
255 43905206 Guido Trotter
    else:
256 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
257 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
258 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
259 43905206 Guido Trotter
    if expanded_name is None:
260 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
261 43905206 Guido Trotter
                                  self.op.instance_name)
262 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
263 43905206 Guido Trotter
    self.op.instance_name = expanded_name
264 43905206 Guido Trotter
265 c4a2fee1 Guido Trotter
  def _LockInstancesNodes(self):
266 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
267 c4a2fee1 Guido Trotter

268 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
269 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
270 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
271 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
272 c4a2fee1 Guido Trotter

273 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
274 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
275 c4a2fee1 Guido Trotter

276 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
277 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
278 c4a2fee1 Guido Trotter

279 c4a2fee1 Guido Trotter
    If should be called in DeclareLocks in a way similar to:
280 c4a2fee1 Guido Trotter

281 c4a2fee1 Guido Trotter
    if level == locking.LEVEL_NODE:
282 c4a2fee1 Guido Trotter
      self._LockInstancesNodes()
283 c4a2fee1 Guido Trotter

284 c4a2fee1 Guido Trotter
    """
285 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
286 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
287 c4a2fee1 Guido Trotter
288 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
289 c4a2fee1 Guido Trotter
290 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
291 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
292 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
293 c4a2fee1 Guido Trotter
    wanted_nodes = []
294 c4a2fee1 Guido Trotter
    for instance_name in self.needed_locks[locking.LEVEL_INSTANCE]:
295 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
296 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
297 c4a2fee1 Guido Trotter
      wanted_nodes.extend(instance.secondary_nodes)
298 c4a2fee1 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
299 c4a2fee1 Guido Trotter
300 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
301 c4a2fee1 Guido Trotter
302 a8083063 Iustin Pop
303 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
304 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
305 a8083063 Iustin Pop

306 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
307 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
308 a8083063 Iustin Pop

309 a8083063 Iustin Pop
  """
310 a8083063 Iustin Pop
  HPATH = None
311 a8083063 Iustin Pop
  HTYPE = None
312 a8083063 Iustin Pop
313 a8083063 Iustin Pop
314 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
315 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
316 83120a01 Michael Hanselmann

317 83120a01 Michael Hanselmann
  Args:
318 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
319 83120a01 Michael Hanselmann

320 83120a01 Michael Hanselmann
  """
321 3312b702 Iustin Pop
  if not isinstance(nodes, list):
322 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
323 dcb93971 Michael Hanselmann
324 dcb93971 Michael Hanselmann
  if nodes:
325 3312b702 Iustin Pop
    wanted = []
326 dcb93971 Michael Hanselmann
327 dcb93971 Michael Hanselmann
    for name in nodes:
328 a7ba5e53 Iustin Pop
      node = lu.cfg.ExpandNodeName(name)
329 dcb93971 Michael Hanselmann
      if node is None:
330 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No such node name '%s'" % name)
331 3312b702 Iustin Pop
      wanted.append(node)
332 dcb93971 Michael Hanselmann
333 dcb93971 Michael Hanselmann
  else:
334 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetNodeList()
335 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
336 3312b702 Iustin Pop
337 3312b702 Iustin Pop
338 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
339 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
340 3312b702 Iustin Pop

341 3312b702 Iustin Pop
  Args:
342 3312b702 Iustin Pop
    instances: List of instances (strings) or None for all
343 3312b702 Iustin Pop

344 3312b702 Iustin Pop
  """
345 3312b702 Iustin Pop
  if not isinstance(instances, list):
346 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
347 3312b702 Iustin Pop
348 3312b702 Iustin Pop
  if instances:
349 3312b702 Iustin Pop
    wanted = []
350 3312b702 Iustin Pop
351 3312b702 Iustin Pop
    for name in instances:
352 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
353 3312b702 Iustin Pop
      if instance is None:
354 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
355 3312b702 Iustin Pop
      wanted.append(instance)
356 3312b702 Iustin Pop
357 3312b702 Iustin Pop
  else:
358 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
359 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
360 dcb93971 Michael Hanselmann
361 dcb93971 Michael Hanselmann
362 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
363 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
364 83120a01 Michael Hanselmann

365 83120a01 Michael Hanselmann
  Args:
366 83120a01 Michael Hanselmann
    static: Static fields
367 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
368 83120a01 Michael Hanselmann

369 83120a01 Michael Hanselmann
  """
370 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
371 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
372 dcb93971 Michael Hanselmann
373 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
374 dcb93971 Michael Hanselmann
375 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
376 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
377 3ecf6786 Iustin Pop
                               % ",".join(frozenset(selected).
378 3ecf6786 Iustin Pop
                                          difference(all_fields)))
379 dcb93971 Michael Hanselmann
380 dcb93971 Michael Hanselmann
381 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
382 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
383 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
384 ecb215b5 Michael Hanselmann

385 ecb215b5 Michael Hanselmann
  Args:
386 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
387 396e1b78 Michael Hanselmann
  """
388 396e1b78 Michael Hanselmann
  env = {
389 0e137c28 Iustin Pop
    "OP_TARGET": name,
390 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
391 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
392 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
393 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
394 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
395 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
396 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
397 396e1b78 Michael Hanselmann
  }
398 396e1b78 Michael Hanselmann
399 396e1b78 Michael Hanselmann
  if nics:
400 396e1b78 Michael Hanselmann
    nic_count = len(nics)
401 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
402 396e1b78 Michael Hanselmann
      if ip is None:
403 396e1b78 Michael Hanselmann
        ip = ""
404 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
405 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
406 53e4e875 Guido Trotter
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
407 396e1b78 Michael Hanselmann
  else:
408 396e1b78 Michael Hanselmann
    nic_count = 0
409 396e1b78 Michael Hanselmann
410 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
411 396e1b78 Michael Hanselmann
412 396e1b78 Michael Hanselmann
  return env
413 396e1b78 Michael Hanselmann
414 396e1b78 Michael Hanselmann
415 396e1b78 Michael Hanselmann
def _BuildInstanceHookEnvByObject(instance, override=None):
416 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
417 ecb215b5 Michael Hanselmann

418 ecb215b5 Michael Hanselmann
  Args:
419 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
420 ecb215b5 Michael Hanselmann
    override: dict of values to override
421 ecb215b5 Michael Hanselmann
  """
422 396e1b78 Michael Hanselmann
  args = {
423 396e1b78 Michael Hanselmann
    'name': instance.name,
424 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
425 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
426 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
427 396e1b78 Michael Hanselmann
    'status': instance.os,
428 396e1b78 Michael Hanselmann
    'memory': instance.memory,
429 396e1b78 Michael Hanselmann
    'vcpus': instance.vcpus,
430 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
431 396e1b78 Michael Hanselmann
  }
432 396e1b78 Michael Hanselmann
  if override:
433 396e1b78 Michael Hanselmann
    args.update(override)
434 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
435 396e1b78 Michael Hanselmann
436 396e1b78 Michael Hanselmann
437 bf6929a2 Alexander Schreiber
def _CheckInstanceBridgesExist(instance):
438 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
439 bf6929a2 Alexander Schreiber

440 bf6929a2 Alexander Schreiber
  """
441 bf6929a2 Alexander Schreiber
  # check bridges existance
442 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
443 bf6929a2 Alexander Schreiber
  if not rpc.call_bridges_exist(instance.primary_node, brlist):
444 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
445 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
446 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
447 bf6929a2 Alexander Schreiber
448 bf6929a2 Alexander Schreiber
449 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
450 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
451 a8083063 Iustin Pop

452 a8083063 Iustin Pop
  """
453 a8083063 Iustin Pop
  _OP_REQP = []
454 a8083063 Iustin Pop
455 a8083063 Iustin Pop
  def CheckPrereq(self):
456 a8083063 Iustin Pop
    """Check prerequisites.
457 a8083063 Iustin Pop

458 a8083063 Iustin Pop
    This checks whether the cluster is empty.
459 a8083063 Iustin Pop

460 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
461 a8083063 Iustin Pop

462 a8083063 Iustin Pop
    """
463 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
464 a8083063 Iustin Pop
465 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
466 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
467 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
468 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
469 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
470 db915bd1 Michael Hanselmann
    if instancelist:
471 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
472 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
473 a8083063 Iustin Pop
474 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
475 a8083063 Iustin Pop
    """Destroys the cluster.
476 a8083063 Iustin Pop

477 a8083063 Iustin Pop
    """
478 c8a0948f Michael Hanselmann
    master = self.sstore.GetMasterNode()
479 1c65840b Iustin Pop
    if not rpc.call_node_stop_master(master, False):
480 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
481 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
482 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
483 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
484 140aa4a8 Iustin Pop
    return master
485 a8083063 Iustin Pop
486 a8083063 Iustin Pop
487 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
488 a8083063 Iustin Pop
  """Verifies the cluster status.
489 a8083063 Iustin Pop

490 a8083063 Iustin Pop
  """
491 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
492 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
493 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
494 a8083063 Iustin Pop
495 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
496 a8083063 Iustin Pop
                  remote_version, feedback_fn):
497 a8083063 Iustin Pop
    """Run multiple tests against a node.
498 a8083063 Iustin Pop

499 a8083063 Iustin Pop
    Test list:
500 a8083063 Iustin Pop
      - compares ganeti version
501 a8083063 Iustin Pop
      - checks vg existance and size > 20G
502 a8083063 Iustin Pop
      - checks config file checksum
503 a8083063 Iustin Pop
      - checks ssh to other nodes
504 a8083063 Iustin Pop

505 a8083063 Iustin Pop
    Args:
506 a8083063 Iustin Pop
      node: name of the node to check
507 a8083063 Iustin Pop
      file_list: required list of files
508 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
509 098c0958 Michael Hanselmann

510 a8083063 Iustin Pop
    """
511 a8083063 Iustin Pop
    # compares ganeti version
512 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
513 a8083063 Iustin Pop
    if not remote_version:
514 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
515 a8083063 Iustin Pop
      return True
516 a8083063 Iustin Pop
517 a8083063 Iustin Pop
    if local_version != remote_version:
518 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
519 a8083063 Iustin Pop
                      (local_version, node, remote_version))
520 a8083063 Iustin Pop
      return True
521 a8083063 Iustin Pop
522 a8083063 Iustin Pop
    # checks vg existance and size > 20G
523 a8083063 Iustin Pop
524 a8083063 Iustin Pop
    bad = False
525 a8083063 Iustin Pop
    if not vglist:
526 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
527 a8083063 Iustin Pop
                      (node,))
528 a8083063 Iustin Pop
      bad = True
529 a8083063 Iustin Pop
    else:
530 8d1a2a64 Michael Hanselmann
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
531 8d1a2a64 Michael Hanselmann
                                            constants.MIN_VG_SIZE)
532 a8083063 Iustin Pop
      if vgstatus:
533 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
534 a8083063 Iustin Pop
        bad = True
535 a8083063 Iustin Pop
536 a8083063 Iustin Pop
    # checks config file checksum
537 a8083063 Iustin Pop
    # checks ssh to any
538 a8083063 Iustin Pop
539 a8083063 Iustin Pop
    if 'filelist' not in node_result:
540 a8083063 Iustin Pop
      bad = True
541 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
542 a8083063 Iustin Pop
    else:
543 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
544 a8083063 Iustin Pop
      for file_name in file_list:
545 a8083063 Iustin Pop
        if file_name not in remote_cksum:
546 a8083063 Iustin Pop
          bad = True
547 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
548 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
549 a8083063 Iustin Pop
          bad = True
550 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
551 a8083063 Iustin Pop
552 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
553 a8083063 Iustin Pop
      bad = True
554 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
555 a8083063 Iustin Pop
    else:
556 a8083063 Iustin Pop
      if node_result['nodelist']:
557 a8083063 Iustin Pop
        bad = True
558 a8083063 Iustin Pop
        for node in node_result['nodelist']:
559 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
560 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
561 9d4bfc96 Iustin Pop
    if 'node-net-test' not in node_result:
562 9d4bfc96 Iustin Pop
      bad = True
563 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
564 9d4bfc96 Iustin Pop
    else:
565 9d4bfc96 Iustin Pop
      if node_result['node-net-test']:
566 9d4bfc96 Iustin Pop
        bad = True
567 9d4bfc96 Iustin Pop
        nlist = utils.NiceSort(node_result['node-net-test'].keys())
568 9d4bfc96 Iustin Pop
        for node in nlist:
569 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
570 9d4bfc96 Iustin Pop
                          (node, node_result['node-net-test'][node]))
571 9d4bfc96 Iustin Pop
572 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
573 a8083063 Iustin Pop
    if hyp_result is not None:
574 a8083063 Iustin Pop
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
575 a8083063 Iustin Pop
    return bad
576 a8083063 Iustin Pop
577 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
578 c5705f58 Guido Trotter
                      node_instance, feedback_fn):
579 a8083063 Iustin Pop
    """Verify an instance.
580 a8083063 Iustin Pop

581 a8083063 Iustin Pop
    This function checks to see if the required block devices are
582 a8083063 Iustin Pop
    available on the instance's node.
583 a8083063 Iustin Pop

584 a8083063 Iustin Pop
    """
585 a8083063 Iustin Pop
    bad = False
586 a8083063 Iustin Pop
587 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
588 a8083063 Iustin Pop
589 a8083063 Iustin Pop
    node_vol_should = {}
590 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
591 a8083063 Iustin Pop
592 a8083063 Iustin Pop
    for node in node_vol_should:
593 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
594 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
595 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
596 a8083063 Iustin Pop
                          (volume, node))
597 a8083063 Iustin Pop
          bad = True
598 a8083063 Iustin Pop
599 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
600 a872dae6 Guido Trotter
      if (node_current not in node_instance or
601 a872dae6 Guido Trotter
          not instance in node_instance[node_current]):
602 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
603 a8083063 Iustin Pop
                        (instance, node_current))
604 a8083063 Iustin Pop
        bad = True
605 a8083063 Iustin Pop
606 a8083063 Iustin Pop
    for node in node_instance:
607 a8083063 Iustin Pop
      if (not node == node_current):
608 a8083063 Iustin Pop
        if instance in node_instance[node]:
609 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
610 a8083063 Iustin Pop
                          (instance, node))
611 a8083063 Iustin Pop
          bad = True
612 a8083063 Iustin Pop
613 6a438c98 Michael Hanselmann
    return bad
614 a8083063 Iustin Pop
615 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
616 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
617 a8083063 Iustin Pop

618 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
619 a8083063 Iustin Pop
    reported as unknown.
620 a8083063 Iustin Pop

621 a8083063 Iustin Pop
    """
622 a8083063 Iustin Pop
    bad = False
623 a8083063 Iustin Pop
624 a8083063 Iustin Pop
    for node in node_vol_is:
625 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
626 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
627 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
628 a8083063 Iustin Pop
                      (volume, node))
629 a8083063 Iustin Pop
          bad = True
630 a8083063 Iustin Pop
    return bad
631 a8083063 Iustin Pop
632 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
633 a8083063 Iustin Pop
    """Verify the list of running instances.
634 a8083063 Iustin Pop

635 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
636 a8083063 Iustin Pop

637 a8083063 Iustin Pop
    """
638 a8083063 Iustin Pop
    bad = False
639 a8083063 Iustin Pop
    for node in node_instance:
640 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
641 a8083063 Iustin Pop
        if runninginstance not in instancelist:
642 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
643 a8083063 Iustin Pop
                          (runninginstance, node))
644 a8083063 Iustin Pop
          bad = True
645 a8083063 Iustin Pop
    return bad
646 a8083063 Iustin Pop
647 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
648 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
649 2b3b6ddd Guido Trotter

650 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
651 2b3b6ddd Guido Trotter
    was primary for.
652 2b3b6ddd Guido Trotter

653 2b3b6ddd Guido Trotter
    """
654 2b3b6ddd Guido Trotter
    bad = False
655 2b3b6ddd Guido Trotter
656 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
657 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
658 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
659 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
660 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
661 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
662 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
663 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
664 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
665 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
666 2b3b6ddd Guido Trotter
        needed_mem = 0
667 2b3b6ddd Guido Trotter
        for instance in instances:
668 2b3b6ddd Guido Trotter
          needed_mem += instance_cfg[instance].memory
669 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
670 2b3b6ddd Guido Trotter
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
671 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
672 2b3b6ddd Guido Trotter
          bad = True
673 2b3b6ddd Guido Trotter
    return bad
674 2b3b6ddd Guido Trotter
675 a8083063 Iustin Pop
  def CheckPrereq(self):
676 a8083063 Iustin Pop
    """Check prerequisites.
677 a8083063 Iustin Pop

678 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
679 e54c4c5e Guido Trotter
    all its members are valid.
680 a8083063 Iustin Pop

681 a8083063 Iustin Pop
    """
682 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
683 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
684 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
685 a8083063 Iustin Pop
686 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
687 d8fff41c Guido Trotter
    """Build hooks env.
688 d8fff41c Guido Trotter

689 d8fff41c Guido Trotter
    Cluster-Verify hooks just rone in the post phase and their failure makes
690 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
691 d8fff41c Guido Trotter

692 d8fff41c Guido Trotter
    """
693 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
694 d8fff41c Guido Trotter
    # TODO: populate the environment with useful information for verify hooks
695 d8fff41c Guido Trotter
    env = {}
696 d8fff41c Guido Trotter
    return env, [], all_nodes
697 d8fff41c Guido Trotter
698 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
699 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
700 a8083063 Iustin Pop

701 a8083063 Iustin Pop
    """
702 a8083063 Iustin Pop
    bad = False
703 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
704 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
705 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
706 a8083063 Iustin Pop
707 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
708 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
709 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
710 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
711 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
712 a8083063 Iustin Pop
    node_volume = {}
713 a8083063 Iustin Pop
    node_instance = {}
714 9c9c7d30 Guido Trotter
    node_info = {}
715 26b6af5e Guido Trotter
    instance_cfg = {}
716 a8083063 Iustin Pop
717 a8083063 Iustin Pop
    # FIXME: verify OS list
718 a8083063 Iustin Pop
    # do local checksums
719 cb91d46e Iustin Pop
    file_names = list(self.sstore.GetFileList())
720 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
721 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
722 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
723 a8083063 Iustin Pop
724 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
725 a8083063 Iustin Pop
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
726 a8083063 Iustin Pop
    all_instanceinfo = rpc.call_instance_list(nodelist)
727 a8083063 Iustin Pop
    all_vglist = rpc.call_vg_list(nodelist)
728 a8083063 Iustin Pop
    node_verify_param = {
729 a8083063 Iustin Pop
      'filelist': file_names,
730 a8083063 Iustin Pop
      'nodelist': nodelist,
731 a8083063 Iustin Pop
      'hypervisor': None,
732 9d4bfc96 Iustin Pop
      'node-net-test': [(node.name, node.primary_ip, node.secondary_ip)
733 9d4bfc96 Iustin Pop
                        for node in nodeinfo]
734 a8083063 Iustin Pop
      }
735 a8083063 Iustin Pop
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
736 a8083063 Iustin Pop
    all_rversion = rpc.call_version(nodelist)
737 9c9c7d30 Guido Trotter
    all_ninfo = rpc.call_node_info(nodelist, self.cfg.GetVGName())
738 a8083063 Iustin Pop
739 a8083063 Iustin Pop
    for node in nodelist:
740 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
741 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
742 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
743 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
744 a8083063 Iustin Pop
      bad = bad or result
745 a8083063 Iustin Pop
746 a8083063 Iustin Pop
      # node_volume
747 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
748 a8083063 Iustin Pop
749 b63ed789 Iustin Pop
      if isinstance(volumeinfo, basestring):
750 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
751 b63ed789 Iustin Pop
                    (node, volumeinfo[-400:].encode('string_escape')))
752 b63ed789 Iustin Pop
        bad = True
753 b63ed789 Iustin Pop
        node_volume[node] = {}
754 b63ed789 Iustin Pop
      elif not isinstance(volumeinfo, dict):
755 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
756 a8083063 Iustin Pop
        bad = True
757 a8083063 Iustin Pop
        continue
758 b63ed789 Iustin Pop
      else:
759 b63ed789 Iustin Pop
        node_volume[node] = volumeinfo
760 a8083063 Iustin Pop
761 a8083063 Iustin Pop
      # node_instance
762 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
763 a8083063 Iustin Pop
      if type(nodeinstance) != list:
764 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
765 a8083063 Iustin Pop
        bad = True
766 a8083063 Iustin Pop
        continue
767 a8083063 Iustin Pop
768 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
769 a8083063 Iustin Pop
770 9c9c7d30 Guido Trotter
      # node_info
771 9c9c7d30 Guido Trotter
      nodeinfo = all_ninfo[node]
772 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
773 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
774 9c9c7d30 Guido Trotter
        bad = True
775 9c9c7d30 Guido Trotter
        continue
776 9c9c7d30 Guido Trotter
777 9c9c7d30 Guido Trotter
      try:
778 9c9c7d30 Guido Trotter
        node_info[node] = {
779 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
780 9c9c7d30 Guido Trotter
          "dfree": int(nodeinfo['vg_free']),
781 93e4c50b Guido Trotter
          "pinst": [],
782 93e4c50b Guido Trotter
          "sinst": [],
783 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
784 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
785 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
786 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
787 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
788 36e7da50 Guido Trotter
          # secondary.
789 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
790 9c9c7d30 Guido Trotter
        }
791 9c9c7d30 Guido Trotter
      except ValueError:
792 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
793 9c9c7d30 Guido Trotter
        bad = True
794 9c9c7d30 Guido Trotter
        continue
795 9c9c7d30 Guido Trotter
796 a8083063 Iustin Pop
    node_vol_should = {}
797 a8083063 Iustin Pop
798 a8083063 Iustin Pop
    for instance in instancelist:
799 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
800 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
801 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
802 c5705f58 Guido Trotter
                                     node_instance, feedback_fn)
803 c5705f58 Guido Trotter
      bad = bad or result
804 a8083063 Iustin Pop
805 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
806 a8083063 Iustin Pop
807 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
808 26b6af5e Guido Trotter
809 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
810 93e4c50b Guido Trotter
      if pnode in node_info:
811 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
812 93e4c50b Guido Trotter
      else:
813 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
814 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
815 93e4c50b Guido Trotter
        bad = True
816 93e4c50b Guido Trotter
817 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
818 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
819 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
820 93e4c50b Guido Trotter
      # supported either.
821 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
822 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
823 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
824 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
825 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
826 93e4c50b Guido Trotter
                    % instance)
827 93e4c50b Guido Trotter
828 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
829 93e4c50b Guido Trotter
        if snode in node_info:
830 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
831 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
832 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
833 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
834 93e4c50b Guido Trotter
        else:
835 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
836 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
837 93e4c50b Guido Trotter
838 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
839 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
840 a8083063 Iustin Pop
                                       feedback_fn)
841 a8083063 Iustin Pop
    bad = bad or result
842 a8083063 Iustin Pop
843 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
844 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
845 a8083063 Iustin Pop
                                         feedback_fn)
846 a8083063 Iustin Pop
    bad = bad or result
847 a8083063 Iustin Pop
848 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
849 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
850 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
851 e54c4c5e Guido Trotter
      bad = bad or result
852 2b3b6ddd Guido Trotter
853 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
854 2b3b6ddd Guido Trotter
    if i_non_redundant:
855 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
856 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
857 2b3b6ddd Guido Trotter
858 34290825 Michael Hanselmann
    return not bad
859 a8083063 Iustin Pop
860 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
861 d8fff41c Guido Trotter
    """Analize the post-hooks' result, handle it, and send some
862 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
863 d8fff41c Guido Trotter

864 d8fff41c Guido Trotter
    Args:
865 d8fff41c Guido Trotter
      phase: the hooks phase that has just been run
866 d8fff41c Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
867 d8fff41c Guido Trotter
      feedback_fn: function to send feedback back to the caller
868 d8fff41c Guido Trotter
      lu_result: previous Exec result
869 d8fff41c Guido Trotter

870 d8fff41c Guido Trotter
    """
871 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
872 38206f3c Iustin Pop
    # their results
873 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
874 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
875 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
876 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
877 d8fff41c Guido Trotter
      if not hooks_results:
878 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
879 d8fff41c Guido Trotter
        lu_result = 1
880 d8fff41c Guido Trotter
      else:
881 d8fff41c Guido Trotter
        for node_name in hooks_results:
882 d8fff41c Guido Trotter
          show_node_header = True
883 d8fff41c Guido Trotter
          res = hooks_results[node_name]
884 d8fff41c Guido Trotter
          if res is False or not isinstance(res, list):
885 d8fff41c Guido Trotter
            feedback_fn("    Communication failure")
886 d8fff41c Guido Trotter
            lu_result = 1
887 d8fff41c Guido Trotter
            continue
888 d8fff41c Guido Trotter
          for script, hkr, output in res:
889 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
890 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
891 d8fff41c Guido Trotter
              # failing hooks on that node
892 d8fff41c Guido Trotter
              if show_node_header:
893 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
894 d8fff41c Guido Trotter
                show_node_header = False
895 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
896 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
897 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
898 d8fff41c Guido Trotter
              lu_result = 1
899 d8fff41c Guido Trotter
900 d8fff41c Guido Trotter
      return lu_result
901 d8fff41c Guido Trotter
902 a8083063 Iustin Pop
903 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
904 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
905 2c95a8d4 Iustin Pop

906 2c95a8d4 Iustin Pop
  """
907 2c95a8d4 Iustin Pop
  _OP_REQP = []
908 2c95a8d4 Iustin Pop
909 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
910 2c95a8d4 Iustin Pop
    """Check prerequisites.
911 2c95a8d4 Iustin Pop

912 2c95a8d4 Iustin Pop
    This has no prerequisites.
913 2c95a8d4 Iustin Pop

914 2c95a8d4 Iustin Pop
    """
915 2c95a8d4 Iustin Pop
    pass
916 2c95a8d4 Iustin Pop
917 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
918 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
919 2c95a8d4 Iustin Pop

920 2c95a8d4 Iustin Pop
    """
921 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
922 2c95a8d4 Iustin Pop
923 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
924 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
925 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
926 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
927 2c95a8d4 Iustin Pop
928 2c95a8d4 Iustin Pop
    nv_dict = {}
929 2c95a8d4 Iustin Pop
    for inst in instances:
930 2c95a8d4 Iustin Pop
      inst_lvs = {}
931 2c95a8d4 Iustin Pop
      if (inst.status != "up" or
932 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
933 2c95a8d4 Iustin Pop
        continue
934 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
935 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
936 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
937 2c95a8d4 Iustin Pop
        for vol in vol_list:
938 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
939 2c95a8d4 Iustin Pop
940 2c95a8d4 Iustin Pop
    if not nv_dict:
941 2c95a8d4 Iustin Pop
      return result
942 2c95a8d4 Iustin Pop
943 2c95a8d4 Iustin Pop
    node_lvs = rpc.call_volume_list(nodes, vg_name)
944 2c95a8d4 Iustin Pop
945 2c95a8d4 Iustin Pop
    to_act = set()
946 2c95a8d4 Iustin Pop
    for node in nodes:
947 2c95a8d4 Iustin Pop
      # node_volume
948 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
949 2c95a8d4 Iustin Pop
950 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
951 b63ed789 Iustin Pop
        logger.Info("error enumerating LVs on node %s: %s" % (node, lvs))
952 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
953 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
954 2c95a8d4 Iustin Pop
        logger.Info("connection to node %s failed or invalid data returned" %
955 2c95a8d4 Iustin Pop
                    (node,))
956 2c95a8d4 Iustin Pop
        res_nodes.append(node)
957 2c95a8d4 Iustin Pop
        continue
958 2c95a8d4 Iustin Pop
959 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
960 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
961 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
962 b63ed789 Iustin Pop
            and inst.name not in res_instances):
963 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
964 2c95a8d4 Iustin Pop
965 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
966 b63ed789 Iustin Pop
    # data better
967 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
968 b63ed789 Iustin Pop
      if inst.name not in res_missing:
969 b63ed789 Iustin Pop
        res_missing[inst.name] = []
970 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
971 b63ed789 Iustin Pop
972 2c95a8d4 Iustin Pop
    return result
973 2c95a8d4 Iustin Pop
974 2c95a8d4 Iustin Pop
975 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
976 07bd8a51 Iustin Pop
  """Rename the cluster.
977 07bd8a51 Iustin Pop

978 07bd8a51 Iustin Pop
  """
979 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
980 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
981 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
982 05f86716 Guido Trotter
  REQ_WSSTORE = True
983 07bd8a51 Iustin Pop
984 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
985 07bd8a51 Iustin Pop
    """Build hooks env.
986 07bd8a51 Iustin Pop

987 07bd8a51 Iustin Pop
    """
988 07bd8a51 Iustin Pop
    env = {
989 488b540d Iustin Pop
      "OP_TARGET": self.sstore.GetClusterName(),
990 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
991 07bd8a51 Iustin Pop
      }
992 07bd8a51 Iustin Pop
    mn = self.sstore.GetMasterNode()
993 07bd8a51 Iustin Pop
    return env, [mn], [mn]
994 07bd8a51 Iustin Pop
995 07bd8a51 Iustin Pop
  def CheckPrereq(self):
996 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
997 07bd8a51 Iustin Pop

998 07bd8a51 Iustin Pop
    """
999 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1000 07bd8a51 Iustin Pop
1001 bcf043c9 Iustin Pop
    new_name = hostname.name
1002 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1003 07bd8a51 Iustin Pop
    old_name = self.sstore.GetClusterName()
1004 07bd8a51 Iustin Pop
    old_ip = self.sstore.GetMasterIP()
1005 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1006 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1007 07bd8a51 Iustin Pop
                                 " cluster has changed")
1008 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1009 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1010 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1011 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1012 07bd8a51 Iustin Pop
                                   new_ip)
1013 07bd8a51 Iustin Pop
1014 07bd8a51 Iustin Pop
    self.op.name = new_name
1015 07bd8a51 Iustin Pop
1016 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1017 07bd8a51 Iustin Pop
    """Rename the cluster.
1018 07bd8a51 Iustin Pop

1019 07bd8a51 Iustin Pop
    """
1020 07bd8a51 Iustin Pop
    clustername = self.op.name
1021 07bd8a51 Iustin Pop
    ip = self.ip
1022 07bd8a51 Iustin Pop
    ss = self.sstore
1023 07bd8a51 Iustin Pop
1024 07bd8a51 Iustin Pop
    # shutdown the master IP
1025 07bd8a51 Iustin Pop
    master = ss.GetMasterNode()
1026 1c65840b Iustin Pop
    if not rpc.call_node_stop_master(master, False):
1027 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1028 07bd8a51 Iustin Pop
1029 07bd8a51 Iustin Pop
    try:
1030 07bd8a51 Iustin Pop
      # modify the sstore
1031 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
1032 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
1033 07bd8a51 Iustin Pop
1034 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
1035 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
1036 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
1037 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
1038 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
1039 07bd8a51 Iustin Pop
1040 07bd8a51 Iustin Pop
      logger.Debug("Copying updated ssconf data to all nodes")
1041 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1042 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
1043 07bd8a51 Iustin Pop
        result = rpc.call_upload_file(dist_nodes, fname)
1044 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
1045 07bd8a51 Iustin Pop
          if not result[to_node]:
1046 07bd8a51 Iustin Pop
            logger.Error("copy of file %s to node %s failed" %
1047 07bd8a51 Iustin Pop
                         (fname, to_node))
1048 07bd8a51 Iustin Pop
    finally:
1049 1c65840b Iustin Pop
      if not rpc.call_node_start_master(master, False):
1050 f4bc1f2c Michael Hanselmann
        logger.Error("Could not re-enable the master role on the master,"
1051 f4bc1f2c Michael Hanselmann
                     " please restart manually.")
1052 07bd8a51 Iustin Pop
1053 07bd8a51 Iustin Pop
1054 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1055 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1056 8084f9f6 Manuel Franceschini

1057 8084f9f6 Manuel Franceschini
  Args:
1058 8084f9f6 Manuel Franceschini
    disk: ganeti.objects.Disk object
1059 8084f9f6 Manuel Franceschini

1060 8084f9f6 Manuel Franceschini
  Returns:
1061 8084f9f6 Manuel Franceschini
    boolean indicating whether a LD_LV dev_type was found or not
1062 8084f9f6 Manuel Franceschini

1063 8084f9f6 Manuel Franceschini
  """
1064 8084f9f6 Manuel Franceschini
  if disk.children:
1065 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1066 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1067 8084f9f6 Manuel Franceschini
        return True
1068 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1069 8084f9f6 Manuel Franceschini
1070 8084f9f6 Manuel Franceschini
1071 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1072 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1073 8084f9f6 Manuel Franceschini

1074 8084f9f6 Manuel Franceschini
  """
1075 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1076 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1077 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1078 8084f9f6 Manuel Franceschini
1079 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1080 8084f9f6 Manuel Franceschini
    """Build hooks env.
1081 8084f9f6 Manuel Franceschini

1082 8084f9f6 Manuel Franceschini
    """
1083 8084f9f6 Manuel Franceschini
    env = {
1084 8084f9f6 Manuel Franceschini
      "OP_TARGET": self.sstore.GetClusterName(),
1085 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1086 8084f9f6 Manuel Franceschini
      }
1087 8084f9f6 Manuel Franceschini
    mn = self.sstore.GetMasterNode()
1088 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1089 8084f9f6 Manuel Franceschini
1090 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1091 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1092 8084f9f6 Manuel Franceschini

1093 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1094 5f83e263 Iustin Pop
    if the given volume group is valid.
1095 8084f9f6 Manuel Franceschini

1096 8084f9f6 Manuel Franceschini
    """
1097 8084f9f6 Manuel Franceschini
    if not self.op.vg_name:
1098 8084f9f6 Manuel Franceschini
      instances = [self.cfg.GetInstanceInfo(name)
1099 8084f9f6 Manuel Franceschini
                   for name in self.cfg.GetInstanceList()]
1100 8084f9f6 Manuel Franceschini
      for inst in instances:
1101 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1102 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1103 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1104 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1105 8084f9f6 Manuel Franceschini
1106 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1107 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1108 8084f9f6 Manuel Franceschini
      node_list = self.cfg.GetNodeList()
1109 8084f9f6 Manuel Franceschini
      vglist = rpc.call_vg_list(node_list)
1110 8084f9f6 Manuel Franceschini
      for node in node_list:
1111 8d1a2a64 Michael Hanselmann
        vgstatus = utils.CheckVolumeGroupSize(vglist[node], self.op.vg_name,
1112 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1113 8084f9f6 Manuel Franceschini
        if vgstatus:
1114 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1115 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1116 8084f9f6 Manuel Franceschini
1117 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1118 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1119 8084f9f6 Manuel Franceschini

1120 8084f9f6 Manuel Franceschini
    """
1121 8084f9f6 Manuel Franceschini
    if self.op.vg_name != self.cfg.GetVGName():
1122 8084f9f6 Manuel Franceschini
      self.cfg.SetVGName(self.op.vg_name)
1123 8084f9f6 Manuel Franceschini
    else:
1124 8084f9f6 Manuel Franceschini
      feedback_fn("Cluster LVM configuration already in desired"
1125 8084f9f6 Manuel Franceschini
                  " state, not changing")
1126 8084f9f6 Manuel Franceschini
1127 8084f9f6 Manuel Franceschini
1128 5bfac263 Iustin Pop
def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
1129 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1130 a8083063 Iustin Pop

1131 a8083063 Iustin Pop
  """
1132 a8083063 Iustin Pop
  if not instance.disks:
1133 a8083063 Iustin Pop
    return True
1134 a8083063 Iustin Pop
1135 a8083063 Iustin Pop
  if not oneshot:
1136 5bfac263 Iustin Pop
    proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1137 a8083063 Iustin Pop
1138 a8083063 Iustin Pop
  node = instance.primary_node
1139 a8083063 Iustin Pop
1140 a8083063 Iustin Pop
  for dev in instance.disks:
1141 a8083063 Iustin Pop
    cfgw.SetDiskID(dev, node)
1142 a8083063 Iustin Pop
1143 a8083063 Iustin Pop
  retries = 0
1144 a8083063 Iustin Pop
  while True:
1145 a8083063 Iustin Pop
    max_time = 0
1146 a8083063 Iustin Pop
    done = True
1147 a8083063 Iustin Pop
    cumul_degraded = False
1148 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1149 a8083063 Iustin Pop
    if not rstats:
1150 5bfac263 Iustin Pop
      proc.LogWarning("Can't get any data from node %s" % node)
1151 a8083063 Iustin Pop
      retries += 1
1152 a8083063 Iustin Pop
      if retries >= 10:
1153 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1154 3ecf6786 Iustin Pop
                                 " aborting." % node)
1155 a8083063 Iustin Pop
      time.sleep(6)
1156 a8083063 Iustin Pop
      continue
1157 a8083063 Iustin Pop
    retries = 0
1158 a8083063 Iustin Pop
    for i in range(len(rstats)):
1159 a8083063 Iustin Pop
      mstat = rstats[i]
1160 a8083063 Iustin Pop
      if mstat is None:
1161 5bfac263 Iustin Pop
        proc.LogWarning("Can't compute data for node %s/%s" %
1162 a8083063 Iustin Pop
                        (node, instance.disks[i].iv_name))
1163 a8083063 Iustin Pop
        continue
1164 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1165 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1166 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1167 a8083063 Iustin Pop
      if perc_done is not None:
1168 a8083063 Iustin Pop
        done = False
1169 a8083063 Iustin Pop
        if est_time is not None:
1170 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1171 a8083063 Iustin Pop
          max_time = est_time
1172 a8083063 Iustin Pop
        else:
1173 a8083063 Iustin Pop
          rem_time = "no time estimate"
1174 5bfac263 Iustin Pop
        proc.LogInfo("- device %s: %5.2f%% done, %s" %
1175 5bfac263 Iustin Pop
                     (instance.disks[i].iv_name, perc_done, rem_time))
1176 a8083063 Iustin Pop
    if done or oneshot:
1177 a8083063 Iustin Pop
      break
1178 a8083063 Iustin Pop
1179 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
1180 a8083063 Iustin Pop
1181 a8083063 Iustin Pop
  if done:
1182 5bfac263 Iustin Pop
    proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1183 a8083063 Iustin Pop
  return not cumul_degraded
1184 a8083063 Iustin Pop
1185 a8083063 Iustin Pop
1186 0834c866 Iustin Pop
def _CheckDiskConsistency(cfgw, dev, node, on_primary, ldisk=False):
1187 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1188 a8083063 Iustin Pop

1189 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1190 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1191 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1192 0834c866 Iustin Pop

1193 a8083063 Iustin Pop
  """
1194 a8083063 Iustin Pop
  cfgw.SetDiskID(dev, node)
1195 0834c866 Iustin Pop
  if ldisk:
1196 0834c866 Iustin Pop
    idx = 6
1197 0834c866 Iustin Pop
  else:
1198 0834c866 Iustin Pop
    idx = 5
1199 a8083063 Iustin Pop
1200 a8083063 Iustin Pop
  result = True
1201 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1202 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_find(node, dev)
1203 a8083063 Iustin Pop
    if not rstats:
1204 aa9d0c32 Guido Trotter
      logger.ToStderr("Node %s: Disk degraded, not found or node down" % node)
1205 a8083063 Iustin Pop
      result = False
1206 a8083063 Iustin Pop
    else:
1207 0834c866 Iustin Pop
      result = result and (not rstats[idx])
1208 a8083063 Iustin Pop
  if dev.children:
1209 a8083063 Iustin Pop
    for child in dev.children:
1210 a8083063 Iustin Pop
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
1211 a8083063 Iustin Pop
1212 a8083063 Iustin Pop
  return result
1213 a8083063 Iustin Pop
1214 a8083063 Iustin Pop
1215 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1216 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1217 a8083063 Iustin Pop

1218 a8083063 Iustin Pop
  """
1219 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1220 a8083063 Iustin Pop
1221 a8083063 Iustin Pop
  def CheckPrereq(self):
1222 a8083063 Iustin Pop
    """Check prerequisites.
1223 a8083063 Iustin Pop

1224 a8083063 Iustin Pop
    This always succeeds, since this is a pure query LU.
1225 a8083063 Iustin Pop

1226 a8083063 Iustin Pop
    """
1227 1f9430d6 Iustin Pop
    if self.op.names:
1228 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1229 1f9430d6 Iustin Pop
1230 1f9430d6 Iustin Pop
    self.dynamic_fields = frozenset(["name", "valid", "node_status"])
1231 1f9430d6 Iustin Pop
    _CheckOutputFields(static=[],
1232 1f9430d6 Iustin Pop
                       dynamic=self.dynamic_fields,
1233 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1234 1f9430d6 Iustin Pop
1235 1f9430d6 Iustin Pop
  @staticmethod
1236 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1237 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1238 1f9430d6 Iustin Pop

1239 1f9430d6 Iustin Pop
      Args:
1240 1f9430d6 Iustin Pop
        node_list: a list with the names of all nodes
1241 1f9430d6 Iustin Pop
        rlist: a map with node names as keys and OS objects as values
1242 1f9430d6 Iustin Pop

1243 1f9430d6 Iustin Pop
      Returns:
1244 1f9430d6 Iustin Pop
        map: a map with osnames as keys and as value another map, with
1245 1f9430d6 Iustin Pop
             nodes as
1246 1f9430d6 Iustin Pop
             keys and list of OS objects as values
1247 1f9430d6 Iustin Pop
             e.g. {"debian-etch": {"node1": [<object>,...],
1248 1f9430d6 Iustin Pop
                                   "node2": [<object>,]}
1249 1f9430d6 Iustin Pop
                  }
1250 1f9430d6 Iustin Pop

1251 1f9430d6 Iustin Pop
    """
1252 1f9430d6 Iustin Pop
    all_os = {}
1253 1f9430d6 Iustin Pop
    for node_name, nr in rlist.iteritems():
1254 1f9430d6 Iustin Pop
      if not nr:
1255 1f9430d6 Iustin Pop
        continue
1256 b4de68a9 Iustin Pop
      for os_obj in nr:
1257 b4de68a9 Iustin Pop
        if os_obj.name not in all_os:
1258 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1259 1f9430d6 Iustin Pop
          # for each node in node_list
1260 b4de68a9 Iustin Pop
          all_os[os_obj.name] = {}
1261 1f9430d6 Iustin Pop
          for nname in node_list:
1262 b4de68a9 Iustin Pop
            all_os[os_obj.name][nname] = []
1263 b4de68a9 Iustin Pop
        all_os[os_obj.name][node_name].append(os_obj)
1264 1f9430d6 Iustin Pop
    return all_os
1265 a8083063 Iustin Pop
1266 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1267 a8083063 Iustin Pop
    """Compute the list of OSes.
1268 a8083063 Iustin Pop

1269 a8083063 Iustin Pop
    """
1270 a8083063 Iustin Pop
    node_list = self.cfg.GetNodeList()
1271 a8083063 Iustin Pop
    node_data = rpc.call_os_diagnose(node_list)
1272 a8083063 Iustin Pop
    if node_data == False:
1273 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1274 1f9430d6 Iustin Pop
    pol = self._DiagnoseByOS(node_list, node_data)
1275 1f9430d6 Iustin Pop
    output = []
1276 1f9430d6 Iustin Pop
    for os_name, os_data in pol.iteritems():
1277 1f9430d6 Iustin Pop
      row = []
1278 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1279 1f9430d6 Iustin Pop
        if field == "name":
1280 1f9430d6 Iustin Pop
          val = os_name
1281 1f9430d6 Iustin Pop
        elif field == "valid":
1282 1f9430d6 Iustin Pop
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1283 1f9430d6 Iustin Pop
        elif field == "node_status":
1284 1f9430d6 Iustin Pop
          val = {}
1285 1f9430d6 Iustin Pop
          for node_name, nos_list in os_data.iteritems():
1286 1f9430d6 Iustin Pop
            val[node_name] = [(v.status, v.path) for v in nos_list]
1287 1f9430d6 Iustin Pop
        else:
1288 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1289 1f9430d6 Iustin Pop
        row.append(val)
1290 1f9430d6 Iustin Pop
      output.append(row)
1291 1f9430d6 Iustin Pop
1292 1f9430d6 Iustin Pop
    return output
1293 a8083063 Iustin Pop
1294 a8083063 Iustin Pop
1295 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1296 a8083063 Iustin Pop
  """Logical unit for removing a node.
1297 a8083063 Iustin Pop

1298 a8083063 Iustin Pop
  """
1299 a8083063 Iustin Pop
  HPATH = "node-remove"
1300 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1301 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1302 a8083063 Iustin Pop
1303 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1304 a8083063 Iustin Pop
    """Build hooks env.
1305 a8083063 Iustin Pop

1306 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1307 d08869ee Guido Trotter
    node would then be impossible to remove.
1308 a8083063 Iustin Pop

1309 a8083063 Iustin Pop
    """
1310 396e1b78 Michael Hanselmann
    env = {
1311 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1312 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1313 396e1b78 Michael Hanselmann
      }
1314 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1315 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1316 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1317 a8083063 Iustin Pop
1318 a8083063 Iustin Pop
  def CheckPrereq(self):
1319 a8083063 Iustin Pop
    """Check prerequisites.
1320 a8083063 Iustin Pop

1321 a8083063 Iustin Pop
    This checks:
1322 a8083063 Iustin Pop
     - the node exists in the configuration
1323 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1324 a8083063 Iustin Pop
     - it's not the master
1325 a8083063 Iustin Pop

1326 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1327 a8083063 Iustin Pop

1328 a8083063 Iustin Pop
    """
1329 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1330 a8083063 Iustin Pop
    if node is None:
1331 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1332 a8083063 Iustin Pop
1333 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1334 a8083063 Iustin Pop
1335 880478f8 Iustin Pop
    masternode = self.sstore.GetMasterNode()
1336 a8083063 Iustin Pop
    if node.name == masternode:
1337 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1338 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1339 a8083063 Iustin Pop
1340 a8083063 Iustin Pop
    for instance_name in instance_list:
1341 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1342 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1343 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1344 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1345 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1346 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1347 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1348 a8083063 Iustin Pop
    self.op.node_name = node.name
1349 a8083063 Iustin Pop
    self.node = node
1350 a8083063 Iustin Pop
1351 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1352 a8083063 Iustin Pop
    """Removes the node from the cluster.
1353 a8083063 Iustin Pop

1354 a8083063 Iustin Pop
    """
1355 a8083063 Iustin Pop
    node = self.node
1356 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1357 a8083063 Iustin Pop
                node.name)
1358 a8083063 Iustin Pop
1359 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
1360 a8083063 Iustin Pop
1361 d8470559 Michael Hanselmann
    rpc.call_node_leave_cluster(node.name)
1362 c8a0948f Michael Hanselmann
1363 a8083063 Iustin Pop
1364 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1365 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1366 a8083063 Iustin Pop

1367 a8083063 Iustin Pop
  """
1368 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1369 a8083063 Iustin Pop
1370 a8083063 Iustin Pop
  def CheckPrereq(self):
1371 a8083063 Iustin Pop
    """Check prerequisites.
1372 a8083063 Iustin Pop

1373 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
1374 a8083063 Iustin Pop

1375 a8083063 Iustin Pop
    """
1376 e8a4c138 Iustin Pop
    self.dynamic_fields = frozenset([
1377 e8a4c138 Iustin Pop
      "dtotal", "dfree",
1378 e8a4c138 Iustin Pop
      "mtotal", "mnode", "mfree",
1379 e8a4c138 Iustin Pop
      "bootid",
1380 e8a4c138 Iustin Pop
      "ctotal",
1381 e8a4c138 Iustin Pop
      ])
1382 a8083063 Iustin Pop
1383 ec223efb Iustin Pop
    _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt",
1384 ec223efb Iustin Pop
                               "pinst_list", "sinst_list",
1385 130a6a6f Iustin Pop
                               "pip", "sip", "tags"],
1386 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1387 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1388 a8083063 Iustin Pop
1389 246e180a Iustin Pop
    self.wanted = _GetWantedNodes(self, self.op.names)
1390 a8083063 Iustin Pop
1391 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1392 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1393 a8083063 Iustin Pop

1394 a8083063 Iustin Pop
    """
1395 246e180a Iustin Pop
    nodenames = self.wanted
1396 a8083063 Iustin Pop
    nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]
1397 a8083063 Iustin Pop
1398 a8083063 Iustin Pop
    # begin data gathering
1399 a8083063 Iustin Pop
1400 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1401 a8083063 Iustin Pop
      live_data = {}
1402 a8083063 Iustin Pop
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
1403 a8083063 Iustin Pop
      for name in nodenames:
1404 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1405 a8083063 Iustin Pop
        if nodeinfo:
1406 a8083063 Iustin Pop
          live_data[name] = {
1407 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1408 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1409 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1410 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1411 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1412 e8a4c138 Iustin Pop
            "ctotal": utils.TryConvert(int, nodeinfo['cpu_total']),
1413 3ef10550 Michael Hanselmann
            "bootid": nodeinfo['bootid'],
1414 a8083063 Iustin Pop
            }
1415 a8083063 Iustin Pop
        else:
1416 a8083063 Iustin Pop
          live_data[name] = {}
1417 a8083063 Iustin Pop
    else:
1418 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1419 a8083063 Iustin Pop
1420 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1421 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1422 a8083063 Iustin Pop
1423 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1424 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1425 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1426 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1427 a8083063 Iustin Pop
1428 ec223efb Iustin Pop
      for instance_name in instancelist:
1429 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1430 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1431 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1432 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1433 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1434 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1435 a8083063 Iustin Pop
1436 a8083063 Iustin Pop
    # end data gathering
1437 a8083063 Iustin Pop
1438 a8083063 Iustin Pop
    output = []
1439 a8083063 Iustin Pop
    for node in nodelist:
1440 a8083063 Iustin Pop
      node_output = []
1441 a8083063 Iustin Pop
      for field in self.op.output_fields:
1442 a8083063 Iustin Pop
        if field == "name":
1443 a8083063 Iustin Pop
          val = node.name
1444 ec223efb Iustin Pop
        elif field == "pinst_list":
1445 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1446 ec223efb Iustin Pop
        elif field == "sinst_list":
1447 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1448 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1449 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1450 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1451 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1452 a8083063 Iustin Pop
        elif field == "pip":
1453 a8083063 Iustin Pop
          val = node.primary_ip
1454 a8083063 Iustin Pop
        elif field == "sip":
1455 a8083063 Iustin Pop
          val = node.secondary_ip
1456 130a6a6f Iustin Pop
        elif field == "tags":
1457 130a6a6f Iustin Pop
          val = list(node.GetTags())
1458 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1459 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1460 a8083063 Iustin Pop
        else:
1461 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1462 a8083063 Iustin Pop
        node_output.append(val)
1463 a8083063 Iustin Pop
      output.append(node_output)
1464 a8083063 Iustin Pop
1465 a8083063 Iustin Pop
    return output
1466 a8083063 Iustin Pop
1467 a8083063 Iustin Pop
1468 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1469 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1470 dcb93971 Michael Hanselmann

1471 dcb93971 Michael Hanselmann
  """
1472 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1473 dcb93971 Michael Hanselmann
1474 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1475 dcb93971 Michael Hanselmann
    """Check prerequisites.
1476 dcb93971 Michael Hanselmann

1477 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1478 dcb93971 Michael Hanselmann

1479 dcb93971 Michael Hanselmann
    """
1480 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1481 dcb93971 Michael Hanselmann
1482 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["node"],
1483 dcb93971 Michael Hanselmann
                       dynamic=["phys", "vg", "name", "size", "instance"],
1484 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1485 dcb93971 Michael Hanselmann
1486 dcb93971 Michael Hanselmann
1487 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1488 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1489 dcb93971 Michael Hanselmann

1490 dcb93971 Michael Hanselmann
    """
1491 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1492 dcb93971 Michael Hanselmann
    volumes = rpc.call_node_volumes(nodenames)
1493 dcb93971 Michael Hanselmann
1494 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1495 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1496 dcb93971 Michael Hanselmann
1497 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1498 dcb93971 Michael Hanselmann
1499 dcb93971 Michael Hanselmann
    output = []
1500 dcb93971 Michael Hanselmann
    for node in nodenames:
1501 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1502 37d19eb2 Michael Hanselmann
        continue
1503 37d19eb2 Michael Hanselmann
1504 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1505 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1506 dcb93971 Michael Hanselmann
1507 dcb93971 Michael Hanselmann
      for vol in node_vols:
1508 dcb93971 Michael Hanselmann
        node_output = []
1509 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1510 dcb93971 Michael Hanselmann
          if field == "node":
1511 dcb93971 Michael Hanselmann
            val = node
1512 dcb93971 Michael Hanselmann
          elif field == "phys":
1513 dcb93971 Michael Hanselmann
            val = vol['dev']
1514 dcb93971 Michael Hanselmann
          elif field == "vg":
1515 dcb93971 Michael Hanselmann
            val = vol['vg']
1516 dcb93971 Michael Hanselmann
          elif field == "name":
1517 dcb93971 Michael Hanselmann
            val = vol['name']
1518 dcb93971 Michael Hanselmann
          elif field == "size":
1519 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1520 dcb93971 Michael Hanselmann
          elif field == "instance":
1521 dcb93971 Michael Hanselmann
            for inst in ilist:
1522 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1523 dcb93971 Michael Hanselmann
                continue
1524 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1525 dcb93971 Michael Hanselmann
                val = inst.name
1526 dcb93971 Michael Hanselmann
                break
1527 dcb93971 Michael Hanselmann
            else:
1528 dcb93971 Michael Hanselmann
              val = '-'
1529 dcb93971 Michael Hanselmann
          else:
1530 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1531 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1532 dcb93971 Michael Hanselmann
1533 dcb93971 Michael Hanselmann
        output.append(node_output)
1534 dcb93971 Michael Hanselmann
1535 dcb93971 Michael Hanselmann
    return output
1536 dcb93971 Michael Hanselmann
1537 dcb93971 Michael Hanselmann
1538 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1539 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1540 a8083063 Iustin Pop

1541 a8083063 Iustin Pop
  """
1542 a8083063 Iustin Pop
  HPATH = "node-add"
1543 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1544 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1545 a8083063 Iustin Pop
1546 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1547 a8083063 Iustin Pop
    """Build hooks env.
1548 a8083063 Iustin Pop

1549 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1550 a8083063 Iustin Pop

1551 a8083063 Iustin Pop
    """
1552 a8083063 Iustin Pop
    env = {
1553 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1554 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1555 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1556 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1557 a8083063 Iustin Pop
      }
1558 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1559 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1560 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1561 a8083063 Iustin Pop
1562 a8083063 Iustin Pop
  def CheckPrereq(self):
1563 a8083063 Iustin Pop
    """Check prerequisites.
1564 a8083063 Iustin Pop

1565 a8083063 Iustin Pop
    This checks:
1566 a8083063 Iustin Pop
     - the new node is not already in the config
1567 a8083063 Iustin Pop
     - it is resolvable
1568 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1569 a8083063 Iustin Pop

1570 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1571 a8083063 Iustin Pop

1572 a8083063 Iustin Pop
    """
1573 a8083063 Iustin Pop
    node_name = self.op.node_name
1574 a8083063 Iustin Pop
    cfg = self.cfg
1575 a8083063 Iustin Pop
1576 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1577 a8083063 Iustin Pop
1578 bcf043c9 Iustin Pop
    node = dns_data.name
1579 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1580 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1581 a8083063 Iustin Pop
    if secondary_ip is None:
1582 a8083063 Iustin Pop
      secondary_ip = primary_ip
1583 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1584 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1585 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1586 e7c6e02b Michael Hanselmann
1587 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1588 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
1589 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
1590 e7c6e02b Michael Hanselmann
                                 node)
1591 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
1592 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
1593 a8083063 Iustin Pop
1594 a8083063 Iustin Pop
    for existing_node_name in node_list:
1595 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1596 e7c6e02b Michael Hanselmann
1597 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
1598 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
1599 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
1600 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
1601 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
1602 e7c6e02b Michael Hanselmann
        continue
1603 e7c6e02b Michael Hanselmann
1604 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1605 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1606 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1607 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1608 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1609 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1610 a8083063 Iustin Pop
1611 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1612 a8083063 Iustin Pop
    # same as for the master
1613 880478f8 Iustin Pop
    myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
1614 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1615 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1616 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1617 a8083063 Iustin Pop
      if master_singlehomed:
1618 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1619 3ecf6786 Iustin Pop
                                   " new node has one")
1620 a8083063 Iustin Pop
      else:
1621 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1622 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1623 a8083063 Iustin Pop
1624 a8083063 Iustin Pop
    # checks reachablity
1625 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
1626 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1627 a8083063 Iustin Pop
1628 a8083063 Iustin Pop
    if not newbie_singlehomed:
1629 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1630 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
1631 b15d625f Iustin Pop
                           source=myself.secondary_ip):
1632 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1633 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
1634 a8083063 Iustin Pop
1635 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1636 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1637 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1638 a8083063 Iustin Pop
1639 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1640 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1641 a8083063 Iustin Pop

1642 a8083063 Iustin Pop
    """
1643 a8083063 Iustin Pop
    new_node = self.new_node
1644 a8083063 Iustin Pop
    node = new_node.name
1645 a8083063 Iustin Pop
1646 a8083063 Iustin Pop
    # check connectivity
1647 a8083063 Iustin Pop
    result = rpc.call_version([node])[node]
1648 a8083063 Iustin Pop
    if result:
1649 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1650 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1651 a8083063 Iustin Pop
                    (node, result))
1652 a8083063 Iustin Pop
      else:
1653 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1654 3ecf6786 Iustin Pop
                                 " node version %s" %
1655 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1656 a8083063 Iustin Pop
    else:
1657 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1658 a8083063 Iustin Pop
1659 a8083063 Iustin Pop
    # setup ssh on node
1660 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1661 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1662 a8083063 Iustin Pop
    keyarray = []
1663 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1664 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1665 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1666 a8083063 Iustin Pop
1667 a8083063 Iustin Pop
    for i in keyfiles:
1668 a8083063 Iustin Pop
      f = open(i, 'r')
1669 a8083063 Iustin Pop
      try:
1670 a8083063 Iustin Pop
        keyarray.append(f.read())
1671 a8083063 Iustin Pop
      finally:
1672 a8083063 Iustin Pop
        f.close()
1673 a8083063 Iustin Pop
1674 a8083063 Iustin Pop
    result = rpc.call_node_add(node, keyarray[0], keyarray[1], keyarray[2],
1675 a8083063 Iustin Pop
                               keyarray[3], keyarray[4], keyarray[5])
1676 a8083063 Iustin Pop
1677 a8083063 Iustin Pop
    if not result:
1678 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1679 a8083063 Iustin Pop
1680 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1681 d9c02ca6 Michael Hanselmann
    utils.AddHostToEtcHosts(new_node.name)
1682 c8a0948f Michael Hanselmann
1683 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1684 16abfbc2 Alexander Schreiber
      if not rpc.call_node_tcp_ping(new_node.name,
1685 16abfbc2 Alexander Schreiber
                                    constants.LOCALHOST_IP_ADDRESS,
1686 16abfbc2 Alexander Schreiber
                                    new_node.secondary_ip,
1687 16abfbc2 Alexander Schreiber
                                    constants.DEFAULT_NODED_PORT,
1688 16abfbc2 Alexander Schreiber
                                    10, False):
1689 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
1690 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
1691 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
1692 a8083063 Iustin Pop
1693 5c0527ed Guido Trotter
    node_verify_list = [self.sstore.GetMasterNode()]
1694 5c0527ed Guido Trotter
    node_verify_param = {
1695 5c0527ed Guido Trotter
      'nodelist': [node],
1696 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
1697 5c0527ed Guido Trotter
    }
1698 5c0527ed Guido Trotter
1699 5c0527ed Guido Trotter
    result = rpc.call_node_verify(node_verify_list, node_verify_param)
1700 5c0527ed Guido Trotter
    for verifier in node_verify_list:
1701 5c0527ed Guido Trotter
      if not result[verifier]:
1702 5c0527ed Guido Trotter
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
1703 5c0527ed Guido Trotter
                                 " for remote verification" % verifier)
1704 5c0527ed Guido Trotter
      if result[verifier]['nodelist']:
1705 5c0527ed Guido Trotter
        for failed in result[verifier]['nodelist']:
1706 5c0527ed Guido Trotter
          feedback_fn("ssh/hostname verification failed %s -> %s" %
1707 5c0527ed Guido Trotter
                      (verifier, result[verifier]['nodelist'][failed]))
1708 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
1709 ff98055b Iustin Pop
1710 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1711 a8083063 Iustin Pop
    # including the node just added
1712 880478f8 Iustin Pop
    myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
1713 102b115b Michael Hanselmann
    dist_nodes = self.cfg.GetNodeList()
1714 102b115b Michael Hanselmann
    if not self.op.readd:
1715 102b115b Michael Hanselmann
      dist_nodes.append(node)
1716 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1717 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1718 a8083063 Iustin Pop
1719 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1720 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
1721 a8083063 Iustin Pop
      result = rpc.call_upload_file(dist_nodes, fname)
1722 a8083063 Iustin Pop
      for to_node in dist_nodes:
1723 a8083063 Iustin Pop
        if not result[to_node]:
1724 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1725 a8083063 Iustin Pop
                       (fname, to_node))
1726 a8083063 Iustin Pop
1727 3d1e7706 Guido Trotter
    to_copy = self.sstore.GetFileList()
1728 2a6469d5 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
1729 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
1730 a8083063 Iustin Pop
    for fname in to_copy:
1731 b5602d15 Guido Trotter
      result = rpc.call_upload_file([node], fname)
1732 b5602d15 Guido Trotter
      if not result[node]:
1733 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1734 a8083063 Iustin Pop
1735 d8470559 Michael Hanselmann
    if self.op.readd:
1736 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
1737 d8470559 Michael Hanselmann
    else:
1738 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
1739 a8083063 Iustin Pop
1740 a8083063 Iustin Pop
1741 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1742 a8083063 Iustin Pop
  """Query cluster configuration.
1743 a8083063 Iustin Pop

1744 a8083063 Iustin Pop
  """
1745 a8083063 Iustin Pop
  _OP_REQP = []
1746 59322403 Iustin Pop
  REQ_MASTER = False
1747 642339cf Guido Trotter
  REQ_BGL = False
1748 642339cf Guido Trotter
1749 642339cf Guido Trotter
  def ExpandNames(self):
1750 642339cf Guido Trotter
    self.needed_locks = {}
1751 a8083063 Iustin Pop
1752 a8083063 Iustin Pop
  def CheckPrereq(self):
1753 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1754 a8083063 Iustin Pop

1755 a8083063 Iustin Pop
    """
1756 a8083063 Iustin Pop
    pass
1757 a8083063 Iustin Pop
1758 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1759 a8083063 Iustin Pop
    """Return cluster config.
1760 a8083063 Iustin Pop

1761 a8083063 Iustin Pop
    """
1762 a8083063 Iustin Pop
    result = {
1763 5fcdc80d Iustin Pop
      "name": self.sstore.GetClusterName(),
1764 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1765 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1766 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1767 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1768 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1769 880478f8 Iustin Pop
      "master": self.sstore.GetMasterNode(),
1770 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1771 8a12ce45 Iustin Pop
      "hypervisor_type": self.sstore.GetHypervisorType(),
1772 a8083063 Iustin Pop
      }
1773 a8083063 Iustin Pop
1774 a8083063 Iustin Pop
    return result
1775 a8083063 Iustin Pop
1776 a8083063 Iustin Pop
1777 a8083063 Iustin Pop
class LUDumpClusterConfig(NoHooksLU):
1778 a8083063 Iustin Pop
  """Return a text-representation of the cluster-config.
1779 a8083063 Iustin Pop

1780 a8083063 Iustin Pop
  """
1781 a8083063 Iustin Pop
  _OP_REQP = []
1782 642339cf Guido Trotter
  REQ_BGL = False
1783 642339cf Guido Trotter
1784 642339cf Guido Trotter
  def ExpandNames(self):
1785 642339cf Guido Trotter
    self.needed_locks = {}
1786 a8083063 Iustin Pop
1787 a8083063 Iustin Pop
  def CheckPrereq(self):
1788 a8083063 Iustin Pop
    """No prerequisites.
1789 a8083063 Iustin Pop

1790 a8083063 Iustin Pop
    """
1791 a8083063 Iustin Pop
    pass
1792 a8083063 Iustin Pop
1793 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1794 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
1795 a8083063 Iustin Pop

1796 a8083063 Iustin Pop
    """
1797 a8083063 Iustin Pop
    return self.cfg.DumpConfig()
1798 a8083063 Iustin Pop
1799 a8083063 Iustin Pop
1800 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
1801 a8083063 Iustin Pop
  """Bring up an instance's disks.
1802 a8083063 Iustin Pop

1803 a8083063 Iustin Pop
  """
1804 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1805 a8083063 Iustin Pop
1806 a8083063 Iustin Pop
  def CheckPrereq(self):
1807 a8083063 Iustin Pop
    """Check prerequisites.
1808 a8083063 Iustin Pop

1809 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1810 a8083063 Iustin Pop

1811 a8083063 Iustin Pop
    """
1812 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1813 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1814 a8083063 Iustin Pop
    if instance is None:
1815 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1816 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1817 a8083063 Iustin Pop
    self.instance = instance
1818 a8083063 Iustin Pop
1819 a8083063 Iustin Pop
1820 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1821 a8083063 Iustin Pop
    """Activate the disks.
1822 a8083063 Iustin Pop

1823 a8083063 Iustin Pop
    """
1824 a8083063 Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
1825 a8083063 Iustin Pop
    if not disks_ok:
1826 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
1827 a8083063 Iustin Pop
1828 a8083063 Iustin Pop
    return disks_info
1829 a8083063 Iustin Pop
1830 a8083063 Iustin Pop
1831 a8083063 Iustin Pop
def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
1832 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
1833 a8083063 Iustin Pop

1834 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
1835 a8083063 Iustin Pop

1836 a8083063 Iustin Pop
  Args:
1837 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
1838 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
1839 a8083063 Iustin Pop
                        in an error return from the function
1840 a8083063 Iustin Pop

1841 a8083063 Iustin Pop
  Returns:
1842 a8083063 Iustin Pop
    false if the operation failed
1843 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
1844 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
1845 a8083063 Iustin Pop
  """
1846 a8083063 Iustin Pop
  device_info = []
1847 a8083063 Iustin Pop
  disks_ok = True
1848 fdbd668d Iustin Pop
  iname = instance.name
1849 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
1850 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
1851 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
1852 fdbd668d Iustin Pop
1853 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
1854 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
1855 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
1856 fdbd668d Iustin Pop
  # SyncSource, etc.)
1857 fdbd668d Iustin Pop
1858 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
1859 a8083063 Iustin Pop
  for inst_disk in instance.disks:
1860 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1861 a8083063 Iustin Pop
      cfg.SetDiskID(node_disk, node)
1862 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, False)
1863 a8083063 Iustin Pop
      if not result:
1864 f4bc1f2c Michael Hanselmann
        logger.Error("could not prepare block device %s on node %s"
1865 fdbd668d Iustin Pop
                     " (is_primary=False, pass=1)" % (inst_disk.iv_name, node))
1866 fdbd668d Iustin Pop
        if not ignore_secondaries:
1867 a8083063 Iustin Pop
          disks_ok = False
1868 fdbd668d Iustin Pop
1869 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
1870 fdbd668d Iustin Pop
1871 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
1872 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
1873 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1874 fdbd668d Iustin Pop
      if node != instance.primary_node:
1875 fdbd668d Iustin Pop
        continue
1876 fdbd668d Iustin Pop
      cfg.SetDiskID(node_disk, node)
1877 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, True)
1878 fdbd668d Iustin Pop
      if not result:
1879 fdbd668d Iustin Pop
        logger.Error("could not prepare block device %s on node %s"
1880 fdbd668d Iustin Pop
                     " (is_primary=True, pass=2)" % (inst_disk.iv_name, node))
1881 fdbd668d Iustin Pop
        disks_ok = False
1882 fdbd668d Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name, result))
1883 a8083063 Iustin Pop
1884 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
1885 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
1886 b352ab5b Iustin Pop
  # improving the logical/physical id handling
1887 b352ab5b Iustin Pop
  for disk in instance.disks:
1888 b352ab5b Iustin Pop
    cfg.SetDiskID(disk, instance.primary_node)
1889 b352ab5b Iustin Pop
1890 a8083063 Iustin Pop
  return disks_ok, device_info
1891 a8083063 Iustin Pop
1892 a8083063 Iustin Pop
1893 fe7b0351 Michael Hanselmann
def _StartInstanceDisks(cfg, instance, force):
1894 3ecf6786 Iustin Pop
  """Start the disks of an instance.
1895 3ecf6786 Iustin Pop

1896 3ecf6786 Iustin Pop
  """
1897 fe7b0351 Michael Hanselmann
  disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
1898 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
1899 fe7b0351 Michael Hanselmann
  if not disks_ok:
1900 fe7b0351 Michael Hanselmann
    _ShutdownInstanceDisks(instance, cfg)
1901 fe7b0351 Michael Hanselmann
    if force is not None and not force:
1902 fe7b0351 Michael Hanselmann
      logger.Error("If the message above refers to a secondary node,"
1903 fe7b0351 Michael Hanselmann
                   " you can retry the operation using '--force'.")
1904 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
1905 fe7b0351 Michael Hanselmann
1906 fe7b0351 Michael Hanselmann
1907 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
1908 a8083063 Iustin Pop
  """Shutdown an instance's disks.
1909 a8083063 Iustin Pop

1910 a8083063 Iustin Pop
  """
1911 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1912 a8083063 Iustin Pop
1913 a8083063 Iustin Pop
  def CheckPrereq(self):
1914 a8083063 Iustin Pop
    """Check prerequisites.
1915 a8083063 Iustin Pop

1916 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1917 a8083063 Iustin Pop

1918 a8083063 Iustin Pop
    """
1919 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1920 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1921 a8083063 Iustin Pop
    if instance is None:
1922 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1923 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1924 a8083063 Iustin Pop
    self.instance = instance
1925 a8083063 Iustin Pop
1926 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1927 a8083063 Iustin Pop
    """Deactivate the disks
1928 a8083063 Iustin Pop

1929 a8083063 Iustin Pop
    """
1930 a8083063 Iustin Pop
    instance = self.instance
1931 a8083063 Iustin Pop
    ins_l = rpc.call_instance_list([instance.primary_node])
1932 a8083063 Iustin Pop
    ins_l = ins_l[instance.primary_node]
1933 a8083063 Iustin Pop
    if not type(ins_l) is list:
1934 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't contact node '%s'" %
1935 3ecf6786 Iustin Pop
                               instance.primary_node)
1936 a8083063 Iustin Pop
1937 a8083063 Iustin Pop
    if self.instance.name in ins_l:
1938 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance is running, can't shutdown"
1939 3ecf6786 Iustin Pop
                               " block devices.")
1940 a8083063 Iustin Pop
1941 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
1942 a8083063 Iustin Pop
1943 a8083063 Iustin Pop
1944 a8083063 Iustin Pop
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
1945 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
1946 a8083063 Iustin Pop

1947 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
1948 a8083063 Iustin Pop

1949 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
1950 a8083063 Iustin Pop
  ignored.
1951 a8083063 Iustin Pop

1952 a8083063 Iustin Pop
  """
1953 a8083063 Iustin Pop
  result = True
1954 a8083063 Iustin Pop
  for disk in instance.disks:
1955 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
1956 a8083063 Iustin Pop
      cfg.SetDiskID(top_disk, node)
1957 a8083063 Iustin Pop
      if not rpc.call_blockdev_shutdown(node, top_disk):
1958 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
1959 a8083063 Iustin Pop
                     (disk.iv_name, node))
1960 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
1961 a8083063 Iustin Pop
          result = False
1962 a8083063 Iustin Pop
  return result
1963 a8083063 Iustin Pop
1964 a8083063 Iustin Pop
1965 d4f16fd9 Iustin Pop
def _CheckNodeFreeMemory(cfg, node, reason, requested):
1966 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
1967 d4f16fd9 Iustin Pop

1968 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
1969 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
1970 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
1971 d4f16fd9 Iustin Pop
  exception.
1972 d4f16fd9 Iustin Pop

1973 d4f16fd9 Iustin Pop
  Args:
1974 d4f16fd9 Iustin Pop
    - cfg: a ConfigWriter instance
1975 d4f16fd9 Iustin Pop
    - node: the node name
1976 d4f16fd9 Iustin Pop
    - reason: string to use in the error message
1977 d4f16fd9 Iustin Pop
    - requested: the amount of memory in MiB
1978 d4f16fd9 Iustin Pop

1979 d4f16fd9 Iustin Pop
  """
1980 d4f16fd9 Iustin Pop
  nodeinfo = rpc.call_node_info([node], cfg.GetVGName())
1981 d4f16fd9 Iustin Pop
  if not nodeinfo or not isinstance(nodeinfo, dict):
1982 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Could not contact node %s for resource"
1983 d4f16fd9 Iustin Pop
                             " information" % (node,))
1984 d4f16fd9 Iustin Pop
1985 d4f16fd9 Iustin Pop
  free_mem = nodeinfo[node].get('memory_free')
1986 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
1987 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
1988 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
1989 d4f16fd9 Iustin Pop
  if requested > free_mem:
1990 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
1991 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
1992 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
1993 d4f16fd9 Iustin Pop
1994 d4f16fd9 Iustin Pop
1995 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
1996 a8083063 Iustin Pop
  """Starts an instance.
1997 a8083063 Iustin Pop

1998 a8083063 Iustin Pop
  """
1999 a8083063 Iustin Pop
  HPATH = "instance-start"
2000 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2001 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2002 e873317a Guido Trotter
  REQ_BGL = False
2003 e873317a Guido Trotter
2004 e873317a Guido Trotter
  def ExpandNames(self):
2005 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2006 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2007 e873317a Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = 'replace'
2008 e873317a Guido Trotter
2009 e873317a Guido Trotter
  def DeclareLocks(self, level):
2010 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2011 e873317a Guido Trotter
      self._LockInstancesNodes()
2012 a8083063 Iustin Pop
2013 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2014 a8083063 Iustin Pop
    """Build hooks env.
2015 a8083063 Iustin Pop

2016 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2017 a8083063 Iustin Pop

2018 a8083063 Iustin Pop
    """
2019 a8083063 Iustin Pop
    env = {
2020 a8083063 Iustin Pop
      "FORCE": self.op.force,
2021 a8083063 Iustin Pop
      }
2022 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2023 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2024 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2025 a8083063 Iustin Pop
    return env, nl, nl
2026 a8083063 Iustin Pop
2027 a8083063 Iustin Pop
  def CheckPrereq(self):
2028 a8083063 Iustin Pop
    """Check prerequisites.
2029 a8083063 Iustin Pop

2030 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2031 a8083063 Iustin Pop

2032 a8083063 Iustin Pop
    """
2033 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2034 e873317a Guido Trotter
    assert self.instance is not None, \
2035 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2036 a8083063 Iustin Pop
2037 a8083063 Iustin Pop
    # check bridges existance
2038 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2039 a8083063 Iustin Pop
2040 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, instance.primary_node,
2041 d4f16fd9 Iustin Pop
                         "starting instance %s" % instance.name,
2042 d4f16fd9 Iustin Pop
                         instance.memory)
2043 d4f16fd9 Iustin Pop
2044 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2045 a8083063 Iustin Pop
    """Start the instance.
2046 a8083063 Iustin Pop

2047 a8083063 Iustin Pop
    """
2048 a8083063 Iustin Pop
    instance = self.instance
2049 a8083063 Iustin Pop
    force = self.op.force
2050 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
2051 a8083063 Iustin Pop
2052 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2053 fe482621 Iustin Pop
2054 a8083063 Iustin Pop
    node_current = instance.primary_node
2055 a8083063 Iustin Pop
2056 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, instance, force)
2057 a8083063 Iustin Pop
2058 a8083063 Iustin Pop
    if not rpc.call_instance_start(node_current, instance, extra_args):
2059 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2060 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
2061 a8083063 Iustin Pop
2062 a8083063 Iustin Pop
2063 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2064 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2065 bf6929a2 Alexander Schreiber

2066 bf6929a2 Alexander Schreiber
  """
2067 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2068 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2069 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2070 e873317a Guido Trotter
  REQ_BGL = False
2071 e873317a Guido Trotter
2072 e873317a Guido Trotter
  def ExpandNames(self):
2073 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2074 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2075 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
2076 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2077 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
2078 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2079 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
2080 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2081 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2082 e873317a Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = 'replace'
2083 e873317a Guido Trotter
2084 e873317a Guido Trotter
  def DeclareLocks(self, level):
2085 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2086 0fcc5db3 Guido Trotter
      # FIXME: lock only primary on (not constants.INSTANCE_REBOOT_FULL)
2087 e873317a Guido Trotter
      self._LockInstancesNodes()
2088 bf6929a2 Alexander Schreiber
2089 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2090 bf6929a2 Alexander Schreiber
    """Build hooks env.
2091 bf6929a2 Alexander Schreiber

2092 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2093 bf6929a2 Alexander Schreiber

2094 bf6929a2 Alexander Schreiber
    """
2095 bf6929a2 Alexander Schreiber
    env = {
2096 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2097 bf6929a2 Alexander Schreiber
      }
2098 bf6929a2 Alexander Schreiber
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2099 bf6929a2 Alexander Schreiber
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2100 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
2101 bf6929a2 Alexander Schreiber
    return env, nl, nl
2102 bf6929a2 Alexander Schreiber
2103 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2104 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2105 bf6929a2 Alexander Schreiber

2106 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2107 bf6929a2 Alexander Schreiber

2108 bf6929a2 Alexander Schreiber
    """
2109 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2110 e873317a Guido Trotter
    assert self.instance is not None, \
2111 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2112 bf6929a2 Alexander Schreiber
2113 bf6929a2 Alexander Schreiber
    # check bridges existance
2114 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2115 bf6929a2 Alexander Schreiber
2116 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2117 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2118 bf6929a2 Alexander Schreiber

2119 bf6929a2 Alexander Schreiber
    """
2120 bf6929a2 Alexander Schreiber
    instance = self.instance
2121 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2122 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2123 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2124 bf6929a2 Alexander Schreiber
2125 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2126 bf6929a2 Alexander Schreiber
2127 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2128 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2129 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_reboot(node_current, instance,
2130 bf6929a2 Alexander Schreiber
                                      reboot_type, extra_args):
2131 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2132 bf6929a2 Alexander Schreiber
    else:
2133 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_shutdown(node_current, instance):
2134 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2135 bf6929a2 Alexander Schreiber
      _ShutdownInstanceDisks(instance, self.cfg)
2136 bf6929a2 Alexander Schreiber
      _StartInstanceDisks(self.cfg, instance, ignore_secondaries)
2137 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_start(node_current, instance, extra_args):
2138 bf6929a2 Alexander Schreiber
        _ShutdownInstanceDisks(instance, self.cfg)
2139 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2140 bf6929a2 Alexander Schreiber
2141 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2142 bf6929a2 Alexander Schreiber
2143 bf6929a2 Alexander Schreiber
2144 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2145 a8083063 Iustin Pop
  """Shutdown an instance.
2146 a8083063 Iustin Pop

2147 a8083063 Iustin Pop
  """
2148 a8083063 Iustin Pop
  HPATH = "instance-stop"
2149 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2150 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2151 e873317a Guido Trotter
  REQ_BGL = False
2152 e873317a Guido Trotter
2153 e873317a Guido Trotter
  def ExpandNames(self):
2154 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2155 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2156 e873317a Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = 'replace'
2157 e873317a Guido Trotter
2158 e873317a Guido Trotter
  def DeclareLocks(self, level):
2159 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2160 e873317a Guido Trotter
      self._LockInstancesNodes()
2161 a8083063 Iustin Pop
2162 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2163 a8083063 Iustin Pop
    """Build hooks env.
2164 a8083063 Iustin Pop

2165 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2166 a8083063 Iustin Pop

2167 a8083063 Iustin Pop
    """
2168 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2169 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2170 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2171 a8083063 Iustin Pop
    return env, nl, nl
2172 a8083063 Iustin Pop
2173 a8083063 Iustin Pop
  def CheckPrereq(self):
2174 a8083063 Iustin Pop
    """Check prerequisites.
2175 a8083063 Iustin Pop

2176 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2177 a8083063 Iustin Pop

2178 a8083063 Iustin Pop
    """
2179 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2180 e873317a Guido Trotter
    assert self.instance is not None, \
2181 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2182 a8083063 Iustin Pop
2183 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2184 a8083063 Iustin Pop
    """Shutdown the instance.
2185 a8083063 Iustin Pop

2186 a8083063 Iustin Pop
    """
2187 a8083063 Iustin Pop
    instance = self.instance
2188 a8083063 Iustin Pop
    node_current = instance.primary_node
2189 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2190 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(node_current, instance):
2191 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
2192 a8083063 Iustin Pop
2193 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
2194 a8083063 Iustin Pop
2195 a8083063 Iustin Pop
2196 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2197 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2198 fe7b0351 Michael Hanselmann

2199 fe7b0351 Michael Hanselmann
  """
2200 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2201 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2202 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2203 4e0b4d2d Guido Trotter
  REQ_BGL = False
2204 4e0b4d2d Guido Trotter
2205 4e0b4d2d Guido Trotter
  def ExpandNames(self):
2206 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
2207 4e0b4d2d Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2208 4e0b4d2d Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = 'replace'
2209 4e0b4d2d Guido Trotter
2210 4e0b4d2d Guido Trotter
  def DeclareLocks(self, level):
2211 4e0b4d2d Guido Trotter
    if level == locking.LEVEL_NODE:
2212 4e0b4d2d Guido Trotter
      self._LockInstancesNodes()
2213 fe7b0351 Michael Hanselmann
2214 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2215 fe7b0351 Michael Hanselmann
    """Build hooks env.
2216 fe7b0351 Michael Hanselmann

2217 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2218 fe7b0351 Michael Hanselmann

2219 fe7b0351 Michael Hanselmann
    """
2220 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2221 fe7b0351 Michael Hanselmann
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2222 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2223 fe7b0351 Michael Hanselmann
    return env, nl, nl
2224 fe7b0351 Michael Hanselmann
2225 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2226 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2227 fe7b0351 Michael Hanselmann

2228 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2229 fe7b0351 Michael Hanselmann

2230 fe7b0351 Michael Hanselmann
    """
2231 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2232 4e0b4d2d Guido Trotter
    assert instance is not None, \
2233 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2234 4e0b4d2d Guido Trotter
2235 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2236 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2237 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2238 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2239 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2240 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2241 fe7b0351 Michael Hanselmann
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2242 fe7b0351 Michael Hanselmann
    if remote_info:
2243 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2244 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2245 3ecf6786 Iustin Pop
                                  instance.primary_node))
2246 d0834de3 Michael Hanselmann
2247 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2248 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2249 d0834de3 Michael Hanselmann
      # OS verification
2250 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2251 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2252 d0834de3 Michael Hanselmann
      if pnode is None:
2253 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2254 3ecf6786 Iustin Pop
                                   self.op.pnode)
2255 00fe9e38 Guido Trotter
      os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
2256 dfa96ded Guido Trotter
      if not os_obj:
2257 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2258 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2259 d0834de3 Michael Hanselmann
2260 fe7b0351 Michael Hanselmann
    self.instance = instance
2261 fe7b0351 Michael Hanselmann
2262 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2263 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2264 fe7b0351 Michael Hanselmann

2265 fe7b0351 Michael Hanselmann
    """
2266 fe7b0351 Michael Hanselmann
    inst = self.instance
2267 fe7b0351 Michael Hanselmann
2268 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2269 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2270 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2271 d0834de3 Michael Hanselmann
      self.cfg.AddInstance(inst)
2272 d0834de3 Michael Hanselmann
2273 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, inst, None)
2274 fe7b0351 Michael Hanselmann
    try:
2275 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2276 fe7b0351 Michael Hanselmann
      if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
2277 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
2278 f4bc1f2c Michael Hanselmann
                                 " on node %s" %
2279 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2280 fe7b0351 Michael Hanselmann
    finally:
2281 fe7b0351 Michael Hanselmann
      _ShutdownInstanceDisks(inst, self.cfg)
2282 fe7b0351 Michael Hanselmann
2283 fe7b0351 Michael Hanselmann
2284 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2285 decd5f45 Iustin Pop
  """Rename an instance.
2286 decd5f45 Iustin Pop

2287 decd5f45 Iustin Pop
  """
2288 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2289 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2290 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2291 decd5f45 Iustin Pop
2292 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2293 decd5f45 Iustin Pop
    """Build hooks env.
2294 decd5f45 Iustin Pop

2295 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2296 decd5f45 Iustin Pop

2297 decd5f45 Iustin Pop
    """
2298 decd5f45 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self.instance)
2299 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2300 decd5f45 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2301 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2302 decd5f45 Iustin Pop
    return env, nl, nl
2303 decd5f45 Iustin Pop
2304 decd5f45 Iustin Pop
  def CheckPrereq(self):
2305 decd5f45 Iustin Pop
    """Check prerequisites.
2306 decd5f45 Iustin Pop

2307 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2308 decd5f45 Iustin Pop

2309 decd5f45 Iustin Pop
    """
2310 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2311 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2312 decd5f45 Iustin Pop
    if instance is None:
2313 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2314 decd5f45 Iustin Pop
                                 self.op.instance_name)
2315 decd5f45 Iustin Pop
    if instance.status != "down":
2316 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2317 decd5f45 Iustin Pop
                                 self.op.instance_name)
2318 decd5f45 Iustin Pop
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2319 decd5f45 Iustin Pop
    if remote_info:
2320 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2321 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2322 decd5f45 Iustin Pop
                                  instance.primary_node))
2323 decd5f45 Iustin Pop
    self.instance = instance
2324 decd5f45 Iustin Pop
2325 decd5f45 Iustin Pop
    # new name verification
2326 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2327 decd5f45 Iustin Pop
2328 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2329 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
2330 7bde3275 Guido Trotter
    if new_name in instance_list:
2331 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2332 c09f363f Manuel Franceschini
                                 new_name)
2333 7bde3275 Guido Trotter
2334 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2335 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
2336 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2337 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2338 decd5f45 Iustin Pop
2339 decd5f45 Iustin Pop
2340 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2341 decd5f45 Iustin Pop
    """Reinstall the instance.
2342 decd5f45 Iustin Pop

2343 decd5f45 Iustin Pop
    """
2344 decd5f45 Iustin Pop
    inst = self.instance
2345 decd5f45 Iustin Pop
    old_name = inst.name
2346 decd5f45 Iustin Pop
2347 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2348 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2349 b23c4333 Manuel Franceschini
2350 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2351 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
2352 74b5913f Guido Trotter
    self.context.glm.remove(locking.LEVEL_INSTANCE, inst.name)
2353 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
2354 decd5f45 Iustin Pop
2355 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2356 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2357 decd5f45 Iustin Pop
2358 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2359 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2360 b23c4333 Manuel Franceschini
      result = rpc.call_file_storage_dir_rename(inst.primary_node,
2361 b23c4333 Manuel Franceschini
                                                old_file_storage_dir,
2362 b23c4333 Manuel Franceschini
                                                new_file_storage_dir)
2363 b23c4333 Manuel Franceschini
2364 b23c4333 Manuel Franceschini
      if not result:
2365 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not connect to node '%s' to rename"
2366 b23c4333 Manuel Franceschini
                                 " directory '%s' to '%s' (but the instance"
2367 b23c4333 Manuel Franceschini
                                 " has been renamed in Ganeti)" % (
2368 b23c4333 Manuel Franceschini
                                 inst.primary_node, old_file_storage_dir,
2369 b23c4333 Manuel Franceschini
                                 new_file_storage_dir))
2370 b23c4333 Manuel Franceschini
2371 b23c4333 Manuel Franceschini
      if not result[0]:
2372 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2373 b23c4333 Manuel Franceschini
                                 " (but the instance has been renamed in"
2374 b23c4333 Manuel Franceschini
                                 " Ganeti)" % (old_file_storage_dir,
2375 b23c4333 Manuel Franceschini
                                               new_file_storage_dir))
2376 b23c4333 Manuel Franceschini
2377 decd5f45 Iustin Pop
    _StartInstanceDisks(self.cfg, inst, None)
2378 decd5f45 Iustin Pop
    try:
2379 decd5f45 Iustin Pop
      if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
2380 decd5f45 Iustin Pop
                                          "sda", "sdb"):
2381 f4bc1f2c Michael Hanselmann
        msg = ("Could run OS rename script for instance %s on node %s (but the"
2382 f4bc1f2c Michael Hanselmann
               " instance has been renamed in Ganeti)" %
2383 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2384 decd5f45 Iustin Pop
        logger.Error(msg)
2385 decd5f45 Iustin Pop
    finally:
2386 decd5f45 Iustin Pop
      _ShutdownInstanceDisks(inst, self.cfg)
2387 decd5f45 Iustin Pop
2388 decd5f45 Iustin Pop
2389 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2390 a8083063 Iustin Pop
  """Remove an instance.
2391 a8083063 Iustin Pop

2392 a8083063 Iustin Pop
  """
2393 a8083063 Iustin Pop
  HPATH = "instance-remove"
2394 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2395 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
2396 a8083063 Iustin Pop
2397 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2398 a8083063 Iustin Pop
    """Build hooks env.
2399 a8083063 Iustin Pop

2400 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2401 a8083063 Iustin Pop

2402 a8083063 Iustin Pop
    """
2403 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2404 1d67656e Iustin Pop
    nl = [self.sstore.GetMasterNode()]
2405 a8083063 Iustin Pop
    return env, nl, nl
2406 a8083063 Iustin Pop
2407 a8083063 Iustin Pop
  def CheckPrereq(self):
2408 a8083063 Iustin Pop
    """Check prerequisites.
2409 a8083063 Iustin Pop

2410 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2411 a8083063 Iustin Pop

2412 a8083063 Iustin Pop
    """
2413 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2414 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2415 a8083063 Iustin Pop
    if instance is None:
2416 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2417 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2418 a8083063 Iustin Pop
    self.instance = instance
2419 a8083063 Iustin Pop
2420 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2421 a8083063 Iustin Pop
    """Remove the instance.
2422 a8083063 Iustin Pop

2423 a8083063 Iustin Pop
    """
2424 a8083063 Iustin Pop
    instance = self.instance
2425 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
2426 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
2427 a8083063 Iustin Pop
2428 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(instance.primary_node, instance):
2429 1d67656e Iustin Pop
      if self.op.ignore_failures:
2430 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2431 1d67656e Iustin Pop
      else:
2432 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2433 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2434 a8083063 Iustin Pop
2435 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
2436 a8083063 Iustin Pop
2437 1d67656e Iustin Pop
    if not _RemoveDisks(instance, self.cfg):
2438 1d67656e Iustin Pop
      if self.op.ignore_failures:
2439 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2440 1d67656e Iustin Pop
      else:
2441 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2442 a8083063 Iustin Pop
2443 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
2444 a8083063 Iustin Pop
2445 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2446 a2fd9afc Guido Trotter
    # Remove the new instance from the Ganeti Lock Manager
2447 a2fd9afc Guido Trotter
    self.context.glm.remove(locking.LEVEL_INSTANCE, instance.name)
2448 a8083063 Iustin Pop
2449 a8083063 Iustin Pop
2450 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2451 a8083063 Iustin Pop
  """Logical unit for querying instances.
2452 a8083063 Iustin Pop

2453 a8083063 Iustin Pop
  """
2454 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2455 a8083063 Iustin Pop
2456 a8083063 Iustin Pop
  def CheckPrereq(self):
2457 a8083063 Iustin Pop
    """Check prerequisites.
2458 a8083063 Iustin Pop

2459 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
2460 a8083063 Iustin Pop

2461 a8083063 Iustin Pop
    """
2462 d8052456 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram", "status"])
2463 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["name", "os", "pnode", "snodes",
2464 dcb93971 Michael Hanselmann
                               "admin_state", "admin_ram",
2465 644eeef9 Iustin Pop
                               "disk_template", "ip", "mac", "bridge",
2466 130a6a6f Iustin Pop
                               "sda_size", "sdb_size", "vcpus", "tags"],
2467 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2468 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2469 a8083063 Iustin Pop
2470 069dcc86 Iustin Pop
    self.wanted = _GetWantedInstances(self, self.op.names)
2471 069dcc86 Iustin Pop
2472 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2473 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2474 a8083063 Iustin Pop

2475 a8083063 Iustin Pop
    """
2476 069dcc86 Iustin Pop
    instance_names = self.wanted
2477 a8083063 Iustin Pop
    instance_list = [self.cfg.GetInstanceInfo(iname) for iname
2478 a8083063 Iustin Pop
                     in instance_names]
2479 a8083063 Iustin Pop
2480 a8083063 Iustin Pop
    # begin data gathering
2481 a8083063 Iustin Pop
2482 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2483 a8083063 Iustin Pop
2484 a8083063 Iustin Pop
    bad_nodes = []
2485 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2486 a8083063 Iustin Pop
      live_data = {}
2487 a8083063 Iustin Pop
      node_data = rpc.call_all_instances_info(nodes)
2488 a8083063 Iustin Pop
      for name in nodes:
2489 a8083063 Iustin Pop
        result = node_data[name]
2490 a8083063 Iustin Pop
        if result:
2491 a8083063 Iustin Pop
          live_data.update(result)
2492 a8083063 Iustin Pop
        elif result == False:
2493 a8083063 Iustin Pop
          bad_nodes.append(name)
2494 a8083063 Iustin Pop
        # else no instance is alive
2495 a8083063 Iustin Pop
    else:
2496 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2497 a8083063 Iustin Pop
2498 a8083063 Iustin Pop
    # end data gathering
2499 a8083063 Iustin Pop
2500 a8083063 Iustin Pop
    output = []
2501 a8083063 Iustin Pop
    for instance in instance_list:
2502 a8083063 Iustin Pop
      iout = []
2503 a8083063 Iustin Pop
      for field in self.op.output_fields:
2504 a8083063 Iustin Pop
        if field == "name":
2505 a8083063 Iustin Pop
          val = instance.name
2506 a8083063 Iustin Pop
        elif field == "os":
2507 a8083063 Iustin Pop
          val = instance.os
2508 a8083063 Iustin Pop
        elif field == "pnode":
2509 a8083063 Iustin Pop
          val = instance.primary_node
2510 a8083063 Iustin Pop
        elif field == "snodes":
2511 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2512 a8083063 Iustin Pop
        elif field == "admin_state":
2513 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2514 a8083063 Iustin Pop
        elif field == "oper_state":
2515 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2516 8a23d2d3 Iustin Pop
            val = None
2517 a8083063 Iustin Pop
          else:
2518 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2519 d8052456 Iustin Pop
        elif field == "status":
2520 d8052456 Iustin Pop
          if instance.primary_node in bad_nodes:
2521 d8052456 Iustin Pop
            val = "ERROR_nodedown"
2522 d8052456 Iustin Pop
          else:
2523 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
2524 d8052456 Iustin Pop
            if running:
2525 d8052456 Iustin Pop
              if instance.status != "down":
2526 d8052456 Iustin Pop
                val = "running"
2527 d8052456 Iustin Pop
              else:
2528 d8052456 Iustin Pop
                val = "ERROR_up"
2529 d8052456 Iustin Pop
            else:
2530 d8052456 Iustin Pop
              if instance.status != "down":
2531 d8052456 Iustin Pop
                val = "ERROR_down"
2532 d8052456 Iustin Pop
              else:
2533 d8052456 Iustin Pop
                val = "ADMIN_down"
2534 a8083063 Iustin Pop
        elif field == "admin_ram":
2535 a8083063 Iustin Pop
          val = instance.memory
2536 a8083063 Iustin Pop
        elif field == "oper_ram":
2537 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2538 8a23d2d3 Iustin Pop
            val = None
2539 a8083063 Iustin Pop
          elif instance.name in live_data:
2540 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2541 a8083063 Iustin Pop
          else:
2542 a8083063 Iustin Pop
            val = "-"
2543 a8083063 Iustin Pop
        elif field == "disk_template":
2544 a8083063 Iustin Pop
          val = instance.disk_template
2545 a8083063 Iustin Pop
        elif field == "ip":
2546 a8083063 Iustin Pop
          val = instance.nics[0].ip
2547 a8083063 Iustin Pop
        elif field == "bridge":
2548 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2549 a8083063 Iustin Pop
        elif field == "mac":
2550 a8083063 Iustin Pop
          val = instance.nics[0].mac
2551 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2552 644eeef9 Iustin Pop
          disk = instance.FindDisk(field[:3])
2553 644eeef9 Iustin Pop
          if disk is None:
2554 8a23d2d3 Iustin Pop
            val = None
2555 644eeef9 Iustin Pop
          else:
2556 644eeef9 Iustin Pop
            val = disk.size
2557 d6d415e8 Iustin Pop
        elif field == "vcpus":
2558 d6d415e8 Iustin Pop
          val = instance.vcpus
2559 130a6a6f Iustin Pop
        elif field == "tags":
2560 130a6a6f Iustin Pop
          val = list(instance.GetTags())
2561 a8083063 Iustin Pop
        else:
2562 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2563 a8083063 Iustin Pop
        iout.append(val)
2564 a8083063 Iustin Pop
      output.append(iout)
2565 a8083063 Iustin Pop
2566 a8083063 Iustin Pop
    return output
2567 a8083063 Iustin Pop
2568 a8083063 Iustin Pop
2569 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2570 a8083063 Iustin Pop
  """Failover an instance.
2571 a8083063 Iustin Pop

2572 a8083063 Iustin Pop
  """
2573 a8083063 Iustin Pop
  HPATH = "instance-failover"
2574 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2575 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2576 c9e5c064 Guido Trotter
  REQ_BGL = False
2577 c9e5c064 Guido Trotter
2578 c9e5c064 Guido Trotter
  def ExpandNames(self):
2579 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
2580 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2581 c9e5c064 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = 'replace'
2582 c9e5c064 Guido Trotter
2583 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
2584 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
2585 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
2586 a8083063 Iustin Pop
2587 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2588 a8083063 Iustin Pop
    """Build hooks env.
2589 a8083063 Iustin Pop

2590 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2591 a8083063 Iustin Pop

2592 a8083063 Iustin Pop
    """
2593 a8083063 Iustin Pop
    env = {
2594 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2595 a8083063 Iustin Pop
      }
2596 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2597 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
2598 a8083063 Iustin Pop
    return env, nl, nl
2599 a8083063 Iustin Pop
2600 a8083063 Iustin Pop
  def CheckPrereq(self):
2601 a8083063 Iustin Pop
    """Check prerequisites.
2602 a8083063 Iustin Pop

2603 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2604 a8083063 Iustin Pop

2605 a8083063 Iustin Pop
    """
2606 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2607 c9e5c064 Guido Trotter
    assert self.instance is not None, \
2608 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2609 a8083063 Iustin Pop
2610 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2611 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2612 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
2613 2a710df1 Michael Hanselmann
2614 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2615 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2616 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2617 abdf0113 Iustin Pop
                                   "a mirrored disk template")
2618 2a710df1 Michael Hanselmann
2619 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2620 d4f16fd9 Iustin Pop
    # check memory requirements on the secondary node
2621 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, target_node, "failing over instance %s" %
2622 d4f16fd9 Iustin Pop
                         instance.name, instance.memory)
2623 3a7c308e Guido Trotter
2624 a8083063 Iustin Pop
    # check bridge existance
2625 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2626 50ff9a7a Iustin Pop
    if not rpc.call_bridges_exist(target_node, brlist):
2627 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2628 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2629 50ff9a7a Iustin Pop
                                 (brlist, target_node))
2630 a8083063 Iustin Pop
2631 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2632 a8083063 Iustin Pop
    """Failover an instance.
2633 a8083063 Iustin Pop

2634 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2635 a8083063 Iustin Pop
    starting it on the secondary.
2636 a8083063 Iustin Pop

2637 a8083063 Iustin Pop
    """
2638 a8083063 Iustin Pop
    instance = self.instance
2639 a8083063 Iustin Pop
2640 a8083063 Iustin Pop
    source_node = instance.primary_node
2641 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2642 a8083063 Iustin Pop
2643 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2644 a8083063 Iustin Pop
    for dev in instance.disks:
2645 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
2646 a8083063 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
2647 a0aaa0d0 Guido Trotter
        if instance.status == "up" and not self.op.ignore_consistency:
2648 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
2649 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
2650 a8083063 Iustin Pop
2651 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2652 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2653 a8083063 Iustin Pop
                (instance.name, source_node))
2654 a8083063 Iustin Pop
2655 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(source_node, instance):
2656 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
2657 24a40d57 Iustin Pop
        logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2658 24a40d57 Iustin Pop
                     " anyway. Please make sure node %s is down"  %
2659 24a40d57 Iustin Pop
                     (instance.name, source_node, source_node))
2660 24a40d57 Iustin Pop
      else:
2661 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2662 24a40d57 Iustin Pop
                                 (instance.name, source_node))
2663 a8083063 Iustin Pop
2664 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2665 a8083063 Iustin Pop
    if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
2666 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
2667 a8083063 Iustin Pop
2668 a8083063 Iustin Pop
    instance.primary_node = target_node
2669 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2670 b6102dab Guido Trotter
    self.cfg.Update(instance)
2671 a8083063 Iustin Pop
2672 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
2673 12a0cfbe Guido Trotter
    if instance.status == "up":
2674 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
2675 12a0cfbe Guido Trotter
      logger.Info("Starting instance %s on node %s" %
2676 12a0cfbe Guido Trotter
                  (instance.name, target_node))
2677 12a0cfbe Guido Trotter
2678 12a0cfbe Guido Trotter
      disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
2679 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
2680 12a0cfbe Guido Trotter
      if not disks_ok:
2681 12a0cfbe Guido Trotter
        _ShutdownInstanceDisks(instance, self.cfg)
2682 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
2683 a8083063 Iustin Pop
2684 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
2685 12a0cfbe Guido Trotter
      if not rpc.call_instance_start(target_node, instance, None):
2686 12a0cfbe Guido Trotter
        _ShutdownInstanceDisks(instance, self.cfg)
2687 12a0cfbe Guido Trotter
        raise errors.OpExecError("Could not start instance %s on node %s." %
2688 12a0cfbe Guido Trotter
                                 (instance.name, target_node))
2689 a8083063 Iustin Pop
2690 a8083063 Iustin Pop
2691 3f78eef2 Iustin Pop
def _CreateBlockDevOnPrimary(cfg, node, instance, device, info):
2692 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2693 a8083063 Iustin Pop

2694 a8083063 Iustin Pop
  This always creates all devices.
2695 a8083063 Iustin Pop

2696 a8083063 Iustin Pop
  """
2697 a8083063 Iustin Pop
  if device.children:
2698 a8083063 Iustin Pop
    for child in device.children:
2699 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, node, instance, child, info):
2700 a8083063 Iustin Pop
        return False
2701 a8083063 Iustin Pop
2702 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2703 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2704 3f78eef2 Iustin Pop
                                    instance.name, True, info)
2705 a8083063 Iustin Pop
  if not new_id:
2706 a8083063 Iustin Pop
    return False
2707 a8083063 Iustin Pop
  if device.physical_id is None:
2708 a8083063 Iustin Pop
    device.physical_id = new_id
2709 a8083063 Iustin Pop
  return True
2710 a8083063 Iustin Pop
2711 a8083063 Iustin Pop
2712 3f78eef2 Iustin Pop
def _CreateBlockDevOnSecondary(cfg, node, instance, device, force, info):
2713 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2714 a8083063 Iustin Pop

2715 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2716 a8083063 Iustin Pop
  all its children.
2717 a8083063 Iustin Pop

2718 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2719 a8083063 Iustin Pop

2720 a8083063 Iustin Pop
  """
2721 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2722 a8083063 Iustin Pop
    force = True
2723 a8083063 Iustin Pop
  if device.children:
2724 a8083063 Iustin Pop
    for child in device.children:
2725 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, node, instance,
2726 3f78eef2 Iustin Pop
                                        child, force, info):
2727 a8083063 Iustin Pop
        return False
2728 a8083063 Iustin Pop
2729 a8083063 Iustin Pop
  if not force:
2730 a8083063 Iustin Pop
    return True
2731 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2732 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2733 3f78eef2 Iustin Pop
                                    instance.name, False, info)
2734 a8083063 Iustin Pop
  if not new_id:
2735 a8083063 Iustin Pop
    return False
2736 a8083063 Iustin Pop
  if device.physical_id is None:
2737 a8083063 Iustin Pop
    device.physical_id = new_id
2738 a8083063 Iustin Pop
  return True
2739 a8083063 Iustin Pop
2740 a8083063 Iustin Pop
2741 923b1523 Iustin Pop
def _GenerateUniqueNames(cfg, exts):
2742 923b1523 Iustin Pop
  """Generate a suitable LV name.
2743 923b1523 Iustin Pop

2744 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
2745 923b1523 Iustin Pop

2746 923b1523 Iustin Pop
  """
2747 923b1523 Iustin Pop
  results = []
2748 923b1523 Iustin Pop
  for val in exts:
2749 923b1523 Iustin Pop
    new_id = cfg.GenerateUniqueID()
2750 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
2751 923b1523 Iustin Pop
  return results
2752 923b1523 Iustin Pop
2753 923b1523 Iustin Pop
2754 a1f445d3 Iustin Pop
def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name):
2755 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
2756 a1f445d3 Iustin Pop

2757 a1f445d3 Iustin Pop
  """
2758 a1f445d3 Iustin Pop
  port = cfg.AllocatePort()
2759 a1f445d3 Iustin Pop
  vgname = cfg.GetVGName()
2760 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2761 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
2762 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2763 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
2764 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
2765 a1f445d3 Iustin Pop
                          logical_id = (primary, secondary, port),
2766 a1f445d3 Iustin Pop
                          children = [dev_data, dev_meta],
2767 a1f445d3 Iustin Pop
                          iv_name=iv_name)
2768 a1f445d3 Iustin Pop
  return drbd_dev
2769 a1f445d3 Iustin Pop
2770 7c0d6283 Michael Hanselmann
2771 923b1523 Iustin Pop
def _GenerateDiskTemplate(cfg, template_name,
2772 a8083063 Iustin Pop
                          instance_name, primary_node,
2773 0f1a06e3 Manuel Franceschini
                          secondary_nodes, disk_sz, swap_sz,
2774 0f1a06e3 Manuel Franceschini
                          file_storage_dir, file_driver):
2775 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
2776 a8083063 Iustin Pop

2777 a8083063 Iustin Pop
  """
2778 a8083063 Iustin Pop
  #TODO: compute space requirements
2779 a8083063 Iustin Pop
2780 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2781 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
2782 a8083063 Iustin Pop
    disks = []
2783 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
2784 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2785 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2786 923b1523 Iustin Pop
2787 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
2788 fe96220b Iustin Pop
    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2789 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
2790 a8083063 Iustin Pop
                           iv_name = "sda")
2791 fe96220b Iustin Pop
    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2792 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
2793 a8083063 Iustin Pop
                           iv_name = "sdb")
2794 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
2795 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
2796 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
2797 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2798 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
2799 a1f445d3 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2800 a1f445d3 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2801 a1f445d3 Iustin Pop
    drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2802 a1f445d3 Iustin Pop
                                         disk_sz, names[0:2], "sda")
2803 a1f445d3 Iustin Pop
    drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2804 a1f445d3 Iustin Pop
                                         swap_sz, names[2:4], "sdb")
2805 a1f445d3 Iustin Pop
    disks = [drbd_sda_dev, drbd_sdb_dev]
2806 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
2807 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
2808 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
2809 0f1a06e3 Manuel Franceschini
2810 0f1a06e3 Manuel Franceschini
    file_sda_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk_sz,
2811 0f1a06e3 Manuel Franceschini
                                iv_name="sda", logical_id=(file_driver,
2812 0f1a06e3 Manuel Franceschini
                                "%s/sda" % file_storage_dir))
2813 0f1a06e3 Manuel Franceschini
    file_sdb_dev = objects.Disk(dev_type=constants.LD_FILE, size=swap_sz,
2814 0f1a06e3 Manuel Franceschini
                                iv_name="sdb", logical_id=(file_driver,
2815 0f1a06e3 Manuel Franceschini
                                "%s/sdb" % file_storage_dir))
2816 0f1a06e3 Manuel Franceschini
    disks = [file_sda_dev, file_sdb_dev]
2817 a8083063 Iustin Pop
  else:
2818 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
2819 a8083063 Iustin Pop
  return disks
2820 a8083063 Iustin Pop
2821 a8083063 Iustin Pop
2822 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
2823 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
2824 3ecf6786 Iustin Pop

2825 3ecf6786 Iustin Pop
  """
2826 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
2827 a0c3fea1 Michael Hanselmann
2828 a0c3fea1 Michael Hanselmann
2829 a8083063 Iustin Pop
def _CreateDisks(cfg, instance):
2830 a8083063 Iustin Pop
  """Create all disks for an instance.
2831 a8083063 Iustin Pop

2832 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
2833 a8083063 Iustin Pop

2834 a8083063 Iustin Pop
  Args:
2835 a8083063 Iustin Pop
    instance: the instance object
2836 a8083063 Iustin Pop

2837 a8083063 Iustin Pop
  Returns:
2838 a8083063 Iustin Pop
    True or False showing the success of the creation process
2839 a8083063 Iustin Pop

2840 a8083063 Iustin Pop
  """
2841 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
2842 a0c3fea1 Michael Hanselmann
2843 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
2844 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
2845 0f1a06e3 Manuel Franceschini
    result = rpc.call_file_storage_dir_create(instance.primary_node,
2846 0f1a06e3 Manuel Franceschini
                                              file_storage_dir)
2847 0f1a06e3 Manuel Franceschini
2848 0f1a06e3 Manuel Franceschini
    if not result:
2849 b62ddbe5 Guido Trotter
      logger.Error("Could not connect to node '%s'" % instance.primary_node)
2850 0f1a06e3 Manuel Franceschini
      return False
2851 0f1a06e3 Manuel Franceschini
2852 0f1a06e3 Manuel Franceschini
    if not result[0]:
2853 0f1a06e3 Manuel Franceschini
      logger.Error("failed to create directory '%s'" % file_storage_dir)
2854 0f1a06e3 Manuel Franceschini
      return False
2855 0f1a06e3 Manuel Franceschini
2856 a8083063 Iustin Pop
  for device in instance.disks:
2857 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
2858 1c6e3627 Manuel Franceschini
                (device.iv_name, instance.name))
2859 a8083063 Iustin Pop
    #HARDCODE
2860 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
2861 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, secondary_node, instance,
2862 3f78eef2 Iustin Pop
                                        device, False, info):
2863 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
2864 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
2865 a8083063 Iustin Pop
        return False
2866 a8083063 Iustin Pop
    #HARDCODE
2867 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
2868 3f78eef2 Iustin Pop
                                    instance, device, info):
2869 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
2870 a8083063 Iustin Pop
                   device.iv_name)
2871 a8083063 Iustin Pop
      return False
2872 1c6e3627 Manuel Franceschini
2873 a8083063 Iustin Pop
  return True
2874 a8083063 Iustin Pop
2875 a8083063 Iustin Pop
2876 a8083063 Iustin Pop
def _RemoveDisks(instance, cfg):
2877 a8083063 Iustin Pop
  """Remove all disks for an instance.
2878 a8083063 Iustin Pop

2879 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
2880 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
2881 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
2882 a8083063 Iustin Pop
  with `_CreateDisks()`).
2883 a8083063 Iustin Pop

2884 a8083063 Iustin Pop
  Args:
2885 a8083063 Iustin Pop
    instance: the instance object
2886 a8083063 Iustin Pop

2887 a8083063 Iustin Pop
  Returns:
2888 a8083063 Iustin Pop
    True or False showing the success of the removal proces
2889 a8083063 Iustin Pop

2890 a8083063 Iustin Pop
  """
2891 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
2892 a8083063 Iustin Pop
2893 a8083063 Iustin Pop
  result = True
2894 a8083063 Iustin Pop
  for device in instance.disks:
2895 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
2896 a8083063 Iustin Pop
      cfg.SetDiskID(disk, node)
2897 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, disk):
2898 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
2899 a8083063 Iustin Pop
                     " continuing anyway" %
2900 a8083063 Iustin Pop
                     (device.iv_name, node))
2901 a8083063 Iustin Pop
        result = False
2902 0f1a06e3 Manuel Franceschini
2903 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
2904 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
2905 0f1a06e3 Manuel Franceschini
    if not rpc.call_file_storage_dir_remove(instance.primary_node,
2906 0f1a06e3 Manuel Franceschini
                                            file_storage_dir):
2907 0f1a06e3 Manuel Franceschini
      logger.Error("could not remove directory '%s'" % file_storage_dir)
2908 0f1a06e3 Manuel Franceschini
      result = False
2909 0f1a06e3 Manuel Franceschini
2910 a8083063 Iustin Pop
  return result
2911 a8083063 Iustin Pop
2912 a8083063 Iustin Pop
2913 e2fe6369 Iustin Pop
def _ComputeDiskSize(disk_template, disk_size, swap_size):
2914 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
2915 e2fe6369 Iustin Pop

2916 e2fe6369 Iustin Pop
  This is currently hard-coded for the two-drive layout.
2917 e2fe6369 Iustin Pop

2918 e2fe6369 Iustin Pop
  """
2919 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
2920 e2fe6369 Iustin Pop
  req_size_dict = {
2921 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
2922 e2fe6369 Iustin Pop
    constants.DT_PLAIN: disk_size + swap_size,
2923 e2fe6369 Iustin Pop
    # 256 MB are added for drbd metadata, 128MB for each drbd device
2924 e2fe6369 Iustin Pop
    constants.DT_DRBD8: disk_size + swap_size + 256,
2925 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
2926 e2fe6369 Iustin Pop
  }
2927 e2fe6369 Iustin Pop
2928 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
2929 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
2930 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
2931 e2fe6369 Iustin Pop
2932 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
2933 e2fe6369 Iustin Pop
2934 e2fe6369 Iustin Pop
2935 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
2936 a8083063 Iustin Pop
  """Create an instance.
2937 a8083063 Iustin Pop

2938 a8083063 Iustin Pop
  """
2939 a8083063 Iustin Pop
  HPATH = "instance-add"
2940 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2941 538475ca Iustin Pop
  _OP_REQP = ["instance_name", "mem_size", "disk_size",
2942 a8083063 Iustin Pop
              "disk_template", "swap_size", "mode", "start", "vcpus",
2943 1862d460 Alexander Schreiber
              "wait_for_sync", "ip_check", "mac"]
2944 a8083063 Iustin Pop
2945 538475ca Iustin Pop
  def _RunAllocator(self):
2946 538475ca Iustin Pop
    """Run the allocator based on input opcode.
2947 538475ca Iustin Pop

2948 538475ca Iustin Pop
    """
2949 538475ca Iustin Pop
    disks = [{"size": self.op.disk_size, "mode": "w"},
2950 538475ca Iustin Pop
             {"size": self.op.swap_size, "mode": "w"}]
2951 538475ca Iustin Pop
    nics = [{"mac": self.op.mac, "ip": getattr(self.op, "ip", None),
2952 538475ca Iustin Pop
             "bridge": self.op.bridge}]
2953 d1c2dd75 Iustin Pop
    ial = IAllocator(self.cfg, self.sstore,
2954 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
2955 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
2956 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
2957 d1c2dd75 Iustin Pop
                     tags=[],
2958 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
2959 d1c2dd75 Iustin Pop
                     vcpus=self.op.vcpus,
2960 d1c2dd75 Iustin Pop
                     mem_size=self.op.mem_size,
2961 d1c2dd75 Iustin Pop
                     disks=disks,
2962 d1c2dd75 Iustin Pop
                     nics=nics,
2963 29859cb7 Iustin Pop
                     )
2964 d1c2dd75 Iustin Pop
2965 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
2966 d1c2dd75 Iustin Pop
2967 d1c2dd75 Iustin Pop
    if not ial.success:
2968 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
2969 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
2970 d1c2dd75 Iustin Pop
                                                           ial.info))
2971 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
2972 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
2973 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
2974 27579978 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
2975 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
2976 538475ca Iustin Pop
    logger.ToStdout("Selected nodes for the instance: %s" %
2977 d1c2dd75 Iustin Pop
                    (", ".join(ial.nodes),))
2978 538475ca Iustin Pop
    logger.Info("Selected nodes for instance %s via iallocator %s: %s" %
2979 d1c2dd75 Iustin Pop
                (self.op.instance_name, self.op.iallocator, ial.nodes))
2980 27579978 Iustin Pop
    if ial.required_nodes == 2:
2981 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
2982 538475ca Iustin Pop
2983 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2984 a8083063 Iustin Pop
    """Build hooks env.
2985 a8083063 Iustin Pop

2986 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2987 a8083063 Iustin Pop

2988 a8083063 Iustin Pop
    """
2989 a8083063 Iustin Pop
    env = {
2990 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
2991 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
2992 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
2993 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
2994 a8083063 Iustin Pop
      }
2995 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2996 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
2997 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
2998 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_IMAGE"] = self.src_image
2999 396e1b78 Michael Hanselmann
3000 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
3001 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
3002 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
3003 396e1b78 Michael Hanselmann
      status=self.instance_status,
3004 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
3005 396e1b78 Michael Hanselmann
      memory=self.op.mem_size,
3006 396e1b78 Michael Hanselmann
      vcpus=self.op.vcpus,
3007 c7b27e9e Iustin Pop
      nics=[(self.inst_ip, self.op.bridge, self.op.mac)],
3008 396e1b78 Michael Hanselmann
    ))
3009 a8083063 Iustin Pop
3010 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
3011 a8083063 Iustin Pop
          self.secondaries)
3012 a8083063 Iustin Pop
    return env, nl, nl
3013 a8083063 Iustin Pop
3014 a8083063 Iustin Pop
3015 a8083063 Iustin Pop
  def CheckPrereq(self):
3016 a8083063 Iustin Pop
    """Check prerequisites.
3017 a8083063 Iustin Pop

3018 a8083063 Iustin Pop
    """
3019 538475ca Iustin Pop
    # set optional parameters to none if they don't exist
3020 538475ca Iustin Pop
    for attr in ["kernel_path", "initrd_path", "hvm_boot_order", "pnode",
3021 31a853d2 Iustin Pop
                 "iallocator", "hvm_acpi", "hvm_pae", "hvm_cdrom_image_path",
3022 31a853d2 Iustin Pop
                 "vnc_bind_address"]:
3023 40ed12dd Guido Trotter
      if not hasattr(self.op, attr):
3024 40ed12dd Guido Trotter
        setattr(self.op, attr, None)
3025 40ed12dd Guido Trotter
3026 a8083063 Iustin Pop
    if self.op.mode not in (constants.INSTANCE_CREATE,
3027 a8083063 Iustin Pop
                            constants.INSTANCE_IMPORT):
3028 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
3029 3ecf6786 Iustin Pop
                                 self.op.mode)
3030 a8083063 Iustin Pop
3031 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
3032 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
3033 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
3034 eedc99de Manuel Franceschini
                                 " instances")
3035 eedc99de Manuel Franceschini
3036 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3037 a8083063 Iustin Pop
      src_node = getattr(self.op, "src_node", None)
3038 a8083063 Iustin Pop
      src_path = getattr(self.op, "src_path", None)
3039 a8083063 Iustin Pop
      if src_node is None or src_path is None:
3040 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Importing an instance requires source"
3041 3ecf6786 Iustin Pop
                                   " node and path options")
3042 a8083063 Iustin Pop
      src_node_full = self.cfg.ExpandNodeName(src_node)
3043 a8083063 Iustin Pop
      if src_node_full is None:
3044 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown source node '%s'" % src_node)
3045 a8083063 Iustin Pop
      self.op.src_node = src_node = src_node_full
3046 a8083063 Iustin Pop
3047 a8083063 Iustin Pop
      if not os.path.isabs(src_path):
3048 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The source path must be absolute")
3049 a8083063 Iustin Pop
3050 a8083063 Iustin Pop
      export_info = rpc.call_export_info(src_node, src_path)
3051 a8083063 Iustin Pop
3052 a8083063 Iustin Pop
      if not export_info:
3053 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
3054 a8083063 Iustin Pop
3055 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
3056 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
3057 a8083063 Iustin Pop
3058 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
3059 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
3060 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
3061 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
3062 a8083063 Iustin Pop
3063 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
3064 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Can't import instance with more than"
3065 3ecf6786 Iustin Pop
                                   " one data disk")
3066 a8083063 Iustin Pop
3067 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
3068 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
3069 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
3070 a8083063 Iustin Pop
                                                         'disk0_dump'))
3071 a8083063 Iustin Pop
      self.src_image = diskimage
3072 a8083063 Iustin Pop
    else: # INSTANCE_CREATE
3073 a8083063 Iustin Pop
      if getattr(self.op, "os_type", None) is None:
3074 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No guest OS specified")
3075 a8083063 Iustin Pop
3076 901a65c1 Iustin Pop
    #### instance parameters check
3077 901a65c1 Iustin Pop
3078 a8083063 Iustin Pop
    # disk template and mirror node verification
3079 a8083063 Iustin Pop
    if self.op.disk_template not in constants.DISK_TEMPLATES:
3080 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid disk template name")
3081 a8083063 Iustin Pop
3082 901a65c1 Iustin Pop
    # instance name verification
3083 901a65c1 Iustin Pop
    hostname1 = utils.HostInfo(self.op.instance_name)
3084 901a65c1 Iustin Pop
3085 901a65c1 Iustin Pop
    self.op.instance_name = instance_name = hostname1.name
3086 901a65c1 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
3087 901a65c1 Iustin Pop
    if instance_name in instance_list:
3088 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3089 901a65c1 Iustin Pop
                                 instance_name)
3090 901a65c1 Iustin Pop
3091 901a65c1 Iustin Pop
    # ip validity checks
3092 901a65c1 Iustin Pop
    ip = getattr(self.op, "ip", None)
3093 901a65c1 Iustin Pop
    if ip is None or ip.lower() == "none":
3094 901a65c1 Iustin Pop
      inst_ip = None
3095 901a65c1 Iustin Pop
    elif ip.lower() == "auto":
3096 901a65c1 Iustin Pop
      inst_ip = hostname1.ip
3097 901a65c1 Iustin Pop
    else:
3098 901a65c1 Iustin Pop
      if not utils.IsValidIP(ip):
3099 901a65c1 Iustin Pop
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
3100 901a65c1 Iustin Pop
                                   " like a valid IP" % ip)
3101 901a65c1 Iustin Pop
      inst_ip = ip
3102 901a65c1 Iustin Pop
    self.inst_ip = self.op.ip = inst_ip
3103 901a65c1 Iustin Pop
3104 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
3105 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
3106 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
3107 901a65c1 Iustin Pop
3108 901a65c1 Iustin Pop
    if self.op.ip_check:
3109 901a65c1 Iustin Pop
      if utils.TcpPing(hostname1.ip, constants.DEFAULT_NODED_PORT):
3110 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3111 901a65c1 Iustin Pop
                                   (hostname1.ip, instance_name))
3112 901a65c1 Iustin Pop
3113 901a65c1 Iustin Pop
    # MAC address verification
3114 901a65c1 Iustin Pop
    if self.op.mac != "auto":
3115 901a65c1 Iustin Pop
      if not utils.IsValidMac(self.op.mac.lower()):
3116 901a65c1 Iustin Pop
        raise errors.OpPrereqError("invalid MAC address specified: %s" %
3117 901a65c1 Iustin Pop
                                   self.op.mac)
3118 901a65c1 Iustin Pop
3119 901a65c1 Iustin Pop
    # bridge verification
3120 901a65c1 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
3121 901a65c1 Iustin Pop
    if bridge is None:
3122 901a65c1 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
3123 901a65c1 Iustin Pop
    else:
3124 901a65c1 Iustin Pop
      self.op.bridge = bridge
3125 901a65c1 Iustin Pop
3126 901a65c1 Iustin Pop
    # boot order verification
3127 901a65c1 Iustin Pop
    if self.op.hvm_boot_order is not None:
3128 901a65c1 Iustin Pop
      if len(self.op.hvm_boot_order.strip("acdn")) != 0:
3129 901a65c1 Iustin Pop
        raise errors.OpPrereqError("invalid boot order specified,"
3130 901a65c1 Iustin Pop
                                   " must be one or more of [acdn]")
3131 901a65c1 Iustin Pop
    # file storage checks
3132 0f1a06e3 Manuel Franceschini
    if (self.op.file_driver and
3133 0f1a06e3 Manuel Franceschini
        not self.op.file_driver in constants.FILE_DRIVER):
3134 0f1a06e3 Manuel Franceschini
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
3135 0f1a06e3 Manuel Franceschini
                                 self.op.file_driver)
3136 0f1a06e3 Manuel Franceschini
3137 0f1a06e3 Manuel Franceschini
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
3138 b4de68a9 Iustin Pop
      raise errors.OpPrereqError("File storage directory not a relative"
3139 b4de68a9 Iustin Pop
                                 " path")
3140 538475ca Iustin Pop
    #### allocator run
3141 538475ca Iustin Pop
3142 538475ca Iustin Pop
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
3143 538475ca Iustin Pop
      raise errors.OpPrereqError("One and only one of iallocator and primary"
3144 538475ca Iustin Pop
                                 " node must be given")
3145 538475ca Iustin Pop
3146 538475ca Iustin Pop
    if self.op.iallocator is not None:
3147 538475ca Iustin Pop
      self._RunAllocator()
3148 0f1a06e3 Manuel Franceschini
3149 901a65c1 Iustin Pop
    #### node related checks
3150 901a65c1 Iustin Pop
3151 901a65c1 Iustin Pop
    # check primary node
3152 901a65c1 Iustin Pop
    pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode))
3153 901a65c1 Iustin Pop
    if pnode is None:
3154 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Primary node '%s' is unknown" %
3155 901a65c1 Iustin Pop
                                 self.op.pnode)
3156 901a65c1 Iustin Pop
    self.op.pnode = pnode.name
3157 901a65c1 Iustin Pop
    self.pnode = pnode
3158 901a65c1 Iustin Pop
    self.secondaries = []
3159 901a65c1 Iustin Pop
3160 901a65c1 Iustin Pop
    # mirror node verification
3161 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
3162 a8083063 Iustin Pop
      if getattr(self.op, "snode", None) is None:
3163 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
3164 3ecf6786 Iustin Pop
                                   " a mirror node")
3165 a8083063 Iustin Pop
3166 a8083063 Iustin Pop
      snode_name = self.cfg.ExpandNodeName(self.op.snode)
3167 a8083063 Iustin Pop
      if snode_name is None:
3168 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown secondary node '%s'" %
3169 3ecf6786 Iustin Pop
                                   self.op.snode)
3170 a8083063 Iustin Pop
      elif snode_name == pnode.name:
3171 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
3172 3ecf6786 Iustin Pop
                                   " the primary node.")
3173 a8083063 Iustin Pop
      self.secondaries.append(snode_name)
3174 a8083063 Iustin Pop
3175 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
3176 e2fe6369 Iustin Pop
                                self.op.disk_size, self.op.swap_size)
3177 ed1ebc60 Guido Trotter
3178 8d75db10 Iustin Pop
    # Check lv size requirements
3179 8d75db10 Iustin Pop
    if req_size is not None:
3180 8d75db10 Iustin Pop
      nodenames = [pnode.name] + self.secondaries
3181 8d75db10 Iustin Pop
      nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
3182 8d75db10 Iustin Pop
      for node in nodenames:
3183 8d75db10 Iustin Pop
        info = nodeinfo.get(node, None)
3184 8d75db10 Iustin Pop
        if not info:
3185 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
3186 3e91897b Iustin Pop
                                     " from node '%s'" % node)
3187 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
3188 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
3189 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
3190 8d75db10 Iustin Pop
                                     " node %s" % node)
3191 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
3192 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
3193 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
3194 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
3195 ed1ebc60 Guido Trotter
3196 a8083063 Iustin Pop
    # os verification
3197 00fe9e38 Guido Trotter
    os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
3198 dfa96ded Guido Trotter
    if not os_obj:
3199 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
3200 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
3201 a8083063 Iustin Pop
3202 3b6d8c9b Iustin Pop
    if self.op.kernel_path == constants.VALUE_NONE:
3203 3b6d8c9b Iustin Pop
      raise errors.OpPrereqError("Can't set instance kernel to none")
3204 3b6d8c9b Iustin Pop
3205 a8083063 Iustin Pop
3206 901a65c1 Iustin Pop
    # bridge check on primary node
3207 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
3208 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
3209 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
3210 3ecf6786 Iustin Pop
                                 (self.op.bridge, pnode.name))
3211 a8083063 Iustin Pop
3212 49ce1563 Iustin Pop
    # memory check on primary node
3213 49ce1563 Iustin Pop
    if self.op.start:
3214 49ce1563 Iustin Pop
      _CheckNodeFreeMemory(self.cfg, self.pnode.name,
3215 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
3216 49ce1563 Iustin Pop
                           self.op.mem_size)
3217 49ce1563 Iustin Pop
3218 31a853d2 Iustin Pop
    # hvm_cdrom_image_path verification
3219 31a853d2 Iustin Pop
    if self.op.hvm_cdrom_image_path is not None:
3220 31a853d2 Iustin Pop
      if not os.path.isabs(self.op.hvm_cdrom_image_path):
3221 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The path to the HVM CDROM image must"
3222 31a853d2 Iustin Pop
                                   " be an absolute path or None, not %s" %
3223 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
3224 31a853d2 Iustin Pop
      if not os.path.isfile(self.op.hvm_cdrom_image_path):
3225 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The HVM CDROM image must either be a"
3226 31a853d2 Iustin Pop
                                   " regular file or a symlink pointing to"
3227 31a853d2 Iustin Pop
                                   " an existing regular file, not %s" %
3228 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
3229 31a853d2 Iustin Pop
3230 31a853d2 Iustin Pop
    # vnc_bind_address verification
3231 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is not None:
3232 31a853d2 Iustin Pop
      if not utils.IsValidIP(self.op.vnc_bind_address):
3233 31a853d2 Iustin Pop
        raise errors.OpPrereqError("given VNC bind address '%s' doesn't look"
3234 31a853d2 Iustin Pop
                                   " like a valid IP address" %
3235 31a853d2 Iustin Pop
                                   self.op.vnc_bind_address)
3236 31a853d2 Iustin Pop
3237 a8083063 Iustin Pop
    if self.op.start:
3238 a8083063 Iustin Pop
      self.instance_status = 'up'
3239 a8083063 Iustin Pop
    else:
3240 a8083063 Iustin Pop
      self.instance_status = 'down'
3241 a8083063 Iustin Pop
3242 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3243 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
3244 a8083063 Iustin Pop

3245 a8083063 Iustin Pop
    """
3246 a8083063 Iustin Pop
    instance = self.op.instance_name
3247 a8083063 Iustin Pop
    pnode_name = self.pnode.name
3248 a8083063 Iustin Pop
3249 1862d460 Alexander Schreiber
    if self.op.mac == "auto":
3250 ba4b62cf Iustin Pop
      mac_address = self.cfg.GenerateMAC()
3251 1862d460 Alexander Schreiber
    else:
3252 ba4b62cf Iustin Pop
      mac_address = self.op.mac
3253 1862d460 Alexander Schreiber
3254 1862d460 Alexander Schreiber
    nic = objects.NIC(bridge=self.op.bridge, mac=mac_address)
3255 a8083063 Iustin Pop
    if self.inst_ip is not None:
3256 a8083063 Iustin Pop
      nic.ip = self.inst_ip
3257 a8083063 Iustin Pop
3258 2a6469d5 Alexander Schreiber
    ht_kind = self.sstore.GetHypervisorType()
3259 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
3260 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
3261 2a6469d5 Alexander Schreiber
    else:
3262 2a6469d5 Alexander Schreiber
      network_port = None
3263 58acb49d Alexander Schreiber
3264 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is None:
3265 31a853d2 Iustin Pop
      self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
3266 31a853d2 Iustin Pop
3267 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
3268 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
3269 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
3270 2c313123 Manuel Franceschini
    else:
3271 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
3272 2c313123 Manuel Franceschini
3273 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
3274 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
3275 0f1a06e3 Manuel Franceschini
                                        self.sstore.GetFileStorageDir(),
3276 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
3277 0f1a06e3 Manuel Franceschini
3278 0f1a06e3 Manuel Franceschini
3279 923b1523 Iustin Pop
    disks = _GenerateDiskTemplate(self.cfg,
3280 a8083063 Iustin Pop
                                  self.op.disk_template,
3281 a8083063 Iustin Pop
                                  instance, pnode_name,
3282 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
3283 0f1a06e3 Manuel Franceschini
                                  self.op.swap_size,
3284 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
3285 0f1a06e3 Manuel Franceschini
                                  self.op.file_driver)
3286 a8083063 Iustin Pop
3287 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3288 a8083063 Iustin Pop
                            primary_node=pnode_name,
3289 a8083063 Iustin Pop
                            memory=self.op.mem_size,
3290 a8083063 Iustin Pop
                            vcpus=self.op.vcpus,
3291 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
3292 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
3293 a8083063 Iustin Pop
                            status=self.instance_status,
3294 58acb49d Alexander Schreiber
                            network_port=network_port,
3295 3b6d8c9b Iustin Pop
                            kernel_path=self.op.kernel_path,
3296 3b6d8c9b Iustin Pop
                            initrd_path=self.op.initrd_path,
3297 25c5878d Alexander Schreiber
                            hvm_boot_order=self.op.hvm_boot_order,
3298 31a853d2 Iustin Pop
                            hvm_acpi=self.op.hvm_acpi,
3299 31a853d2 Iustin Pop
                            hvm_pae=self.op.hvm_pae,
3300 31a853d2 Iustin Pop
                            hvm_cdrom_image_path=self.op.hvm_cdrom_image_path,
3301 31a853d2 Iustin Pop
                            vnc_bind_address=self.op.vnc_bind_address,
3302 a8083063 Iustin Pop
                            )
3303 a8083063 Iustin Pop
3304 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
3305 a8083063 Iustin Pop
    if not _CreateDisks(self.cfg, iobj):
3306 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3307 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
3308 a8083063 Iustin Pop
3309 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
3310 a8083063 Iustin Pop
3311 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
3312 a2fd9afc Guido Trotter
    # Add the new instance to the Ganeti Lock Manager
3313 a2fd9afc Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, instance)
3314 a8083063 Iustin Pop
3315 a8083063 Iustin Pop
    if self.op.wait_for_sync:
3316 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc)
3317 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3318 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
3319 a8083063 Iustin Pop
      time.sleep(15)
3320 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
3321 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc, oneshot=True)
3322 a8083063 Iustin Pop
    else:
3323 a8083063 Iustin Pop
      disk_abort = False
3324 a8083063 Iustin Pop
3325 a8083063 Iustin Pop
    if disk_abort:
3326 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3327 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3328 a2fd9afc Guido Trotter
      # Remove the new instance from the Ganeti Lock Manager
3329 a2fd9afc Guido Trotter
      self.context.glm.remove(locking.LEVEL_INSTANCE, iobj.name)
3330 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3331 3ecf6786 Iustin Pop
                               " this instance")
3332 a8083063 Iustin Pop
3333 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3334 a8083063 Iustin Pop
                (instance, pnode_name))
3335 a8083063 Iustin Pop
3336 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3337 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3338 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3339 a8083063 Iustin Pop
        if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
3340 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3341 3ecf6786 Iustin Pop
                                   " on node %s" %
3342 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3343 a8083063 Iustin Pop
3344 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3345 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3346 a8083063 Iustin Pop
        src_node = self.op.src_node
3347 a8083063 Iustin Pop
        src_image = self.src_image
3348 a8083063 Iustin Pop
        if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
3349 a8083063 Iustin Pop
                                                src_node, src_image):
3350 3ecf6786 Iustin Pop
          raise errors.OpExecError("Could not import os for instance"
3351 3ecf6786 Iustin Pop
                                   " %s on node %s" %
3352 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3353 a8083063 Iustin Pop
      else:
3354 a8083063 Iustin Pop
        # also checked in the prereq part
3355 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3356 3ecf6786 Iustin Pop
                                     % self.op.mode)
3357 a8083063 Iustin Pop
3358 a8083063 Iustin Pop
    if self.op.start:
3359 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
3360 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3361 a8083063 Iustin Pop
      if not rpc.call_instance_start(pnode_name, iobj, None):
3362 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3363 a8083063 Iustin Pop
3364 a8083063 Iustin Pop
3365 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3366 a8083063 Iustin Pop
  """Connect to an instance's console.
3367 a8083063 Iustin Pop

3368 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3369 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3370 a8083063 Iustin Pop
  console.
3371 a8083063 Iustin Pop

3372 a8083063 Iustin Pop
  """
3373 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3374 8659b73e Guido Trotter
  REQ_BGL = False
3375 8659b73e Guido Trotter
3376 8659b73e Guido Trotter
  def ExpandNames(self):
3377 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
3378 a8083063 Iustin Pop
3379 a8083063 Iustin Pop
  def CheckPrereq(self):
3380 a8083063 Iustin Pop
    """Check prerequisites.
3381 a8083063 Iustin Pop

3382 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3383 a8083063 Iustin Pop

3384 a8083063 Iustin Pop
    """
3385 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3386 8659b73e Guido Trotter
    assert self.instance is not None, \
3387 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3388 a8083063 Iustin Pop
3389 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3390 a8083063 Iustin Pop
    """Connect to the console of an instance
3391 a8083063 Iustin Pop

3392 a8083063 Iustin Pop
    """
3393 a8083063 Iustin Pop
    instance = self.instance
3394 a8083063 Iustin Pop
    node = instance.primary_node
3395 a8083063 Iustin Pop
3396 a8083063 Iustin Pop
    node_insts = rpc.call_instance_list([node])[node]
3397 a8083063 Iustin Pop
    if node_insts is False:
3398 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3399 a8083063 Iustin Pop
3400 a8083063 Iustin Pop
    if instance.name not in node_insts:
3401 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3402 a8083063 Iustin Pop
3403 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
3404 a8083063 Iustin Pop
3405 a8083063 Iustin Pop
    hyper = hypervisor.GetHypervisor()
3406 30989e69 Alexander Schreiber
    console_cmd = hyper.GetShellCommandForConsole(instance)
3407 b047857b Michael Hanselmann
3408 82122173 Iustin Pop
    # build ssh cmdline
3409 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
3410 a8083063 Iustin Pop
3411 a8083063 Iustin Pop
3412 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3413 a8083063 Iustin Pop
  """Replace the disks of an instance.
3414 a8083063 Iustin Pop

3415 a8083063 Iustin Pop
  """
3416 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3417 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3418 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
3419 a8083063 Iustin Pop
3420 b6e82a65 Iustin Pop
  def _RunAllocator(self):
3421 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
3422 b6e82a65 Iustin Pop

3423 b6e82a65 Iustin Pop
    """
3424 b6e82a65 Iustin Pop
    ial = IAllocator(self.cfg, self.sstore,
3425 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
3426 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
3427 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
3428 b6e82a65 Iustin Pop
3429 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
3430 b6e82a65 Iustin Pop
3431 b6e82a65 Iustin Pop
    if not ial.success:
3432 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3433 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3434 b6e82a65 Iustin Pop
                                                           ial.info))
3435 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3436 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3437 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
3438 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
3439 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
3440 b6e82a65 Iustin Pop
    logger.ToStdout("Selected new secondary for the instance: %s" %
3441 b6e82a65 Iustin Pop
                    self.op.remote_node)
3442 b6e82a65 Iustin Pop
3443 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3444 a8083063 Iustin Pop
    """Build hooks env.
3445 a8083063 Iustin Pop

3446 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3447 a8083063 Iustin Pop

3448 a8083063 Iustin Pop
    """
3449 a8083063 Iustin Pop
    env = {
3450 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
3451 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3452 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3453 a8083063 Iustin Pop
      }
3454 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3455 0834c866 Iustin Pop
    nl = [
3456 0834c866 Iustin Pop
      self.sstore.GetMasterNode(),
3457 0834c866 Iustin Pop
      self.instance.primary_node,
3458 0834c866 Iustin Pop
      ]
3459 0834c866 Iustin Pop
    if self.op.remote_node is not None:
3460 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
3461 a8083063 Iustin Pop
    return env, nl, nl
3462 a8083063 Iustin Pop
3463 a8083063 Iustin Pop
  def CheckPrereq(self):
3464 a8083063 Iustin Pop
    """Check prerequisites.
3465 a8083063 Iustin Pop

3466 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3467 a8083063 Iustin Pop

3468 a8083063 Iustin Pop
    """
3469 b6e82a65 Iustin Pop
    if not hasattr(self.op, "remote_node"):
3470 b6e82a65 Iustin Pop
      self.op.remote_node = None
3471 b6e82a65 Iustin Pop
3472 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3473 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3474 a8083063 Iustin Pop
    if instance is None:
3475 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3476 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3477 a8083063 Iustin Pop
    self.instance = instance
3478 7df43a76 Iustin Pop
    self.op.instance_name = instance.name
3479 a8083063 Iustin Pop
3480 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3481 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3482 a9e0c397 Iustin Pop
                                 " network mirrored.")
3483 a8083063 Iustin Pop
3484 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3485 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3486 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3487 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3488 a8083063 Iustin Pop
3489 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
3490 a9e0c397 Iustin Pop
3491 b6e82a65 Iustin Pop
    ia_name = getattr(self.op, "iallocator", None)
3492 b6e82a65 Iustin Pop
    if ia_name is not None:
3493 b6e82a65 Iustin Pop
      if self.op.remote_node is not None:
3494 b6e82a65 Iustin Pop
        raise errors.OpPrereqError("Give either the iallocator or the new"
3495 b6e82a65 Iustin Pop
                                   " secondary, not both")
3496 b6e82a65 Iustin Pop
      self.op.remote_node = self._RunAllocator()
3497 b6e82a65 Iustin Pop
3498 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
3499 a9e0c397 Iustin Pop
    if remote_node is not None:
3500 a8083063 Iustin Pop
      remote_node = self.cfg.ExpandNodeName(remote_node)
3501 a8083063 Iustin Pop
      if remote_node is None:
3502 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Node '%s' not known" %
3503 3ecf6786 Iustin Pop
                                   self.op.remote_node)
3504 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
3505 a9e0c397 Iustin Pop
    else:
3506 a9e0c397 Iustin Pop
      self.remote_node_info = None
3507 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3508 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3509 3ecf6786 Iustin Pop
                                 " the instance.")
3510 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
3511 0834c866 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_SEC:
3512 0834c866 Iustin Pop
        # this is for DRBD8, where we can't execute the same mode of
3513 0834c866 Iustin Pop
        # replacement as for drbd7 (no different port allocated)
3514 0834c866 Iustin Pop
        raise errors.OpPrereqError("Same secondary given, cannot execute"
3515 0834c866 Iustin Pop
                                   " replacement")
3516 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3517 7df43a76 Iustin Pop
      if (self.op.mode == constants.REPLACE_DISK_ALL and
3518 7df43a76 Iustin Pop
          remote_node is not None):
3519 7df43a76 Iustin Pop
        # switch to replace secondary mode
3520 7df43a76 Iustin Pop
        self.op.mode = constants.REPLACE_DISK_SEC
3521 7df43a76 Iustin Pop
3522 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
3523 12c3449a Michael Hanselmann
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
3524 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
3525 a9e0c397 Iustin Pop
                                   " both at once")
3526 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
3527 a9e0c397 Iustin Pop
        if remote_node is not None:
3528 12c3449a Michael Hanselmann
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
3529 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
3530 a9e0c397 Iustin Pop
                                     " node disk replacement")
3531 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
3532 cff90b79 Iustin Pop
        self.oth_node = instance.secondary_nodes[0]
3533 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
3534 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
3535 a9e0c397 Iustin Pop
                                    # we don't change the secondary
3536 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
3537 cff90b79 Iustin Pop
        self.oth_node = instance.primary_node
3538 a9e0c397 Iustin Pop
      else:
3539 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
3540 a9e0c397 Iustin Pop
3541 a9e0c397 Iustin Pop
    for name in self.op.disks:
3542 a9e0c397 Iustin Pop
      if instance.FindDisk(name) is None:
3543 a9e0c397 Iustin Pop
        raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3544 a9e0c397 Iustin Pop
                                   (name, instance.name))
3545 a8083063 Iustin Pop
    self.op.remote_node = remote_node
3546 a8083063 Iustin Pop
3547 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
3548 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
3549 a9e0c397 Iustin Pop

3550 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3551 a9e0c397 Iustin Pop
      - for each disk to be replaced:
3552 a9e0c397 Iustin Pop
        - create new LVs on the target node with unique names
3553 a9e0c397 Iustin Pop
        - detach old LVs from the drbd device
3554 a9e0c397 Iustin Pop
        - rename old LVs to name_replaced.<time_t>
3555 a9e0c397 Iustin Pop
        - rename new LVs to old LVs
3556 a9e0c397 Iustin Pop
        - attach the new LVs (with the old names now) to the drbd device
3557 a9e0c397 Iustin Pop
      - wait for sync across all devices
3558 a9e0c397 Iustin Pop
      - for each modified disk:
3559 a9e0c397 Iustin Pop
        - remove old LVs (which have the name name_replaces.<time_t>)
3560 a9e0c397 Iustin Pop

3561 a9e0c397 Iustin Pop
    Failures are not very well handled.
3562 cff90b79 Iustin Pop

3563 a9e0c397 Iustin Pop
    """
3564 cff90b79 Iustin Pop
    steps_total = 6
3565 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3566 a9e0c397 Iustin Pop
    instance = self.instance
3567 a9e0c397 Iustin Pop
    iv_names = {}
3568 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3569 a9e0c397 Iustin Pop
    # start of work
3570 a9e0c397 Iustin Pop
    cfg = self.cfg
3571 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
3572 cff90b79 Iustin Pop
    oth_node = self.oth_node
3573 cff90b79 Iustin Pop
3574 cff90b79 Iustin Pop
    # Step: check device activation
3575 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3576 cff90b79 Iustin Pop
    info("checking volume groups")
3577 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
3578 cff90b79 Iustin Pop
    results = rpc.call_vg_list([oth_node, tgt_node])
3579 cff90b79 Iustin Pop
    if not results:
3580 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3581 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
3582 cff90b79 Iustin Pop
      res = results.get(node, False)
3583 cff90b79 Iustin Pop
      if not res or my_vg not in res:
3584 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3585 cff90b79 Iustin Pop
                                 (my_vg, node))
3586 cff90b79 Iustin Pop
    for dev in instance.disks:
3587 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3588 cff90b79 Iustin Pop
        continue
3589 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
3590 cff90b79 Iustin Pop
        info("checking %s on %s" % (dev.iv_name, node))
3591 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
3592 cff90b79 Iustin Pop
        if not rpc.call_blockdev_find(node, dev):
3593 cff90b79 Iustin Pop
          raise errors.OpExecError("Can't find device %s on node %s" %
3594 cff90b79 Iustin Pop
                                   (dev.iv_name, node))
3595 cff90b79 Iustin Pop
3596 cff90b79 Iustin Pop
    # Step: check other node consistency
3597 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3598 cff90b79 Iustin Pop
    for dev in instance.disks:
3599 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3600 cff90b79 Iustin Pop
        continue
3601 cff90b79 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, oth_node))
3602 cff90b79 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, oth_node,
3603 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
3604 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
3605 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
3606 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
3607 cff90b79 Iustin Pop
3608 cff90b79 Iustin Pop
    # Step: create new storage
3609 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3610 a9e0c397 Iustin Pop
    for dev in instance.disks:
3611 a9e0c397 Iustin Pop
      if not dev.iv_name in self.op.disks:
3612 a9e0c397 Iustin Pop
        continue
3613 a9e0c397 Iustin Pop
      size = dev.size
3614 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
3615 a9e0c397 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3616 a9e0c397 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3617 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3618 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
3619 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3620 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
3621 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
3622 a9e0c397 Iustin Pop
      old_lvs = dev.children
3623 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
3624 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
3625 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
3626 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3627 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3628 a9e0c397 Iustin Pop
      # are talking about the secondary node
3629 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
3630 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, tgt_node, instance, new_lv,
3631 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3632 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3633 a9e0c397 Iustin Pop
                                   " node '%s'" %
3634 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
3635 a9e0c397 Iustin Pop
3636 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
3637 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
3638 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
3639 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
3640 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
3641 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
3642 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
3643 cff90b79 Iustin Pop
      #dev.children = []
3644 cff90b79 Iustin Pop
      #cfg.Update(instance)
3645 a9e0c397 Iustin Pop
3646 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
3647 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
3648 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
3649 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
3650 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
3651 cff90b79 Iustin Pop
3652 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
3653 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
3654 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
3655 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
3656 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
3657 cff90b79 Iustin Pop
      rlist = []
3658 cff90b79 Iustin Pop
      for to_ren in old_lvs:
3659 cff90b79 Iustin Pop
        find_res = rpc.call_blockdev_find(tgt_node, to_ren)
3660 cff90b79 Iustin Pop
        if find_res is not None: # device exists
3661 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
3662 cff90b79 Iustin Pop
3663 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
3664 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3665 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
3666 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
3667 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
3668 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
3669 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3670 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
3671 cff90b79 Iustin Pop
3672 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
3673 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
3674 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
3675 a9e0c397 Iustin Pop
3676 cff90b79 Iustin Pop
      for disk in old_lvs:
3677 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
3678 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
3679 a9e0c397 Iustin Pop
3680 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
3681 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
3682 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
3683 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
3684 a9e0c397 Iustin Pop
          if not rpc.call_blockdev_remove(tgt_node, new_lv):
3685 79caa9ed Guido Trotter
            warning("Can't rollback device %s", hint="manually cleanup unused"
3686 cff90b79 Iustin Pop
                    " logical volumes")
3687 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
3688 a9e0c397 Iustin Pop
3689 a9e0c397 Iustin Pop
      dev.children = new_lvs
3690 a9e0c397 Iustin Pop
      cfg.Update(instance)
3691 a9e0c397 Iustin Pop
3692 cff90b79 Iustin Pop
    # Step: wait for sync
3693 a9e0c397 Iustin Pop
3694 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3695 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3696 a9e0c397 Iustin Pop
    # return value
3697 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3698 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3699 a9e0c397 Iustin Pop
3700 a9e0c397 Iustin Pop
    # so check manually all the devices
3701 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3702 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3703 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3704 a9e0c397 Iustin Pop
      if is_degr:
3705 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3706 a9e0c397 Iustin Pop
3707 cff90b79 Iustin Pop
    # Step: remove old storage
3708 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3709 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3710 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
3711 a9e0c397 Iustin Pop
      for lv in old_lvs:
3712 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
3713 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(tgt_node, lv):
3714 79caa9ed Guido Trotter
          warning("Can't remove old LV", hint="manually remove unused LVs")
3715 a9e0c397 Iustin Pop
          continue
3716 a9e0c397 Iustin Pop
3717 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
3718 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
3719 a9e0c397 Iustin Pop

3720 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3721 a9e0c397 Iustin Pop
      - for all disks of the instance:
3722 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
3723 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
3724 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
3725 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
3726 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
3727 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
3728 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
3729 a9e0c397 Iustin Pop
          not network enabled
3730 a9e0c397 Iustin Pop
      - wait for sync across all devices
3731 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
3732 a9e0c397 Iustin Pop

3733 a9e0c397 Iustin Pop
    Failures are not very well handled.
3734 0834c866 Iustin Pop

3735 a9e0c397 Iustin Pop
    """
3736 0834c866 Iustin Pop
    steps_total = 6
3737 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3738 a9e0c397 Iustin Pop
    instance = self.instance
3739 a9e0c397 Iustin Pop
    iv_names = {}
3740 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3741 a9e0c397 Iustin Pop
    # start of work
3742 a9e0c397 Iustin Pop
    cfg = self.cfg
3743 a9e0c397 Iustin Pop
    old_node = self.tgt_node
3744 a9e0c397 Iustin Pop
    new_node = self.new_node
3745 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
3746 0834c866 Iustin Pop
3747 0834c866 Iustin Pop
    # Step: check device activation
3748 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3749 0834c866 Iustin Pop
    info("checking volume groups")
3750 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
3751 0834c866 Iustin Pop
    results = rpc.call_vg_list([pri_node, new_node])
3752 0834c866 Iustin Pop
    if not results:
3753 0834c866 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3754 0834c866 Iustin Pop
    for node in pri_node, new_node:
3755 0834c866 Iustin Pop
      res = results.get(node, False)
3756 0834c866 Iustin Pop
      if not res or my_vg not in res:
3757 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3758 0834c866 Iustin Pop
                                 (my_vg, node))
3759 0834c866 Iustin Pop
    for dev in instance.disks:
3760 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
3761 0834c866 Iustin Pop
        continue
3762 0834c866 Iustin Pop
      info("checking %s on %s" % (dev.iv_name, pri_node))
3763 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3764 0834c866 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
3765 0834c866 Iustin Pop
        raise errors.OpExecError("Can't find device %s on node %s" %
3766 0834c866 Iustin Pop
                                 (dev.iv_name, pri_node))
3767 0834c866 Iustin Pop
3768 0834c866 Iustin Pop
    # Step: check other node consistency
3769 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3770 0834c866 Iustin Pop
    for dev in instance.disks:
3771 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
3772 0834c866 Iustin Pop
        continue
3773 0834c866 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, pri_node))
3774 0834c866 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, pri_node, True, ldisk=True):
3775 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
3776 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
3777 0834c866 Iustin Pop
                                 pri_node)
3778 0834c866 Iustin Pop
3779 0834c866 Iustin Pop
    # Step: create new storage
3780 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3781 a9e0c397 Iustin Pop
    for dev in instance.disks:
3782 a9e0c397 Iustin Pop
      size = dev.size
3783 0834c866 Iustin Pop
      info("adding new local storage on %s for %s" % (new_node, dev.iv_name))
3784 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3785 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3786 a9e0c397 Iustin Pop
      # are talking about the secondary node
3787 a9e0c397 Iustin Pop
      for new_lv in dev.children:
3788 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, new_node, instance, new_lv,
3789 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3790 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3791 a9e0c397 Iustin Pop
                                   " node '%s'" %
3792 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
3793 a9e0c397 Iustin Pop
3794 0834c866 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children)
3795 0834c866 Iustin Pop
3796 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
3797 0834c866 Iustin Pop
    for dev in instance.disks:
3798 0834c866 Iustin Pop
      size = dev.size
3799 0834c866 Iustin Pop
      info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
3800 a9e0c397 Iustin Pop
      # create new devices on new_node
3801 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
3802 a9e0c397 Iustin Pop
                              logical_id=(pri_node, new_node,
3803 a9e0c397 Iustin Pop
                                          dev.logical_id[2]),
3804 a9e0c397 Iustin Pop
                              children=dev.children)
3805 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, new_node, instance,
3806 3f78eef2 Iustin Pop
                                        new_drbd, False,
3807 a9e0c397 Iustin Pop
                                      _GetInstanceInfoText(instance)):
3808 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
3809 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
3810 a9e0c397 Iustin Pop
3811 0834c866 Iustin Pop
    for dev in instance.disks:
3812 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
3813 0834c866 Iustin Pop
      info("shutting down drbd for %s on old node" % dev.iv_name)
3814 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
3815 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_shutdown(old_node, dev):
3816 0834c866 Iustin Pop
        warning("Failed to shutdown drbd for %s on old node" % dev.iv_name,
3817 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
3818 a9e0c397 Iustin Pop
3819 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
3820 642445d9 Iustin Pop
    done = 0
3821 642445d9 Iustin Pop
    for dev in instance.disks:
3822 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3823 642445d9 Iustin Pop
      # set the physical (unique in bdev terms) id to None, meaning
3824 642445d9 Iustin Pop
      # detach from network
3825 642445d9 Iustin Pop
      dev.physical_id = (None,) * len(dev.physical_id)
3826 642445d9 Iustin Pop
      # and 'find' the device, which will 'fix' it to match the
3827 642445d9 Iustin Pop
      # standalone state
3828 642445d9 Iustin Pop
      if rpc.call_blockdev_find(pri_node, dev):
3829 642445d9 Iustin Pop
        done += 1
3830 642445d9 Iustin Pop
      else:
3831 642445d9 Iustin Pop
        warning("Failed to detach drbd %s from network, unusual case" %
3832 642445d9 Iustin Pop
                dev.iv_name)
3833 642445d9 Iustin Pop
3834 642445d9 Iustin Pop
    if not done:
3835 642445d9 Iustin Pop
      # no detaches succeeded (very unlikely)
3836 642445d9 Iustin Pop
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
3837 642445d9 Iustin Pop
3838 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
3839 642445d9 Iustin Pop
    # the instance to point to the new secondary
3840 642445d9 Iustin Pop
    info("updating instance configuration")
3841 642445d9 Iustin Pop
    for dev in instance.disks:
3842 642445d9 Iustin Pop
      dev.logical_id = (pri_node, new_node) + dev.logical_id[2:]
3843 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3844 642445d9 Iustin Pop
    cfg.Update(instance)
3845 a9e0c397 Iustin Pop
3846 642445d9 Iustin Pop
    # and now perform the drbd attach
3847 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
3848 642445d9 Iustin Pop
    failures = []
3849 642445d9 Iustin Pop
    for dev in instance.disks:
3850 642445d9 Iustin Pop
      info("attaching primary drbd for %s to new secondary node" % dev.iv_name)
3851 642445d9 Iustin Pop
      # since the attach is smart, it's enough to 'find' the device,
3852 642445d9 Iustin Pop
      # it will automatically activate the network, if the physical_id
3853 642445d9 Iustin Pop
      # is correct
3854 642445d9 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3855 642445d9 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
3856 642445d9 Iustin Pop
        warning("can't attach drbd %s to new secondary!" % dev.iv_name,
3857 642445d9 Iustin Pop
                "please do a gnt-instance info to see the status of disks")
3858 a9e0c397 Iustin Pop
3859 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3860 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3861 a9e0c397 Iustin Pop
    # return value
3862 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3863 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3864 a9e0c397 Iustin Pop
3865 a9e0c397 Iustin Pop
    # so check manually all the devices
3866 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3867 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3868 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(pri_node, dev)[5]
3869 a9e0c397 Iustin Pop
      if is_degr:
3870 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3871 a9e0c397 Iustin Pop
3872 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3873 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3874 0834c866 Iustin Pop
      info("remove logical volumes for %s" % name)
3875 a9e0c397 Iustin Pop
      for lv in old_lvs:
3876 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
3877 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(old_node, lv):
3878 0834c866 Iustin Pop
          warning("Can't remove LV on old secondary",
3879 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
3880 a9e0c397 Iustin Pop
3881 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
3882 a9e0c397 Iustin Pop
    """Execute disk replacement.
3883 a9e0c397 Iustin Pop

3884 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
3885 a9e0c397 Iustin Pop

3886 a9e0c397 Iustin Pop
    """
3887 a9e0c397 Iustin Pop
    instance = self.instance
3888 22985314 Guido Trotter
3889 22985314 Guido Trotter
    # Activate the instance disks if we're replacing them on a down instance
3890 22985314 Guido Trotter
    if instance.status == "down":
3891 22985314 Guido Trotter
      op = opcodes.OpActivateInstanceDisks(instance_name=instance.name)
3892 22985314 Guido Trotter
      self.proc.ChainOpCode(op)
3893 22985314 Guido Trotter
3894 abdf0113 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3895 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
3896 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
3897 a9e0c397 Iustin Pop
      else:
3898 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
3899 a9e0c397 Iustin Pop
    else:
3900 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
3901 22985314 Guido Trotter
3902 22985314 Guido Trotter
    ret = fn(feedback_fn)
3903 22985314 Guido Trotter
3904 22985314 Guido Trotter
    # Deactivate the instance disks if we're replacing them on a down instance
3905 22985314 Guido Trotter
    if instance.status == "down":
3906 22985314 Guido Trotter
      op = opcodes.OpDeactivateInstanceDisks(instance_name=instance.name)
3907 22985314 Guido Trotter
      self.proc.ChainOpCode(op)
3908 22985314 Guido Trotter
3909 22985314 Guido Trotter
    return ret
3910 a9e0c397 Iustin Pop
3911 a8083063 Iustin Pop
3912 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
3913 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
3914 8729e0d7 Iustin Pop

3915 8729e0d7 Iustin Pop
  """
3916 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
3917 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3918 8729e0d7 Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount"]
3919 8729e0d7 Iustin Pop
3920 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
3921 8729e0d7 Iustin Pop
    """Build hooks env.
3922 8729e0d7 Iustin Pop

3923 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3924 8729e0d7 Iustin Pop

3925 8729e0d7 Iustin Pop
    """
3926 8729e0d7 Iustin Pop
    env = {
3927 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
3928 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
3929 8729e0d7 Iustin Pop
      }
3930 8729e0d7 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3931 8729e0d7 Iustin Pop
    nl = [
3932 8729e0d7 Iustin Pop
      self.sstore.GetMasterNode(),
3933 8729e0d7 Iustin Pop
      self.instance.primary_node,
3934 8729e0d7 Iustin Pop
      ]
3935 8729e0d7 Iustin Pop
    return env, nl, nl
3936 8729e0d7 Iustin Pop
3937 8729e0d7 Iustin Pop
  def CheckPrereq(self):
3938 8729e0d7 Iustin Pop
    """Check prerequisites.
3939 8729e0d7 Iustin Pop

3940 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
3941 8729e0d7 Iustin Pop

3942 8729e0d7 Iustin Pop
    """
3943 8729e0d7 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3944 8729e0d7 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3945 8729e0d7 Iustin Pop
    if instance is None:
3946 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3947 8729e0d7 Iustin Pop
                                 self.op.instance_name)
3948 8729e0d7 Iustin Pop
    self.instance = instance
3949 8729e0d7 Iustin Pop
    self.op.instance_name = instance.name
3950 8729e0d7 Iustin Pop
3951 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
3952 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
3953 8729e0d7 Iustin Pop
                                 " growing.")
3954 8729e0d7 Iustin Pop
3955 8729e0d7 Iustin Pop
    if instance.FindDisk(self.op.disk) is None:
3956 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3957 c7cdfc90 Iustin Pop
                                 (self.op.disk, instance.name))
3958 8729e0d7 Iustin Pop
3959 8729e0d7 Iustin Pop
    nodenames = [instance.primary_node] + list(instance.secondary_nodes)
3960 8729e0d7 Iustin Pop
    nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
3961 8729e0d7 Iustin Pop
    for node in nodenames:
3962 8729e0d7 Iustin Pop
      info = nodeinfo.get(node, None)
3963 8729e0d7 Iustin Pop
      if not info:
3964 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
3965 8729e0d7 Iustin Pop
                                   " from node '%s'" % node)
3966 8729e0d7 Iustin Pop
      vg_free = info.get('vg_free', None)
3967 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
3968 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
3969 8729e0d7 Iustin Pop
                                   " node %s" % node)
3970 8729e0d7 Iustin Pop
      if self.op.amount > info['vg_free']:
3971 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
3972 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
3973 8729e0d7 Iustin Pop
                                   (node, info['vg_free'], self.op.amount))
3974 8729e0d7 Iustin Pop
3975 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
3976 8729e0d7 Iustin Pop
    """Execute disk grow.
3977 8729e0d7 Iustin Pop

3978 8729e0d7 Iustin Pop
    """
3979 8729e0d7 Iustin Pop
    instance = self.instance
3980 8729e0d7 Iustin Pop
    disk = instance.FindDisk(self.op.disk)
3981 8729e0d7 Iustin Pop
    for node in (instance.secondary_nodes + (instance.primary_node,)):
3982 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
3983 8729e0d7 Iustin Pop
      result = rpc.call_blockdev_grow(node, disk, self.op.amount)
3984 8729e0d7 Iustin Pop
      if not result or not isinstance(result, tuple) or len(result) != 2:
3985 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s" % node)
3986 8729e0d7 Iustin Pop
      elif not result[0]:
3987 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s: %s" %
3988 8729e0d7 Iustin Pop
                                 (node, result[1]))
3989 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
3990 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
3991 8729e0d7 Iustin Pop
    return
3992 8729e0d7 Iustin Pop
3993 8729e0d7 Iustin Pop
3994 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
3995 a8083063 Iustin Pop
  """Query runtime instance data.
3996 a8083063 Iustin Pop

3997 a8083063 Iustin Pop
  """
3998 a8083063 Iustin Pop
  _OP_REQP = ["instances"]
3999 a8083063 Iustin Pop
4000 a8083063 Iustin Pop
  def CheckPrereq(self):
4001 a8083063 Iustin Pop
    """Check prerequisites.
4002 a8083063 Iustin Pop

4003 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
4004 a8083063 Iustin Pop

4005 a8083063 Iustin Pop
    """
4006 a8083063 Iustin Pop
    if not isinstance(self.op.instances, list):
4007 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'")
4008 a8083063 Iustin Pop
    if self.op.instances:
4009 a8083063 Iustin Pop
      self.wanted_instances = []
4010 a8083063 Iustin Pop
      names = self.op.instances
4011 a8083063 Iustin Pop
      for name in names:
4012 a8083063 Iustin Pop
        instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name))
4013 a8083063 Iustin Pop
        if instance is None:
4014 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("No such instance name '%s'" % name)
4015 515207af Guido Trotter
        self.wanted_instances.append(instance)
4016 a8083063 Iustin Pop
    else:
4017 a8083063 Iustin Pop
      self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
4018 a8083063 Iustin Pop
                               in self.cfg.GetInstanceList()]
4019 a8083063 Iustin Pop
    return
4020 a8083063 Iustin Pop
4021 a8083063 Iustin Pop
4022 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
4023 a8083063 Iustin Pop
    """Compute block device status.
4024 a8083063 Iustin Pop

4025 a8083063 Iustin Pop
    """
4026 a8083063 Iustin Pop
    self.cfg.SetDiskID(dev, instance.primary_node)
4027 a8083063 Iustin Pop
    dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
4028 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
4029 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
4030 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
4031 a8083063 Iustin Pop
        snode = dev.logical_id[1]
4032 a8083063 Iustin Pop
      else:
4033 a8083063 Iustin Pop
        snode = dev.logical_id[0]
4034 a8083063 Iustin Pop
4035 a8083063 Iustin Pop
    if snode:
4036 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
4037 a8083063 Iustin Pop
      dev_sstatus = rpc.call_blockdev_find(snode, dev)
4038 a8083063 Iustin Pop
    else:
4039 a8083063 Iustin Pop
      dev_sstatus = None
4040 a8083063 Iustin Pop
4041 a8083063 Iustin Pop
    if dev.children:
4042 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
4043 a8083063 Iustin Pop
                      for child in dev.children]
4044 a8083063 Iustin Pop
    else:
4045 a8083063 Iustin Pop
      dev_children = []
4046 a8083063 Iustin Pop
4047 a8083063 Iustin Pop
    data = {
4048 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
4049 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
4050 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
4051 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
4052 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
4053 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
4054 a8083063 Iustin Pop
      "children": dev_children,
4055 a8083063 Iustin Pop
      }
4056 a8083063 Iustin Pop
4057 a8083063 Iustin Pop
    return data
4058 a8083063 Iustin Pop
4059 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4060 a8083063 Iustin Pop
    """Gather and return data"""
4061 a8083063 Iustin Pop
    result = {}
4062 a8083063 Iustin Pop
    for instance in self.wanted_instances:
4063 a8083063 Iustin Pop
      remote_info = rpc.call_instance_info(instance.primary_node,
4064 a8083063 Iustin Pop
                                                instance.name)
4065 a8083063 Iustin Pop
      if remote_info and "state" in remote_info:
4066 a8083063 Iustin Pop
        remote_state = "up"
4067 a8083063 Iustin Pop
      else:
4068 a8083063 Iustin Pop
        remote_state = "down"
4069 a8083063 Iustin Pop
      if instance.status == "down":
4070 a8083063 Iustin Pop
        config_state = "down"
4071 a8083063 Iustin Pop
      else:
4072 a8083063 Iustin Pop
        config_state = "up"
4073 a8083063 Iustin Pop
4074 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
4075 a8083063 Iustin Pop
               for device in instance.disks]
4076 a8083063 Iustin Pop
4077 a8083063 Iustin Pop
      idict = {
4078 a8083063 Iustin Pop
        "name": instance.name,
4079 a8083063 Iustin Pop
        "config_state": config_state,
4080 a8083063 Iustin Pop
        "run_state": remote_state,
4081 a8083063 Iustin Pop
        "pnode": instance.primary_node,
4082 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
4083 a8083063 Iustin Pop
        "os": instance.os,
4084 a8083063 Iustin Pop
        "memory": instance.memory,
4085 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4086 a8083063 Iustin Pop
        "disks": disks,
4087 f55ff7ec Iustin Pop
        "vcpus": instance.vcpus,
4088 a8083063 Iustin Pop
        }
4089 a8083063 Iustin Pop
4090 a8340917 Iustin Pop
      htkind = self.sstore.GetHypervisorType()
4091 a8340917 Iustin Pop
      if htkind == constants.HT_XEN_PVM30:
4092 a8340917 Iustin Pop
        idict["kernel_path"] = instance.kernel_path
4093 a8340917 Iustin Pop
        idict["initrd_path"] = instance.initrd_path
4094 a8340917 Iustin Pop
4095 a8340917 Iustin Pop
      if htkind == constants.HT_XEN_HVM31:
4096 a8340917 Iustin Pop
        idict["hvm_boot_order"] = instance.hvm_boot_order
4097 a8340917 Iustin Pop
        idict["hvm_acpi"] = instance.hvm_acpi
4098 a8340917 Iustin Pop
        idict["hvm_pae"] = instance.hvm_pae
4099 a8340917 Iustin Pop
        idict["hvm_cdrom_image_path"] = instance.hvm_cdrom_image_path
4100 a8340917 Iustin Pop
4101 a8340917 Iustin Pop
      if htkind in constants.HTS_REQ_PORT:
4102 a8340917 Iustin Pop
        idict["vnc_bind_address"] = instance.vnc_bind_address
4103 a8340917 Iustin Pop
        idict["network_port"] = instance.network_port
4104 a8340917 Iustin Pop
4105 a8083063 Iustin Pop
      result[instance.name] = idict
4106 a8083063 Iustin Pop
4107 a8083063 Iustin Pop
    return result
4108 a8083063 Iustin Pop
4109 a8083063 Iustin Pop
4110 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
4111 a8083063 Iustin Pop
  """Modifies an instances's parameters.
4112 a8083063 Iustin Pop

4113 a8083063 Iustin Pop
  """
4114 a8083063 Iustin Pop
  HPATH = "instance-modify"
4115 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4116 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
4117 1a5c7281 Guido Trotter
  REQ_BGL = False
4118 1a5c7281 Guido Trotter
4119 1a5c7281 Guido Trotter
  def ExpandNames(self):
4120 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
4121 a8083063 Iustin Pop
4122 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4123 a8083063 Iustin Pop
    """Build hooks env.
4124 a8083063 Iustin Pop

4125 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
4126 a8083063 Iustin Pop

4127 a8083063 Iustin Pop
    """
4128 396e1b78 Michael Hanselmann
    args = dict()
4129 a8083063 Iustin Pop
    if self.mem:
4130 396e1b78 Michael Hanselmann
      args['memory'] = self.mem
4131 a8083063 Iustin Pop
    if self.vcpus:
4132 396e1b78 Michael Hanselmann
      args['vcpus'] = self.vcpus
4133 ef756965 Iustin Pop
    if self.do_ip or self.do_bridge or self.mac:
4134 396e1b78 Michael Hanselmann
      if self.do_ip:
4135 396e1b78 Michael Hanselmann
        ip = self.ip
4136 396e1b78 Michael Hanselmann
      else:
4137 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
4138 396e1b78 Michael Hanselmann
      if self.bridge:
4139 396e1b78 Michael Hanselmann
        bridge = self.bridge
4140 396e1b78 Michael Hanselmann
      else:
4141 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
4142 ef756965 Iustin Pop
      if self.mac:
4143 ef756965 Iustin Pop
        mac = self.mac
4144 ef756965 Iustin Pop
      else:
4145 ef756965 Iustin Pop
        mac = self.instance.nics[0].mac
4146 ef756965 Iustin Pop
      args['nics'] = [(ip, bridge, mac)]
4147 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance, override=args)
4148 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
4149 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
4150 a8083063 Iustin Pop
    return env, nl, nl
4151 a8083063 Iustin Pop
4152 a8083063 Iustin Pop
  def CheckPrereq(self):
4153 a8083063 Iustin Pop
    """Check prerequisites.
4154 a8083063 Iustin Pop

4155 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
4156 a8083063 Iustin Pop

4157 a8083063 Iustin Pop
    """
4158 1a5c7281 Guido Trotter
    # FIXME: all the parameters could be checked before, in ExpandNames, or in
4159 1a5c7281 Guido Trotter
    # a separate CheckArguments function, if we implement one, so the operation
4160 1a5c7281 Guido Trotter
    # can be aborted without waiting for any lock, should it have an error...
4161 a8083063 Iustin Pop
    self.mem = getattr(self.op, "mem", None)
4162 a8083063 Iustin Pop
    self.vcpus = getattr(self.op, "vcpus", None)
4163 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
4164 1862d460 Alexander Schreiber
    self.mac = getattr(self.op, "mac", None)
4165 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
4166 973d7867 Iustin Pop
    self.kernel_path = getattr(self.op, "kernel_path", None)
4167 973d7867 Iustin Pop
    self.initrd_path = getattr(self.op, "initrd_path", None)
4168 25c5878d Alexander Schreiber
    self.hvm_boot_order = getattr(self.op, "hvm_boot_order", None)
4169 31a853d2 Iustin Pop
    self.hvm_acpi = getattr(self.op, "hvm_acpi", None)
4170 31a853d2 Iustin Pop
    self.hvm_pae = getattr(self.op, "hvm_pae", None)
4171 31a853d2 Iustin Pop
    self.hvm_cdrom_image_path = getattr(self.op, "hvm_cdrom_image_path", None)
4172 31a853d2 Iustin Pop
    self.vnc_bind_address = getattr(self.op, "vnc_bind_address", None)
4173 31a853d2 Iustin Pop
    all_parms = [self.mem, self.vcpus, self.ip, self.bridge, self.mac,
4174 31a853d2 Iustin Pop
                 self.kernel_path, self.initrd_path, self.hvm_boot_order,
4175 31a853d2 Iustin Pop
                 self.hvm_acpi, self.hvm_pae, self.hvm_cdrom_image_path,
4176 31a853d2 Iustin Pop
                 self.vnc_bind_address]
4177 31a853d2 Iustin Pop
    if all_parms.count(None) == len(all_parms):
4178 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
4179 a8083063 Iustin Pop
    if self.mem is not None:
4180 a8083063 Iustin Pop
      try:
4181 a8083063 Iustin Pop
        self.mem = int(self.mem)
4182 a8083063 Iustin Pop
      except ValueError, err:
4183 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid memory size: %s" % str(err))
4184 a8083063 Iustin Pop
    if self.vcpus is not None:
4185 a8083063 Iustin Pop
      try:
4186 a8083063 Iustin Pop
        self.vcpus = int(self.vcpus)
4187 a8083063 Iustin Pop
      except ValueError, err:
4188 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid vcpus number: %s" % str(err))
4189 a8083063 Iustin Pop
    if self.ip is not None:
4190 a8083063 Iustin Pop
      self.do_ip = True
4191 a8083063 Iustin Pop
      if self.ip.lower() == "none":
4192 a8083063 Iustin Pop
        self.ip = None
4193 a8083063 Iustin Pop
      else:
4194 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
4195 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
4196 a8083063 Iustin Pop
    else:
4197 a8083063 Iustin Pop
      self.do_ip = False
4198 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
4199 1862d460 Alexander Schreiber
    if self.mac is not None:
4200 1862d460 Alexander Schreiber
      if self.cfg.IsMacInUse(self.mac):
4201 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('MAC address %s already in use in cluster' %
4202 1862d460 Alexander Schreiber
                                   self.mac)
4203 1862d460 Alexander Schreiber
      if not utils.IsValidMac(self.mac):
4204 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('Invalid MAC address %s' % self.mac)
4205 a8083063 Iustin Pop
4206 973d7867 Iustin Pop
    if self.kernel_path is not None:
4207 973d7867 Iustin Pop
      self.do_kernel_path = True
4208 973d7867 Iustin Pop
      if self.kernel_path == constants.VALUE_NONE:
4209 973d7867 Iustin Pop
        raise errors.OpPrereqError("Can't set instance to no kernel")
4210 973d7867 Iustin Pop
4211 973d7867 Iustin Pop
      if self.kernel_path != constants.VALUE_DEFAULT:
4212 973d7867 Iustin Pop
        if not os.path.isabs(self.kernel_path):
4213 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The kernel path must be an absolute"
4214 973d7867 Iustin Pop
                                    " filename")
4215 8cafeb26 Iustin Pop
    else:
4216 8cafeb26 Iustin Pop
      self.do_kernel_path = False
4217 973d7867 Iustin Pop
4218 973d7867 Iustin Pop
    if self.initrd_path is not None:
4219 973d7867 Iustin Pop
      self.do_initrd_path = True
4220 973d7867 Iustin Pop
      if self.initrd_path not in (constants.VALUE_NONE,
4221 973d7867 Iustin Pop
                                  constants.VALUE_DEFAULT):
4222 2bc22872 Iustin Pop
        if not os.path.isabs(self.initrd_path):
4223 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The initrd path must be an absolute"
4224 973d7867 Iustin Pop
                                    " filename")
4225 8cafeb26 Iustin Pop
    else:
4226 8cafeb26 Iustin Pop
      self.do_initrd_path = False
4227 973d7867 Iustin Pop
4228 25c5878d Alexander Schreiber
    # boot order verification
4229 25c5878d Alexander Schreiber
    if self.hvm_boot_order is not None:
4230 25c5878d Alexander Schreiber
      if self.hvm_boot_order != constants.VALUE_DEFAULT:
4231 25c5878d Alexander Schreiber
        if len(self.hvm_boot_order.strip("acdn")) != 0:
4232 25c5878d Alexander Schreiber
          raise errors.OpPrereqError("invalid boot order specified,"
4233 25c5878d Alexander Schreiber
                                     " must be one or more of [acdn]"
4234 25c5878d Alexander Schreiber
                                     " or 'default'")
4235 25c5878d Alexander Schreiber
4236 31a853d2 Iustin Pop
    # hvm_cdrom_image_path verification
4237 31a853d2 Iustin Pop
    if self.op.hvm_cdrom_image_path is not None:
4238 31a853d2 Iustin Pop
      if not os.path.isabs(self.op.hvm_cdrom_image_path):
4239 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The path to the HVM CDROM image must"
4240 31a853d2 Iustin Pop
                                   " be an absolute path or None, not %s" %
4241 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
4242 31a853d2 Iustin Pop
      if not os.path.isfile(self.op.hvm_cdrom_image_path):
4243 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The HVM CDROM image must either be a"
4244 31a853d2 Iustin Pop
                                   " regular file or a symlink pointing to"
4245 31a853d2 Iustin Pop
                                   " an existing regular file, not %s" %
4246 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
4247 31a853d2 Iustin Pop
4248 31a853d2 Iustin Pop
    # vnc_bind_address verification
4249 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is not None:
4250 31a853d2 Iustin Pop
      if not utils.IsValidIP(self.op.vnc_bind_address):
4251 31a853d2 Iustin Pop
        raise errors.OpPrereqError("given VNC bind address '%s' doesn't look"
4252 31a853d2 Iustin Pop
                                   " like a valid IP address" %
4253 31a853d2 Iustin Pop
                                   self.op.vnc_bind_address)
4254 31a853d2 Iustin Pop
4255 1a5c7281 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4256 1a5c7281 Guido Trotter
    assert self.instance is not None, \
4257 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4258 a8083063 Iustin Pop
    return
4259 a8083063 Iustin Pop
4260 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4261 a8083063 Iustin Pop
    """Modifies an instance.
4262 a8083063 Iustin Pop

4263 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
4264 a8083063 Iustin Pop
    """
4265 a8083063 Iustin Pop
    result = []
4266 a8083063 Iustin Pop
    instance = self.instance
4267 a8083063 Iustin Pop
    if self.mem:
4268 a8083063 Iustin Pop
      instance.memory = self.mem
4269 a8083063 Iustin Pop
      result.append(("mem", self.mem))
4270 a8083063 Iustin Pop
    if self.vcpus:
4271 a8083063 Iustin Pop
      instance.vcpus = self.vcpus
4272 a8083063 Iustin Pop
      result.append(("vcpus",  self.vcpus))
4273 a8083063 Iustin Pop
    if self.do_ip:
4274 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
4275 a8083063 Iustin Pop
      result.append(("ip", self.ip))
4276 a8083063 Iustin Pop
    if self.bridge:
4277 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
4278 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
4279 1862d460 Alexander Schreiber
    if self.mac:
4280 1862d460 Alexander Schreiber
      instance.nics[0].mac = self.mac
4281 1862d460 Alexander Schreiber
      result.append(("mac", self.mac))
4282 973d7867 Iustin Pop
    if self.do_kernel_path:
4283 973d7867 Iustin Pop
      instance.kernel_path = self.kernel_path
4284 973d7867 Iustin Pop
      result.append(("kernel_path", self.kernel_path))
4285 973d7867 Iustin Pop
    if self.do_initrd_path:
4286 973d7867 Iustin Pop
      instance.initrd_path = self.initrd_path
4287 973d7867 Iustin Pop
      result.append(("initrd_path", self.initrd_path))
4288 25c5878d Alexander Schreiber
    if self.hvm_boot_order:
4289 25c5878d Alexander Schreiber
      if self.hvm_boot_order == constants.VALUE_DEFAULT:
4290 25c5878d Alexander Schreiber
        instance.hvm_boot_order = None
4291 25c5878d Alexander Schreiber
      else:
4292 25c5878d Alexander Schreiber
        instance.hvm_boot_order = self.hvm_boot_order
4293 25c5878d Alexander Schreiber
      result.append(("hvm_boot_order", self.hvm_boot_order))
4294 31a853d2 Iustin Pop
    if self.hvm_acpi:
4295 ec1ba002 Iustin Pop
      instance.hvm_acpi = self.hvm_acpi
4296 31a853d2 Iustin Pop
      result.append(("hvm_acpi", self.hvm_acpi))
4297 31a853d2 Iustin Pop
    if self.hvm_pae:
4298 ec1ba002 Iustin Pop
      instance.hvm_pae = self.hvm_pae
4299 31a853d2 Iustin Pop
      result.append(("hvm_pae", self.hvm_pae))
4300 31a853d2 Iustin Pop
    if self.hvm_cdrom_image_path:
4301 ec1ba002 Iustin Pop
      instance.hvm_cdrom_image_path = self.hvm_cdrom_image_path
4302 31a853d2 Iustin Pop
      result.append(("hvm_cdrom_image_path", self.hvm_cdrom_image_path))
4303 31a853d2 Iustin Pop
    if self.vnc_bind_address:
4304 31a853d2 Iustin Pop
      instance.vnc_bind_address = self.vnc_bind_address
4305 31a853d2 Iustin Pop
      result.append(("vnc_bind_address", self.vnc_bind_address))
4306 a8083063 Iustin Pop
4307 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
4308 a8083063 Iustin Pop
4309 a8083063 Iustin Pop
    return result
4310 a8083063 Iustin Pop
4311 a8083063 Iustin Pop
4312 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
4313 a8083063 Iustin Pop
  """Query the exports list
4314 a8083063 Iustin Pop

4315 a8083063 Iustin Pop
  """
4316 a8083063 Iustin Pop
  _OP_REQP = []
4317 a8083063 Iustin Pop
4318 a8083063 Iustin Pop
  def CheckPrereq(self):
4319 a8083063 Iustin Pop
    """Check that the nodelist contains only existing nodes.
4320 a8083063 Iustin Pop

4321 a8083063 Iustin Pop
    """
4322 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, getattr(self.op, "nodes", None))
4323 a8083063 Iustin Pop
4324 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4325 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
4326 a8083063 Iustin Pop

4327 a8083063 Iustin Pop
    Returns:
4328 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
4329 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
4330 a8083063 Iustin Pop
      that node.
4331 a8083063 Iustin Pop

4332 a8083063 Iustin Pop
    """
4333 a7ba5e53 Iustin Pop
    return rpc.call_export_list(self.nodes)
4334 a8083063 Iustin Pop
4335 a8083063 Iustin Pop
4336 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
4337 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
4338 a8083063 Iustin Pop

4339 a8083063 Iustin Pop
  """
4340 a8083063 Iustin Pop
  HPATH = "instance-export"
4341 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4342 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
4343 a8083063 Iustin Pop
4344 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4345 a8083063 Iustin Pop
    """Build hooks env.
4346 a8083063 Iustin Pop

4347 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
4348 a8083063 Iustin Pop

4349 a8083063 Iustin Pop
    """
4350 a8083063 Iustin Pop
    env = {
4351 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
4352 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
4353 a8083063 Iustin Pop
      }
4354 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4355 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
4356 a8083063 Iustin Pop
          self.op.target_node]
4357 a8083063 Iustin Pop
    return env, nl, nl
4358 a8083063 Iustin Pop
4359 a8083063 Iustin Pop
  def CheckPrereq(self):
4360 a8083063 Iustin Pop
    """Check prerequisites.
4361 a8083063 Iustin Pop

4362 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
4363 a8083063 Iustin Pop

4364 a8083063 Iustin Pop
    """
4365 a8083063 Iustin Pop
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4366 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
4367 a8083063 Iustin Pop
    if self.instance is None:
4368 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not found" %
4369 3ecf6786 Iustin Pop
                                 self.op.instance_name)
4370 a8083063 Iustin Pop
4371 a8083063 Iustin Pop
    # node verification
4372 a8083063 Iustin Pop
    dst_node_short = self.cfg.ExpandNodeName(self.op.target_node)
4373 a8083063 Iustin Pop
    self.dst_node = self.cfg.GetNodeInfo(dst_node_short)
4374 a8083063 Iustin Pop
4375 a8083063 Iustin Pop
    if self.dst_node is None:
4376 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Destination node '%s' is unknown." %
4377 3ecf6786 Iustin Pop
                                 self.op.target_node)
4378 a8083063 Iustin Pop
    self.op.target_node = self.dst_node.name
4379 a8083063 Iustin Pop
4380 b6023d6c Manuel Franceschini
    # instance disk type verification
4381 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
4382 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
4383 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
4384 b6023d6c Manuel Franceschini
                                   " file-based disks")
4385 b6023d6c Manuel Franceschini
4386 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4387 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
4388 a8083063 Iustin Pop

4389 a8083063 Iustin Pop
    """
4390 a8083063 Iustin Pop
    instance = self.instance
4391 a8083063 Iustin Pop
    dst_node = self.dst_node
4392 a8083063 Iustin Pop
    src_node = instance.primary_node
4393 a8083063 Iustin Pop
    if self.op.shutdown:
4394 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
4395 fb300fb7 Guido Trotter
      if not rpc.call_instance_shutdown(src_node, instance):
4396 38206f3c Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
4397 38206f3c Iustin Pop
                                 (instance.name, src_node))
4398 a8083063 Iustin Pop
4399 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
4400 a8083063 Iustin Pop
4401 a8083063 Iustin Pop
    snap_disks = []
4402 a8083063 Iustin Pop
4403 a8083063 Iustin Pop
    try:
4404 a8083063 Iustin Pop
      for disk in instance.disks:
4405 a8083063 Iustin Pop
        if disk.iv_name == "sda":
4406 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
4407 a8083063 Iustin Pop
          new_dev_name = rpc.call_blockdev_snapshot(src_node, disk)
4408 a8083063 Iustin Pop
4409 a8083063 Iustin Pop
          if not new_dev_name:
4410 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
4411 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
4412 a8083063 Iustin Pop
          else:
4413 fe96220b Iustin Pop
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
4414 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
4415 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
4416 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
4417 a8083063 Iustin Pop
            snap_disks.append(new_dev)
4418 a8083063 Iustin Pop
4419 a8083063 Iustin Pop
    finally:
4420 fb300fb7 Guido Trotter
      if self.op.shutdown and instance.status == "up":
4421 fb300fb7 Guido Trotter
        if not rpc.call_instance_start(src_node, instance, None):
4422 fb300fb7 Guido Trotter
          _ShutdownInstanceDisks(instance, self.cfg)
4423 fb300fb7 Guido Trotter
          raise errors.OpExecError("Could not start instance")
4424 a8083063 Iustin Pop
4425 a8083063 Iustin Pop
    # TODO: check for size
4426 a8083063 Iustin Pop
4427 a8083063 Iustin Pop
    for dev in snap_disks:
4428 16687b98 Manuel Franceschini
      if not rpc.call_snapshot_export(src_node, dev, dst_node.name, instance):
4429 16687b98 Manuel Franceschini
        logger.Error("could not export block device %s from node %s to node %s"
4430 16687b98 Manuel Franceschini
                     % (dev.logical_id[1], src_node, dst_node.name))
4431 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(src_node, dev):
4432 16687b98 Manuel Franceschini
        logger.Error("could not remove snapshot block device %s from node %s" %
4433 16687b98 Manuel Franceschini
                     (dev.logical_id[1], src_node))
4434 a8083063 Iustin Pop
4435 a8083063 Iustin Pop
    if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
4436 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
4437 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
4438 a8083063 Iustin Pop
4439 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
4440 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
4441 a8083063 Iustin Pop
4442 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
4443 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
4444 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
4445 a8083063 Iustin Pop
    if nodelist:
4446 a8083063 Iustin Pop
      op = opcodes.OpQueryExports(nodes=nodelist)
4447 5bfac263 Iustin Pop
      exportlist = self.proc.ChainOpCode(op)
4448 a8083063 Iustin Pop
      for node in exportlist:
4449 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
4450 a8083063 Iustin Pop
          if not rpc.call_export_remove(node, instance.name):
4451 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
4452 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))
4453 5c947f38 Iustin Pop
4454 5c947f38 Iustin Pop
4455 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
4456 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
4457 9ac99fda Guido Trotter

4458 9ac99fda Guido Trotter
  """
4459 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
4460 9ac99fda Guido Trotter
4461 9ac99fda Guido Trotter
  def CheckPrereq(self):
4462 9ac99fda Guido Trotter
    """Check prerequisites.
4463 9ac99fda Guido Trotter
    """
4464 9ac99fda Guido Trotter
    pass
4465 9ac99fda Guido Trotter
4466 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
4467 9ac99fda Guido Trotter
    """Remove any export.
4468 9ac99fda Guido Trotter

4469 9ac99fda Guido Trotter
    """
4470 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4471 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
4472 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
4473 9ac99fda Guido Trotter
    fqdn_warn = False
4474 9ac99fda Guido Trotter
    if not instance_name:
4475 9ac99fda Guido Trotter
      fqdn_warn = True
4476 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
4477 9ac99fda Guido Trotter
4478 9ac99fda Guido Trotter
    op = opcodes.OpQueryExports(nodes=[])
4479 9ac99fda Guido Trotter
    exportlist = self.proc.ChainOpCode(op)
4480 9ac99fda Guido Trotter
    found = False
4481 9ac99fda Guido Trotter
    for node in exportlist:
4482 9ac99fda Guido Trotter
      if instance_name in exportlist[node]:
4483 9ac99fda Guido Trotter
        found = True
4484 9ac99fda Guido Trotter
        if not rpc.call_export_remove(node, instance_name):
4485 9ac99fda Guido Trotter
          logger.Error("could not remove export for instance %s"
4486 9ac99fda Guido Trotter
                       " on node %s" % (instance_name, node))
4487 9ac99fda Guido Trotter
4488 9ac99fda Guido Trotter
    if fqdn_warn and not found:
4489 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
4490 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
4491 9ac99fda Guido Trotter
                  " Domain Name.")
4492 9ac99fda Guido Trotter
4493 9ac99fda Guido Trotter
4494 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
4495 5c947f38 Iustin Pop
  """Generic tags LU.
4496 5c947f38 Iustin Pop

4497 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
4498 5c947f38 Iustin Pop

4499 5c947f38 Iustin Pop
  """
4500 5c947f38 Iustin Pop
  def CheckPrereq(self):
4501 5c947f38 Iustin Pop
    """Check prerequisites.
4502 5c947f38 Iustin Pop

4503 5c947f38 Iustin Pop
    """
4504 5c947f38 Iustin Pop
    if self.op.kind == constants.TAG_CLUSTER:
4505 5c947f38 Iustin Pop
      self.target = self.cfg.GetClusterInfo()
4506 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_NODE:
4507 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
4508 5c947f38 Iustin Pop
      if name is None:
4509 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
4510 3ecf6786 Iustin Pop
                                   (self.op.name,))
4511 5c947f38 Iustin Pop
      self.op.name = name
4512 5c947f38 Iustin Pop
      self.target = self.cfg.GetNodeInfo(name)
4513 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
4514 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
4515 5c947f38 Iustin Pop
      if name is None:
4516 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
4517 3ecf6786 Iustin Pop
                                   (self.op.name,))
4518 5c947f38 Iustin Pop
      self.op.name = name
4519 5c947f38 Iustin Pop
      self.target = self.cfg.GetInstanceInfo(name)
4520 5c947f38 Iustin Pop
    else:
4521 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
4522 3ecf6786 Iustin Pop
                                 str(self.op.kind))
4523 5c947f38 Iustin Pop
4524 5c947f38 Iustin Pop
4525 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
4526 5c947f38 Iustin Pop
  """Returns the tags of a given object.
4527 5c947f38 Iustin Pop

4528 5c947f38 Iustin Pop
  """
4529 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
4530 5c947f38 Iustin Pop
4531 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4532 5c947f38 Iustin Pop
    """Returns the tag list.
4533 5c947f38 Iustin Pop

4534 5c947f38 Iustin Pop
    """
4535 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
4536 5c947f38 Iustin Pop
4537 5c947f38 Iustin Pop
4538 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
4539 73415719 Iustin Pop
  """Searches the tags for a given pattern.
4540 73415719 Iustin Pop

4541 73415719 Iustin Pop
  """
4542 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
4543 73415719 Iustin Pop
4544 73415719 Iustin Pop
  def CheckPrereq(self):
4545 73415719 Iustin Pop
    """Check prerequisites.
4546 73415719 Iustin Pop

4547 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
4548 73415719 Iustin Pop

4549 73415719 Iustin Pop
    """
4550 73415719 Iustin Pop
    try:
4551 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
4552 73415719 Iustin Pop
    except re.error, err:
4553 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
4554 73415719 Iustin Pop
                                 (self.op.pattern, err))
4555 73415719 Iustin Pop
4556 73415719 Iustin Pop
  def Exec(self, feedback_fn):
4557 73415719 Iustin Pop
    """Returns the tag list.
4558 73415719 Iustin Pop

4559 73415719 Iustin Pop
    """
4560 73415719 Iustin Pop
    cfg = self.cfg
4561 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
4562 73415719 Iustin Pop
    ilist = [cfg.GetInstanceInfo(name) for name in cfg.GetInstanceList()]
4563 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
4564 73415719 Iustin Pop
    nlist = [cfg.GetNodeInfo(name) for name in cfg.GetNodeList()]
4565 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
4566 73415719 Iustin Pop
    results = []
4567 73415719 Iustin Pop
    for path, target in tgts:
4568 73415719 Iustin Pop
      for tag in target.GetTags():
4569 73415719 Iustin Pop
        if self.re.search(tag):
4570 73415719 Iustin Pop
          results.append((path, tag))
4571 73415719 Iustin Pop
    return results
4572 73415719 Iustin Pop
4573 73415719 Iustin Pop
4574 f27302fa Iustin Pop
class LUAddTags(TagsLU):
4575 5c947f38 Iustin Pop
  """Sets a tag on a given object.
4576 5c947f38 Iustin Pop

4577 5c947f38 Iustin Pop
  """
4578 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4579 5c947f38 Iustin Pop
4580 5c947f38 Iustin Pop
  def CheckPrereq(self):
4581 5c947f38 Iustin Pop
    """Check prerequisites.
4582 5c947f38 Iustin Pop

4583 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
4584 5c947f38 Iustin Pop

4585 5c947f38 Iustin Pop
    """
4586 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4587 f27302fa Iustin Pop
    for tag in self.op.tags:
4588 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4589 5c947f38 Iustin Pop
4590 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4591 5c947f38 Iustin Pop
    """Sets the tag.
4592 5c947f38 Iustin Pop

4593 5c947f38 Iustin Pop
    """
4594 5c947f38 Iustin Pop
    try:
4595 f27302fa Iustin Pop
      for tag in self.op.tags:
4596 f27302fa Iustin Pop
        self.target.AddTag(tag)
4597 5c947f38 Iustin Pop
    except errors.TagError, err:
4598 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
4599 5c947f38 Iustin Pop
    try:
4600 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4601 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4602 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4603 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4604 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4605 5c947f38 Iustin Pop
4606 5c947f38 Iustin Pop
4607 f27302fa Iustin Pop
class LUDelTags(TagsLU):
4608 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
4609 5c947f38 Iustin Pop

4610 5c947f38 Iustin Pop
  """
4611 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4612 5c947f38 Iustin Pop
4613 5c947f38 Iustin Pop
  def CheckPrereq(self):
4614 5c947f38 Iustin Pop
    """Check prerequisites.
4615 5c947f38 Iustin Pop

4616 5c947f38 Iustin Pop
    This checks that we have the given tag.
4617 5c947f38 Iustin Pop

4618 5c947f38 Iustin Pop
    """
4619 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4620 f27302fa Iustin Pop
    for tag in self.op.tags:
4621 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4622 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
4623 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
4624 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
4625 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
4626 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
4627 f27302fa Iustin Pop
      diff_names.sort()
4628 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
4629 f27302fa Iustin Pop
                                 (",".join(diff_names)))
4630 5c947f38 Iustin Pop
4631 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4632 5c947f38 Iustin Pop
    """Remove the tag from the object.
4633 5c947f38 Iustin Pop

4634 5c947f38 Iustin Pop
    """
4635 f27302fa Iustin Pop
    for tag in self.op.tags:
4636 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
4637 5c947f38 Iustin Pop
    try:
4638 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4639 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4640 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4641 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4642 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4643 06009e27 Iustin Pop
4644 0eed6e61 Guido Trotter
4645 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
4646 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
4647 06009e27 Iustin Pop

4648 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
4649 06009e27 Iustin Pop
  time.
4650 06009e27 Iustin Pop

4651 06009e27 Iustin Pop
  """
4652 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
4653 fbe9022f Guido Trotter
  REQ_BGL = False
4654 06009e27 Iustin Pop
4655 fbe9022f Guido Trotter
  def ExpandNames(self):
4656 fbe9022f Guido Trotter
    """Expand names and set required locks.
4657 06009e27 Iustin Pop

4658 fbe9022f Guido Trotter
    This expands the node list, if any.
4659 06009e27 Iustin Pop

4660 06009e27 Iustin Pop
    """
4661 fbe9022f Guido Trotter
    self.needed_locks = {}
4662 06009e27 Iustin Pop
    if self.op.on_nodes:
4663 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
4664 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
4665 fbe9022f Guido Trotter
      # more information.
4666 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
4667 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
4668 fbe9022f Guido Trotter
4669 fbe9022f Guido Trotter
  def CheckPrereq(self):
4670 fbe9022f Guido Trotter
    """Check prerequisites.
4671 fbe9022f Guido Trotter

4672 fbe9022f Guido Trotter
    """
4673 06009e27 Iustin Pop
4674 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
4675 06009e27 Iustin Pop
    """Do the actual sleep.
4676 06009e27 Iustin Pop

4677 06009e27 Iustin Pop
    """
4678 06009e27 Iustin Pop
    if self.op.on_master:
4679 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
4680 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
4681 06009e27 Iustin Pop
    if self.op.on_nodes:
4682 06009e27 Iustin Pop
      result = rpc.call_test_delay(self.op.on_nodes, self.op.duration)
4683 06009e27 Iustin Pop
      if not result:
4684 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
4685 06009e27 Iustin Pop
      for node, node_result in result.items():
4686 06009e27 Iustin Pop
        if not node_result:
4687 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
4688 06009e27 Iustin Pop
                                   " result: %s" % (node, node_result))
4689 d61df03e Iustin Pop
4690 d61df03e Iustin Pop
4691 d1c2dd75 Iustin Pop
class IAllocator(object):
4692 d1c2dd75 Iustin Pop
  """IAllocator framework.
4693 d61df03e Iustin Pop

4694 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
4695 d1c2dd75 Iustin Pop
    - cfg/sstore that are needed to query the cluster
4696 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
4697 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
4698 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
4699 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
4700 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
4701 d1c2dd75 Iustin Pop
      easy usage
4702 d61df03e Iustin Pop

4703 d61df03e Iustin Pop
  """
4704 29859cb7 Iustin Pop
  _ALLO_KEYS = [
4705 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
4706 d1c2dd75 Iustin Pop
    "os", "tags", "nics", "vcpus",
4707 d1c2dd75 Iustin Pop
    ]
4708 29859cb7 Iustin Pop
  _RELO_KEYS = [
4709 29859cb7 Iustin Pop
    "relocate_from",
4710 29859cb7 Iustin Pop
    ]
4711 d1c2dd75 Iustin Pop
4712 29859cb7 Iustin Pop
  def __init__(self, cfg, sstore, mode, name, **kwargs):
4713 d1c2dd75 Iustin Pop
    self.cfg = cfg
4714 d1c2dd75 Iustin Pop
    self.sstore = sstore
4715 d1c2dd75 Iustin Pop
    # init buffer variables
4716 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
4717 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
4718 29859cb7 Iustin Pop
    self.mode = mode
4719 29859cb7 Iustin Pop
    self.name = name
4720 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
4721 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
4722 29859cb7 Iustin Pop
    self.relocate_from = None
4723 27579978 Iustin Pop
    # computed fields
4724 27579978 Iustin Pop
    self.required_nodes = None
4725 d1c2dd75 Iustin Pop
    # init result fields
4726 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
4727 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
4728 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
4729 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
4730 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
4731 29859cb7 Iustin Pop
    else:
4732 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
4733 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
4734 d1c2dd75 Iustin Pop
    for key in kwargs:
4735 29859cb7 Iustin Pop
      if key not in keyset:
4736 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
4737 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
4738 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
4739 29859cb7 Iustin Pop
    for key in keyset:
4740 d1c2dd75 Iustin Pop
      if key not in kwargs:
4741 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
4742 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
4743 d1c2dd75 Iustin Pop
    self._BuildInputData()
4744 d1c2dd75 Iustin Pop
4745 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
4746 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
4747 d1c2dd75 Iustin Pop

4748 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
4749 d1c2dd75 Iustin Pop

4750 d1c2dd75 Iustin Pop
    """
4751 d1c2dd75 Iustin Pop
    cfg = self.cfg
4752 d1c2dd75 Iustin Pop
    # cluster data
4753 d1c2dd75 Iustin Pop
    data = {
4754 d1c2dd75 Iustin Pop
      "version": 1,
4755 d1c2dd75 Iustin Pop
      "cluster_name": self.sstore.GetClusterName(),
4756 d1c2dd75 Iustin Pop
      "cluster_tags": list(cfg.GetClusterInfo().GetTags()),
4757 6286519f Iustin Pop
      "hypervisor_type": self.sstore.GetHypervisorType(),
4758 d1c2dd75 Iustin Pop
      # we don't have job IDs
4759 d61df03e Iustin Pop
      }
4760 d61df03e Iustin Pop
4761 6286519f Iustin Pop
    i_list = [cfg.GetInstanceInfo(iname) for iname in cfg.GetInstanceList()]
4762 6286519f Iustin Pop
4763 d1c2dd75 Iustin Pop
    # node data
4764 d1c2dd75 Iustin Pop
    node_results = {}
4765 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
4766 d1c2dd75 Iustin Pop
    node_data = rpc.call_node_info(node_list, cfg.GetVGName())
4767 d1c2dd75 Iustin Pop
    for nname in node_list:
4768 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
4769 d1c2dd75 Iustin Pop
      if nname not in node_data or not isinstance(node_data[nname], dict):
4770 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't get data for node %s" % nname)
4771 d1c2dd75 Iustin Pop
      remote_info = node_data[nname]
4772 b2662e7f Iustin Pop
      for attr in ['memory_total', 'memory_free', 'memory_dom0',
4773 4337cf1b Iustin Pop
                   'vg_size', 'vg_free', 'cpu_total']:
4774 d1c2dd75 Iustin Pop
        if attr not in remote_info:
4775 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' didn't return attribute '%s'" %
4776 d1c2dd75 Iustin Pop
                                   (nname, attr))
4777 d1c2dd75 Iustin Pop
        try:
4778 b2662e7f Iustin Pop
          remote_info[attr] = int(remote_info[attr])
4779 d1c2dd75 Iustin Pop
        except ValueError, err:
4780 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' returned invalid value for '%s':"
4781 d1c2dd75 Iustin Pop
                                   " %s" % (nname, attr, str(err)))
4782 6286519f Iustin Pop
      # compute memory used by primary instances
4783 6286519f Iustin Pop
      i_p_mem = i_p_up_mem = 0
4784 6286519f Iustin Pop
      for iinfo in i_list:
4785 6286519f Iustin Pop
        if iinfo.primary_node == nname:
4786 6286519f Iustin Pop
          i_p_mem += iinfo.memory
4787 6286519f Iustin Pop
          if iinfo.status == "up":
4788 6286519f Iustin Pop
            i_p_up_mem += iinfo.memory
4789 6286519f Iustin Pop
4790 b2662e7f Iustin Pop
      # compute memory used by instances
4791 d1c2dd75 Iustin Pop
      pnr = {
4792 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
4793 b2662e7f Iustin Pop
        "total_memory": remote_info['memory_total'],
4794 b2662e7f Iustin Pop
        "reserved_memory": remote_info['memory_dom0'],
4795 b2662e7f Iustin Pop
        "free_memory": remote_info['memory_free'],
4796 6286519f Iustin Pop
        "i_pri_memory": i_p_mem,
4797 6286519f Iustin Pop
        "i_pri_up_memory": i_p_up_mem,
4798 b2662e7f Iustin Pop
        "total_disk": remote_info['vg_size'],
4799 b2662e7f Iustin Pop
        "free_disk": remote_info['vg_free'],
4800 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
4801 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
4802 4337cf1b Iustin Pop
        "total_cpus": remote_info['cpu_total'],
4803 d1c2dd75 Iustin Pop
        }
4804 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
4805 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
4806 d1c2dd75 Iustin Pop
4807 d1c2dd75 Iustin Pop
    # instance data
4808 d1c2dd75 Iustin Pop
    instance_data = {}
4809 6286519f Iustin Pop
    for iinfo in i_list:
4810 d1c2dd75 Iustin Pop
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
4811 d1c2dd75 Iustin Pop
                  for n in iinfo.nics]
4812 d1c2dd75 Iustin Pop
      pir = {
4813 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
4814 d1c2dd75 Iustin Pop
        "should_run": iinfo.status == "up",
4815 d1c2dd75 Iustin Pop
        "vcpus": iinfo.vcpus,
4816 d1c2dd75 Iustin Pop
        "memory": iinfo.memory,
4817 d1c2dd75 Iustin Pop
        "os": iinfo.os,
4818 d1c2dd75 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
4819 d1c2dd75 Iustin Pop
        "nics": nic_data,
4820 d1c2dd75 Iustin Pop
        "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
4821 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
4822 d1c2dd75 Iustin Pop
        }
4823 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
4824 d61df03e Iustin Pop
4825 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
4826 d61df03e Iustin Pop
4827 d1c2dd75 Iustin Pop
    self.in_data = data
4828 d61df03e Iustin Pop
4829 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
4830 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
4831 d61df03e Iustin Pop

4832 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
4833 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
4834 d61df03e Iustin Pop

4835 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
4836 d1c2dd75 Iustin Pop
    done.
4837 d61df03e Iustin Pop

4838 d1c2dd75 Iustin Pop
    """
4839 d1c2dd75 Iustin Pop
    data = self.in_data
4840 d1c2dd75 Iustin Pop
    if len(self.disks) != 2:
4841 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Only two-disk configurations supported")
4842 d1c2dd75 Iustin Pop
4843 d1c2dd75 Iustin Pop
    disk_space = _ComputeDiskSize(self.disk_template,
4844 d1c2dd75 Iustin Pop
                                  self.disks[0]["size"], self.disks[1]["size"])
4845 d1c2dd75 Iustin Pop
4846 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
4847 27579978 Iustin Pop
      self.required_nodes = 2
4848 27579978 Iustin Pop
    else:
4849 27579978 Iustin Pop
      self.required_nodes = 1
4850 d1c2dd75 Iustin Pop
    request = {
4851 d1c2dd75 Iustin Pop
      "type": "allocate",
4852 d1c2dd75 Iustin Pop
      "name": self.name,
4853 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
4854 d1c2dd75 Iustin Pop
      "tags": self.tags,
4855 d1c2dd75 Iustin Pop
      "os": self.os,
4856 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
4857 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
4858 d1c2dd75 Iustin Pop
      "disks": self.disks,
4859 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
4860 d1c2dd75 Iustin Pop
      "nics": self.nics,
4861 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
4862 d1c2dd75 Iustin Pop
      }
4863 d1c2dd75 Iustin Pop
    data["request"] = request
4864 298fe380 Iustin Pop
4865 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
4866 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
4867 298fe380 Iustin Pop

4868 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
4869 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
4870 d61df03e Iustin Pop

4871 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
4872 d1c2dd75 Iustin Pop
    done.
4873 d61df03e Iustin Pop

4874 d1c2dd75 Iustin Pop
    """
4875 27579978 Iustin Pop
    instance = self.cfg.GetInstanceInfo(self.name)
4876 27579978 Iustin Pop
    if instance is None:
4877 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
4878 27579978 Iustin Pop
                                   " IAllocator" % self.name)
4879 27579978 Iustin Pop
4880 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
4881 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
4882 27579978 Iustin Pop
4883 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
4884 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
4885 2a139bb0 Iustin Pop
4886 27579978 Iustin Pop
    self.required_nodes = 1
4887 27579978 Iustin Pop
4888 27579978 Iustin Pop
    disk_space = _ComputeDiskSize(instance.disk_template,
4889 27579978 Iustin Pop
                                  instance.disks[0].size,
4890 27579978 Iustin Pop
                                  instance.disks[1].size)
4891 27579978 Iustin Pop
4892 d1c2dd75 Iustin Pop
    request = {
4893 2a139bb0 Iustin Pop
      "type": "relocate",
4894 d1c2dd75 Iustin Pop
      "name": self.name,
4895 27579978 Iustin Pop
      "disk_space_total": disk_space,
4896 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
4897 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
4898 d1c2dd75 Iustin Pop
      }
4899 27579978 Iustin Pop
    self.in_data["request"] = request
4900 d61df03e Iustin Pop
4901 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
4902 d1c2dd75 Iustin Pop
    """Build input data structures.
4903 d61df03e Iustin Pop

4904 d1c2dd75 Iustin Pop
    """
4905 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
4906 d61df03e Iustin Pop
4907 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
4908 d1c2dd75 Iustin Pop
      self._AddNewInstance()
4909 d1c2dd75 Iustin Pop
    else:
4910 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
4911 d61df03e Iustin Pop
4912 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
4913 d61df03e Iustin Pop
4914 8d528b7c Iustin Pop
  def Run(self, name, validate=True, call_fn=rpc.call_iallocator_runner):
4915 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
4916 298fe380 Iustin Pop

4917 d1c2dd75 Iustin Pop
    """
4918 d1c2dd75 Iustin Pop
    data = self.in_text
4919 298fe380 Iustin Pop
4920 8d528b7c Iustin Pop
    result = call_fn(self.sstore.GetMasterNode(), name, self.in_text)
4921 298fe380 Iustin Pop
4922 8d528b7c Iustin Pop
    if not isinstance(result, tuple) or len(result) != 4:
4923 8d528b7c Iustin Pop
      raise errors.OpExecError("Invalid result from master iallocator runner")
4924 8d528b7c Iustin Pop
4925 8d528b7c Iustin Pop
    rcode, stdout, stderr, fail = result
4926 8d528b7c Iustin Pop
4927 8d528b7c Iustin Pop
    if rcode == constants.IARUN_NOTFOUND:
4928 8d528b7c Iustin Pop
      raise errors.OpExecError("Can't find allocator '%s'" % name)
4929 8d528b7c Iustin Pop
    elif rcode == constants.IARUN_FAILURE:
4930 38206f3c Iustin Pop
      raise errors.OpExecError("Instance allocator call failed: %s,"
4931 38206f3c Iustin Pop
                               " output: %s" % (fail, stdout+stderr))
4932 8d528b7c Iustin Pop
    self.out_text = stdout
4933 d1c2dd75 Iustin Pop
    if validate:
4934 d1c2dd75 Iustin Pop
      self._ValidateResult()
4935 298fe380 Iustin Pop
4936 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
4937 d1c2dd75 Iustin Pop
    """Process the allocator results.
4938 538475ca Iustin Pop

4939 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
4940 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
4941 538475ca Iustin Pop

4942 d1c2dd75 Iustin Pop
    """
4943 d1c2dd75 Iustin Pop
    try:
4944 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
4945 d1c2dd75 Iustin Pop
    except Exception, err:
4946 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
4947 d1c2dd75 Iustin Pop
4948 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
4949 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
4950 538475ca Iustin Pop
4951 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
4952 d1c2dd75 Iustin Pop
      if key not in rdict:
4953 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
4954 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
4955 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
4956 538475ca Iustin Pop
4957 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
4958 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
4959 d1c2dd75 Iustin Pop
                               " is not a list")
4960 d1c2dd75 Iustin Pop
    self.out_data = rdict
4961 538475ca Iustin Pop
4962 538475ca Iustin Pop
4963 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
4964 d61df03e Iustin Pop
  """Run allocator tests.
4965 d61df03e Iustin Pop

4966 d61df03e Iustin Pop
  This LU runs the allocator tests
4967 d61df03e Iustin Pop

4968 d61df03e Iustin Pop
  """
4969 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
4970 d61df03e Iustin Pop
4971 d61df03e Iustin Pop
  def CheckPrereq(self):
4972 d61df03e Iustin Pop
    """Check prerequisites.
4973 d61df03e Iustin Pop

4974 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
4975 d61df03e Iustin Pop

4976 d61df03e Iustin Pop
    """
4977 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
4978 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
4979 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
4980 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
4981 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
4982 d61df03e Iustin Pop
                                     attr)
4983 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
4984 d61df03e Iustin Pop
      if iname is not None:
4985 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
4986 d61df03e Iustin Pop
                                   iname)
4987 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
4988 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
4989 d61df03e Iustin Pop
      for row in self.op.nics:
4990 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
4991 d61df03e Iustin Pop
            "mac" not in row or
4992 d61df03e Iustin Pop
            "ip" not in row or
4993 d61df03e Iustin Pop
            "bridge" not in row):
4994 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
4995 d61df03e Iustin Pop
                                     " 'nics' parameter")
4996 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
4997 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
4998 298fe380 Iustin Pop
      if len(self.op.disks) != 2:
4999 298fe380 Iustin Pop
        raise errors.OpPrereqError("Only two-disk configurations supported")
5000 d61df03e Iustin Pop
      for row in self.op.disks:
5001 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5002 d61df03e Iustin Pop
            "size" not in row or
5003 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
5004 d61df03e Iustin Pop
            "mode" not in row or
5005 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
5006 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5007 d61df03e Iustin Pop
                                     " 'disks' parameter")
5008 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
5009 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
5010 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
5011 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
5012 d61df03e Iustin Pop
      if fname is None:
5013 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
5014 d61df03e Iustin Pop
                                   self.op.name)
5015 d61df03e Iustin Pop
      self.op.name = fname
5016 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
5017 d61df03e Iustin Pop
    else:
5018 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
5019 d61df03e Iustin Pop
                                 self.op.mode)
5020 d61df03e Iustin Pop
5021 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
5022 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
5023 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
5024 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
5025 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
5026 d61df03e Iustin Pop
                                 self.op.direction)
5027 d61df03e Iustin Pop
5028 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
5029 d61df03e Iustin Pop
    """Run the allocator test.
5030 d61df03e Iustin Pop

5031 d61df03e Iustin Pop
    """
5032 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5033 29859cb7 Iustin Pop
      ial = IAllocator(self.cfg, self.sstore,
5034 29859cb7 Iustin Pop
                       mode=self.op.mode,
5035 29859cb7 Iustin Pop
                       name=self.op.name,
5036 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
5037 29859cb7 Iustin Pop
                       disks=self.op.disks,
5038 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
5039 29859cb7 Iustin Pop
                       os=self.op.os,
5040 29859cb7 Iustin Pop
                       tags=self.op.tags,
5041 29859cb7 Iustin Pop
                       nics=self.op.nics,
5042 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
5043 29859cb7 Iustin Pop
                       )
5044 29859cb7 Iustin Pop
    else:
5045 29859cb7 Iustin Pop
      ial = IAllocator(self.cfg, self.sstore,
5046 29859cb7 Iustin Pop
                       mode=self.op.mode,
5047 29859cb7 Iustin Pop
                       name=self.op.name,
5048 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
5049 29859cb7 Iustin Pop
                       )
5050 d61df03e Iustin Pop
5051 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
5052 d1c2dd75 Iustin Pop
      result = ial.in_text
5053 298fe380 Iustin Pop
    else:
5054 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
5055 d1c2dd75 Iustin Pop
      result = ial.out_text
5056 298fe380 Iustin Pop
    return result