Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ f22a8ba3

History | View | Annotate | Download (181.3 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 a8083063 Iustin Pop
34 a8083063 Iustin Pop
from ganeti import rpc
35 a8083063 Iustin Pop
from ganeti import ssh
36 a8083063 Iustin Pop
from ganeti import logger
37 a8083063 Iustin Pop
from ganeti import utils
38 a8083063 Iustin Pop
from ganeti import errors
39 a8083063 Iustin Pop
from ganeti import hypervisor
40 6048c986 Guido Trotter
from ganeti import locking
41 a8083063 Iustin Pop
from ganeti import constants
42 a8083063 Iustin Pop
from ganeti import objects
43 a8083063 Iustin Pop
from ganeti import opcodes
44 8d14b30d Iustin Pop
from ganeti import serializer
45 d61df03e Iustin Pop
46 d61df03e Iustin Pop
47 a8083063 Iustin Pop
class LogicalUnit(object):
48 396e1b78 Michael Hanselmann
  """Logical Unit base class.
49 a8083063 Iustin Pop

50 a8083063 Iustin Pop
  Subclasses must follow these rules:
51 d465bdc8 Guido Trotter
    - implement ExpandNames
52 d465bdc8 Guido Trotter
    - implement CheckPrereq
53 a8083063 Iustin Pop
    - implement Exec
54 a8083063 Iustin Pop
    - implement BuildHooksEnv
55 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
56 05f86716 Guido Trotter
    - optionally redefine their run requirements:
57 05f86716 Guido Trotter
        REQ_MASTER: the LU needs to run on the master node
58 05f86716 Guido Trotter
        REQ_WSSTORE: the LU needs a writable SimpleStore
59 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
60 05f86716 Guido Trotter

61 05f86716 Guido Trotter
  Note that all commands require root permissions.
62 a8083063 Iustin Pop

63 a8083063 Iustin Pop
  """
64 a8083063 Iustin Pop
  HPATH = None
65 a8083063 Iustin Pop
  HTYPE = None
66 a8083063 Iustin Pop
  _OP_REQP = []
67 a8083063 Iustin Pop
  REQ_MASTER = True
68 05f86716 Guido Trotter
  REQ_WSSTORE = False
69 7e55040e Guido Trotter
  REQ_BGL = True
70 a8083063 Iustin Pop
71 77b657a3 Guido Trotter
  def __init__(self, processor, op, context, sstore):
72 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
73 a8083063 Iustin Pop

74 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
75 a8083063 Iustin Pop
    validity.
76 a8083063 Iustin Pop

77 a8083063 Iustin Pop
    """
78 5bfac263 Iustin Pop
    self.proc = processor
79 a8083063 Iustin Pop
    self.op = op
80 77b657a3 Guido Trotter
    self.cfg = context.cfg
81 a8083063 Iustin Pop
    self.sstore = sstore
82 77b657a3 Guido Trotter
    self.context = context
83 d465bdc8 Guido Trotter
    self.needed_locks = None
84 6683bba2 Guido Trotter
    self.acquired_locks = {}
85 3977a4c1 Guido Trotter
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
86 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
87 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
88 c92b310a Michael Hanselmann
    self.__ssh = None
89 c92b310a Michael Hanselmann
90 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
91 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
92 a8083063 Iustin Pop
      if attr_val is None:
93 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
94 3ecf6786 Iustin Pop
                                   attr_name)
95 c6d58a2b Michael Hanselmann
96 f64c9de6 Guido Trotter
    if not self.cfg.IsCluster():
97 c6d58a2b Michael Hanselmann
      raise errors.OpPrereqError("Cluster not initialized yet,"
98 c6d58a2b Michael Hanselmann
                                 " use 'gnt-cluster init' first.")
99 c6d58a2b Michael Hanselmann
    if self.REQ_MASTER:
100 c6d58a2b Michael Hanselmann
      master = sstore.GetMasterNode()
101 c6d58a2b Michael Hanselmann
      if master != utils.HostInfo().name:
102 c6d58a2b Michael Hanselmann
        raise errors.OpPrereqError("Commands must be run on the master"
103 c6d58a2b Michael Hanselmann
                                   " node %s" % master)
104 a8083063 Iustin Pop
105 c92b310a Michael Hanselmann
  def __GetSSH(self):
106 c92b310a Michael Hanselmann
    """Returns the SshRunner object
107 c92b310a Michael Hanselmann

108 c92b310a Michael Hanselmann
    """
109 c92b310a Michael Hanselmann
    if not self.__ssh:
110 1ff08570 Michael Hanselmann
      self.__ssh = ssh.SshRunner(self.sstore)
111 c92b310a Michael Hanselmann
    return self.__ssh
112 c92b310a Michael Hanselmann
113 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
114 c92b310a Michael Hanselmann
115 d465bdc8 Guido Trotter
  def ExpandNames(self):
116 d465bdc8 Guido Trotter
    """Expand names for this LU.
117 d465bdc8 Guido Trotter

118 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
119 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
120 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
121 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
122 d465bdc8 Guido Trotter

123 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
124 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
125 d465bdc8 Guido Trotter
    as values. Rules:
126 d465bdc8 Guido Trotter
      - Use an empty dict if you don't need any lock
127 d465bdc8 Guido Trotter
      - If you don't need any lock at a particular level omit that level
128 d465bdc8 Guido Trotter
      - Don't put anything for the BGL level
129 e310b019 Guido Trotter
      - If you want all locks at a level use locking.ALL_SET as a value
130 d465bdc8 Guido Trotter

131 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
132 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
133 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
134 3977a4c1 Guido Trotter

135 d465bdc8 Guido Trotter
    Examples:
136 d465bdc8 Guido Trotter
    # Acquire all nodes and one instance
137 d465bdc8 Guido Trotter
    self.needed_locks = {
138 e310b019 Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
139 3a5d7305 Guido Trotter
      locking.LEVEL_INSTANCE: ['instance1.example.tld'],
140 d465bdc8 Guido Trotter
    }
141 d465bdc8 Guido Trotter
    # Acquire just two nodes
142 d465bdc8 Guido Trotter
    self.needed_locks = {
143 d465bdc8 Guido Trotter
      locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
144 d465bdc8 Guido Trotter
    }
145 d465bdc8 Guido Trotter
    # Acquire no locks
146 d465bdc8 Guido Trotter
    self.needed_locks = {} # No, you can't leave it to the default value None
147 d465bdc8 Guido Trotter

148 d465bdc8 Guido Trotter
    """
149 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
150 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
151 d465bdc8 Guido Trotter
    # time.
152 d465bdc8 Guido Trotter
    if self.REQ_BGL:
153 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
154 d465bdc8 Guido Trotter
    else:
155 d465bdc8 Guido Trotter
      raise NotImplementedError
156 d465bdc8 Guido Trotter
157 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
158 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
159 fb8dcb62 Guido Trotter

160 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
161 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
162 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
163 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
164 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
165 fb8dcb62 Guido Trotter
    default it does nothing.
166 fb8dcb62 Guido Trotter

167 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
168 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
169 fb8dcb62 Guido Trotter

170 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
171 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
172 fb8dcb62 Guido Trotter

173 fb8dcb62 Guido Trotter
    """
174 fb8dcb62 Guido Trotter
175 a8083063 Iustin Pop
  def CheckPrereq(self):
176 a8083063 Iustin Pop
    """Check prerequisites for this LU.
177 a8083063 Iustin Pop

178 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
179 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
180 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
181 a8083063 Iustin Pop
    allowed.
182 a8083063 Iustin Pop

183 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
184 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
185 a8083063 Iustin Pop

186 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
187 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
188 a8083063 Iustin Pop

189 a8083063 Iustin Pop
    """
190 a8083063 Iustin Pop
    raise NotImplementedError
191 a8083063 Iustin Pop
192 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
193 a8083063 Iustin Pop
    """Execute the LU.
194 a8083063 Iustin Pop

195 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
196 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
197 a8083063 Iustin Pop
    code, or expected.
198 a8083063 Iustin Pop

199 a8083063 Iustin Pop
    """
200 a8083063 Iustin Pop
    raise NotImplementedError
201 a8083063 Iustin Pop
202 a8083063 Iustin Pop
  def BuildHooksEnv(self):
203 a8083063 Iustin Pop
    """Build hooks environment for this LU.
204 a8083063 Iustin Pop

205 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
206 a8083063 Iustin Pop
    containing the environment that will be used for running the
207 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
208 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
209 a8083063 Iustin Pop
    the hook should run after the execution.
210 a8083063 Iustin Pop

211 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
212 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
213 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
214 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
215 a8083063 Iustin Pop

216 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
217 a8083063 Iustin Pop

218 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
219 a8083063 Iustin Pop
    not be called.
220 a8083063 Iustin Pop

221 a8083063 Iustin Pop
    """
222 a8083063 Iustin Pop
    raise NotImplementedError
223 a8083063 Iustin Pop
224 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
225 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
226 1fce5219 Guido Trotter

227 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
228 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
229 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
230 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
231 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
232 1fce5219 Guido Trotter

233 1fce5219 Guido Trotter
    Args:
234 1fce5219 Guido Trotter
      phase: the hooks phase that has just been run
235 1fce5219 Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
236 1fce5219 Guido Trotter
      feedback_fn: function to send feedback back to the caller
237 1fce5219 Guido Trotter
      lu_result: the previous result this LU had, or None in the PRE phase.
238 1fce5219 Guido Trotter

239 1fce5219 Guido Trotter
    """
240 1fce5219 Guido Trotter
    return lu_result
241 1fce5219 Guido Trotter
242 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
243 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
244 43905206 Guido Trotter

245 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
246 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
247 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
248 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
249 43905206 Guido Trotter
    before.
250 43905206 Guido Trotter

251 43905206 Guido Trotter
    """
252 43905206 Guido Trotter
    if self.needed_locks is None:
253 43905206 Guido Trotter
      self.needed_locks = {}
254 43905206 Guido Trotter
    else:
255 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
256 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
257 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
258 43905206 Guido Trotter
    if expanded_name is None:
259 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
260 43905206 Guido Trotter
                                  self.op.instance_name)
261 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
262 43905206 Guido Trotter
    self.op.instance_name = expanded_name
263 43905206 Guido Trotter
264 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
265 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
266 c4a2fee1 Guido Trotter

267 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
268 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
269 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
270 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
271 c4a2fee1 Guido Trotter

272 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
273 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
274 c4a2fee1 Guido Trotter

275 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
276 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
277 c4a2fee1 Guido Trotter

278 c4a2fee1 Guido Trotter
    If should be called in DeclareLocks in a way similar to:
279 c4a2fee1 Guido Trotter

280 c4a2fee1 Guido Trotter
    if level == locking.LEVEL_NODE:
281 c4a2fee1 Guido Trotter
      self._LockInstancesNodes()
282 c4a2fee1 Guido Trotter

283 a82ce292 Guido Trotter
    @type primary_only: boolean
284 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
285 a82ce292 Guido Trotter

286 c4a2fee1 Guido Trotter
    """
287 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
288 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
289 c4a2fee1 Guido Trotter
290 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
291 c4a2fee1 Guido Trotter
292 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
293 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
294 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
295 c4a2fee1 Guido Trotter
    wanted_nodes = []
296 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
297 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
298 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
299 a82ce292 Guido Trotter
      if not primary_only:
300 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
301 c4a2fee1 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
302 c4a2fee1 Guido Trotter
303 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
304 c4a2fee1 Guido Trotter
305 a8083063 Iustin Pop
306 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
307 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
308 a8083063 Iustin Pop

309 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
310 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
311 a8083063 Iustin Pop

312 a8083063 Iustin Pop
  """
313 a8083063 Iustin Pop
  HPATH = None
314 a8083063 Iustin Pop
  HTYPE = None
315 a8083063 Iustin Pop
316 a8083063 Iustin Pop
317 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
318 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
319 83120a01 Michael Hanselmann

320 83120a01 Michael Hanselmann
  Args:
321 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
322 83120a01 Michael Hanselmann

323 83120a01 Michael Hanselmann
  """
324 3312b702 Iustin Pop
  if not isinstance(nodes, list):
325 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
326 dcb93971 Michael Hanselmann
327 ea47808a Guido Trotter
  if not nodes:
328 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
329 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
330 dcb93971 Michael Hanselmann
331 ea47808a Guido Trotter
  wanted = []
332 ea47808a Guido Trotter
  for name in nodes:
333 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
334 ea47808a Guido Trotter
    if node is None:
335 ea47808a Guido Trotter
      raise errors.OpPrereqError("No such node name '%s'" % name)
336 ea47808a Guido Trotter
    wanted.append(node)
337 dcb93971 Michael Hanselmann
338 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
339 3312b702 Iustin Pop
340 3312b702 Iustin Pop
341 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
342 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
343 3312b702 Iustin Pop

344 3312b702 Iustin Pop
  Args:
345 3312b702 Iustin Pop
    instances: List of instances (strings) or None for all
346 3312b702 Iustin Pop

347 3312b702 Iustin Pop
  """
348 3312b702 Iustin Pop
  if not isinstance(instances, list):
349 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
350 3312b702 Iustin Pop
351 3312b702 Iustin Pop
  if instances:
352 3312b702 Iustin Pop
    wanted = []
353 3312b702 Iustin Pop
354 3312b702 Iustin Pop
    for name in instances:
355 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
356 3312b702 Iustin Pop
      if instance is None:
357 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
358 3312b702 Iustin Pop
      wanted.append(instance)
359 3312b702 Iustin Pop
360 3312b702 Iustin Pop
  else:
361 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
362 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
363 dcb93971 Michael Hanselmann
364 dcb93971 Michael Hanselmann
365 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
366 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
367 83120a01 Michael Hanselmann

368 83120a01 Michael Hanselmann
  Args:
369 83120a01 Michael Hanselmann
    static: Static fields
370 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
371 83120a01 Michael Hanselmann

372 83120a01 Michael Hanselmann
  """
373 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
374 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
375 dcb93971 Michael Hanselmann
376 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
377 dcb93971 Michael Hanselmann
378 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
379 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
380 3ecf6786 Iustin Pop
                               % ",".join(frozenset(selected).
381 3ecf6786 Iustin Pop
                                          difference(all_fields)))
382 dcb93971 Michael Hanselmann
383 dcb93971 Michael Hanselmann
384 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
385 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
386 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
387 ecb215b5 Michael Hanselmann

388 ecb215b5 Michael Hanselmann
  Args:
389 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
390 396e1b78 Michael Hanselmann
  """
391 396e1b78 Michael Hanselmann
  env = {
392 0e137c28 Iustin Pop
    "OP_TARGET": name,
393 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
394 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
395 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
396 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
397 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
398 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
399 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
400 396e1b78 Michael Hanselmann
  }
401 396e1b78 Michael Hanselmann
402 396e1b78 Michael Hanselmann
  if nics:
403 396e1b78 Michael Hanselmann
    nic_count = len(nics)
404 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
405 396e1b78 Michael Hanselmann
      if ip is None:
406 396e1b78 Michael Hanselmann
        ip = ""
407 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
408 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
409 53e4e875 Guido Trotter
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
410 396e1b78 Michael Hanselmann
  else:
411 396e1b78 Michael Hanselmann
    nic_count = 0
412 396e1b78 Michael Hanselmann
413 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
414 396e1b78 Michael Hanselmann
415 396e1b78 Michael Hanselmann
  return env
416 396e1b78 Michael Hanselmann
417 396e1b78 Michael Hanselmann
418 396e1b78 Michael Hanselmann
def _BuildInstanceHookEnvByObject(instance, override=None):
419 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
420 ecb215b5 Michael Hanselmann

421 ecb215b5 Michael Hanselmann
  Args:
422 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
423 ecb215b5 Michael Hanselmann
    override: dict of values to override
424 ecb215b5 Michael Hanselmann
  """
425 396e1b78 Michael Hanselmann
  args = {
426 396e1b78 Michael Hanselmann
    'name': instance.name,
427 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
428 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
429 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
430 396e1b78 Michael Hanselmann
    'status': instance.os,
431 396e1b78 Michael Hanselmann
    'memory': instance.memory,
432 396e1b78 Michael Hanselmann
    'vcpus': instance.vcpus,
433 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
434 396e1b78 Michael Hanselmann
  }
435 396e1b78 Michael Hanselmann
  if override:
436 396e1b78 Michael Hanselmann
    args.update(override)
437 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
438 396e1b78 Michael Hanselmann
439 396e1b78 Michael Hanselmann
440 bf6929a2 Alexander Schreiber
def _CheckInstanceBridgesExist(instance):
441 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
442 bf6929a2 Alexander Schreiber

443 bf6929a2 Alexander Schreiber
  """
444 bf6929a2 Alexander Schreiber
  # check bridges existance
445 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
446 bf6929a2 Alexander Schreiber
  if not rpc.call_bridges_exist(instance.primary_node, brlist):
447 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
448 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
449 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
450 bf6929a2 Alexander Schreiber
451 bf6929a2 Alexander Schreiber
452 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
453 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
454 a8083063 Iustin Pop

455 a8083063 Iustin Pop
  """
456 a8083063 Iustin Pop
  _OP_REQP = []
457 a8083063 Iustin Pop
458 a8083063 Iustin Pop
  def CheckPrereq(self):
459 a8083063 Iustin Pop
    """Check prerequisites.
460 a8083063 Iustin Pop

461 a8083063 Iustin Pop
    This checks whether the cluster is empty.
462 a8083063 Iustin Pop

463 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
464 a8083063 Iustin Pop

465 a8083063 Iustin Pop
    """
466 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
467 a8083063 Iustin Pop
468 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
469 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
470 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
471 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
472 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
473 db915bd1 Michael Hanselmann
    if instancelist:
474 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
475 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
476 a8083063 Iustin Pop
477 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
478 a8083063 Iustin Pop
    """Destroys the cluster.
479 a8083063 Iustin Pop

480 a8083063 Iustin Pop
    """
481 c8a0948f Michael Hanselmann
    master = self.sstore.GetMasterNode()
482 1c65840b Iustin Pop
    if not rpc.call_node_stop_master(master, False):
483 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
484 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
485 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
486 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
487 140aa4a8 Iustin Pop
    return master
488 a8083063 Iustin Pop
489 a8083063 Iustin Pop
490 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
491 a8083063 Iustin Pop
  """Verifies the cluster status.
492 a8083063 Iustin Pop

493 a8083063 Iustin Pop
  """
494 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
495 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
496 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
497 a8083063 Iustin Pop
498 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
499 a8083063 Iustin Pop
                  remote_version, feedback_fn):
500 a8083063 Iustin Pop
    """Run multiple tests against a node.
501 a8083063 Iustin Pop

502 a8083063 Iustin Pop
    Test list:
503 a8083063 Iustin Pop
      - compares ganeti version
504 a8083063 Iustin Pop
      - checks vg existance and size > 20G
505 a8083063 Iustin Pop
      - checks config file checksum
506 a8083063 Iustin Pop
      - checks ssh to other nodes
507 a8083063 Iustin Pop

508 a8083063 Iustin Pop
    Args:
509 a8083063 Iustin Pop
      node: name of the node to check
510 a8083063 Iustin Pop
      file_list: required list of files
511 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
512 098c0958 Michael Hanselmann

513 a8083063 Iustin Pop
    """
514 a8083063 Iustin Pop
    # compares ganeti version
515 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
516 a8083063 Iustin Pop
    if not remote_version:
517 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
518 a8083063 Iustin Pop
      return True
519 a8083063 Iustin Pop
520 a8083063 Iustin Pop
    if local_version != remote_version:
521 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
522 a8083063 Iustin Pop
                      (local_version, node, remote_version))
523 a8083063 Iustin Pop
      return True
524 a8083063 Iustin Pop
525 a8083063 Iustin Pop
    # checks vg existance and size > 20G
526 a8083063 Iustin Pop
527 a8083063 Iustin Pop
    bad = False
528 a8083063 Iustin Pop
    if not vglist:
529 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
530 a8083063 Iustin Pop
                      (node,))
531 a8083063 Iustin Pop
      bad = True
532 a8083063 Iustin Pop
    else:
533 8d1a2a64 Michael Hanselmann
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
534 8d1a2a64 Michael Hanselmann
                                            constants.MIN_VG_SIZE)
535 a8083063 Iustin Pop
      if vgstatus:
536 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
537 a8083063 Iustin Pop
        bad = True
538 a8083063 Iustin Pop
539 a8083063 Iustin Pop
    # checks config file checksum
540 a8083063 Iustin Pop
    # checks ssh to any
541 a8083063 Iustin Pop
542 a8083063 Iustin Pop
    if 'filelist' not in node_result:
543 a8083063 Iustin Pop
      bad = True
544 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
545 a8083063 Iustin Pop
    else:
546 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
547 a8083063 Iustin Pop
      for file_name in file_list:
548 a8083063 Iustin Pop
        if file_name not in remote_cksum:
549 a8083063 Iustin Pop
          bad = True
550 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
551 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
552 a8083063 Iustin Pop
          bad = True
553 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
554 a8083063 Iustin Pop
555 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
556 a8083063 Iustin Pop
      bad = True
557 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
558 a8083063 Iustin Pop
    else:
559 a8083063 Iustin Pop
      if node_result['nodelist']:
560 a8083063 Iustin Pop
        bad = True
561 a8083063 Iustin Pop
        for node in node_result['nodelist']:
562 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
563 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
564 9d4bfc96 Iustin Pop
    if 'node-net-test' not in node_result:
565 9d4bfc96 Iustin Pop
      bad = True
566 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
567 9d4bfc96 Iustin Pop
    else:
568 9d4bfc96 Iustin Pop
      if node_result['node-net-test']:
569 9d4bfc96 Iustin Pop
        bad = True
570 9d4bfc96 Iustin Pop
        nlist = utils.NiceSort(node_result['node-net-test'].keys())
571 9d4bfc96 Iustin Pop
        for node in nlist:
572 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
573 9d4bfc96 Iustin Pop
                          (node, node_result['node-net-test'][node]))
574 9d4bfc96 Iustin Pop
575 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
576 a8083063 Iustin Pop
    if hyp_result is not None:
577 a8083063 Iustin Pop
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
578 a8083063 Iustin Pop
    return bad
579 a8083063 Iustin Pop
580 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
581 c5705f58 Guido Trotter
                      node_instance, feedback_fn):
582 a8083063 Iustin Pop
    """Verify an instance.
583 a8083063 Iustin Pop

584 a8083063 Iustin Pop
    This function checks to see if the required block devices are
585 a8083063 Iustin Pop
    available on the instance's node.
586 a8083063 Iustin Pop

587 a8083063 Iustin Pop
    """
588 a8083063 Iustin Pop
    bad = False
589 a8083063 Iustin Pop
590 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
591 a8083063 Iustin Pop
592 a8083063 Iustin Pop
    node_vol_should = {}
593 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
594 a8083063 Iustin Pop
595 a8083063 Iustin Pop
    for node in node_vol_should:
596 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
597 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
598 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
599 a8083063 Iustin Pop
                          (volume, node))
600 a8083063 Iustin Pop
          bad = True
601 a8083063 Iustin Pop
602 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
603 a872dae6 Guido Trotter
      if (node_current not in node_instance or
604 a872dae6 Guido Trotter
          not instance in node_instance[node_current]):
605 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
606 a8083063 Iustin Pop
                        (instance, node_current))
607 a8083063 Iustin Pop
        bad = True
608 a8083063 Iustin Pop
609 a8083063 Iustin Pop
    for node in node_instance:
610 a8083063 Iustin Pop
      if (not node == node_current):
611 a8083063 Iustin Pop
        if instance in node_instance[node]:
612 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
613 a8083063 Iustin Pop
                          (instance, node))
614 a8083063 Iustin Pop
          bad = True
615 a8083063 Iustin Pop
616 6a438c98 Michael Hanselmann
    return bad
617 a8083063 Iustin Pop
618 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
619 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
620 a8083063 Iustin Pop

621 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
622 a8083063 Iustin Pop
    reported as unknown.
623 a8083063 Iustin Pop

624 a8083063 Iustin Pop
    """
625 a8083063 Iustin Pop
    bad = False
626 a8083063 Iustin Pop
627 a8083063 Iustin Pop
    for node in node_vol_is:
628 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
629 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
630 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
631 a8083063 Iustin Pop
                      (volume, node))
632 a8083063 Iustin Pop
          bad = True
633 a8083063 Iustin Pop
    return bad
634 a8083063 Iustin Pop
635 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
636 a8083063 Iustin Pop
    """Verify the list of running instances.
637 a8083063 Iustin Pop

638 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
639 a8083063 Iustin Pop

640 a8083063 Iustin Pop
    """
641 a8083063 Iustin Pop
    bad = False
642 a8083063 Iustin Pop
    for node in node_instance:
643 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
644 a8083063 Iustin Pop
        if runninginstance not in instancelist:
645 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
646 a8083063 Iustin Pop
                          (runninginstance, node))
647 a8083063 Iustin Pop
          bad = True
648 a8083063 Iustin Pop
    return bad
649 a8083063 Iustin Pop
650 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
651 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
652 2b3b6ddd Guido Trotter

653 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
654 2b3b6ddd Guido Trotter
    was primary for.
655 2b3b6ddd Guido Trotter

656 2b3b6ddd Guido Trotter
    """
657 2b3b6ddd Guido Trotter
    bad = False
658 2b3b6ddd Guido Trotter
659 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
660 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
661 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
662 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
663 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
664 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
665 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
666 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
667 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
668 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
669 2b3b6ddd Guido Trotter
        needed_mem = 0
670 2b3b6ddd Guido Trotter
        for instance in instances:
671 2b3b6ddd Guido Trotter
          needed_mem += instance_cfg[instance].memory
672 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
673 2b3b6ddd Guido Trotter
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
674 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
675 2b3b6ddd Guido Trotter
          bad = True
676 2b3b6ddd Guido Trotter
    return bad
677 2b3b6ddd Guido Trotter
678 a8083063 Iustin Pop
  def CheckPrereq(self):
679 a8083063 Iustin Pop
    """Check prerequisites.
680 a8083063 Iustin Pop

681 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
682 e54c4c5e Guido Trotter
    all its members are valid.
683 a8083063 Iustin Pop

684 a8083063 Iustin Pop
    """
685 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
686 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
687 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
688 a8083063 Iustin Pop
689 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
690 d8fff41c Guido Trotter
    """Build hooks env.
691 d8fff41c Guido Trotter

692 d8fff41c Guido Trotter
    Cluster-Verify hooks just rone in the post phase and their failure makes
693 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
694 d8fff41c Guido Trotter

695 d8fff41c Guido Trotter
    """
696 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
697 d8fff41c Guido Trotter
    # TODO: populate the environment with useful information for verify hooks
698 d8fff41c Guido Trotter
    env = {}
699 d8fff41c Guido Trotter
    return env, [], all_nodes
700 d8fff41c Guido Trotter
701 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
702 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
703 a8083063 Iustin Pop

704 a8083063 Iustin Pop
    """
705 a8083063 Iustin Pop
    bad = False
706 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
707 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
708 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
709 a8083063 Iustin Pop
710 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
711 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
712 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
713 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
714 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
715 a8083063 Iustin Pop
    node_volume = {}
716 a8083063 Iustin Pop
    node_instance = {}
717 9c9c7d30 Guido Trotter
    node_info = {}
718 26b6af5e Guido Trotter
    instance_cfg = {}
719 a8083063 Iustin Pop
720 a8083063 Iustin Pop
    # FIXME: verify OS list
721 a8083063 Iustin Pop
    # do local checksums
722 cb91d46e Iustin Pop
    file_names = list(self.sstore.GetFileList())
723 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
724 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
725 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
726 a8083063 Iustin Pop
727 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
728 a8083063 Iustin Pop
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
729 a8083063 Iustin Pop
    all_instanceinfo = rpc.call_instance_list(nodelist)
730 a8083063 Iustin Pop
    all_vglist = rpc.call_vg_list(nodelist)
731 a8083063 Iustin Pop
    node_verify_param = {
732 a8083063 Iustin Pop
      'filelist': file_names,
733 a8083063 Iustin Pop
      'nodelist': nodelist,
734 a8083063 Iustin Pop
      'hypervisor': None,
735 9d4bfc96 Iustin Pop
      'node-net-test': [(node.name, node.primary_ip, node.secondary_ip)
736 9d4bfc96 Iustin Pop
                        for node in nodeinfo]
737 a8083063 Iustin Pop
      }
738 a8083063 Iustin Pop
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
739 a8083063 Iustin Pop
    all_rversion = rpc.call_version(nodelist)
740 9c9c7d30 Guido Trotter
    all_ninfo = rpc.call_node_info(nodelist, self.cfg.GetVGName())
741 a8083063 Iustin Pop
742 a8083063 Iustin Pop
    for node in nodelist:
743 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
744 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
745 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
746 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
747 a8083063 Iustin Pop
      bad = bad or result
748 a8083063 Iustin Pop
749 a8083063 Iustin Pop
      # node_volume
750 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
751 a8083063 Iustin Pop
752 b63ed789 Iustin Pop
      if isinstance(volumeinfo, basestring):
753 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
754 b63ed789 Iustin Pop
                    (node, volumeinfo[-400:].encode('string_escape')))
755 b63ed789 Iustin Pop
        bad = True
756 b63ed789 Iustin Pop
        node_volume[node] = {}
757 b63ed789 Iustin Pop
      elif not isinstance(volumeinfo, dict):
758 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
759 a8083063 Iustin Pop
        bad = True
760 a8083063 Iustin Pop
        continue
761 b63ed789 Iustin Pop
      else:
762 b63ed789 Iustin Pop
        node_volume[node] = volumeinfo
763 a8083063 Iustin Pop
764 a8083063 Iustin Pop
      # node_instance
765 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
766 a8083063 Iustin Pop
      if type(nodeinstance) != list:
767 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
768 a8083063 Iustin Pop
        bad = True
769 a8083063 Iustin Pop
        continue
770 a8083063 Iustin Pop
771 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
772 a8083063 Iustin Pop
773 9c9c7d30 Guido Trotter
      # node_info
774 9c9c7d30 Guido Trotter
      nodeinfo = all_ninfo[node]
775 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
776 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
777 9c9c7d30 Guido Trotter
        bad = True
778 9c9c7d30 Guido Trotter
        continue
779 9c9c7d30 Guido Trotter
780 9c9c7d30 Guido Trotter
      try:
781 9c9c7d30 Guido Trotter
        node_info[node] = {
782 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
783 9c9c7d30 Guido Trotter
          "dfree": int(nodeinfo['vg_free']),
784 93e4c50b Guido Trotter
          "pinst": [],
785 93e4c50b Guido Trotter
          "sinst": [],
786 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
787 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
788 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
789 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
790 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
791 36e7da50 Guido Trotter
          # secondary.
792 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
793 9c9c7d30 Guido Trotter
        }
794 9c9c7d30 Guido Trotter
      except ValueError:
795 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
796 9c9c7d30 Guido Trotter
        bad = True
797 9c9c7d30 Guido Trotter
        continue
798 9c9c7d30 Guido Trotter
799 a8083063 Iustin Pop
    node_vol_should = {}
800 a8083063 Iustin Pop
801 a8083063 Iustin Pop
    for instance in instancelist:
802 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
803 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
804 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
805 c5705f58 Guido Trotter
                                     node_instance, feedback_fn)
806 c5705f58 Guido Trotter
      bad = bad or result
807 a8083063 Iustin Pop
808 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
809 a8083063 Iustin Pop
810 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
811 26b6af5e Guido Trotter
812 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
813 93e4c50b Guido Trotter
      if pnode in node_info:
814 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
815 93e4c50b Guido Trotter
      else:
816 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
817 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
818 93e4c50b Guido Trotter
        bad = True
819 93e4c50b Guido Trotter
820 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
821 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
822 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
823 93e4c50b Guido Trotter
      # supported either.
824 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
825 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
826 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
827 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
828 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
829 93e4c50b Guido Trotter
                    % instance)
830 93e4c50b Guido Trotter
831 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
832 93e4c50b Guido Trotter
        if snode in node_info:
833 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
834 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
835 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
836 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
837 93e4c50b Guido Trotter
        else:
838 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
839 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
840 93e4c50b Guido Trotter
841 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
842 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
843 a8083063 Iustin Pop
                                       feedback_fn)
844 a8083063 Iustin Pop
    bad = bad or result
845 a8083063 Iustin Pop
846 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
847 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
848 a8083063 Iustin Pop
                                         feedback_fn)
849 a8083063 Iustin Pop
    bad = bad or result
850 a8083063 Iustin Pop
851 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
852 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
853 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
854 e54c4c5e Guido Trotter
      bad = bad or result
855 2b3b6ddd Guido Trotter
856 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
857 2b3b6ddd Guido Trotter
    if i_non_redundant:
858 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
859 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
860 2b3b6ddd Guido Trotter
861 34290825 Michael Hanselmann
    return not bad
862 a8083063 Iustin Pop
863 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
864 d8fff41c Guido Trotter
    """Analize the post-hooks' result, handle it, and send some
865 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
866 d8fff41c Guido Trotter

867 d8fff41c Guido Trotter
    Args:
868 d8fff41c Guido Trotter
      phase: the hooks phase that has just been run
869 d8fff41c Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
870 d8fff41c Guido Trotter
      feedback_fn: function to send feedback back to the caller
871 d8fff41c Guido Trotter
      lu_result: previous Exec result
872 d8fff41c Guido Trotter

873 d8fff41c Guido Trotter
    """
874 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
875 38206f3c Iustin Pop
    # their results
876 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
877 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
878 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
879 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
880 d8fff41c Guido Trotter
      if not hooks_results:
881 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
882 d8fff41c Guido Trotter
        lu_result = 1
883 d8fff41c Guido Trotter
      else:
884 d8fff41c Guido Trotter
        for node_name in hooks_results:
885 d8fff41c Guido Trotter
          show_node_header = True
886 d8fff41c Guido Trotter
          res = hooks_results[node_name]
887 d8fff41c Guido Trotter
          if res is False or not isinstance(res, list):
888 d8fff41c Guido Trotter
            feedback_fn("    Communication failure")
889 d8fff41c Guido Trotter
            lu_result = 1
890 d8fff41c Guido Trotter
            continue
891 d8fff41c Guido Trotter
          for script, hkr, output in res:
892 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
893 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
894 d8fff41c Guido Trotter
              # failing hooks on that node
895 d8fff41c Guido Trotter
              if show_node_header:
896 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
897 d8fff41c Guido Trotter
                show_node_header = False
898 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
899 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
900 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
901 d8fff41c Guido Trotter
              lu_result = 1
902 d8fff41c Guido Trotter
903 d8fff41c Guido Trotter
      return lu_result
904 d8fff41c Guido Trotter
905 a8083063 Iustin Pop
906 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
907 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
908 2c95a8d4 Iustin Pop

909 2c95a8d4 Iustin Pop
  """
910 2c95a8d4 Iustin Pop
  _OP_REQP = []
911 2c95a8d4 Iustin Pop
912 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
913 2c95a8d4 Iustin Pop
    """Check prerequisites.
914 2c95a8d4 Iustin Pop

915 2c95a8d4 Iustin Pop
    This has no prerequisites.
916 2c95a8d4 Iustin Pop

917 2c95a8d4 Iustin Pop
    """
918 2c95a8d4 Iustin Pop
    pass
919 2c95a8d4 Iustin Pop
920 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
921 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
922 2c95a8d4 Iustin Pop

923 2c95a8d4 Iustin Pop
    """
924 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
925 2c95a8d4 Iustin Pop
926 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
927 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
928 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
929 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
930 2c95a8d4 Iustin Pop
931 2c95a8d4 Iustin Pop
    nv_dict = {}
932 2c95a8d4 Iustin Pop
    for inst in instances:
933 2c95a8d4 Iustin Pop
      inst_lvs = {}
934 2c95a8d4 Iustin Pop
      if (inst.status != "up" or
935 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
936 2c95a8d4 Iustin Pop
        continue
937 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
938 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
939 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
940 2c95a8d4 Iustin Pop
        for vol in vol_list:
941 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
942 2c95a8d4 Iustin Pop
943 2c95a8d4 Iustin Pop
    if not nv_dict:
944 2c95a8d4 Iustin Pop
      return result
945 2c95a8d4 Iustin Pop
946 2c95a8d4 Iustin Pop
    node_lvs = rpc.call_volume_list(nodes, vg_name)
947 2c95a8d4 Iustin Pop
948 2c95a8d4 Iustin Pop
    to_act = set()
949 2c95a8d4 Iustin Pop
    for node in nodes:
950 2c95a8d4 Iustin Pop
      # node_volume
951 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
952 2c95a8d4 Iustin Pop
953 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
954 b63ed789 Iustin Pop
        logger.Info("error enumerating LVs on node %s: %s" % (node, lvs))
955 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
956 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
957 2c95a8d4 Iustin Pop
        logger.Info("connection to node %s failed or invalid data returned" %
958 2c95a8d4 Iustin Pop
                    (node,))
959 2c95a8d4 Iustin Pop
        res_nodes.append(node)
960 2c95a8d4 Iustin Pop
        continue
961 2c95a8d4 Iustin Pop
962 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
963 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
964 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
965 b63ed789 Iustin Pop
            and inst.name not in res_instances):
966 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
967 2c95a8d4 Iustin Pop
968 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
969 b63ed789 Iustin Pop
    # data better
970 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
971 b63ed789 Iustin Pop
      if inst.name not in res_missing:
972 b63ed789 Iustin Pop
        res_missing[inst.name] = []
973 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
974 b63ed789 Iustin Pop
975 2c95a8d4 Iustin Pop
    return result
976 2c95a8d4 Iustin Pop
977 2c95a8d4 Iustin Pop
978 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
979 07bd8a51 Iustin Pop
  """Rename the cluster.
980 07bd8a51 Iustin Pop

981 07bd8a51 Iustin Pop
  """
982 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
983 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
984 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
985 05f86716 Guido Trotter
  REQ_WSSTORE = True
986 07bd8a51 Iustin Pop
987 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
988 07bd8a51 Iustin Pop
    """Build hooks env.
989 07bd8a51 Iustin Pop

990 07bd8a51 Iustin Pop
    """
991 07bd8a51 Iustin Pop
    env = {
992 488b540d Iustin Pop
      "OP_TARGET": self.sstore.GetClusterName(),
993 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
994 07bd8a51 Iustin Pop
      }
995 07bd8a51 Iustin Pop
    mn = self.sstore.GetMasterNode()
996 07bd8a51 Iustin Pop
    return env, [mn], [mn]
997 07bd8a51 Iustin Pop
998 07bd8a51 Iustin Pop
  def CheckPrereq(self):
999 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1000 07bd8a51 Iustin Pop

1001 07bd8a51 Iustin Pop
    """
1002 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1003 07bd8a51 Iustin Pop
1004 bcf043c9 Iustin Pop
    new_name = hostname.name
1005 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1006 07bd8a51 Iustin Pop
    old_name = self.sstore.GetClusterName()
1007 07bd8a51 Iustin Pop
    old_ip = self.sstore.GetMasterIP()
1008 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1009 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1010 07bd8a51 Iustin Pop
                                 " cluster has changed")
1011 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1012 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1013 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1014 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1015 07bd8a51 Iustin Pop
                                   new_ip)
1016 07bd8a51 Iustin Pop
1017 07bd8a51 Iustin Pop
    self.op.name = new_name
1018 07bd8a51 Iustin Pop
1019 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1020 07bd8a51 Iustin Pop
    """Rename the cluster.
1021 07bd8a51 Iustin Pop

1022 07bd8a51 Iustin Pop
    """
1023 07bd8a51 Iustin Pop
    clustername = self.op.name
1024 07bd8a51 Iustin Pop
    ip = self.ip
1025 07bd8a51 Iustin Pop
    ss = self.sstore
1026 07bd8a51 Iustin Pop
1027 07bd8a51 Iustin Pop
    # shutdown the master IP
1028 07bd8a51 Iustin Pop
    master = ss.GetMasterNode()
1029 1c65840b Iustin Pop
    if not rpc.call_node_stop_master(master, False):
1030 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1031 07bd8a51 Iustin Pop
1032 07bd8a51 Iustin Pop
    try:
1033 07bd8a51 Iustin Pop
      # modify the sstore
1034 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
1035 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
1036 07bd8a51 Iustin Pop
1037 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
1038 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
1039 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
1040 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
1041 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
1042 07bd8a51 Iustin Pop
1043 07bd8a51 Iustin Pop
      logger.Debug("Copying updated ssconf data to all nodes")
1044 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1045 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
1046 07bd8a51 Iustin Pop
        result = rpc.call_upload_file(dist_nodes, fname)
1047 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
1048 07bd8a51 Iustin Pop
          if not result[to_node]:
1049 07bd8a51 Iustin Pop
            logger.Error("copy of file %s to node %s failed" %
1050 07bd8a51 Iustin Pop
                         (fname, to_node))
1051 07bd8a51 Iustin Pop
    finally:
1052 1c65840b Iustin Pop
      if not rpc.call_node_start_master(master, False):
1053 f4bc1f2c Michael Hanselmann
        logger.Error("Could not re-enable the master role on the master,"
1054 f4bc1f2c Michael Hanselmann
                     " please restart manually.")
1055 07bd8a51 Iustin Pop
1056 07bd8a51 Iustin Pop
1057 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1058 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1059 8084f9f6 Manuel Franceschini

1060 8084f9f6 Manuel Franceschini
  Args:
1061 8084f9f6 Manuel Franceschini
    disk: ganeti.objects.Disk object
1062 8084f9f6 Manuel Franceschini

1063 8084f9f6 Manuel Franceschini
  Returns:
1064 8084f9f6 Manuel Franceschini
    boolean indicating whether a LD_LV dev_type was found or not
1065 8084f9f6 Manuel Franceschini

1066 8084f9f6 Manuel Franceschini
  """
1067 8084f9f6 Manuel Franceschini
  if disk.children:
1068 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1069 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1070 8084f9f6 Manuel Franceschini
        return True
1071 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1072 8084f9f6 Manuel Franceschini
1073 8084f9f6 Manuel Franceschini
1074 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1075 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1076 8084f9f6 Manuel Franceschini

1077 8084f9f6 Manuel Franceschini
  """
1078 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1079 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1080 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1081 8084f9f6 Manuel Franceschini
1082 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1083 8084f9f6 Manuel Franceschini
    """Build hooks env.
1084 8084f9f6 Manuel Franceschini

1085 8084f9f6 Manuel Franceschini
    """
1086 8084f9f6 Manuel Franceschini
    env = {
1087 8084f9f6 Manuel Franceschini
      "OP_TARGET": self.sstore.GetClusterName(),
1088 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1089 8084f9f6 Manuel Franceschini
      }
1090 8084f9f6 Manuel Franceschini
    mn = self.sstore.GetMasterNode()
1091 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1092 8084f9f6 Manuel Franceschini
1093 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1094 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1095 8084f9f6 Manuel Franceschini

1096 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1097 5f83e263 Iustin Pop
    if the given volume group is valid.
1098 8084f9f6 Manuel Franceschini

1099 8084f9f6 Manuel Franceschini
    """
1100 8084f9f6 Manuel Franceschini
    if not self.op.vg_name:
1101 8084f9f6 Manuel Franceschini
      instances = [self.cfg.GetInstanceInfo(name)
1102 8084f9f6 Manuel Franceschini
                   for name in self.cfg.GetInstanceList()]
1103 8084f9f6 Manuel Franceschini
      for inst in instances:
1104 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1105 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1106 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1107 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1108 8084f9f6 Manuel Franceschini
1109 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1110 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1111 8084f9f6 Manuel Franceschini
      node_list = self.cfg.GetNodeList()
1112 8084f9f6 Manuel Franceschini
      vglist = rpc.call_vg_list(node_list)
1113 8084f9f6 Manuel Franceschini
      for node in node_list:
1114 8d1a2a64 Michael Hanselmann
        vgstatus = utils.CheckVolumeGroupSize(vglist[node], self.op.vg_name,
1115 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1116 8084f9f6 Manuel Franceschini
        if vgstatus:
1117 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1118 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1119 8084f9f6 Manuel Franceschini
1120 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1121 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1122 8084f9f6 Manuel Franceschini

1123 8084f9f6 Manuel Franceschini
    """
1124 8084f9f6 Manuel Franceschini
    if self.op.vg_name != self.cfg.GetVGName():
1125 8084f9f6 Manuel Franceschini
      self.cfg.SetVGName(self.op.vg_name)
1126 8084f9f6 Manuel Franceschini
    else:
1127 8084f9f6 Manuel Franceschini
      feedback_fn("Cluster LVM configuration already in desired"
1128 8084f9f6 Manuel Franceschini
                  " state, not changing")
1129 8084f9f6 Manuel Franceschini
1130 8084f9f6 Manuel Franceschini
1131 5bfac263 Iustin Pop
def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
1132 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1133 a8083063 Iustin Pop

1134 a8083063 Iustin Pop
  """
1135 a8083063 Iustin Pop
  if not instance.disks:
1136 a8083063 Iustin Pop
    return True
1137 a8083063 Iustin Pop
1138 a8083063 Iustin Pop
  if not oneshot:
1139 5bfac263 Iustin Pop
    proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1140 a8083063 Iustin Pop
1141 a8083063 Iustin Pop
  node = instance.primary_node
1142 a8083063 Iustin Pop
1143 a8083063 Iustin Pop
  for dev in instance.disks:
1144 a8083063 Iustin Pop
    cfgw.SetDiskID(dev, node)
1145 a8083063 Iustin Pop
1146 a8083063 Iustin Pop
  retries = 0
1147 a8083063 Iustin Pop
  while True:
1148 a8083063 Iustin Pop
    max_time = 0
1149 a8083063 Iustin Pop
    done = True
1150 a8083063 Iustin Pop
    cumul_degraded = False
1151 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1152 a8083063 Iustin Pop
    if not rstats:
1153 5bfac263 Iustin Pop
      proc.LogWarning("Can't get any data from node %s" % node)
1154 a8083063 Iustin Pop
      retries += 1
1155 a8083063 Iustin Pop
      if retries >= 10:
1156 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1157 3ecf6786 Iustin Pop
                                 " aborting." % node)
1158 a8083063 Iustin Pop
      time.sleep(6)
1159 a8083063 Iustin Pop
      continue
1160 a8083063 Iustin Pop
    retries = 0
1161 a8083063 Iustin Pop
    for i in range(len(rstats)):
1162 a8083063 Iustin Pop
      mstat = rstats[i]
1163 a8083063 Iustin Pop
      if mstat is None:
1164 5bfac263 Iustin Pop
        proc.LogWarning("Can't compute data for node %s/%s" %
1165 a8083063 Iustin Pop
                        (node, instance.disks[i].iv_name))
1166 a8083063 Iustin Pop
        continue
1167 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1168 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1169 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1170 a8083063 Iustin Pop
      if perc_done is not None:
1171 a8083063 Iustin Pop
        done = False
1172 a8083063 Iustin Pop
        if est_time is not None:
1173 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1174 a8083063 Iustin Pop
          max_time = est_time
1175 a8083063 Iustin Pop
        else:
1176 a8083063 Iustin Pop
          rem_time = "no time estimate"
1177 5bfac263 Iustin Pop
        proc.LogInfo("- device %s: %5.2f%% done, %s" %
1178 5bfac263 Iustin Pop
                     (instance.disks[i].iv_name, perc_done, rem_time))
1179 a8083063 Iustin Pop
    if done or oneshot:
1180 a8083063 Iustin Pop
      break
1181 a8083063 Iustin Pop
1182 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
1183 a8083063 Iustin Pop
1184 a8083063 Iustin Pop
  if done:
1185 5bfac263 Iustin Pop
    proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1186 a8083063 Iustin Pop
  return not cumul_degraded
1187 a8083063 Iustin Pop
1188 a8083063 Iustin Pop
1189 0834c866 Iustin Pop
def _CheckDiskConsistency(cfgw, dev, node, on_primary, ldisk=False):
1190 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1191 a8083063 Iustin Pop

1192 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1193 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1194 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1195 0834c866 Iustin Pop

1196 a8083063 Iustin Pop
  """
1197 a8083063 Iustin Pop
  cfgw.SetDiskID(dev, node)
1198 0834c866 Iustin Pop
  if ldisk:
1199 0834c866 Iustin Pop
    idx = 6
1200 0834c866 Iustin Pop
  else:
1201 0834c866 Iustin Pop
    idx = 5
1202 a8083063 Iustin Pop
1203 a8083063 Iustin Pop
  result = True
1204 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1205 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_find(node, dev)
1206 a8083063 Iustin Pop
    if not rstats:
1207 aa9d0c32 Guido Trotter
      logger.ToStderr("Node %s: Disk degraded, not found or node down" % node)
1208 a8083063 Iustin Pop
      result = False
1209 a8083063 Iustin Pop
    else:
1210 0834c866 Iustin Pop
      result = result and (not rstats[idx])
1211 a8083063 Iustin Pop
  if dev.children:
1212 a8083063 Iustin Pop
    for child in dev.children:
1213 a8083063 Iustin Pop
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
1214 a8083063 Iustin Pop
1215 a8083063 Iustin Pop
  return result
1216 a8083063 Iustin Pop
1217 a8083063 Iustin Pop
1218 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1219 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1220 a8083063 Iustin Pop

1221 a8083063 Iustin Pop
  """
1222 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1223 6bf01bbb Guido Trotter
  REQ_BGL = False
1224 a8083063 Iustin Pop
1225 6bf01bbb Guido Trotter
  def ExpandNames(self):
1226 1f9430d6 Iustin Pop
    if self.op.names:
1227 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1228 1f9430d6 Iustin Pop
1229 1f9430d6 Iustin Pop
    self.dynamic_fields = frozenset(["name", "valid", "node_status"])
1230 1f9430d6 Iustin Pop
    _CheckOutputFields(static=[],
1231 1f9430d6 Iustin Pop
                       dynamic=self.dynamic_fields,
1232 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1233 1f9430d6 Iustin Pop
1234 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
1235 6bf01bbb Guido Trotter
    self.needed_locks = {}
1236 6bf01bbb Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1237 e310b019 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1238 6bf01bbb Guido Trotter
1239 6bf01bbb Guido Trotter
  def CheckPrereq(self):
1240 6bf01bbb Guido Trotter
    """Check prerequisites.
1241 6bf01bbb Guido Trotter

1242 6bf01bbb Guido Trotter
    """
1243 6bf01bbb Guido Trotter
1244 1f9430d6 Iustin Pop
  @staticmethod
1245 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1246 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1247 1f9430d6 Iustin Pop

1248 1f9430d6 Iustin Pop
      Args:
1249 1f9430d6 Iustin Pop
        node_list: a list with the names of all nodes
1250 1f9430d6 Iustin Pop
        rlist: a map with node names as keys and OS objects as values
1251 1f9430d6 Iustin Pop

1252 1f9430d6 Iustin Pop
      Returns:
1253 1f9430d6 Iustin Pop
        map: a map with osnames as keys and as value another map, with
1254 1f9430d6 Iustin Pop
             nodes as
1255 1f9430d6 Iustin Pop
             keys and list of OS objects as values
1256 1f9430d6 Iustin Pop
             e.g. {"debian-etch": {"node1": [<object>,...],
1257 1f9430d6 Iustin Pop
                                   "node2": [<object>,]}
1258 1f9430d6 Iustin Pop
                  }
1259 1f9430d6 Iustin Pop

1260 1f9430d6 Iustin Pop
    """
1261 1f9430d6 Iustin Pop
    all_os = {}
1262 1f9430d6 Iustin Pop
    for node_name, nr in rlist.iteritems():
1263 1f9430d6 Iustin Pop
      if not nr:
1264 1f9430d6 Iustin Pop
        continue
1265 b4de68a9 Iustin Pop
      for os_obj in nr:
1266 b4de68a9 Iustin Pop
        if os_obj.name not in all_os:
1267 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1268 1f9430d6 Iustin Pop
          # for each node in node_list
1269 b4de68a9 Iustin Pop
          all_os[os_obj.name] = {}
1270 1f9430d6 Iustin Pop
          for nname in node_list:
1271 b4de68a9 Iustin Pop
            all_os[os_obj.name][nname] = []
1272 b4de68a9 Iustin Pop
        all_os[os_obj.name][node_name].append(os_obj)
1273 1f9430d6 Iustin Pop
    return all_os
1274 a8083063 Iustin Pop
1275 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1276 a8083063 Iustin Pop
    """Compute the list of OSes.
1277 a8083063 Iustin Pop

1278 a8083063 Iustin Pop
    """
1279 6bf01bbb Guido Trotter
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1280 a8083063 Iustin Pop
    node_data = rpc.call_os_diagnose(node_list)
1281 a8083063 Iustin Pop
    if node_data == False:
1282 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1283 1f9430d6 Iustin Pop
    pol = self._DiagnoseByOS(node_list, node_data)
1284 1f9430d6 Iustin Pop
    output = []
1285 1f9430d6 Iustin Pop
    for os_name, os_data in pol.iteritems():
1286 1f9430d6 Iustin Pop
      row = []
1287 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1288 1f9430d6 Iustin Pop
        if field == "name":
1289 1f9430d6 Iustin Pop
          val = os_name
1290 1f9430d6 Iustin Pop
        elif field == "valid":
1291 1f9430d6 Iustin Pop
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1292 1f9430d6 Iustin Pop
        elif field == "node_status":
1293 1f9430d6 Iustin Pop
          val = {}
1294 1f9430d6 Iustin Pop
          for node_name, nos_list in os_data.iteritems():
1295 1f9430d6 Iustin Pop
            val[node_name] = [(v.status, v.path) for v in nos_list]
1296 1f9430d6 Iustin Pop
        else:
1297 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1298 1f9430d6 Iustin Pop
        row.append(val)
1299 1f9430d6 Iustin Pop
      output.append(row)
1300 1f9430d6 Iustin Pop
1301 1f9430d6 Iustin Pop
    return output
1302 a8083063 Iustin Pop
1303 a8083063 Iustin Pop
1304 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1305 a8083063 Iustin Pop
  """Logical unit for removing a node.
1306 a8083063 Iustin Pop

1307 a8083063 Iustin Pop
  """
1308 a8083063 Iustin Pop
  HPATH = "node-remove"
1309 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1310 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1311 a8083063 Iustin Pop
1312 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1313 a8083063 Iustin Pop
    """Build hooks env.
1314 a8083063 Iustin Pop

1315 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1316 d08869ee Guido Trotter
    node would then be impossible to remove.
1317 a8083063 Iustin Pop

1318 a8083063 Iustin Pop
    """
1319 396e1b78 Michael Hanselmann
    env = {
1320 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1321 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1322 396e1b78 Michael Hanselmann
      }
1323 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1324 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1325 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1326 a8083063 Iustin Pop
1327 a8083063 Iustin Pop
  def CheckPrereq(self):
1328 a8083063 Iustin Pop
    """Check prerequisites.
1329 a8083063 Iustin Pop

1330 a8083063 Iustin Pop
    This checks:
1331 a8083063 Iustin Pop
     - the node exists in the configuration
1332 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1333 a8083063 Iustin Pop
     - it's not the master
1334 a8083063 Iustin Pop

1335 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1336 a8083063 Iustin Pop

1337 a8083063 Iustin Pop
    """
1338 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1339 a8083063 Iustin Pop
    if node is None:
1340 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1341 a8083063 Iustin Pop
1342 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1343 a8083063 Iustin Pop
1344 880478f8 Iustin Pop
    masternode = self.sstore.GetMasterNode()
1345 a8083063 Iustin Pop
    if node.name == masternode:
1346 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1347 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1348 a8083063 Iustin Pop
1349 a8083063 Iustin Pop
    for instance_name in instance_list:
1350 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1351 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1352 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1353 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1354 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1355 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1356 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1357 a8083063 Iustin Pop
    self.op.node_name = node.name
1358 a8083063 Iustin Pop
    self.node = node
1359 a8083063 Iustin Pop
1360 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1361 a8083063 Iustin Pop
    """Removes the node from the cluster.
1362 a8083063 Iustin Pop

1363 a8083063 Iustin Pop
    """
1364 a8083063 Iustin Pop
    node = self.node
1365 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1366 a8083063 Iustin Pop
                node.name)
1367 a8083063 Iustin Pop
1368 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
1369 a8083063 Iustin Pop
1370 d8470559 Michael Hanselmann
    rpc.call_node_leave_cluster(node.name)
1371 c8a0948f Michael Hanselmann
1372 a8083063 Iustin Pop
1373 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1374 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1375 a8083063 Iustin Pop

1376 a8083063 Iustin Pop
  """
1377 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1378 35705d8f Guido Trotter
  REQ_BGL = False
1379 a8083063 Iustin Pop
1380 35705d8f Guido Trotter
  def ExpandNames(self):
1381 e8a4c138 Iustin Pop
    self.dynamic_fields = frozenset([
1382 e8a4c138 Iustin Pop
      "dtotal", "dfree",
1383 e8a4c138 Iustin Pop
      "mtotal", "mnode", "mfree",
1384 e8a4c138 Iustin Pop
      "bootid",
1385 e8a4c138 Iustin Pop
      "ctotal",
1386 e8a4c138 Iustin Pop
      ])
1387 a8083063 Iustin Pop
1388 ec223efb Iustin Pop
    _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt",
1389 ec223efb Iustin Pop
                               "pinst_list", "sinst_list",
1390 130a6a6f Iustin Pop
                               "pip", "sip", "tags"],
1391 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1392 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1393 a8083063 Iustin Pop
1394 35705d8f Guido Trotter
    self.needed_locks = {}
1395 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1396 35705d8f Guido Trotter
    # TODO: we could lock nodes only if the user asked for dynamic fields. For
1397 35705d8f Guido Trotter
    # that we need atomic ways to get info for a group of nodes from the
1398 35705d8f Guido Trotter
    # config, though.
1399 35705d8f Guido Trotter
    if not self.op.names:
1400 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1401 35705d8f Guido Trotter
    else:
1402 b91a34a5 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
1403 b91a34a5 Guido Trotter
        _GetWantedNodes(self, self.op.names)
1404 35705d8f Guido Trotter
1405 35705d8f Guido Trotter
  def CheckPrereq(self):
1406 35705d8f Guido Trotter
    """Check prerequisites.
1407 35705d8f Guido Trotter

1408 35705d8f Guido Trotter
    """
1409 35705d8f Guido Trotter
    # This of course is valid only if we locked the nodes
1410 6683bba2 Guido Trotter
    self.wanted = self.acquired_locks[locking.LEVEL_NODE]
1411 a8083063 Iustin Pop
1412 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1413 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1414 a8083063 Iustin Pop

1415 a8083063 Iustin Pop
    """
1416 246e180a Iustin Pop
    nodenames = self.wanted
1417 a8083063 Iustin Pop
    nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]
1418 a8083063 Iustin Pop
1419 a8083063 Iustin Pop
    # begin data gathering
1420 a8083063 Iustin Pop
1421 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1422 a8083063 Iustin Pop
      live_data = {}
1423 a8083063 Iustin Pop
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
1424 a8083063 Iustin Pop
      for name in nodenames:
1425 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1426 a8083063 Iustin Pop
        if nodeinfo:
1427 a8083063 Iustin Pop
          live_data[name] = {
1428 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1429 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1430 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1431 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1432 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1433 e8a4c138 Iustin Pop
            "ctotal": utils.TryConvert(int, nodeinfo['cpu_total']),
1434 3ef10550 Michael Hanselmann
            "bootid": nodeinfo['bootid'],
1435 a8083063 Iustin Pop
            }
1436 a8083063 Iustin Pop
        else:
1437 a8083063 Iustin Pop
          live_data[name] = {}
1438 a8083063 Iustin Pop
    else:
1439 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1440 a8083063 Iustin Pop
1441 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1442 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1443 a8083063 Iustin Pop
1444 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1445 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1446 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1447 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1448 a8083063 Iustin Pop
1449 ec223efb Iustin Pop
      for instance_name in instancelist:
1450 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1451 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1452 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1453 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1454 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1455 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1456 a8083063 Iustin Pop
1457 a8083063 Iustin Pop
    # end data gathering
1458 a8083063 Iustin Pop
1459 a8083063 Iustin Pop
    output = []
1460 a8083063 Iustin Pop
    for node in nodelist:
1461 a8083063 Iustin Pop
      node_output = []
1462 a8083063 Iustin Pop
      for field in self.op.output_fields:
1463 a8083063 Iustin Pop
        if field == "name":
1464 a8083063 Iustin Pop
          val = node.name
1465 ec223efb Iustin Pop
        elif field == "pinst_list":
1466 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1467 ec223efb Iustin Pop
        elif field == "sinst_list":
1468 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1469 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1470 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1471 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1472 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1473 a8083063 Iustin Pop
        elif field == "pip":
1474 a8083063 Iustin Pop
          val = node.primary_ip
1475 a8083063 Iustin Pop
        elif field == "sip":
1476 a8083063 Iustin Pop
          val = node.secondary_ip
1477 130a6a6f Iustin Pop
        elif field == "tags":
1478 130a6a6f Iustin Pop
          val = list(node.GetTags())
1479 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1480 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1481 a8083063 Iustin Pop
        else:
1482 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1483 a8083063 Iustin Pop
        node_output.append(val)
1484 a8083063 Iustin Pop
      output.append(node_output)
1485 a8083063 Iustin Pop
1486 a8083063 Iustin Pop
    return output
1487 a8083063 Iustin Pop
1488 a8083063 Iustin Pop
1489 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1490 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1491 dcb93971 Michael Hanselmann

1492 dcb93971 Michael Hanselmann
  """
1493 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1494 21a15682 Guido Trotter
  REQ_BGL = False
1495 21a15682 Guido Trotter
1496 21a15682 Guido Trotter
  def ExpandNames(self):
1497 21a15682 Guido Trotter
    _CheckOutputFields(static=["node"],
1498 21a15682 Guido Trotter
                       dynamic=["phys", "vg", "name", "size", "instance"],
1499 21a15682 Guido Trotter
                       selected=self.op.output_fields)
1500 21a15682 Guido Trotter
1501 21a15682 Guido Trotter
    self.needed_locks = {}
1502 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1503 21a15682 Guido Trotter
    if not self.op.nodes:
1504 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1505 21a15682 Guido Trotter
    else:
1506 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
1507 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
1508 dcb93971 Michael Hanselmann
1509 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1510 dcb93971 Michael Hanselmann
    """Check prerequisites.
1511 dcb93971 Michael Hanselmann

1512 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1513 dcb93971 Michael Hanselmann

1514 dcb93971 Michael Hanselmann
    """
1515 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
1516 dcb93971 Michael Hanselmann
1517 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1518 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1519 dcb93971 Michael Hanselmann

1520 dcb93971 Michael Hanselmann
    """
1521 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1522 dcb93971 Michael Hanselmann
    volumes = rpc.call_node_volumes(nodenames)
1523 dcb93971 Michael Hanselmann
1524 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1525 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1526 dcb93971 Michael Hanselmann
1527 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1528 dcb93971 Michael Hanselmann
1529 dcb93971 Michael Hanselmann
    output = []
1530 dcb93971 Michael Hanselmann
    for node in nodenames:
1531 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1532 37d19eb2 Michael Hanselmann
        continue
1533 37d19eb2 Michael Hanselmann
1534 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1535 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1536 dcb93971 Michael Hanselmann
1537 dcb93971 Michael Hanselmann
      for vol in node_vols:
1538 dcb93971 Michael Hanselmann
        node_output = []
1539 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1540 dcb93971 Michael Hanselmann
          if field == "node":
1541 dcb93971 Michael Hanselmann
            val = node
1542 dcb93971 Michael Hanselmann
          elif field == "phys":
1543 dcb93971 Michael Hanselmann
            val = vol['dev']
1544 dcb93971 Michael Hanselmann
          elif field == "vg":
1545 dcb93971 Michael Hanselmann
            val = vol['vg']
1546 dcb93971 Michael Hanselmann
          elif field == "name":
1547 dcb93971 Michael Hanselmann
            val = vol['name']
1548 dcb93971 Michael Hanselmann
          elif field == "size":
1549 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1550 dcb93971 Michael Hanselmann
          elif field == "instance":
1551 dcb93971 Michael Hanselmann
            for inst in ilist:
1552 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1553 dcb93971 Michael Hanselmann
                continue
1554 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1555 dcb93971 Michael Hanselmann
                val = inst.name
1556 dcb93971 Michael Hanselmann
                break
1557 dcb93971 Michael Hanselmann
            else:
1558 dcb93971 Michael Hanselmann
              val = '-'
1559 dcb93971 Michael Hanselmann
          else:
1560 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1561 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1562 dcb93971 Michael Hanselmann
1563 dcb93971 Michael Hanselmann
        output.append(node_output)
1564 dcb93971 Michael Hanselmann
1565 dcb93971 Michael Hanselmann
    return output
1566 dcb93971 Michael Hanselmann
1567 dcb93971 Michael Hanselmann
1568 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1569 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1570 a8083063 Iustin Pop

1571 a8083063 Iustin Pop
  """
1572 a8083063 Iustin Pop
  HPATH = "node-add"
1573 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1574 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1575 a8083063 Iustin Pop
1576 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1577 a8083063 Iustin Pop
    """Build hooks env.
1578 a8083063 Iustin Pop

1579 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1580 a8083063 Iustin Pop

1581 a8083063 Iustin Pop
    """
1582 a8083063 Iustin Pop
    env = {
1583 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1584 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1585 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1586 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1587 a8083063 Iustin Pop
      }
1588 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1589 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1590 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1591 a8083063 Iustin Pop
1592 a8083063 Iustin Pop
  def CheckPrereq(self):
1593 a8083063 Iustin Pop
    """Check prerequisites.
1594 a8083063 Iustin Pop

1595 a8083063 Iustin Pop
    This checks:
1596 a8083063 Iustin Pop
     - the new node is not already in the config
1597 a8083063 Iustin Pop
     - it is resolvable
1598 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1599 a8083063 Iustin Pop

1600 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1601 a8083063 Iustin Pop

1602 a8083063 Iustin Pop
    """
1603 a8083063 Iustin Pop
    node_name = self.op.node_name
1604 a8083063 Iustin Pop
    cfg = self.cfg
1605 a8083063 Iustin Pop
1606 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1607 a8083063 Iustin Pop
1608 bcf043c9 Iustin Pop
    node = dns_data.name
1609 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1610 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1611 a8083063 Iustin Pop
    if secondary_ip is None:
1612 a8083063 Iustin Pop
      secondary_ip = primary_ip
1613 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1614 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1615 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1616 e7c6e02b Michael Hanselmann
1617 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1618 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
1619 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
1620 e7c6e02b Michael Hanselmann
                                 node)
1621 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
1622 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
1623 a8083063 Iustin Pop
1624 a8083063 Iustin Pop
    for existing_node_name in node_list:
1625 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1626 e7c6e02b Michael Hanselmann
1627 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
1628 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
1629 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
1630 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
1631 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
1632 e7c6e02b Michael Hanselmann
        continue
1633 e7c6e02b Michael Hanselmann
1634 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1635 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1636 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1637 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1638 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1639 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1640 a8083063 Iustin Pop
1641 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1642 a8083063 Iustin Pop
    # same as for the master
1643 880478f8 Iustin Pop
    myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
1644 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1645 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1646 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1647 a8083063 Iustin Pop
      if master_singlehomed:
1648 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1649 3ecf6786 Iustin Pop
                                   " new node has one")
1650 a8083063 Iustin Pop
      else:
1651 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1652 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1653 a8083063 Iustin Pop
1654 a8083063 Iustin Pop
    # checks reachablity
1655 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
1656 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1657 a8083063 Iustin Pop
1658 a8083063 Iustin Pop
    if not newbie_singlehomed:
1659 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1660 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
1661 b15d625f Iustin Pop
                           source=myself.secondary_ip):
1662 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1663 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
1664 a8083063 Iustin Pop
1665 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1666 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1667 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1668 a8083063 Iustin Pop
1669 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1670 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1671 a8083063 Iustin Pop

1672 a8083063 Iustin Pop
    """
1673 a8083063 Iustin Pop
    new_node = self.new_node
1674 a8083063 Iustin Pop
    node = new_node.name
1675 a8083063 Iustin Pop
1676 a8083063 Iustin Pop
    # check connectivity
1677 a8083063 Iustin Pop
    result = rpc.call_version([node])[node]
1678 a8083063 Iustin Pop
    if result:
1679 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1680 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1681 a8083063 Iustin Pop
                    (node, result))
1682 a8083063 Iustin Pop
      else:
1683 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1684 3ecf6786 Iustin Pop
                                 " node version %s" %
1685 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1686 a8083063 Iustin Pop
    else:
1687 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1688 a8083063 Iustin Pop
1689 a8083063 Iustin Pop
    # setup ssh on node
1690 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1691 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1692 a8083063 Iustin Pop
    keyarray = []
1693 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1694 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1695 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1696 a8083063 Iustin Pop
1697 a8083063 Iustin Pop
    for i in keyfiles:
1698 a8083063 Iustin Pop
      f = open(i, 'r')
1699 a8083063 Iustin Pop
      try:
1700 a8083063 Iustin Pop
        keyarray.append(f.read())
1701 a8083063 Iustin Pop
      finally:
1702 a8083063 Iustin Pop
        f.close()
1703 a8083063 Iustin Pop
1704 a8083063 Iustin Pop
    result = rpc.call_node_add(node, keyarray[0], keyarray[1], keyarray[2],
1705 a8083063 Iustin Pop
                               keyarray[3], keyarray[4], keyarray[5])
1706 a8083063 Iustin Pop
1707 a8083063 Iustin Pop
    if not result:
1708 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1709 a8083063 Iustin Pop
1710 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1711 d9c02ca6 Michael Hanselmann
    utils.AddHostToEtcHosts(new_node.name)
1712 c8a0948f Michael Hanselmann
1713 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1714 16abfbc2 Alexander Schreiber
      if not rpc.call_node_tcp_ping(new_node.name,
1715 16abfbc2 Alexander Schreiber
                                    constants.LOCALHOST_IP_ADDRESS,
1716 16abfbc2 Alexander Schreiber
                                    new_node.secondary_ip,
1717 16abfbc2 Alexander Schreiber
                                    constants.DEFAULT_NODED_PORT,
1718 16abfbc2 Alexander Schreiber
                                    10, False):
1719 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
1720 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
1721 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
1722 a8083063 Iustin Pop
1723 5c0527ed Guido Trotter
    node_verify_list = [self.sstore.GetMasterNode()]
1724 5c0527ed Guido Trotter
    node_verify_param = {
1725 5c0527ed Guido Trotter
      'nodelist': [node],
1726 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
1727 5c0527ed Guido Trotter
    }
1728 5c0527ed Guido Trotter
1729 5c0527ed Guido Trotter
    result = rpc.call_node_verify(node_verify_list, node_verify_param)
1730 5c0527ed Guido Trotter
    for verifier in node_verify_list:
1731 5c0527ed Guido Trotter
      if not result[verifier]:
1732 5c0527ed Guido Trotter
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
1733 5c0527ed Guido Trotter
                                 " for remote verification" % verifier)
1734 5c0527ed Guido Trotter
      if result[verifier]['nodelist']:
1735 5c0527ed Guido Trotter
        for failed in result[verifier]['nodelist']:
1736 5c0527ed Guido Trotter
          feedback_fn("ssh/hostname verification failed %s -> %s" %
1737 5c0527ed Guido Trotter
                      (verifier, result[verifier]['nodelist'][failed]))
1738 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
1739 ff98055b Iustin Pop
1740 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1741 a8083063 Iustin Pop
    # including the node just added
1742 880478f8 Iustin Pop
    myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
1743 102b115b Michael Hanselmann
    dist_nodes = self.cfg.GetNodeList()
1744 102b115b Michael Hanselmann
    if not self.op.readd:
1745 102b115b Michael Hanselmann
      dist_nodes.append(node)
1746 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1747 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1748 a8083063 Iustin Pop
1749 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1750 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
1751 a8083063 Iustin Pop
      result = rpc.call_upload_file(dist_nodes, fname)
1752 a8083063 Iustin Pop
      for to_node in dist_nodes:
1753 a8083063 Iustin Pop
        if not result[to_node]:
1754 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1755 a8083063 Iustin Pop
                       (fname, to_node))
1756 a8083063 Iustin Pop
1757 3d1e7706 Guido Trotter
    to_copy = self.sstore.GetFileList()
1758 2a6469d5 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
1759 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
1760 a8083063 Iustin Pop
    for fname in to_copy:
1761 b5602d15 Guido Trotter
      result = rpc.call_upload_file([node], fname)
1762 b5602d15 Guido Trotter
      if not result[node]:
1763 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1764 a8083063 Iustin Pop
1765 d8470559 Michael Hanselmann
    if self.op.readd:
1766 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
1767 d8470559 Michael Hanselmann
    else:
1768 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
1769 a8083063 Iustin Pop
1770 a8083063 Iustin Pop
1771 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1772 a8083063 Iustin Pop
  """Query cluster configuration.
1773 a8083063 Iustin Pop

1774 a8083063 Iustin Pop
  """
1775 a8083063 Iustin Pop
  _OP_REQP = []
1776 59322403 Iustin Pop
  REQ_MASTER = False
1777 642339cf Guido Trotter
  REQ_BGL = False
1778 642339cf Guido Trotter
1779 642339cf Guido Trotter
  def ExpandNames(self):
1780 642339cf Guido Trotter
    self.needed_locks = {}
1781 a8083063 Iustin Pop
1782 a8083063 Iustin Pop
  def CheckPrereq(self):
1783 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1784 a8083063 Iustin Pop

1785 a8083063 Iustin Pop
    """
1786 a8083063 Iustin Pop
    pass
1787 a8083063 Iustin Pop
1788 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1789 a8083063 Iustin Pop
    """Return cluster config.
1790 a8083063 Iustin Pop

1791 a8083063 Iustin Pop
    """
1792 a8083063 Iustin Pop
    result = {
1793 5fcdc80d Iustin Pop
      "name": self.sstore.GetClusterName(),
1794 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1795 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1796 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1797 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1798 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1799 880478f8 Iustin Pop
      "master": self.sstore.GetMasterNode(),
1800 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1801 8a12ce45 Iustin Pop
      "hypervisor_type": self.sstore.GetHypervisorType(),
1802 a8083063 Iustin Pop
      }
1803 a8083063 Iustin Pop
1804 a8083063 Iustin Pop
    return result
1805 a8083063 Iustin Pop
1806 a8083063 Iustin Pop
1807 a8083063 Iustin Pop
class LUDumpClusterConfig(NoHooksLU):
1808 a8083063 Iustin Pop
  """Return a text-representation of the cluster-config.
1809 a8083063 Iustin Pop

1810 a8083063 Iustin Pop
  """
1811 a8083063 Iustin Pop
  _OP_REQP = []
1812 642339cf Guido Trotter
  REQ_BGL = False
1813 642339cf Guido Trotter
1814 642339cf Guido Trotter
  def ExpandNames(self):
1815 642339cf Guido Trotter
    self.needed_locks = {}
1816 a8083063 Iustin Pop
1817 a8083063 Iustin Pop
  def CheckPrereq(self):
1818 a8083063 Iustin Pop
    """No prerequisites.
1819 a8083063 Iustin Pop

1820 a8083063 Iustin Pop
    """
1821 a8083063 Iustin Pop
    pass
1822 a8083063 Iustin Pop
1823 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1824 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
1825 a8083063 Iustin Pop

1826 a8083063 Iustin Pop
    """
1827 a8083063 Iustin Pop
    return self.cfg.DumpConfig()
1828 a8083063 Iustin Pop
1829 a8083063 Iustin Pop
1830 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
1831 a8083063 Iustin Pop
  """Bring up an instance's disks.
1832 a8083063 Iustin Pop

1833 a8083063 Iustin Pop
  """
1834 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1835 f22a8ba3 Guido Trotter
  REQ_BGL = False
1836 f22a8ba3 Guido Trotter
1837 f22a8ba3 Guido Trotter
  def ExpandNames(self):
1838 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
1839 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
1840 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1841 f22a8ba3 Guido Trotter
1842 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
1843 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
1844 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
1845 a8083063 Iustin Pop
1846 a8083063 Iustin Pop
  def CheckPrereq(self):
1847 a8083063 Iustin Pop
    """Check prerequisites.
1848 a8083063 Iustin Pop

1849 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1850 a8083063 Iustin Pop

1851 a8083063 Iustin Pop
    """
1852 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1853 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
1854 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
1855 a8083063 Iustin Pop
1856 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1857 a8083063 Iustin Pop
    """Activate the disks.
1858 a8083063 Iustin Pop

1859 a8083063 Iustin Pop
    """
1860 a8083063 Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
1861 a8083063 Iustin Pop
    if not disks_ok:
1862 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
1863 a8083063 Iustin Pop
1864 a8083063 Iustin Pop
    return disks_info
1865 a8083063 Iustin Pop
1866 a8083063 Iustin Pop
1867 a8083063 Iustin Pop
def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
1868 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
1869 a8083063 Iustin Pop

1870 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
1871 a8083063 Iustin Pop

1872 a8083063 Iustin Pop
  Args:
1873 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
1874 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
1875 a8083063 Iustin Pop
                        in an error return from the function
1876 a8083063 Iustin Pop

1877 a8083063 Iustin Pop
  Returns:
1878 a8083063 Iustin Pop
    false if the operation failed
1879 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
1880 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
1881 a8083063 Iustin Pop
  """
1882 a8083063 Iustin Pop
  device_info = []
1883 a8083063 Iustin Pop
  disks_ok = True
1884 fdbd668d Iustin Pop
  iname = instance.name
1885 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
1886 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
1887 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
1888 fdbd668d Iustin Pop
1889 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
1890 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
1891 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
1892 fdbd668d Iustin Pop
  # SyncSource, etc.)
1893 fdbd668d Iustin Pop
1894 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
1895 a8083063 Iustin Pop
  for inst_disk in instance.disks:
1896 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1897 a8083063 Iustin Pop
      cfg.SetDiskID(node_disk, node)
1898 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, False)
1899 a8083063 Iustin Pop
      if not result:
1900 f4bc1f2c Michael Hanselmann
        logger.Error("could not prepare block device %s on node %s"
1901 fdbd668d Iustin Pop
                     " (is_primary=False, pass=1)" % (inst_disk.iv_name, node))
1902 fdbd668d Iustin Pop
        if not ignore_secondaries:
1903 a8083063 Iustin Pop
          disks_ok = False
1904 fdbd668d Iustin Pop
1905 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
1906 fdbd668d Iustin Pop
1907 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
1908 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
1909 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1910 fdbd668d Iustin Pop
      if node != instance.primary_node:
1911 fdbd668d Iustin Pop
        continue
1912 fdbd668d Iustin Pop
      cfg.SetDiskID(node_disk, node)
1913 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, True)
1914 fdbd668d Iustin Pop
      if not result:
1915 fdbd668d Iustin Pop
        logger.Error("could not prepare block device %s on node %s"
1916 fdbd668d Iustin Pop
                     " (is_primary=True, pass=2)" % (inst_disk.iv_name, node))
1917 fdbd668d Iustin Pop
        disks_ok = False
1918 fdbd668d Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name, result))
1919 a8083063 Iustin Pop
1920 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
1921 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
1922 b352ab5b Iustin Pop
  # improving the logical/physical id handling
1923 b352ab5b Iustin Pop
  for disk in instance.disks:
1924 b352ab5b Iustin Pop
    cfg.SetDiskID(disk, instance.primary_node)
1925 b352ab5b Iustin Pop
1926 a8083063 Iustin Pop
  return disks_ok, device_info
1927 a8083063 Iustin Pop
1928 a8083063 Iustin Pop
1929 fe7b0351 Michael Hanselmann
def _StartInstanceDisks(cfg, instance, force):
1930 3ecf6786 Iustin Pop
  """Start the disks of an instance.
1931 3ecf6786 Iustin Pop

1932 3ecf6786 Iustin Pop
  """
1933 fe7b0351 Michael Hanselmann
  disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
1934 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
1935 fe7b0351 Michael Hanselmann
  if not disks_ok:
1936 fe7b0351 Michael Hanselmann
    _ShutdownInstanceDisks(instance, cfg)
1937 fe7b0351 Michael Hanselmann
    if force is not None and not force:
1938 fe7b0351 Michael Hanselmann
      logger.Error("If the message above refers to a secondary node,"
1939 fe7b0351 Michael Hanselmann
                   " you can retry the operation using '--force'.")
1940 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
1941 fe7b0351 Michael Hanselmann
1942 fe7b0351 Michael Hanselmann
1943 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
1944 a8083063 Iustin Pop
  """Shutdown an instance's disks.
1945 a8083063 Iustin Pop

1946 a8083063 Iustin Pop
  """
1947 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1948 f22a8ba3 Guido Trotter
  REQ_BGL = False
1949 f22a8ba3 Guido Trotter
1950 f22a8ba3 Guido Trotter
  def ExpandNames(self):
1951 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
1952 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
1953 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1954 f22a8ba3 Guido Trotter
1955 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
1956 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
1957 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
1958 a8083063 Iustin Pop
1959 a8083063 Iustin Pop
  def CheckPrereq(self):
1960 a8083063 Iustin Pop
    """Check prerequisites.
1961 a8083063 Iustin Pop

1962 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1963 a8083063 Iustin Pop

1964 a8083063 Iustin Pop
    """
1965 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1966 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
1967 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
1968 a8083063 Iustin Pop
1969 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1970 a8083063 Iustin Pop
    """Deactivate the disks
1971 a8083063 Iustin Pop

1972 a8083063 Iustin Pop
    """
1973 a8083063 Iustin Pop
    instance = self.instance
1974 155d6c75 Guido Trotter
    _SafeShutdownInstanceDisks(instance, self.cfg)
1975 a8083063 Iustin Pop
1976 a8083063 Iustin Pop
1977 155d6c75 Guido Trotter
def _SafeShutdownInstanceDisks(instance, cfg):
1978 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
1979 155d6c75 Guido Trotter

1980 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
1981 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
1982 155d6c75 Guido Trotter

1983 155d6c75 Guido Trotter
  """
1984 155d6c75 Guido Trotter
  ins_l = rpc.call_instance_list([instance.primary_node])
1985 155d6c75 Guido Trotter
  ins_l = ins_l[instance.primary_node]
1986 155d6c75 Guido Trotter
  if not type(ins_l) is list:
1987 155d6c75 Guido Trotter
    raise errors.OpExecError("Can't contact node '%s'" %
1988 155d6c75 Guido Trotter
                             instance.primary_node)
1989 155d6c75 Guido Trotter
1990 155d6c75 Guido Trotter
  if instance.name in ins_l:
1991 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
1992 155d6c75 Guido Trotter
                             " block devices.")
1993 155d6c75 Guido Trotter
1994 155d6c75 Guido Trotter
  _ShutdownInstanceDisks(instance, cfg)
1995 a8083063 Iustin Pop
1996 a8083063 Iustin Pop
1997 a8083063 Iustin Pop
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
1998 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
1999 a8083063 Iustin Pop

2000 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
2001 a8083063 Iustin Pop

2002 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
2003 a8083063 Iustin Pop
  ignored.
2004 a8083063 Iustin Pop

2005 a8083063 Iustin Pop
  """
2006 a8083063 Iustin Pop
  result = True
2007 a8083063 Iustin Pop
  for disk in instance.disks:
2008 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2009 a8083063 Iustin Pop
      cfg.SetDiskID(top_disk, node)
2010 a8083063 Iustin Pop
      if not rpc.call_blockdev_shutdown(node, top_disk):
2011 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
2012 a8083063 Iustin Pop
                     (disk.iv_name, node))
2013 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
2014 a8083063 Iustin Pop
          result = False
2015 a8083063 Iustin Pop
  return result
2016 a8083063 Iustin Pop
2017 a8083063 Iustin Pop
2018 d4f16fd9 Iustin Pop
def _CheckNodeFreeMemory(cfg, node, reason, requested):
2019 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
2020 d4f16fd9 Iustin Pop

2021 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
2022 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
2023 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
2024 d4f16fd9 Iustin Pop
  exception.
2025 d4f16fd9 Iustin Pop

2026 d4f16fd9 Iustin Pop
  Args:
2027 d4f16fd9 Iustin Pop
    - cfg: a ConfigWriter instance
2028 d4f16fd9 Iustin Pop
    - node: the node name
2029 d4f16fd9 Iustin Pop
    - reason: string to use in the error message
2030 d4f16fd9 Iustin Pop
    - requested: the amount of memory in MiB
2031 d4f16fd9 Iustin Pop

2032 d4f16fd9 Iustin Pop
  """
2033 d4f16fd9 Iustin Pop
  nodeinfo = rpc.call_node_info([node], cfg.GetVGName())
2034 d4f16fd9 Iustin Pop
  if not nodeinfo or not isinstance(nodeinfo, dict):
2035 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Could not contact node %s for resource"
2036 d4f16fd9 Iustin Pop
                             " information" % (node,))
2037 d4f16fd9 Iustin Pop
2038 d4f16fd9 Iustin Pop
  free_mem = nodeinfo[node].get('memory_free')
2039 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2040 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2041 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
2042 d4f16fd9 Iustin Pop
  if requested > free_mem:
2043 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2044 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
2045 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
2046 d4f16fd9 Iustin Pop
2047 d4f16fd9 Iustin Pop
2048 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2049 a8083063 Iustin Pop
  """Starts an instance.
2050 a8083063 Iustin Pop

2051 a8083063 Iustin Pop
  """
2052 a8083063 Iustin Pop
  HPATH = "instance-start"
2053 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2054 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2055 e873317a Guido Trotter
  REQ_BGL = False
2056 e873317a Guido Trotter
2057 e873317a Guido Trotter
  def ExpandNames(self):
2058 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2059 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2060 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2061 e873317a Guido Trotter
2062 e873317a Guido Trotter
  def DeclareLocks(self, level):
2063 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2064 e873317a Guido Trotter
      self._LockInstancesNodes()
2065 a8083063 Iustin Pop
2066 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2067 a8083063 Iustin Pop
    """Build hooks env.
2068 a8083063 Iustin Pop

2069 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2070 a8083063 Iustin Pop

2071 a8083063 Iustin Pop
    """
2072 a8083063 Iustin Pop
    env = {
2073 a8083063 Iustin Pop
      "FORCE": self.op.force,
2074 a8083063 Iustin Pop
      }
2075 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2076 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2077 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2078 a8083063 Iustin Pop
    return env, nl, nl
2079 a8083063 Iustin Pop
2080 a8083063 Iustin Pop
  def CheckPrereq(self):
2081 a8083063 Iustin Pop
    """Check prerequisites.
2082 a8083063 Iustin Pop

2083 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2084 a8083063 Iustin Pop

2085 a8083063 Iustin Pop
    """
2086 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2087 e873317a Guido Trotter
    assert self.instance is not None, \
2088 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2089 a8083063 Iustin Pop
2090 a8083063 Iustin Pop
    # check bridges existance
2091 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2092 a8083063 Iustin Pop
2093 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, instance.primary_node,
2094 d4f16fd9 Iustin Pop
                         "starting instance %s" % instance.name,
2095 d4f16fd9 Iustin Pop
                         instance.memory)
2096 d4f16fd9 Iustin Pop
2097 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2098 a8083063 Iustin Pop
    """Start the instance.
2099 a8083063 Iustin Pop

2100 a8083063 Iustin Pop
    """
2101 a8083063 Iustin Pop
    instance = self.instance
2102 a8083063 Iustin Pop
    force = self.op.force
2103 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
2104 a8083063 Iustin Pop
2105 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2106 fe482621 Iustin Pop
2107 a8083063 Iustin Pop
    node_current = instance.primary_node
2108 a8083063 Iustin Pop
2109 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, instance, force)
2110 a8083063 Iustin Pop
2111 a8083063 Iustin Pop
    if not rpc.call_instance_start(node_current, instance, extra_args):
2112 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2113 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
2114 a8083063 Iustin Pop
2115 a8083063 Iustin Pop
2116 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2117 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2118 bf6929a2 Alexander Schreiber

2119 bf6929a2 Alexander Schreiber
  """
2120 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2121 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2122 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2123 e873317a Guido Trotter
  REQ_BGL = False
2124 e873317a Guido Trotter
2125 e873317a Guido Trotter
  def ExpandNames(self):
2126 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2127 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2128 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
2129 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2130 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
2131 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2132 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
2133 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2134 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2135 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2136 e873317a Guido Trotter
2137 e873317a Guido Trotter
  def DeclareLocks(self, level):
2138 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2139 849da276 Guido Trotter
      primary_only = not constants.INSTANCE_REBOOT_FULL
2140 849da276 Guido Trotter
      self._LockInstancesNodes(primary_only=primary_only)
2141 bf6929a2 Alexander Schreiber
2142 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2143 bf6929a2 Alexander Schreiber
    """Build hooks env.
2144 bf6929a2 Alexander Schreiber

2145 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2146 bf6929a2 Alexander Schreiber

2147 bf6929a2 Alexander Schreiber
    """
2148 bf6929a2 Alexander Schreiber
    env = {
2149 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2150 bf6929a2 Alexander Schreiber
      }
2151 bf6929a2 Alexander Schreiber
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2152 bf6929a2 Alexander Schreiber
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2153 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
2154 bf6929a2 Alexander Schreiber
    return env, nl, nl
2155 bf6929a2 Alexander Schreiber
2156 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2157 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2158 bf6929a2 Alexander Schreiber

2159 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2160 bf6929a2 Alexander Schreiber

2161 bf6929a2 Alexander Schreiber
    """
2162 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2163 e873317a Guido Trotter
    assert self.instance is not None, \
2164 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2165 bf6929a2 Alexander Schreiber
2166 bf6929a2 Alexander Schreiber
    # check bridges existance
2167 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2168 bf6929a2 Alexander Schreiber
2169 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2170 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2171 bf6929a2 Alexander Schreiber

2172 bf6929a2 Alexander Schreiber
    """
2173 bf6929a2 Alexander Schreiber
    instance = self.instance
2174 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2175 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2176 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2177 bf6929a2 Alexander Schreiber
2178 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2179 bf6929a2 Alexander Schreiber
2180 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2181 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2182 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_reboot(node_current, instance,
2183 bf6929a2 Alexander Schreiber
                                      reboot_type, extra_args):
2184 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2185 bf6929a2 Alexander Schreiber
    else:
2186 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_shutdown(node_current, instance):
2187 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2188 bf6929a2 Alexander Schreiber
      _ShutdownInstanceDisks(instance, self.cfg)
2189 bf6929a2 Alexander Schreiber
      _StartInstanceDisks(self.cfg, instance, ignore_secondaries)
2190 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_start(node_current, instance, extra_args):
2191 bf6929a2 Alexander Schreiber
        _ShutdownInstanceDisks(instance, self.cfg)
2192 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2193 bf6929a2 Alexander Schreiber
2194 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2195 bf6929a2 Alexander Schreiber
2196 bf6929a2 Alexander Schreiber
2197 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2198 a8083063 Iustin Pop
  """Shutdown an instance.
2199 a8083063 Iustin Pop

2200 a8083063 Iustin Pop
  """
2201 a8083063 Iustin Pop
  HPATH = "instance-stop"
2202 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2203 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2204 e873317a Guido Trotter
  REQ_BGL = False
2205 e873317a Guido Trotter
2206 e873317a Guido Trotter
  def ExpandNames(self):
2207 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2208 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2209 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2210 e873317a Guido Trotter
2211 e873317a Guido Trotter
  def DeclareLocks(self, level):
2212 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2213 e873317a Guido Trotter
      self._LockInstancesNodes()
2214 a8083063 Iustin Pop
2215 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2216 a8083063 Iustin Pop
    """Build hooks env.
2217 a8083063 Iustin Pop

2218 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2219 a8083063 Iustin Pop

2220 a8083063 Iustin Pop
    """
2221 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2222 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2223 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2224 a8083063 Iustin Pop
    return env, nl, nl
2225 a8083063 Iustin Pop
2226 a8083063 Iustin Pop
  def CheckPrereq(self):
2227 a8083063 Iustin Pop
    """Check prerequisites.
2228 a8083063 Iustin Pop

2229 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2230 a8083063 Iustin Pop

2231 a8083063 Iustin Pop
    """
2232 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2233 e873317a Guido Trotter
    assert self.instance is not None, \
2234 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2235 a8083063 Iustin Pop
2236 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2237 a8083063 Iustin Pop
    """Shutdown the instance.
2238 a8083063 Iustin Pop

2239 a8083063 Iustin Pop
    """
2240 a8083063 Iustin Pop
    instance = self.instance
2241 a8083063 Iustin Pop
    node_current = instance.primary_node
2242 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2243 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(node_current, instance):
2244 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
2245 a8083063 Iustin Pop
2246 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
2247 a8083063 Iustin Pop
2248 a8083063 Iustin Pop
2249 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2250 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2251 fe7b0351 Michael Hanselmann

2252 fe7b0351 Michael Hanselmann
  """
2253 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2254 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2255 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2256 4e0b4d2d Guido Trotter
  REQ_BGL = False
2257 4e0b4d2d Guido Trotter
2258 4e0b4d2d Guido Trotter
  def ExpandNames(self):
2259 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
2260 4e0b4d2d Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2261 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2262 4e0b4d2d Guido Trotter
2263 4e0b4d2d Guido Trotter
  def DeclareLocks(self, level):
2264 4e0b4d2d Guido Trotter
    if level == locking.LEVEL_NODE:
2265 4e0b4d2d Guido Trotter
      self._LockInstancesNodes()
2266 fe7b0351 Michael Hanselmann
2267 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2268 fe7b0351 Michael Hanselmann
    """Build hooks env.
2269 fe7b0351 Michael Hanselmann

2270 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2271 fe7b0351 Michael Hanselmann

2272 fe7b0351 Michael Hanselmann
    """
2273 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2274 fe7b0351 Michael Hanselmann
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2275 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2276 fe7b0351 Michael Hanselmann
    return env, nl, nl
2277 fe7b0351 Michael Hanselmann
2278 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2279 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2280 fe7b0351 Michael Hanselmann

2281 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2282 fe7b0351 Michael Hanselmann

2283 fe7b0351 Michael Hanselmann
    """
2284 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2285 4e0b4d2d Guido Trotter
    assert instance is not None, \
2286 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2287 4e0b4d2d Guido Trotter
2288 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2289 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2290 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2291 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2292 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2293 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2294 fe7b0351 Michael Hanselmann
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2295 fe7b0351 Michael Hanselmann
    if remote_info:
2296 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2297 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2298 3ecf6786 Iustin Pop
                                  instance.primary_node))
2299 d0834de3 Michael Hanselmann
2300 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2301 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2302 d0834de3 Michael Hanselmann
      # OS verification
2303 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2304 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2305 d0834de3 Michael Hanselmann
      if pnode is None:
2306 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2307 3ecf6786 Iustin Pop
                                   self.op.pnode)
2308 00fe9e38 Guido Trotter
      os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
2309 dfa96ded Guido Trotter
      if not os_obj:
2310 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2311 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2312 d0834de3 Michael Hanselmann
2313 fe7b0351 Michael Hanselmann
    self.instance = instance
2314 fe7b0351 Michael Hanselmann
2315 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2316 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2317 fe7b0351 Michael Hanselmann

2318 fe7b0351 Michael Hanselmann
    """
2319 fe7b0351 Michael Hanselmann
    inst = self.instance
2320 fe7b0351 Michael Hanselmann
2321 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2322 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2323 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2324 d0834de3 Michael Hanselmann
      self.cfg.AddInstance(inst)
2325 d0834de3 Michael Hanselmann
2326 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, inst, None)
2327 fe7b0351 Michael Hanselmann
    try:
2328 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2329 fe7b0351 Michael Hanselmann
      if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
2330 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
2331 f4bc1f2c Michael Hanselmann
                                 " on node %s" %
2332 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2333 fe7b0351 Michael Hanselmann
    finally:
2334 fe7b0351 Michael Hanselmann
      _ShutdownInstanceDisks(inst, self.cfg)
2335 fe7b0351 Michael Hanselmann
2336 fe7b0351 Michael Hanselmann
2337 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2338 decd5f45 Iustin Pop
  """Rename an instance.
2339 decd5f45 Iustin Pop

2340 decd5f45 Iustin Pop
  """
2341 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2342 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2343 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2344 decd5f45 Iustin Pop
2345 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2346 decd5f45 Iustin Pop
    """Build hooks env.
2347 decd5f45 Iustin Pop

2348 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2349 decd5f45 Iustin Pop

2350 decd5f45 Iustin Pop
    """
2351 decd5f45 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self.instance)
2352 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2353 decd5f45 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2354 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2355 decd5f45 Iustin Pop
    return env, nl, nl
2356 decd5f45 Iustin Pop
2357 decd5f45 Iustin Pop
  def CheckPrereq(self):
2358 decd5f45 Iustin Pop
    """Check prerequisites.
2359 decd5f45 Iustin Pop

2360 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2361 decd5f45 Iustin Pop

2362 decd5f45 Iustin Pop
    """
2363 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2364 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2365 decd5f45 Iustin Pop
    if instance is None:
2366 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2367 decd5f45 Iustin Pop
                                 self.op.instance_name)
2368 decd5f45 Iustin Pop
    if instance.status != "down":
2369 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2370 decd5f45 Iustin Pop
                                 self.op.instance_name)
2371 decd5f45 Iustin Pop
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2372 decd5f45 Iustin Pop
    if remote_info:
2373 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2374 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2375 decd5f45 Iustin Pop
                                  instance.primary_node))
2376 decd5f45 Iustin Pop
    self.instance = instance
2377 decd5f45 Iustin Pop
2378 decd5f45 Iustin Pop
    # new name verification
2379 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2380 decd5f45 Iustin Pop
2381 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2382 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
2383 7bde3275 Guido Trotter
    if new_name in instance_list:
2384 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2385 c09f363f Manuel Franceschini
                                 new_name)
2386 7bde3275 Guido Trotter
2387 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2388 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
2389 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2390 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2391 decd5f45 Iustin Pop
2392 decd5f45 Iustin Pop
2393 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2394 decd5f45 Iustin Pop
    """Reinstall the instance.
2395 decd5f45 Iustin Pop

2396 decd5f45 Iustin Pop
    """
2397 decd5f45 Iustin Pop
    inst = self.instance
2398 decd5f45 Iustin Pop
    old_name = inst.name
2399 decd5f45 Iustin Pop
2400 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2401 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2402 b23c4333 Manuel Franceschini
2403 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2404 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
2405 74b5913f Guido Trotter
    self.context.glm.remove(locking.LEVEL_INSTANCE, inst.name)
2406 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
2407 decd5f45 Iustin Pop
2408 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2409 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2410 decd5f45 Iustin Pop
2411 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2412 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2413 b23c4333 Manuel Franceschini
      result = rpc.call_file_storage_dir_rename(inst.primary_node,
2414 b23c4333 Manuel Franceschini
                                                old_file_storage_dir,
2415 b23c4333 Manuel Franceschini
                                                new_file_storage_dir)
2416 b23c4333 Manuel Franceschini
2417 b23c4333 Manuel Franceschini
      if not result:
2418 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not connect to node '%s' to rename"
2419 b23c4333 Manuel Franceschini
                                 " directory '%s' to '%s' (but the instance"
2420 b23c4333 Manuel Franceschini
                                 " has been renamed in Ganeti)" % (
2421 b23c4333 Manuel Franceschini
                                 inst.primary_node, old_file_storage_dir,
2422 b23c4333 Manuel Franceschini
                                 new_file_storage_dir))
2423 b23c4333 Manuel Franceschini
2424 b23c4333 Manuel Franceschini
      if not result[0]:
2425 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2426 b23c4333 Manuel Franceschini
                                 " (but the instance has been renamed in"
2427 b23c4333 Manuel Franceschini
                                 " Ganeti)" % (old_file_storage_dir,
2428 b23c4333 Manuel Franceschini
                                               new_file_storage_dir))
2429 b23c4333 Manuel Franceschini
2430 decd5f45 Iustin Pop
    _StartInstanceDisks(self.cfg, inst, None)
2431 decd5f45 Iustin Pop
    try:
2432 decd5f45 Iustin Pop
      if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
2433 decd5f45 Iustin Pop
                                          "sda", "sdb"):
2434 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
2435 6291574d Alexander Schreiber
               " (but the instance has been renamed in Ganeti)" %
2436 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2437 decd5f45 Iustin Pop
        logger.Error(msg)
2438 decd5f45 Iustin Pop
    finally:
2439 decd5f45 Iustin Pop
      _ShutdownInstanceDisks(inst, self.cfg)
2440 decd5f45 Iustin Pop
2441 decd5f45 Iustin Pop
2442 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2443 a8083063 Iustin Pop
  """Remove an instance.
2444 a8083063 Iustin Pop

2445 a8083063 Iustin Pop
  """
2446 a8083063 Iustin Pop
  HPATH = "instance-remove"
2447 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2448 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
2449 a8083063 Iustin Pop
2450 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2451 a8083063 Iustin Pop
    """Build hooks env.
2452 a8083063 Iustin Pop

2453 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2454 a8083063 Iustin Pop

2455 a8083063 Iustin Pop
    """
2456 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2457 1d67656e Iustin Pop
    nl = [self.sstore.GetMasterNode()]
2458 a8083063 Iustin Pop
    return env, nl, nl
2459 a8083063 Iustin Pop
2460 a8083063 Iustin Pop
  def CheckPrereq(self):
2461 a8083063 Iustin Pop
    """Check prerequisites.
2462 a8083063 Iustin Pop

2463 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2464 a8083063 Iustin Pop

2465 a8083063 Iustin Pop
    """
2466 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2467 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2468 a8083063 Iustin Pop
    if instance is None:
2469 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2470 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2471 a8083063 Iustin Pop
    self.instance = instance
2472 a8083063 Iustin Pop
2473 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2474 a8083063 Iustin Pop
    """Remove the instance.
2475 a8083063 Iustin Pop

2476 a8083063 Iustin Pop
    """
2477 a8083063 Iustin Pop
    instance = self.instance
2478 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
2479 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
2480 a8083063 Iustin Pop
2481 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(instance.primary_node, instance):
2482 1d67656e Iustin Pop
      if self.op.ignore_failures:
2483 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2484 1d67656e Iustin Pop
      else:
2485 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2486 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2487 a8083063 Iustin Pop
2488 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
2489 a8083063 Iustin Pop
2490 1d67656e Iustin Pop
    if not _RemoveDisks(instance, self.cfg):
2491 1d67656e Iustin Pop
      if self.op.ignore_failures:
2492 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2493 1d67656e Iustin Pop
      else:
2494 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2495 a8083063 Iustin Pop
2496 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
2497 a8083063 Iustin Pop
2498 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2499 a2fd9afc Guido Trotter
    # Remove the new instance from the Ganeti Lock Manager
2500 a2fd9afc Guido Trotter
    self.context.glm.remove(locking.LEVEL_INSTANCE, instance.name)
2501 a8083063 Iustin Pop
2502 a8083063 Iustin Pop
2503 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2504 a8083063 Iustin Pop
  """Logical unit for querying instances.
2505 a8083063 Iustin Pop

2506 a8083063 Iustin Pop
  """
2507 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2508 7eb9d8f7 Guido Trotter
  REQ_BGL = False
2509 a8083063 Iustin Pop
2510 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
2511 d8052456 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram", "status"])
2512 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["name", "os", "pnode", "snodes",
2513 dcb93971 Michael Hanselmann
                               "admin_state", "admin_ram",
2514 644eeef9 Iustin Pop
                               "disk_template", "ip", "mac", "bridge",
2515 3fb1e1c5 Alexander Schreiber
                               "sda_size", "sdb_size", "vcpus", "tags",
2516 3fb1e1c5 Alexander Schreiber
                               "auto_balance",
2517 3fb1e1c5 Alexander Schreiber
                               "network_port", "kernel_path", "initrd_path",
2518 3fb1e1c5 Alexander Schreiber
                               "hvm_boot_order", "hvm_acpi", "hvm_pae",
2519 3fb1e1c5 Alexander Schreiber
                               "hvm_cdrom_image_path", "hvm_nic_type",
2520 3fb1e1c5 Alexander Schreiber
                               "hvm_disk_type", "vnc_bind_address"],
2521 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2522 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2523 a8083063 Iustin Pop
2524 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
2525 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
2526 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2527 7eb9d8f7 Guido Trotter
2528 7eb9d8f7 Guido Trotter
    # TODO: we could lock instances (and nodes) only if the user asked for
2529 7eb9d8f7 Guido Trotter
    # dynamic fields. For that we need atomic ways to get info for a group of
2530 7eb9d8f7 Guido Trotter
    # instances from the config, though.
2531 7eb9d8f7 Guido Trotter
    if not self.op.names:
2532 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
2533 7eb9d8f7 Guido Trotter
    else:
2534 7eb9d8f7 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = \
2535 7eb9d8f7 Guido Trotter
        _GetWantedInstances(self, self.op.names)
2536 7eb9d8f7 Guido Trotter
2537 7eb9d8f7 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2538 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2539 7eb9d8f7 Guido Trotter
2540 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
2541 7eb9d8f7 Guido Trotter
    # TODO: locking of nodes could be avoided when not querying them
2542 7eb9d8f7 Guido Trotter
    if level == locking.LEVEL_NODE:
2543 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
2544 7eb9d8f7 Guido Trotter
2545 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
2546 7eb9d8f7 Guido Trotter
    """Check prerequisites.
2547 7eb9d8f7 Guido Trotter

2548 7eb9d8f7 Guido Trotter
    """
2549 7eb9d8f7 Guido Trotter
    # This of course is valid only if we locked the instances
2550 6683bba2 Guido Trotter
    self.wanted = self.acquired_locks[locking.LEVEL_INSTANCE]
2551 069dcc86 Iustin Pop
2552 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2553 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2554 a8083063 Iustin Pop

2555 a8083063 Iustin Pop
    """
2556 069dcc86 Iustin Pop
    instance_names = self.wanted
2557 a8083063 Iustin Pop
    instance_list = [self.cfg.GetInstanceInfo(iname) for iname
2558 a8083063 Iustin Pop
                     in instance_names]
2559 a8083063 Iustin Pop
2560 a8083063 Iustin Pop
    # begin data gathering
2561 a8083063 Iustin Pop
2562 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2563 a8083063 Iustin Pop
2564 a8083063 Iustin Pop
    bad_nodes = []
2565 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2566 a8083063 Iustin Pop
      live_data = {}
2567 a8083063 Iustin Pop
      node_data = rpc.call_all_instances_info(nodes)
2568 a8083063 Iustin Pop
      for name in nodes:
2569 a8083063 Iustin Pop
        result = node_data[name]
2570 a8083063 Iustin Pop
        if result:
2571 a8083063 Iustin Pop
          live_data.update(result)
2572 a8083063 Iustin Pop
        elif result == False:
2573 a8083063 Iustin Pop
          bad_nodes.append(name)
2574 a8083063 Iustin Pop
        # else no instance is alive
2575 a8083063 Iustin Pop
    else:
2576 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2577 a8083063 Iustin Pop
2578 a8083063 Iustin Pop
    # end data gathering
2579 a8083063 Iustin Pop
2580 a8083063 Iustin Pop
    output = []
2581 a8083063 Iustin Pop
    for instance in instance_list:
2582 a8083063 Iustin Pop
      iout = []
2583 a8083063 Iustin Pop
      for field in self.op.output_fields:
2584 a8083063 Iustin Pop
        if field == "name":
2585 a8083063 Iustin Pop
          val = instance.name
2586 a8083063 Iustin Pop
        elif field == "os":
2587 a8083063 Iustin Pop
          val = instance.os
2588 a8083063 Iustin Pop
        elif field == "pnode":
2589 a8083063 Iustin Pop
          val = instance.primary_node
2590 a8083063 Iustin Pop
        elif field == "snodes":
2591 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2592 a8083063 Iustin Pop
        elif field == "admin_state":
2593 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2594 a8083063 Iustin Pop
        elif field == "oper_state":
2595 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2596 8a23d2d3 Iustin Pop
            val = None
2597 a8083063 Iustin Pop
          else:
2598 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2599 d8052456 Iustin Pop
        elif field == "status":
2600 d8052456 Iustin Pop
          if instance.primary_node in bad_nodes:
2601 d8052456 Iustin Pop
            val = "ERROR_nodedown"
2602 d8052456 Iustin Pop
          else:
2603 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
2604 d8052456 Iustin Pop
            if running:
2605 d8052456 Iustin Pop
              if instance.status != "down":
2606 d8052456 Iustin Pop
                val = "running"
2607 d8052456 Iustin Pop
              else:
2608 d8052456 Iustin Pop
                val = "ERROR_up"
2609 d8052456 Iustin Pop
            else:
2610 d8052456 Iustin Pop
              if instance.status != "down":
2611 d8052456 Iustin Pop
                val = "ERROR_down"
2612 d8052456 Iustin Pop
              else:
2613 d8052456 Iustin Pop
                val = "ADMIN_down"
2614 a8083063 Iustin Pop
        elif field == "admin_ram":
2615 a8083063 Iustin Pop
          val = instance.memory
2616 a8083063 Iustin Pop
        elif field == "oper_ram":
2617 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2618 8a23d2d3 Iustin Pop
            val = None
2619 a8083063 Iustin Pop
          elif instance.name in live_data:
2620 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2621 a8083063 Iustin Pop
          else:
2622 a8083063 Iustin Pop
            val = "-"
2623 a8083063 Iustin Pop
        elif field == "disk_template":
2624 a8083063 Iustin Pop
          val = instance.disk_template
2625 a8083063 Iustin Pop
        elif field == "ip":
2626 a8083063 Iustin Pop
          val = instance.nics[0].ip
2627 a8083063 Iustin Pop
        elif field == "bridge":
2628 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2629 a8083063 Iustin Pop
        elif field == "mac":
2630 a8083063 Iustin Pop
          val = instance.nics[0].mac
2631 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2632 644eeef9 Iustin Pop
          disk = instance.FindDisk(field[:3])
2633 644eeef9 Iustin Pop
          if disk is None:
2634 8a23d2d3 Iustin Pop
            val = None
2635 644eeef9 Iustin Pop
          else:
2636 644eeef9 Iustin Pop
            val = disk.size
2637 d6d415e8 Iustin Pop
        elif field == "vcpus":
2638 d6d415e8 Iustin Pop
          val = instance.vcpus
2639 130a6a6f Iustin Pop
        elif field == "tags":
2640 130a6a6f Iustin Pop
          val = list(instance.GetTags())
2641 3fb1e1c5 Alexander Schreiber
        elif field in ("network_port", "kernel_path", "initrd_path",
2642 3fb1e1c5 Alexander Schreiber
                       "hvm_boot_order", "hvm_acpi", "hvm_pae",
2643 3fb1e1c5 Alexander Schreiber
                       "hvm_cdrom_image_path", "hvm_nic_type",
2644 3fb1e1c5 Alexander Schreiber
                       "hvm_disk_type", "vnc_bind_address"):
2645 3fb1e1c5 Alexander Schreiber
          val = getattr(instance, field, None)
2646 3fb1e1c5 Alexander Schreiber
          if val is not None:
2647 3fb1e1c5 Alexander Schreiber
            pass
2648 3fb1e1c5 Alexander Schreiber
          elif field in ("hvm_nic_type", "hvm_disk_type",
2649 3fb1e1c5 Alexander Schreiber
                         "kernel_path", "initrd_path"):
2650 3fb1e1c5 Alexander Schreiber
            val = "default"
2651 3fb1e1c5 Alexander Schreiber
          else:
2652 3fb1e1c5 Alexander Schreiber
            val = "-"
2653 a8083063 Iustin Pop
        else:
2654 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2655 a8083063 Iustin Pop
        iout.append(val)
2656 a8083063 Iustin Pop
      output.append(iout)
2657 a8083063 Iustin Pop
2658 a8083063 Iustin Pop
    return output
2659 a8083063 Iustin Pop
2660 a8083063 Iustin Pop
2661 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2662 a8083063 Iustin Pop
  """Failover an instance.
2663 a8083063 Iustin Pop

2664 a8083063 Iustin Pop
  """
2665 a8083063 Iustin Pop
  HPATH = "instance-failover"
2666 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2667 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2668 c9e5c064 Guido Trotter
  REQ_BGL = False
2669 c9e5c064 Guido Trotter
2670 c9e5c064 Guido Trotter
  def ExpandNames(self):
2671 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
2672 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2673 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2674 c9e5c064 Guido Trotter
2675 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
2676 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
2677 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
2678 a8083063 Iustin Pop
2679 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2680 a8083063 Iustin Pop
    """Build hooks env.
2681 a8083063 Iustin Pop

2682 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2683 a8083063 Iustin Pop

2684 a8083063 Iustin Pop
    """
2685 a8083063 Iustin Pop
    env = {
2686 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2687 a8083063 Iustin Pop
      }
2688 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2689 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
2690 a8083063 Iustin Pop
    return env, nl, nl
2691 a8083063 Iustin Pop
2692 a8083063 Iustin Pop
  def CheckPrereq(self):
2693 a8083063 Iustin Pop
    """Check prerequisites.
2694 a8083063 Iustin Pop

2695 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2696 a8083063 Iustin Pop

2697 a8083063 Iustin Pop
    """
2698 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2699 c9e5c064 Guido Trotter
    assert self.instance is not None, \
2700 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2701 a8083063 Iustin Pop
2702 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2703 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2704 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
2705 2a710df1 Michael Hanselmann
2706 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2707 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2708 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2709 abdf0113 Iustin Pop
                                   "a mirrored disk template")
2710 2a710df1 Michael Hanselmann
2711 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2712 d4f16fd9 Iustin Pop
    # check memory requirements on the secondary node
2713 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, target_node, "failing over instance %s" %
2714 d4f16fd9 Iustin Pop
                         instance.name, instance.memory)
2715 3a7c308e Guido Trotter
2716 a8083063 Iustin Pop
    # check bridge existance
2717 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2718 50ff9a7a Iustin Pop
    if not rpc.call_bridges_exist(target_node, brlist):
2719 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2720 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2721 50ff9a7a Iustin Pop
                                 (brlist, target_node))
2722 a8083063 Iustin Pop
2723 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2724 a8083063 Iustin Pop
    """Failover an instance.
2725 a8083063 Iustin Pop

2726 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2727 a8083063 Iustin Pop
    starting it on the secondary.
2728 a8083063 Iustin Pop

2729 a8083063 Iustin Pop
    """
2730 a8083063 Iustin Pop
    instance = self.instance
2731 a8083063 Iustin Pop
2732 a8083063 Iustin Pop
    source_node = instance.primary_node
2733 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2734 a8083063 Iustin Pop
2735 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2736 a8083063 Iustin Pop
    for dev in instance.disks:
2737 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
2738 a8083063 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
2739 a0aaa0d0 Guido Trotter
        if instance.status == "up" and not self.op.ignore_consistency:
2740 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
2741 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
2742 a8083063 Iustin Pop
2743 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2744 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2745 a8083063 Iustin Pop
                (instance.name, source_node))
2746 a8083063 Iustin Pop
2747 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(source_node, instance):
2748 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
2749 24a40d57 Iustin Pop
        logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2750 24a40d57 Iustin Pop
                     " anyway. Please make sure node %s is down"  %
2751 24a40d57 Iustin Pop
                     (instance.name, source_node, source_node))
2752 24a40d57 Iustin Pop
      else:
2753 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2754 24a40d57 Iustin Pop
                                 (instance.name, source_node))
2755 a8083063 Iustin Pop
2756 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2757 a8083063 Iustin Pop
    if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
2758 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
2759 a8083063 Iustin Pop
2760 a8083063 Iustin Pop
    instance.primary_node = target_node
2761 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2762 b6102dab Guido Trotter
    self.cfg.Update(instance)
2763 a8083063 Iustin Pop
2764 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
2765 12a0cfbe Guido Trotter
    if instance.status == "up":
2766 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
2767 12a0cfbe Guido Trotter
      logger.Info("Starting instance %s on node %s" %
2768 12a0cfbe Guido Trotter
                  (instance.name, target_node))
2769 12a0cfbe Guido Trotter
2770 12a0cfbe Guido Trotter
      disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
2771 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
2772 12a0cfbe Guido Trotter
      if not disks_ok:
2773 12a0cfbe Guido Trotter
        _ShutdownInstanceDisks(instance, self.cfg)
2774 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
2775 a8083063 Iustin Pop
2776 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
2777 12a0cfbe Guido Trotter
      if not rpc.call_instance_start(target_node, instance, None):
2778 12a0cfbe Guido Trotter
        _ShutdownInstanceDisks(instance, self.cfg)
2779 12a0cfbe Guido Trotter
        raise errors.OpExecError("Could not start instance %s on node %s." %
2780 12a0cfbe Guido Trotter
                                 (instance.name, target_node))
2781 a8083063 Iustin Pop
2782 a8083063 Iustin Pop
2783 3f78eef2 Iustin Pop
def _CreateBlockDevOnPrimary(cfg, node, instance, device, info):
2784 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2785 a8083063 Iustin Pop

2786 a8083063 Iustin Pop
  This always creates all devices.
2787 a8083063 Iustin Pop

2788 a8083063 Iustin Pop
  """
2789 a8083063 Iustin Pop
  if device.children:
2790 a8083063 Iustin Pop
    for child in device.children:
2791 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, node, instance, child, info):
2792 a8083063 Iustin Pop
        return False
2793 a8083063 Iustin Pop
2794 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2795 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2796 3f78eef2 Iustin Pop
                                    instance.name, True, info)
2797 a8083063 Iustin Pop
  if not new_id:
2798 a8083063 Iustin Pop
    return False
2799 a8083063 Iustin Pop
  if device.physical_id is None:
2800 a8083063 Iustin Pop
    device.physical_id = new_id
2801 a8083063 Iustin Pop
  return True
2802 a8083063 Iustin Pop
2803 a8083063 Iustin Pop
2804 3f78eef2 Iustin Pop
def _CreateBlockDevOnSecondary(cfg, node, instance, device, force, info):
2805 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2806 a8083063 Iustin Pop

2807 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2808 a8083063 Iustin Pop
  all its children.
2809 a8083063 Iustin Pop

2810 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2811 a8083063 Iustin Pop

2812 a8083063 Iustin Pop
  """
2813 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2814 a8083063 Iustin Pop
    force = True
2815 a8083063 Iustin Pop
  if device.children:
2816 a8083063 Iustin Pop
    for child in device.children:
2817 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, node, instance,
2818 3f78eef2 Iustin Pop
                                        child, force, info):
2819 a8083063 Iustin Pop
        return False
2820 a8083063 Iustin Pop
2821 a8083063 Iustin Pop
  if not force:
2822 a8083063 Iustin Pop
    return True
2823 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2824 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2825 3f78eef2 Iustin Pop
                                    instance.name, False, info)
2826 a8083063 Iustin Pop
  if not new_id:
2827 a8083063 Iustin Pop
    return False
2828 a8083063 Iustin Pop
  if device.physical_id is None:
2829 a8083063 Iustin Pop
    device.physical_id = new_id
2830 a8083063 Iustin Pop
  return True
2831 a8083063 Iustin Pop
2832 a8083063 Iustin Pop
2833 923b1523 Iustin Pop
def _GenerateUniqueNames(cfg, exts):
2834 923b1523 Iustin Pop
  """Generate a suitable LV name.
2835 923b1523 Iustin Pop

2836 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
2837 923b1523 Iustin Pop

2838 923b1523 Iustin Pop
  """
2839 923b1523 Iustin Pop
  results = []
2840 923b1523 Iustin Pop
  for val in exts:
2841 923b1523 Iustin Pop
    new_id = cfg.GenerateUniqueID()
2842 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
2843 923b1523 Iustin Pop
  return results
2844 923b1523 Iustin Pop
2845 923b1523 Iustin Pop
2846 a1f445d3 Iustin Pop
def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name):
2847 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
2848 a1f445d3 Iustin Pop

2849 a1f445d3 Iustin Pop
  """
2850 a1f445d3 Iustin Pop
  port = cfg.AllocatePort()
2851 a1f445d3 Iustin Pop
  vgname = cfg.GetVGName()
2852 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2853 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
2854 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2855 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
2856 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
2857 a1f445d3 Iustin Pop
                          logical_id = (primary, secondary, port),
2858 a1f445d3 Iustin Pop
                          children = [dev_data, dev_meta],
2859 a1f445d3 Iustin Pop
                          iv_name=iv_name)
2860 a1f445d3 Iustin Pop
  return drbd_dev
2861 a1f445d3 Iustin Pop
2862 7c0d6283 Michael Hanselmann
2863 923b1523 Iustin Pop
def _GenerateDiskTemplate(cfg, template_name,
2864 a8083063 Iustin Pop
                          instance_name, primary_node,
2865 0f1a06e3 Manuel Franceschini
                          secondary_nodes, disk_sz, swap_sz,
2866 0f1a06e3 Manuel Franceschini
                          file_storage_dir, file_driver):
2867 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
2868 a8083063 Iustin Pop

2869 a8083063 Iustin Pop
  """
2870 a8083063 Iustin Pop
  #TODO: compute space requirements
2871 a8083063 Iustin Pop
2872 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2873 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
2874 a8083063 Iustin Pop
    disks = []
2875 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
2876 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2877 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2878 923b1523 Iustin Pop
2879 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
2880 fe96220b Iustin Pop
    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2881 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
2882 a8083063 Iustin Pop
                           iv_name = "sda")
2883 fe96220b Iustin Pop
    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2884 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
2885 a8083063 Iustin Pop
                           iv_name = "sdb")
2886 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
2887 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
2888 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
2889 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2890 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
2891 a1f445d3 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2892 a1f445d3 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2893 a1f445d3 Iustin Pop
    drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2894 a1f445d3 Iustin Pop
                                         disk_sz, names[0:2], "sda")
2895 a1f445d3 Iustin Pop
    drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2896 a1f445d3 Iustin Pop
                                         swap_sz, names[2:4], "sdb")
2897 a1f445d3 Iustin Pop
    disks = [drbd_sda_dev, drbd_sdb_dev]
2898 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
2899 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
2900 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
2901 0f1a06e3 Manuel Franceschini
2902 0f1a06e3 Manuel Franceschini
    file_sda_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk_sz,
2903 0f1a06e3 Manuel Franceschini
                                iv_name="sda", logical_id=(file_driver,
2904 0f1a06e3 Manuel Franceschini
                                "%s/sda" % file_storage_dir))
2905 0f1a06e3 Manuel Franceschini
    file_sdb_dev = objects.Disk(dev_type=constants.LD_FILE, size=swap_sz,
2906 0f1a06e3 Manuel Franceschini
                                iv_name="sdb", logical_id=(file_driver,
2907 0f1a06e3 Manuel Franceschini
                                "%s/sdb" % file_storage_dir))
2908 0f1a06e3 Manuel Franceschini
    disks = [file_sda_dev, file_sdb_dev]
2909 a8083063 Iustin Pop
  else:
2910 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
2911 a8083063 Iustin Pop
  return disks
2912 a8083063 Iustin Pop
2913 a8083063 Iustin Pop
2914 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
2915 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
2916 3ecf6786 Iustin Pop

2917 3ecf6786 Iustin Pop
  """
2918 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
2919 a0c3fea1 Michael Hanselmann
2920 a0c3fea1 Michael Hanselmann
2921 a8083063 Iustin Pop
def _CreateDisks(cfg, instance):
2922 a8083063 Iustin Pop
  """Create all disks for an instance.
2923 a8083063 Iustin Pop

2924 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
2925 a8083063 Iustin Pop

2926 a8083063 Iustin Pop
  Args:
2927 a8083063 Iustin Pop
    instance: the instance object
2928 a8083063 Iustin Pop

2929 a8083063 Iustin Pop
  Returns:
2930 a8083063 Iustin Pop
    True or False showing the success of the creation process
2931 a8083063 Iustin Pop

2932 a8083063 Iustin Pop
  """
2933 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
2934 a0c3fea1 Michael Hanselmann
2935 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
2936 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
2937 0f1a06e3 Manuel Franceschini
    result = rpc.call_file_storage_dir_create(instance.primary_node,
2938 0f1a06e3 Manuel Franceschini
                                              file_storage_dir)
2939 0f1a06e3 Manuel Franceschini
2940 0f1a06e3 Manuel Franceschini
    if not result:
2941 b62ddbe5 Guido Trotter
      logger.Error("Could not connect to node '%s'" % instance.primary_node)
2942 0f1a06e3 Manuel Franceschini
      return False
2943 0f1a06e3 Manuel Franceschini
2944 0f1a06e3 Manuel Franceschini
    if not result[0]:
2945 0f1a06e3 Manuel Franceschini
      logger.Error("failed to create directory '%s'" % file_storage_dir)
2946 0f1a06e3 Manuel Franceschini
      return False
2947 0f1a06e3 Manuel Franceschini
2948 a8083063 Iustin Pop
  for device in instance.disks:
2949 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
2950 1c6e3627 Manuel Franceschini
                (device.iv_name, instance.name))
2951 a8083063 Iustin Pop
    #HARDCODE
2952 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
2953 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, secondary_node, instance,
2954 3f78eef2 Iustin Pop
                                        device, False, info):
2955 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
2956 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
2957 a8083063 Iustin Pop
        return False
2958 a8083063 Iustin Pop
    #HARDCODE
2959 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
2960 3f78eef2 Iustin Pop
                                    instance, device, info):
2961 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
2962 a8083063 Iustin Pop
                   device.iv_name)
2963 a8083063 Iustin Pop
      return False
2964 1c6e3627 Manuel Franceschini
2965 a8083063 Iustin Pop
  return True
2966 a8083063 Iustin Pop
2967 a8083063 Iustin Pop
2968 a8083063 Iustin Pop
def _RemoveDisks(instance, cfg):
2969 a8083063 Iustin Pop
  """Remove all disks for an instance.
2970 a8083063 Iustin Pop

2971 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
2972 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
2973 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
2974 a8083063 Iustin Pop
  with `_CreateDisks()`).
2975 a8083063 Iustin Pop

2976 a8083063 Iustin Pop
  Args:
2977 a8083063 Iustin Pop
    instance: the instance object
2978 a8083063 Iustin Pop

2979 a8083063 Iustin Pop
  Returns:
2980 a8083063 Iustin Pop
    True or False showing the success of the removal proces
2981 a8083063 Iustin Pop

2982 a8083063 Iustin Pop
  """
2983 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
2984 a8083063 Iustin Pop
2985 a8083063 Iustin Pop
  result = True
2986 a8083063 Iustin Pop
  for device in instance.disks:
2987 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
2988 a8083063 Iustin Pop
      cfg.SetDiskID(disk, node)
2989 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, disk):
2990 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
2991 a8083063 Iustin Pop
                     " continuing anyway" %
2992 a8083063 Iustin Pop
                     (device.iv_name, node))
2993 a8083063 Iustin Pop
        result = False
2994 0f1a06e3 Manuel Franceschini
2995 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
2996 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
2997 0f1a06e3 Manuel Franceschini
    if not rpc.call_file_storage_dir_remove(instance.primary_node,
2998 0f1a06e3 Manuel Franceschini
                                            file_storage_dir):
2999 0f1a06e3 Manuel Franceschini
      logger.Error("could not remove directory '%s'" % file_storage_dir)
3000 0f1a06e3 Manuel Franceschini
      result = False
3001 0f1a06e3 Manuel Franceschini
3002 a8083063 Iustin Pop
  return result
3003 a8083063 Iustin Pop
3004 a8083063 Iustin Pop
3005 e2fe6369 Iustin Pop
def _ComputeDiskSize(disk_template, disk_size, swap_size):
3006 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
3007 e2fe6369 Iustin Pop

3008 e2fe6369 Iustin Pop
  This is currently hard-coded for the two-drive layout.
3009 e2fe6369 Iustin Pop

3010 e2fe6369 Iustin Pop
  """
3011 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
3012 e2fe6369 Iustin Pop
  req_size_dict = {
3013 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
3014 e2fe6369 Iustin Pop
    constants.DT_PLAIN: disk_size + swap_size,
3015 e2fe6369 Iustin Pop
    # 256 MB are added for drbd metadata, 128MB for each drbd device
3016 e2fe6369 Iustin Pop
    constants.DT_DRBD8: disk_size + swap_size + 256,
3017 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
3018 e2fe6369 Iustin Pop
  }
3019 e2fe6369 Iustin Pop
3020 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
3021 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
3022 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
3023 e2fe6369 Iustin Pop
3024 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
3025 e2fe6369 Iustin Pop
3026 e2fe6369 Iustin Pop
3027 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
3028 a8083063 Iustin Pop
  """Create an instance.
3029 a8083063 Iustin Pop

3030 a8083063 Iustin Pop
  """
3031 a8083063 Iustin Pop
  HPATH = "instance-add"
3032 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3033 538475ca Iustin Pop
  _OP_REQP = ["instance_name", "mem_size", "disk_size",
3034 a8083063 Iustin Pop
              "disk_template", "swap_size", "mode", "start", "vcpus",
3035 1862d460 Alexander Schreiber
              "wait_for_sync", "ip_check", "mac"]
3036 a8083063 Iustin Pop
3037 538475ca Iustin Pop
  def _RunAllocator(self):
3038 538475ca Iustin Pop
    """Run the allocator based on input opcode.
3039 538475ca Iustin Pop

3040 538475ca Iustin Pop
    """
3041 538475ca Iustin Pop
    disks = [{"size": self.op.disk_size, "mode": "w"},
3042 538475ca Iustin Pop
             {"size": self.op.swap_size, "mode": "w"}]
3043 538475ca Iustin Pop
    nics = [{"mac": self.op.mac, "ip": getattr(self.op, "ip", None),
3044 538475ca Iustin Pop
             "bridge": self.op.bridge}]
3045 d1c2dd75 Iustin Pop
    ial = IAllocator(self.cfg, self.sstore,
3046 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
3047 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
3048 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
3049 d1c2dd75 Iustin Pop
                     tags=[],
3050 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
3051 d1c2dd75 Iustin Pop
                     vcpus=self.op.vcpus,
3052 d1c2dd75 Iustin Pop
                     mem_size=self.op.mem_size,
3053 d1c2dd75 Iustin Pop
                     disks=disks,
3054 d1c2dd75 Iustin Pop
                     nics=nics,
3055 29859cb7 Iustin Pop
                     )
3056 d1c2dd75 Iustin Pop
3057 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
3058 d1c2dd75 Iustin Pop
3059 d1c2dd75 Iustin Pop
    if not ial.success:
3060 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3061 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3062 d1c2dd75 Iustin Pop
                                                           ial.info))
3063 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3064 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3065 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
3066 27579978 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
3067 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
3068 538475ca Iustin Pop
    logger.ToStdout("Selected nodes for the instance: %s" %
3069 d1c2dd75 Iustin Pop
                    (", ".join(ial.nodes),))
3070 538475ca Iustin Pop
    logger.Info("Selected nodes for instance %s via iallocator %s: %s" %
3071 d1c2dd75 Iustin Pop
                (self.op.instance_name, self.op.iallocator, ial.nodes))
3072 27579978 Iustin Pop
    if ial.required_nodes == 2:
3073 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
3074 538475ca Iustin Pop
3075 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3076 a8083063 Iustin Pop
    """Build hooks env.
3077 a8083063 Iustin Pop

3078 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3079 a8083063 Iustin Pop

3080 a8083063 Iustin Pop
    """
3081 a8083063 Iustin Pop
    env = {
3082 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
3083 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
3084 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
3085 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
3086 a8083063 Iustin Pop
      }
3087 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3088 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
3089 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
3090 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_IMAGE"] = self.src_image
3091 396e1b78 Michael Hanselmann
3092 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
3093 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
3094 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
3095 396e1b78 Michael Hanselmann
      status=self.instance_status,
3096 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
3097 396e1b78 Michael Hanselmann
      memory=self.op.mem_size,
3098 396e1b78 Michael Hanselmann
      vcpus=self.op.vcpus,
3099 c7b27e9e Iustin Pop
      nics=[(self.inst_ip, self.op.bridge, self.op.mac)],
3100 396e1b78 Michael Hanselmann
    ))
3101 a8083063 Iustin Pop
3102 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
3103 a8083063 Iustin Pop
          self.secondaries)
3104 a8083063 Iustin Pop
    return env, nl, nl
3105 a8083063 Iustin Pop
3106 a8083063 Iustin Pop
3107 a8083063 Iustin Pop
  def CheckPrereq(self):
3108 a8083063 Iustin Pop
    """Check prerequisites.
3109 a8083063 Iustin Pop

3110 a8083063 Iustin Pop
    """
3111 538475ca Iustin Pop
    # set optional parameters to none if they don't exist
3112 538475ca Iustin Pop
    for attr in ["kernel_path", "initrd_path", "hvm_boot_order", "pnode",
3113 31a853d2 Iustin Pop
                 "iallocator", "hvm_acpi", "hvm_pae", "hvm_cdrom_image_path",
3114 5397e0b7 Alexander Schreiber
                 "hvm_nic_type", "hvm_disk_type", "vnc_bind_address"]:
3115 40ed12dd Guido Trotter
      if not hasattr(self.op, attr):
3116 40ed12dd Guido Trotter
        setattr(self.op, attr, None)
3117 40ed12dd Guido Trotter
3118 a8083063 Iustin Pop
    if self.op.mode not in (constants.INSTANCE_CREATE,
3119 a8083063 Iustin Pop
                            constants.INSTANCE_IMPORT):
3120 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
3121 3ecf6786 Iustin Pop
                                 self.op.mode)
3122 a8083063 Iustin Pop
3123 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
3124 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
3125 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
3126 eedc99de Manuel Franceschini
                                 " instances")
3127 eedc99de Manuel Franceschini
3128 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3129 a8083063 Iustin Pop
      src_node = getattr(self.op, "src_node", None)
3130 a8083063 Iustin Pop
      src_path = getattr(self.op, "src_path", None)
3131 a8083063 Iustin Pop
      if src_node is None or src_path is None:
3132 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Importing an instance requires source"
3133 3ecf6786 Iustin Pop
                                   " node and path options")
3134 a8083063 Iustin Pop
      src_node_full = self.cfg.ExpandNodeName(src_node)
3135 a8083063 Iustin Pop
      if src_node_full is None:
3136 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown source node '%s'" % src_node)
3137 a8083063 Iustin Pop
      self.op.src_node = src_node = src_node_full
3138 a8083063 Iustin Pop
3139 a8083063 Iustin Pop
      if not os.path.isabs(src_path):
3140 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The source path must be absolute")
3141 a8083063 Iustin Pop
3142 a8083063 Iustin Pop
      export_info = rpc.call_export_info(src_node, src_path)
3143 a8083063 Iustin Pop
3144 a8083063 Iustin Pop
      if not export_info:
3145 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
3146 a8083063 Iustin Pop
3147 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
3148 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
3149 a8083063 Iustin Pop
3150 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
3151 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
3152 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
3153 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
3154 a8083063 Iustin Pop
3155 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
3156 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Can't import instance with more than"
3157 3ecf6786 Iustin Pop
                                   " one data disk")
3158 a8083063 Iustin Pop
3159 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
3160 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
3161 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
3162 a8083063 Iustin Pop
                                                         'disk0_dump'))
3163 a8083063 Iustin Pop
      self.src_image = diskimage
3164 a8083063 Iustin Pop
    else: # INSTANCE_CREATE
3165 a8083063 Iustin Pop
      if getattr(self.op, "os_type", None) is None:
3166 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No guest OS specified")
3167 a8083063 Iustin Pop
3168 901a65c1 Iustin Pop
    #### instance parameters check
3169 901a65c1 Iustin Pop
3170 a8083063 Iustin Pop
    # disk template and mirror node verification
3171 a8083063 Iustin Pop
    if self.op.disk_template not in constants.DISK_TEMPLATES:
3172 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid disk template name")
3173 a8083063 Iustin Pop
3174 901a65c1 Iustin Pop
    # instance name verification
3175 901a65c1 Iustin Pop
    hostname1 = utils.HostInfo(self.op.instance_name)
3176 901a65c1 Iustin Pop
3177 901a65c1 Iustin Pop
    self.op.instance_name = instance_name = hostname1.name
3178 901a65c1 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
3179 901a65c1 Iustin Pop
    if instance_name in instance_list:
3180 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3181 901a65c1 Iustin Pop
                                 instance_name)
3182 901a65c1 Iustin Pop
3183 901a65c1 Iustin Pop
    # ip validity checks
3184 901a65c1 Iustin Pop
    ip = getattr(self.op, "ip", None)
3185 901a65c1 Iustin Pop
    if ip is None or ip.lower() == "none":
3186 901a65c1 Iustin Pop
      inst_ip = None
3187 901a65c1 Iustin Pop
    elif ip.lower() == "auto":
3188 901a65c1 Iustin Pop
      inst_ip = hostname1.ip
3189 901a65c1 Iustin Pop
    else:
3190 901a65c1 Iustin Pop
      if not utils.IsValidIP(ip):
3191 901a65c1 Iustin Pop
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
3192 901a65c1 Iustin Pop
                                   " like a valid IP" % ip)
3193 901a65c1 Iustin Pop
      inst_ip = ip
3194 901a65c1 Iustin Pop
    self.inst_ip = self.op.ip = inst_ip
3195 901a65c1 Iustin Pop
3196 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
3197 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
3198 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
3199 901a65c1 Iustin Pop
3200 901a65c1 Iustin Pop
    if self.op.ip_check:
3201 901a65c1 Iustin Pop
      if utils.TcpPing(hostname1.ip, constants.DEFAULT_NODED_PORT):
3202 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3203 901a65c1 Iustin Pop
                                   (hostname1.ip, instance_name))
3204 901a65c1 Iustin Pop
3205 901a65c1 Iustin Pop
    # MAC address verification
3206 901a65c1 Iustin Pop
    if self.op.mac != "auto":
3207 901a65c1 Iustin Pop
      if not utils.IsValidMac(self.op.mac.lower()):
3208 901a65c1 Iustin Pop
        raise errors.OpPrereqError("invalid MAC address specified: %s" %
3209 901a65c1 Iustin Pop
                                   self.op.mac)
3210 901a65c1 Iustin Pop
3211 901a65c1 Iustin Pop
    # bridge verification
3212 901a65c1 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
3213 901a65c1 Iustin Pop
    if bridge is None:
3214 901a65c1 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
3215 901a65c1 Iustin Pop
    else:
3216 901a65c1 Iustin Pop
      self.op.bridge = bridge
3217 901a65c1 Iustin Pop
3218 901a65c1 Iustin Pop
    # boot order verification
3219 901a65c1 Iustin Pop
    if self.op.hvm_boot_order is not None:
3220 901a65c1 Iustin Pop
      if len(self.op.hvm_boot_order.strip("acdn")) != 0:
3221 901a65c1 Iustin Pop
        raise errors.OpPrereqError("invalid boot order specified,"
3222 901a65c1 Iustin Pop
                                   " must be one or more of [acdn]")
3223 901a65c1 Iustin Pop
    # file storage checks
3224 0f1a06e3 Manuel Franceschini
    if (self.op.file_driver and
3225 0f1a06e3 Manuel Franceschini
        not self.op.file_driver in constants.FILE_DRIVER):
3226 0f1a06e3 Manuel Franceschini
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
3227 0f1a06e3 Manuel Franceschini
                                 self.op.file_driver)
3228 0f1a06e3 Manuel Franceschini
3229 0f1a06e3 Manuel Franceschini
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
3230 b4de68a9 Iustin Pop
      raise errors.OpPrereqError("File storage directory not a relative"
3231 b4de68a9 Iustin Pop
                                 " path")
3232 538475ca Iustin Pop
    #### allocator run
3233 538475ca Iustin Pop
3234 538475ca Iustin Pop
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
3235 538475ca Iustin Pop
      raise errors.OpPrereqError("One and only one of iallocator and primary"
3236 538475ca Iustin Pop
                                 " node must be given")
3237 538475ca Iustin Pop
3238 538475ca Iustin Pop
    if self.op.iallocator is not None:
3239 538475ca Iustin Pop
      self._RunAllocator()
3240 0f1a06e3 Manuel Franceschini
3241 901a65c1 Iustin Pop
    #### node related checks
3242 901a65c1 Iustin Pop
3243 901a65c1 Iustin Pop
    # check primary node
3244 901a65c1 Iustin Pop
    pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode))
3245 901a65c1 Iustin Pop
    if pnode is None:
3246 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Primary node '%s' is unknown" %
3247 901a65c1 Iustin Pop
                                 self.op.pnode)
3248 901a65c1 Iustin Pop
    self.op.pnode = pnode.name
3249 901a65c1 Iustin Pop
    self.pnode = pnode
3250 901a65c1 Iustin Pop
    self.secondaries = []
3251 901a65c1 Iustin Pop
3252 901a65c1 Iustin Pop
    # mirror node verification
3253 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
3254 a8083063 Iustin Pop
      if getattr(self.op, "snode", None) is None:
3255 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
3256 3ecf6786 Iustin Pop
                                   " a mirror node")
3257 a8083063 Iustin Pop
3258 a8083063 Iustin Pop
      snode_name = self.cfg.ExpandNodeName(self.op.snode)
3259 a8083063 Iustin Pop
      if snode_name is None:
3260 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown secondary node '%s'" %
3261 3ecf6786 Iustin Pop
                                   self.op.snode)
3262 a8083063 Iustin Pop
      elif snode_name == pnode.name:
3263 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
3264 3ecf6786 Iustin Pop
                                   " the primary node.")
3265 a8083063 Iustin Pop
      self.secondaries.append(snode_name)
3266 a8083063 Iustin Pop
3267 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
3268 e2fe6369 Iustin Pop
                                self.op.disk_size, self.op.swap_size)
3269 ed1ebc60 Guido Trotter
3270 8d75db10 Iustin Pop
    # Check lv size requirements
3271 8d75db10 Iustin Pop
    if req_size is not None:
3272 8d75db10 Iustin Pop
      nodenames = [pnode.name] + self.secondaries
3273 8d75db10 Iustin Pop
      nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
3274 8d75db10 Iustin Pop
      for node in nodenames:
3275 8d75db10 Iustin Pop
        info = nodeinfo.get(node, None)
3276 8d75db10 Iustin Pop
        if not info:
3277 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
3278 3e91897b Iustin Pop
                                     " from node '%s'" % node)
3279 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
3280 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
3281 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
3282 8d75db10 Iustin Pop
                                     " node %s" % node)
3283 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
3284 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
3285 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
3286 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
3287 ed1ebc60 Guido Trotter
3288 a8083063 Iustin Pop
    # os verification
3289 00fe9e38 Guido Trotter
    os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
3290 dfa96ded Guido Trotter
    if not os_obj:
3291 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
3292 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
3293 a8083063 Iustin Pop
3294 3b6d8c9b Iustin Pop
    if self.op.kernel_path == constants.VALUE_NONE:
3295 3b6d8c9b Iustin Pop
      raise errors.OpPrereqError("Can't set instance kernel to none")
3296 3b6d8c9b Iustin Pop
3297 a8083063 Iustin Pop
3298 901a65c1 Iustin Pop
    # bridge check on primary node
3299 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
3300 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
3301 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
3302 3ecf6786 Iustin Pop
                                 (self.op.bridge, pnode.name))
3303 a8083063 Iustin Pop
3304 49ce1563 Iustin Pop
    # memory check on primary node
3305 49ce1563 Iustin Pop
    if self.op.start:
3306 49ce1563 Iustin Pop
      _CheckNodeFreeMemory(self.cfg, self.pnode.name,
3307 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
3308 49ce1563 Iustin Pop
                           self.op.mem_size)
3309 49ce1563 Iustin Pop
3310 31a853d2 Iustin Pop
    # hvm_cdrom_image_path verification
3311 31a853d2 Iustin Pop
    if self.op.hvm_cdrom_image_path is not None:
3312 31a853d2 Iustin Pop
      if not os.path.isabs(self.op.hvm_cdrom_image_path):
3313 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The path to the HVM CDROM image must"
3314 31a853d2 Iustin Pop
                                   " be an absolute path or None, not %s" %
3315 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
3316 31a853d2 Iustin Pop
      if not os.path.isfile(self.op.hvm_cdrom_image_path):
3317 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The HVM CDROM image must either be a"
3318 31a853d2 Iustin Pop
                                   " regular file or a symlink pointing to"
3319 31a853d2 Iustin Pop
                                   " an existing regular file, not %s" %
3320 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
3321 31a853d2 Iustin Pop
3322 31a853d2 Iustin Pop
    # vnc_bind_address verification
3323 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is not None:
3324 31a853d2 Iustin Pop
      if not utils.IsValidIP(self.op.vnc_bind_address):
3325 31a853d2 Iustin Pop
        raise errors.OpPrereqError("given VNC bind address '%s' doesn't look"
3326 31a853d2 Iustin Pop
                                   " like a valid IP address" %
3327 31a853d2 Iustin Pop
                                   self.op.vnc_bind_address)
3328 31a853d2 Iustin Pop
3329 5397e0b7 Alexander Schreiber
    # Xen HVM device type checks
3330 5397e0b7 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
3331 5397e0b7 Alexander Schreiber
      if self.op.hvm_nic_type not in constants.HT_HVM_VALID_NIC_TYPES:
3332 5397e0b7 Alexander Schreiber
        raise errors.OpPrereqError("Invalid NIC type %s specified for Xen HVM"
3333 5397e0b7 Alexander Schreiber
                                   " hypervisor" % self.op.hvm_nic_type)
3334 5397e0b7 Alexander Schreiber
      if self.op.hvm_disk_type not in constants.HT_HVM_VALID_DISK_TYPES:
3335 5397e0b7 Alexander Schreiber
        raise errors.OpPrereqError("Invalid disk type %s specified for Xen HVM"
3336 5397e0b7 Alexander Schreiber
                                   " hypervisor" % self.op.hvm_disk_type)
3337 5397e0b7 Alexander Schreiber
3338 a8083063 Iustin Pop
    if self.op.start:
3339 a8083063 Iustin Pop
      self.instance_status = 'up'
3340 a8083063 Iustin Pop
    else:
3341 a8083063 Iustin Pop
      self.instance_status = 'down'
3342 a8083063 Iustin Pop
3343 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3344 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
3345 a8083063 Iustin Pop

3346 a8083063 Iustin Pop
    """
3347 a8083063 Iustin Pop
    instance = self.op.instance_name
3348 a8083063 Iustin Pop
    pnode_name = self.pnode.name
3349 a8083063 Iustin Pop
3350 1862d460 Alexander Schreiber
    if self.op.mac == "auto":
3351 ba4b62cf Iustin Pop
      mac_address = self.cfg.GenerateMAC()
3352 1862d460 Alexander Schreiber
    else:
3353 ba4b62cf Iustin Pop
      mac_address = self.op.mac
3354 1862d460 Alexander Schreiber
3355 1862d460 Alexander Schreiber
    nic = objects.NIC(bridge=self.op.bridge, mac=mac_address)
3356 a8083063 Iustin Pop
    if self.inst_ip is not None:
3357 a8083063 Iustin Pop
      nic.ip = self.inst_ip
3358 a8083063 Iustin Pop
3359 2a6469d5 Alexander Schreiber
    ht_kind = self.sstore.GetHypervisorType()
3360 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
3361 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
3362 2a6469d5 Alexander Schreiber
    else:
3363 2a6469d5 Alexander Schreiber
      network_port = None
3364 58acb49d Alexander Schreiber
3365 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is None:
3366 31a853d2 Iustin Pop
      self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
3367 31a853d2 Iustin Pop
3368 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
3369 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
3370 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
3371 2c313123 Manuel Franceschini
    else:
3372 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
3373 2c313123 Manuel Franceschini
3374 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
3375 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
3376 0f1a06e3 Manuel Franceschini
                                        self.sstore.GetFileStorageDir(),
3377 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
3378 0f1a06e3 Manuel Franceschini
3379 0f1a06e3 Manuel Franceschini
3380 923b1523 Iustin Pop
    disks = _GenerateDiskTemplate(self.cfg,
3381 a8083063 Iustin Pop
                                  self.op.disk_template,
3382 a8083063 Iustin Pop
                                  instance, pnode_name,
3383 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
3384 0f1a06e3 Manuel Franceschini
                                  self.op.swap_size,
3385 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
3386 0f1a06e3 Manuel Franceschini
                                  self.op.file_driver)
3387 a8083063 Iustin Pop
3388 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3389 a8083063 Iustin Pop
                            primary_node=pnode_name,
3390 a8083063 Iustin Pop
                            memory=self.op.mem_size,
3391 a8083063 Iustin Pop
                            vcpus=self.op.vcpus,
3392 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
3393 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
3394 a8083063 Iustin Pop
                            status=self.instance_status,
3395 58acb49d Alexander Schreiber
                            network_port=network_port,
3396 3b6d8c9b Iustin Pop
                            kernel_path=self.op.kernel_path,
3397 3b6d8c9b Iustin Pop
                            initrd_path=self.op.initrd_path,
3398 25c5878d Alexander Schreiber
                            hvm_boot_order=self.op.hvm_boot_order,
3399 31a853d2 Iustin Pop
                            hvm_acpi=self.op.hvm_acpi,
3400 31a853d2 Iustin Pop
                            hvm_pae=self.op.hvm_pae,
3401 31a853d2 Iustin Pop
                            hvm_cdrom_image_path=self.op.hvm_cdrom_image_path,
3402 31a853d2 Iustin Pop
                            vnc_bind_address=self.op.vnc_bind_address,
3403 5397e0b7 Alexander Schreiber
                            hvm_nic_type=self.op.hvm_nic_type,
3404 5397e0b7 Alexander Schreiber
                            hvm_disk_type=self.op.hvm_disk_type,
3405 a8083063 Iustin Pop
                            )
3406 a8083063 Iustin Pop
3407 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
3408 a8083063 Iustin Pop
    if not _CreateDisks(self.cfg, iobj):
3409 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3410 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
3411 a8083063 Iustin Pop
3412 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
3413 a8083063 Iustin Pop
3414 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
3415 a2fd9afc Guido Trotter
    # Add the new instance to the Ganeti Lock Manager
3416 a2fd9afc Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, instance)
3417 a8083063 Iustin Pop
3418 a8083063 Iustin Pop
    if self.op.wait_for_sync:
3419 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc)
3420 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3421 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
3422 a8083063 Iustin Pop
      time.sleep(15)
3423 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
3424 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc, oneshot=True)
3425 a8083063 Iustin Pop
    else:
3426 a8083063 Iustin Pop
      disk_abort = False
3427 a8083063 Iustin Pop
3428 a8083063 Iustin Pop
    if disk_abort:
3429 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3430 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3431 a2fd9afc Guido Trotter
      # Remove the new instance from the Ganeti Lock Manager
3432 a2fd9afc Guido Trotter
      self.context.glm.remove(locking.LEVEL_INSTANCE, iobj.name)
3433 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3434 3ecf6786 Iustin Pop
                               " this instance")
3435 a8083063 Iustin Pop
3436 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3437 a8083063 Iustin Pop
                (instance, pnode_name))
3438 a8083063 Iustin Pop
3439 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3440 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3441 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3442 a8083063 Iustin Pop
        if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
3443 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3444 3ecf6786 Iustin Pop
                                   " on node %s" %
3445 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3446 a8083063 Iustin Pop
3447 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3448 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3449 a8083063 Iustin Pop
        src_node = self.op.src_node
3450 a8083063 Iustin Pop
        src_image = self.src_image
3451 a8083063 Iustin Pop
        if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
3452 a8083063 Iustin Pop
                                                src_node, src_image):
3453 3ecf6786 Iustin Pop
          raise errors.OpExecError("Could not import os for instance"
3454 3ecf6786 Iustin Pop
                                   " %s on node %s" %
3455 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3456 a8083063 Iustin Pop
      else:
3457 a8083063 Iustin Pop
        # also checked in the prereq part
3458 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3459 3ecf6786 Iustin Pop
                                     % self.op.mode)
3460 a8083063 Iustin Pop
3461 a8083063 Iustin Pop
    if self.op.start:
3462 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
3463 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3464 a8083063 Iustin Pop
      if not rpc.call_instance_start(pnode_name, iobj, None):
3465 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3466 a8083063 Iustin Pop
3467 a8083063 Iustin Pop
3468 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3469 a8083063 Iustin Pop
  """Connect to an instance's console.
3470 a8083063 Iustin Pop

3471 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3472 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3473 a8083063 Iustin Pop
  console.
3474 a8083063 Iustin Pop

3475 a8083063 Iustin Pop
  """
3476 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3477 8659b73e Guido Trotter
  REQ_BGL = False
3478 8659b73e Guido Trotter
3479 8659b73e Guido Trotter
  def ExpandNames(self):
3480 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
3481 a8083063 Iustin Pop
3482 a8083063 Iustin Pop
  def CheckPrereq(self):
3483 a8083063 Iustin Pop
    """Check prerequisites.
3484 a8083063 Iustin Pop

3485 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3486 a8083063 Iustin Pop

3487 a8083063 Iustin Pop
    """
3488 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3489 8659b73e Guido Trotter
    assert self.instance is not None, \
3490 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3491 a8083063 Iustin Pop
3492 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3493 a8083063 Iustin Pop
    """Connect to the console of an instance
3494 a8083063 Iustin Pop

3495 a8083063 Iustin Pop
    """
3496 a8083063 Iustin Pop
    instance = self.instance
3497 a8083063 Iustin Pop
    node = instance.primary_node
3498 a8083063 Iustin Pop
3499 a8083063 Iustin Pop
    node_insts = rpc.call_instance_list([node])[node]
3500 a8083063 Iustin Pop
    if node_insts is False:
3501 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3502 a8083063 Iustin Pop
3503 a8083063 Iustin Pop
    if instance.name not in node_insts:
3504 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3505 a8083063 Iustin Pop
3506 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
3507 a8083063 Iustin Pop
3508 a8083063 Iustin Pop
    hyper = hypervisor.GetHypervisor()
3509 30989e69 Alexander Schreiber
    console_cmd = hyper.GetShellCommandForConsole(instance)
3510 b047857b Michael Hanselmann
3511 82122173 Iustin Pop
    # build ssh cmdline
3512 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
3513 a8083063 Iustin Pop
3514 a8083063 Iustin Pop
3515 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3516 a8083063 Iustin Pop
  """Replace the disks of an instance.
3517 a8083063 Iustin Pop

3518 a8083063 Iustin Pop
  """
3519 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3520 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3521 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
3522 a8083063 Iustin Pop
3523 b6e82a65 Iustin Pop
  def _RunAllocator(self):
3524 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
3525 b6e82a65 Iustin Pop

3526 b6e82a65 Iustin Pop
    """
3527 b6e82a65 Iustin Pop
    ial = IAllocator(self.cfg, self.sstore,
3528 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
3529 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
3530 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
3531 b6e82a65 Iustin Pop
3532 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
3533 b6e82a65 Iustin Pop
3534 b6e82a65 Iustin Pop
    if not ial.success:
3535 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3536 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3537 b6e82a65 Iustin Pop
                                                           ial.info))
3538 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3539 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3540 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
3541 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
3542 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
3543 b6e82a65 Iustin Pop
    logger.ToStdout("Selected new secondary for the instance: %s" %
3544 b6e82a65 Iustin Pop
                    self.op.remote_node)
3545 b6e82a65 Iustin Pop
3546 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3547 a8083063 Iustin Pop
    """Build hooks env.
3548 a8083063 Iustin Pop

3549 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3550 a8083063 Iustin Pop

3551 a8083063 Iustin Pop
    """
3552 a8083063 Iustin Pop
    env = {
3553 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
3554 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3555 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3556 a8083063 Iustin Pop
      }
3557 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3558 0834c866 Iustin Pop
    nl = [
3559 0834c866 Iustin Pop
      self.sstore.GetMasterNode(),
3560 0834c866 Iustin Pop
      self.instance.primary_node,
3561 0834c866 Iustin Pop
      ]
3562 0834c866 Iustin Pop
    if self.op.remote_node is not None:
3563 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
3564 a8083063 Iustin Pop
    return env, nl, nl
3565 a8083063 Iustin Pop
3566 a8083063 Iustin Pop
  def CheckPrereq(self):
3567 a8083063 Iustin Pop
    """Check prerequisites.
3568 a8083063 Iustin Pop

3569 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3570 a8083063 Iustin Pop

3571 a8083063 Iustin Pop
    """
3572 b6e82a65 Iustin Pop
    if not hasattr(self.op, "remote_node"):
3573 b6e82a65 Iustin Pop
      self.op.remote_node = None
3574 b6e82a65 Iustin Pop
3575 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3576 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3577 a8083063 Iustin Pop
    if instance is None:
3578 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3579 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3580 a8083063 Iustin Pop
    self.instance = instance
3581 7df43a76 Iustin Pop
    self.op.instance_name = instance.name
3582 a8083063 Iustin Pop
3583 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3584 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3585 a9e0c397 Iustin Pop
                                 " network mirrored.")
3586 a8083063 Iustin Pop
3587 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3588 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3589 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3590 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3591 a8083063 Iustin Pop
3592 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
3593 a9e0c397 Iustin Pop
3594 b6e82a65 Iustin Pop
    ia_name = getattr(self.op, "iallocator", None)
3595 b6e82a65 Iustin Pop
    if ia_name is not None:
3596 b6e82a65 Iustin Pop
      if self.op.remote_node is not None:
3597 b6e82a65 Iustin Pop
        raise errors.OpPrereqError("Give either the iallocator or the new"
3598 b6e82a65 Iustin Pop
                                   " secondary, not both")
3599 de8c7666 Guido Trotter
      self._RunAllocator()
3600 b6e82a65 Iustin Pop
3601 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
3602 a9e0c397 Iustin Pop
    if remote_node is not None:
3603 a8083063 Iustin Pop
      remote_node = self.cfg.ExpandNodeName(remote_node)
3604 a8083063 Iustin Pop
      if remote_node is None:
3605 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Node '%s' not known" %
3606 3ecf6786 Iustin Pop
                                   self.op.remote_node)
3607 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
3608 a9e0c397 Iustin Pop
    else:
3609 a9e0c397 Iustin Pop
      self.remote_node_info = None
3610 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3611 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3612 3ecf6786 Iustin Pop
                                 " the instance.")
3613 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
3614 0834c866 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_SEC:
3615 0834c866 Iustin Pop
        # this is for DRBD8, where we can't execute the same mode of
3616 0834c866 Iustin Pop
        # replacement as for drbd7 (no different port allocated)
3617 0834c866 Iustin Pop
        raise errors.OpPrereqError("Same secondary given, cannot execute"
3618 0834c866 Iustin Pop
                                   " replacement")
3619 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3620 7df43a76 Iustin Pop
      if (self.op.mode == constants.REPLACE_DISK_ALL and
3621 7df43a76 Iustin Pop
          remote_node is not None):
3622 7df43a76 Iustin Pop
        # switch to replace secondary mode
3623 7df43a76 Iustin Pop
        self.op.mode = constants.REPLACE_DISK_SEC
3624 7df43a76 Iustin Pop
3625 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
3626 12c3449a Michael Hanselmann
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
3627 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
3628 a9e0c397 Iustin Pop
                                   " both at once")
3629 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
3630 a9e0c397 Iustin Pop
        if remote_node is not None:
3631 12c3449a Michael Hanselmann
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
3632 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
3633 a9e0c397 Iustin Pop
                                     " node disk replacement")
3634 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
3635 cff90b79 Iustin Pop
        self.oth_node = instance.secondary_nodes[0]
3636 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
3637 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
3638 a9e0c397 Iustin Pop
                                    # we don't change the secondary
3639 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
3640 cff90b79 Iustin Pop
        self.oth_node = instance.primary_node
3641 a9e0c397 Iustin Pop
      else:
3642 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
3643 a9e0c397 Iustin Pop
3644 a9e0c397 Iustin Pop
    for name in self.op.disks:
3645 a9e0c397 Iustin Pop
      if instance.FindDisk(name) is None:
3646 a9e0c397 Iustin Pop
        raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3647 a9e0c397 Iustin Pop
                                   (name, instance.name))
3648 a8083063 Iustin Pop
    self.op.remote_node = remote_node
3649 a8083063 Iustin Pop
3650 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
3651 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
3652 a9e0c397 Iustin Pop

3653 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3654 a9e0c397 Iustin Pop
      - for each disk to be replaced:
3655 a9e0c397 Iustin Pop
        - create new LVs on the target node with unique names
3656 a9e0c397 Iustin Pop
        - detach old LVs from the drbd device
3657 a9e0c397 Iustin Pop
        - rename old LVs to name_replaced.<time_t>
3658 a9e0c397 Iustin Pop
        - rename new LVs to old LVs
3659 a9e0c397 Iustin Pop
        - attach the new LVs (with the old names now) to the drbd device
3660 a9e0c397 Iustin Pop
      - wait for sync across all devices
3661 a9e0c397 Iustin Pop
      - for each modified disk:
3662 a9e0c397 Iustin Pop
        - remove old LVs (which have the name name_replaces.<time_t>)
3663 a9e0c397 Iustin Pop

3664 a9e0c397 Iustin Pop
    Failures are not very well handled.
3665 cff90b79 Iustin Pop

3666 a9e0c397 Iustin Pop
    """
3667 cff90b79 Iustin Pop
    steps_total = 6
3668 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3669 a9e0c397 Iustin Pop
    instance = self.instance
3670 a9e0c397 Iustin Pop
    iv_names = {}
3671 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3672 a9e0c397 Iustin Pop
    # start of work
3673 a9e0c397 Iustin Pop
    cfg = self.cfg
3674 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
3675 cff90b79 Iustin Pop
    oth_node = self.oth_node
3676 cff90b79 Iustin Pop
3677 cff90b79 Iustin Pop
    # Step: check device activation
3678 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3679 cff90b79 Iustin Pop
    info("checking volume groups")
3680 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
3681 cff90b79 Iustin Pop
    results = rpc.call_vg_list([oth_node, tgt_node])
3682 cff90b79 Iustin Pop
    if not results:
3683 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3684 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
3685 cff90b79 Iustin Pop
      res = results.get(node, False)
3686 cff90b79 Iustin Pop
      if not res or my_vg not in res:
3687 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3688 cff90b79 Iustin Pop
                                 (my_vg, node))
3689 cff90b79 Iustin Pop
    for dev in instance.disks:
3690 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3691 cff90b79 Iustin Pop
        continue
3692 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
3693 cff90b79 Iustin Pop
        info("checking %s on %s" % (dev.iv_name, node))
3694 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
3695 cff90b79 Iustin Pop
        if not rpc.call_blockdev_find(node, dev):
3696 cff90b79 Iustin Pop
          raise errors.OpExecError("Can't find device %s on node %s" %
3697 cff90b79 Iustin Pop
                                   (dev.iv_name, node))
3698 cff90b79 Iustin Pop
3699 cff90b79 Iustin Pop
    # Step: check other node consistency
3700 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3701 cff90b79 Iustin Pop
    for dev in instance.disks:
3702 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3703 cff90b79 Iustin Pop
        continue
3704 cff90b79 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, oth_node))
3705 cff90b79 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, oth_node,
3706 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
3707 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
3708 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
3709 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
3710 cff90b79 Iustin Pop
3711 cff90b79 Iustin Pop
    # Step: create new storage
3712 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3713 a9e0c397 Iustin Pop
    for dev in instance.disks:
3714 a9e0c397 Iustin Pop
      if not dev.iv_name in self.op.disks:
3715 a9e0c397 Iustin Pop
        continue
3716 a9e0c397 Iustin Pop
      size = dev.size
3717 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
3718 a9e0c397 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3719 a9e0c397 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3720 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3721 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
3722 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3723 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
3724 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
3725 a9e0c397 Iustin Pop
      old_lvs = dev.children
3726 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
3727 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
3728 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
3729 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3730 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3731 a9e0c397 Iustin Pop
      # are talking about the secondary node
3732 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
3733 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, tgt_node, instance, new_lv,
3734 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3735 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3736 a9e0c397 Iustin Pop
                                   " node '%s'" %
3737 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
3738 a9e0c397 Iustin Pop
3739 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
3740 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
3741 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
3742 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
3743 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
3744 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
3745 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
3746 cff90b79 Iustin Pop
      #dev.children = []
3747 cff90b79 Iustin Pop
      #cfg.Update(instance)
3748 a9e0c397 Iustin Pop
3749 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
3750 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
3751 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
3752 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
3753 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
3754 cff90b79 Iustin Pop
3755 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
3756 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
3757 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
3758 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
3759 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
3760 cff90b79 Iustin Pop
      rlist = []
3761 cff90b79 Iustin Pop
      for to_ren in old_lvs:
3762 cff90b79 Iustin Pop
        find_res = rpc.call_blockdev_find(tgt_node, to_ren)
3763 cff90b79 Iustin Pop
        if find_res is not None: # device exists
3764 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
3765 cff90b79 Iustin Pop
3766 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
3767 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3768 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
3769 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
3770 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
3771 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
3772 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3773 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
3774 cff90b79 Iustin Pop
3775 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
3776 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
3777 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
3778 a9e0c397 Iustin Pop
3779 cff90b79 Iustin Pop
      for disk in old_lvs:
3780 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
3781 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
3782 a9e0c397 Iustin Pop
3783 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
3784 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
3785 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
3786 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
3787 a9e0c397 Iustin Pop
          if not rpc.call_blockdev_remove(tgt_node, new_lv):
3788 79caa9ed Guido Trotter
            warning("Can't rollback device %s", hint="manually cleanup unused"
3789 cff90b79 Iustin Pop
                    " logical volumes")
3790 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
3791 a9e0c397 Iustin Pop
3792 a9e0c397 Iustin Pop
      dev.children = new_lvs
3793 a9e0c397 Iustin Pop
      cfg.Update(instance)
3794 a9e0c397 Iustin Pop
3795 cff90b79 Iustin Pop
    # Step: wait for sync
3796 a9e0c397 Iustin Pop
3797 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3798 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3799 a9e0c397 Iustin Pop
    # return value
3800 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3801 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3802 a9e0c397 Iustin Pop
3803 a9e0c397 Iustin Pop
    # so check manually all the devices
3804 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3805 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3806 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3807 a9e0c397 Iustin Pop
      if is_degr:
3808 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3809 a9e0c397 Iustin Pop
3810 cff90b79 Iustin Pop
    # Step: remove old storage
3811 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3812 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3813 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
3814 a9e0c397 Iustin Pop
      for lv in old_lvs:
3815 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
3816 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(tgt_node, lv):
3817 79caa9ed Guido Trotter
          warning("Can't remove old LV", hint="manually remove unused LVs")
3818 a9e0c397 Iustin Pop
          continue
3819 a9e0c397 Iustin Pop
3820 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
3821 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
3822 a9e0c397 Iustin Pop

3823 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3824 a9e0c397 Iustin Pop
      - for all disks of the instance:
3825 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
3826 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
3827 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
3828 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
3829 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
3830 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
3831 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
3832 a9e0c397 Iustin Pop
          not network enabled
3833 a9e0c397 Iustin Pop
      - wait for sync across all devices
3834 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
3835 a9e0c397 Iustin Pop

3836 a9e0c397 Iustin Pop
    Failures are not very well handled.
3837 0834c866 Iustin Pop

3838 a9e0c397 Iustin Pop
    """
3839 0834c866 Iustin Pop
    steps_total = 6
3840 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3841 a9e0c397 Iustin Pop
    instance = self.instance
3842 a9e0c397 Iustin Pop
    iv_names = {}
3843 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3844 a9e0c397 Iustin Pop
    # start of work
3845 a9e0c397 Iustin Pop
    cfg = self.cfg
3846 a9e0c397 Iustin Pop
    old_node = self.tgt_node
3847 a9e0c397 Iustin Pop
    new_node = self.new_node
3848 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
3849 0834c866 Iustin Pop
3850 0834c866 Iustin Pop
    # Step: check device activation
3851 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3852 0834c866 Iustin Pop
    info("checking volume groups")
3853 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
3854 0834c866 Iustin Pop
    results = rpc.call_vg_list([pri_node, new_node])
3855 0834c866 Iustin Pop
    if not results:
3856 0834c866 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3857 0834c866 Iustin Pop
    for node in pri_node, new_node:
3858 0834c866 Iustin Pop
      res = results.get(node, False)
3859 0834c866 Iustin Pop
      if not res or my_vg not in res:
3860 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3861 0834c866 Iustin Pop
                                 (my_vg, node))
3862 0834c866 Iustin Pop
    for dev in instance.disks:
3863 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
3864 0834c866 Iustin Pop
        continue
3865 0834c866 Iustin Pop
      info("checking %s on %s" % (dev.iv_name, pri_node))
3866 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3867 0834c866 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
3868 0834c866 Iustin Pop
        raise errors.OpExecError("Can't find device %s on node %s" %
3869 0834c866 Iustin Pop
                                 (dev.iv_name, pri_node))
3870 0834c866 Iustin Pop
3871 0834c866 Iustin Pop
    # Step: check other node consistency
3872 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3873 0834c866 Iustin Pop
    for dev in instance.disks:
3874 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
3875 0834c866 Iustin Pop
        continue
3876 0834c866 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, pri_node))
3877 0834c866 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, pri_node, True, ldisk=True):
3878 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
3879 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
3880 0834c866 Iustin Pop
                                 pri_node)
3881 0834c866 Iustin Pop
3882 0834c866 Iustin Pop
    # Step: create new storage
3883 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3884 a9e0c397 Iustin Pop
    for dev in instance.disks:
3885 a9e0c397 Iustin Pop
      size = dev.size
3886 0834c866 Iustin Pop
      info("adding new local storage on %s for %s" % (new_node, dev.iv_name))
3887 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3888 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3889 a9e0c397 Iustin Pop
      # are talking about the secondary node
3890 a9e0c397 Iustin Pop
      for new_lv in dev.children:
3891 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, new_node, instance, new_lv,
3892 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3893 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3894 a9e0c397 Iustin Pop
                                   " node '%s'" %
3895 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
3896 a9e0c397 Iustin Pop
3897 0834c866 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children)
3898 0834c866 Iustin Pop
3899 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
3900 0834c866 Iustin Pop
    for dev in instance.disks:
3901 0834c866 Iustin Pop
      size = dev.size
3902 0834c866 Iustin Pop
      info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
3903 a9e0c397 Iustin Pop
      # create new devices on new_node
3904 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
3905 a9e0c397 Iustin Pop
                              logical_id=(pri_node, new_node,
3906 a9e0c397 Iustin Pop
                                          dev.logical_id[2]),
3907 a9e0c397 Iustin Pop
                              children=dev.children)
3908 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, new_node, instance,
3909 3f78eef2 Iustin Pop
                                        new_drbd, False,
3910 a9e0c397 Iustin Pop
                                      _GetInstanceInfoText(instance)):
3911 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
3912 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
3913 a9e0c397 Iustin Pop
3914 0834c866 Iustin Pop
    for dev in instance.disks:
3915 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
3916 0834c866 Iustin Pop
      info("shutting down drbd for %s on old node" % dev.iv_name)
3917 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
3918 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_shutdown(old_node, dev):
3919 0834c866 Iustin Pop
        warning("Failed to shutdown drbd for %s on old node" % dev.iv_name,
3920 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
3921 a9e0c397 Iustin Pop
3922 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
3923 642445d9 Iustin Pop
    done = 0
3924 642445d9 Iustin Pop
    for dev in instance.disks:
3925 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3926 642445d9 Iustin Pop
      # set the physical (unique in bdev terms) id to None, meaning
3927 642445d9 Iustin Pop
      # detach from network
3928 642445d9 Iustin Pop
      dev.physical_id = (None,) * len(dev.physical_id)
3929 642445d9 Iustin Pop
      # and 'find' the device, which will 'fix' it to match the
3930 642445d9 Iustin Pop
      # standalone state
3931 642445d9 Iustin Pop
      if rpc.call_blockdev_find(pri_node, dev):
3932 642445d9 Iustin Pop
        done += 1
3933 642445d9 Iustin Pop
      else:
3934 642445d9 Iustin Pop
        warning("Failed to detach drbd %s from network, unusual case" %
3935 642445d9 Iustin Pop
                dev.iv_name)
3936 642445d9 Iustin Pop
3937 642445d9 Iustin Pop
    if not done:
3938 642445d9 Iustin Pop
      # no detaches succeeded (very unlikely)
3939 642445d9 Iustin Pop
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
3940 642445d9 Iustin Pop
3941 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
3942 642445d9 Iustin Pop
    # the instance to point to the new secondary
3943 642445d9 Iustin Pop
    info("updating instance configuration")
3944 642445d9 Iustin Pop
    for dev in instance.disks:
3945 642445d9 Iustin Pop
      dev.logical_id = (pri_node, new_node) + dev.logical_id[2:]
3946 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3947 642445d9 Iustin Pop
    cfg.Update(instance)
3948 a9e0c397 Iustin Pop
3949 642445d9 Iustin Pop
    # and now perform the drbd attach
3950 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
3951 642445d9 Iustin Pop
    failures = []
3952 642445d9 Iustin Pop
    for dev in instance.disks:
3953 642445d9 Iustin Pop
      info("attaching primary drbd for %s to new secondary node" % dev.iv_name)
3954 642445d9 Iustin Pop
      # since the attach is smart, it's enough to 'find' the device,
3955 642445d9 Iustin Pop
      # it will automatically activate the network, if the physical_id
3956 642445d9 Iustin Pop
      # is correct
3957 642445d9 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3958 642445d9 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
3959 642445d9 Iustin Pop
        warning("can't attach drbd %s to new secondary!" % dev.iv_name,
3960 642445d9 Iustin Pop
                "please do a gnt-instance info to see the status of disks")
3961 a9e0c397 Iustin Pop
3962 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3963 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3964 a9e0c397 Iustin Pop
    # return value
3965 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3966 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3967 a9e0c397 Iustin Pop
3968 a9e0c397 Iustin Pop
    # so check manually all the devices
3969 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3970 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3971 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(pri_node, dev)[5]
3972 a9e0c397 Iustin Pop
      if is_degr:
3973 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3974 a9e0c397 Iustin Pop
3975 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3976 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3977 0834c866 Iustin Pop
      info("remove logical volumes for %s" % name)
3978 a9e0c397 Iustin Pop
      for lv in old_lvs:
3979 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
3980 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(old_node, lv):
3981 0834c866 Iustin Pop
          warning("Can't remove LV on old secondary",
3982 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
3983 a9e0c397 Iustin Pop
3984 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
3985 a9e0c397 Iustin Pop
    """Execute disk replacement.
3986 a9e0c397 Iustin Pop

3987 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
3988 a9e0c397 Iustin Pop

3989 a9e0c397 Iustin Pop
    """
3990 a9e0c397 Iustin Pop
    instance = self.instance
3991 22985314 Guido Trotter
3992 22985314 Guido Trotter
    # Activate the instance disks if we're replacing them on a down instance
3993 22985314 Guido Trotter
    if instance.status == "down":
3994 023e3296 Guido Trotter
      _StartInstanceDisks(self.cfg, instance, True)
3995 22985314 Guido Trotter
3996 abdf0113 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3997 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
3998 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
3999 a9e0c397 Iustin Pop
      else:
4000 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
4001 a9e0c397 Iustin Pop
    else:
4002 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
4003 22985314 Guido Trotter
4004 22985314 Guido Trotter
    ret = fn(feedback_fn)
4005 22985314 Guido Trotter
4006 22985314 Guido Trotter
    # Deactivate the instance disks if we're replacing them on a down instance
4007 22985314 Guido Trotter
    if instance.status == "down":
4008 023e3296 Guido Trotter
      _SafeShutdownInstanceDisks(instance, self.cfg)
4009 22985314 Guido Trotter
4010 22985314 Guido Trotter
    return ret
4011 a9e0c397 Iustin Pop
4012 a8083063 Iustin Pop
4013 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
4014 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
4015 8729e0d7 Iustin Pop

4016 8729e0d7 Iustin Pop
  """
4017 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
4018 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4019 8729e0d7 Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount"]
4020 31e63dbf Guido Trotter
  REQ_BGL = False
4021 31e63dbf Guido Trotter
4022 31e63dbf Guido Trotter
  def ExpandNames(self):
4023 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
4024 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4025 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4026 31e63dbf Guido Trotter
4027 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
4028 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
4029 31e63dbf Guido Trotter
      self._LockInstancesNodes()
4030 8729e0d7 Iustin Pop
4031 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
4032 8729e0d7 Iustin Pop
    """Build hooks env.
4033 8729e0d7 Iustin Pop

4034 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
4035 8729e0d7 Iustin Pop

4036 8729e0d7 Iustin Pop
    """
4037 8729e0d7 Iustin Pop
    env = {
4038 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
4039 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
4040 8729e0d7 Iustin Pop
      }
4041 8729e0d7 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4042 8729e0d7 Iustin Pop
    nl = [
4043 8729e0d7 Iustin Pop
      self.sstore.GetMasterNode(),
4044 8729e0d7 Iustin Pop
      self.instance.primary_node,
4045 8729e0d7 Iustin Pop
      ]
4046 8729e0d7 Iustin Pop
    return env, nl, nl
4047 8729e0d7 Iustin Pop
4048 8729e0d7 Iustin Pop
  def CheckPrereq(self):
4049 8729e0d7 Iustin Pop
    """Check prerequisites.
4050 8729e0d7 Iustin Pop

4051 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
4052 8729e0d7 Iustin Pop

4053 8729e0d7 Iustin Pop
    """
4054 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4055 31e63dbf Guido Trotter
    assert instance is not None, \
4056 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4057 31e63dbf Guido Trotter
4058 8729e0d7 Iustin Pop
    self.instance = instance
4059 8729e0d7 Iustin Pop
4060 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
4061 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
4062 8729e0d7 Iustin Pop
                                 " growing.")
4063 8729e0d7 Iustin Pop
4064 8729e0d7 Iustin Pop
    if instance.FindDisk(self.op.disk) is None:
4065 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
4066 c7cdfc90 Iustin Pop
                                 (self.op.disk, instance.name))
4067 8729e0d7 Iustin Pop
4068 8729e0d7 Iustin Pop
    nodenames = [instance.primary_node] + list(instance.secondary_nodes)
4069 8729e0d7 Iustin Pop
    nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
4070 8729e0d7 Iustin Pop
    for node in nodenames:
4071 8729e0d7 Iustin Pop
      info = nodeinfo.get(node, None)
4072 8729e0d7 Iustin Pop
      if not info:
4073 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
4074 8729e0d7 Iustin Pop
                                   " from node '%s'" % node)
4075 8729e0d7 Iustin Pop
      vg_free = info.get('vg_free', None)
4076 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
4077 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
4078 8729e0d7 Iustin Pop
                                   " node %s" % node)
4079 8729e0d7 Iustin Pop
      if self.op.amount > info['vg_free']:
4080 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
4081 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
4082 8729e0d7 Iustin Pop
                                   (node, info['vg_free'], self.op.amount))
4083 8729e0d7 Iustin Pop
4084 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
4085 8729e0d7 Iustin Pop
    """Execute disk grow.
4086 8729e0d7 Iustin Pop

4087 8729e0d7 Iustin Pop
    """
4088 8729e0d7 Iustin Pop
    instance = self.instance
4089 8729e0d7 Iustin Pop
    disk = instance.FindDisk(self.op.disk)
4090 8729e0d7 Iustin Pop
    for node in (instance.secondary_nodes + (instance.primary_node,)):
4091 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
4092 8729e0d7 Iustin Pop
      result = rpc.call_blockdev_grow(node, disk, self.op.amount)
4093 86de84dd Guido Trotter
      if not result or not isinstance(result, (list, tuple)) or len(result) != 2:
4094 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s" % node)
4095 8729e0d7 Iustin Pop
      elif not result[0]:
4096 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s: %s" %
4097 8729e0d7 Iustin Pop
                                 (node, result[1]))
4098 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
4099 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
4100 8729e0d7 Iustin Pop
    return
4101 8729e0d7 Iustin Pop
4102 8729e0d7 Iustin Pop
4103 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
4104 a8083063 Iustin Pop
  """Query runtime instance data.
4105 a8083063 Iustin Pop

4106 a8083063 Iustin Pop
  """
4107 a8083063 Iustin Pop
  _OP_REQP = ["instances"]
4108 a8083063 Iustin Pop
4109 a8083063 Iustin Pop
  def CheckPrereq(self):
4110 a8083063 Iustin Pop
    """Check prerequisites.
4111 a8083063 Iustin Pop

4112 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
4113 a8083063 Iustin Pop

4114 a8083063 Iustin Pop
    """
4115 a8083063 Iustin Pop
    if not isinstance(self.op.instances, list):
4116 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'")
4117 a8083063 Iustin Pop
    if self.op.instances:
4118 a8083063 Iustin Pop
      self.wanted_instances = []
4119 a8083063 Iustin Pop
      names = self.op.instances
4120 a8083063 Iustin Pop
      for name in names:
4121 a8083063 Iustin Pop
        instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name))
4122 a8083063 Iustin Pop
        if instance is None:
4123 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("No such instance name '%s'" % name)
4124 515207af Guido Trotter
        self.wanted_instances.append(instance)
4125 a8083063 Iustin Pop
    else:
4126 a8083063 Iustin Pop
      self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
4127 a8083063 Iustin Pop
                               in self.cfg.GetInstanceList()]
4128 a8083063 Iustin Pop
    return
4129 a8083063 Iustin Pop
4130 a8083063 Iustin Pop
4131 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
4132 a8083063 Iustin Pop
    """Compute block device status.
4133 a8083063 Iustin Pop

4134 a8083063 Iustin Pop
    """
4135 a8083063 Iustin Pop
    self.cfg.SetDiskID(dev, instance.primary_node)
4136 a8083063 Iustin Pop
    dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
4137 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
4138 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
4139 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
4140 a8083063 Iustin Pop
        snode = dev.logical_id[1]
4141 a8083063 Iustin Pop
      else:
4142 a8083063 Iustin Pop
        snode = dev.logical_id[0]
4143 a8083063 Iustin Pop
4144 a8083063 Iustin Pop
    if snode:
4145 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
4146 a8083063 Iustin Pop
      dev_sstatus = rpc.call_blockdev_find(snode, dev)
4147 a8083063 Iustin Pop
    else:
4148 a8083063 Iustin Pop
      dev_sstatus = None
4149 a8083063 Iustin Pop
4150 a8083063 Iustin Pop
    if dev.children:
4151 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
4152 a8083063 Iustin Pop
                      for child in dev.children]
4153 a8083063 Iustin Pop
    else:
4154 a8083063 Iustin Pop
      dev_children = []
4155 a8083063 Iustin Pop
4156 a8083063 Iustin Pop
    data = {
4157 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
4158 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
4159 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
4160 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
4161 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
4162 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
4163 a8083063 Iustin Pop
      "children": dev_children,
4164 a8083063 Iustin Pop
      }
4165 a8083063 Iustin Pop
4166 a8083063 Iustin Pop
    return data
4167 a8083063 Iustin Pop
4168 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4169 a8083063 Iustin Pop
    """Gather and return data"""
4170 a8083063 Iustin Pop
    result = {}
4171 a8083063 Iustin Pop
    for instance in self.wanted_instances:
4172 a8083063 Iustin Pop
      remote_info = rpc.call_instance_info(instance.primary_node,
4173 a8083063 Iustin Pop
                                                instance.name)
4174 a8083063 Iustin Pop
      if remote_info and "state" in remote_info:
4175 a8083063 Iustin Pop
        remote_state = "up"
4176 a8083063 Iustin Pop
      else:
4177 a8083063 Iustin Pop
        remote_state = "down"
4178 a8083063 Iustin Pop
      if instance.status == "down":
4179 a8083063 Iustin Pop
        config_state = "down"
4180 a8083063 Iustin Pop
      else:
4181 a8083063 Iustin Pop
        config_state = "up"
4182 a8083063 Iustin Pop
4183 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
4184 a8083063 Iustin Pop
               for device in instance.disks]
4185 a8083063 Iustin Pop
4186 a8083063 Iustin Pop
      idict = {
4187 a8083063 Iustin Pop
        "name": instance.name,
4188 a8083063 Iustin Pop
        "config_state": config_state,
4189 a8083063 Iustin Pop
        "run_state": remote_state,
4190 a8083063 Iustin Pop
        "pnode": instance.primary_node,
4191 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
4192 a8083063 Iustin Pop
        "os": instance.os,
4193 a8083063 Iustin Pop
        "memory": instance.memory,
4194 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4195 a8083063 Iustin Pop
        "disks": disks,
4196 f55ff7ec Iustin Pop
        "vcpus": instance.vcpus,
4197 a8083063 Iustin Pop
        }
4198 a8083063 Iustin Pop
4199 a8340917 Iustin Pop
      htkind = self.sstore.GetHypervisorType()
4200 a8340917 Iustin Pop
      if htkind == constants.HT_XEN_PVM30:
4201 a8340917 Iustin Pop
        idict["kernel_path"] = instance.kernel_path
4202 a8340917 Iustin Pop
        idict["initrd_path"] = instance.initrd_path
4203 a8340917 Iustin Pop
4204 a8340917 Iustin Pop
      if htkind == constants.HT_XEN_HVM31:
4205 a8340917 Iustin Pop
        idict["hvm_boot_order"] = instance.hvm_boot_order
4206 a8340917 Iustin Pop
        idict["hvm_acpi"] = instance.hvm_acpi
4207 a8340917 Iustin Pop
        idict["hvm_pae"] = instance.hvm_pae
4208 a8340917 Iustin Pop
        idict["hvm_cdrom_image_path"] = instance.hvm_cdrom_image_path
4209 5397e0b7 Alexander Schreiber
        idict["hvm_nic_type"] = instance.hvm_nic_type
4210 5397e0b7 Alexander Schreiber
        idict["hvm_disk_type"] = instance.hvm_disk_type
4211 a8340917 Iustin Pop
4212 a8340917 Iustin Pop
      if htkind in constants.HTS_REQ_PORT:
4213 d0c11cf7 Alexander Schreiber
        if instance.vnc_bind_address is None:
4214 d0c11cf7 Alexander Schreiber
          vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
4215 d0c11cf7 Alexander Schreiber
        else:
4216 d0c11cf7 Alexander Schreiber
          vnc_bind_address = instance.vnc_bind_address
4217 34b6ab97 Alexander Schreiber
        if instance.network_port is None:
4218 34b6ab97 Alexander Schreiber
          vnc_console_port = None
4219 d0c11cf7 Alexander Schreiber
        elif vnc_bind_address == constants.BIND_ADDRESS_GLOBAL:
4220 a4273aba Alexander Schreiber
          vnc_console_port = "%s:%s" % (instance.primary_node,
4221 34b6ab97 Alexander Schreiber
                                       instance.network_port)
4222 d0c11cf7 Alexander Schreiber
        elif vnc_bind_address == constants.LOCALHOST_IP_ADDRESS:
4223 d0c11cf7 Alexander Schreiber
          vnc_console_port = "%s:%s on node %s" % (vnc_bind_address,
4224 a4273aba Alexander Schreiber
                                                   instance.network_port,
4225 a4273aba Alexander Schreiber
                                                   instance.primary_node)
4226 34b6ab97 Alexander Schreiber
        else:
4227 34b6ab97 Alexander Schreiber
          vnc_console_port = "%s:%s" % (instance.vnc_bind_address,
4228 34b6ab97 Alexander Schreiber
                                        instance.network_port)
4229 34b6ab97 Alexander Schreiber
        idict["vnc_console_port"] = vnc_console_port
4230 d0c11cf7 Alexander Schreiber
        idict["vnc_bind_address"] = vnc_bind_address
4231 a8340917 Iustin Pop
        idict["network_port"] = instance.network_port
4232 a8340917 Iustin Pop
4233 a8083063 Iustin Pop
      result[instance.name] = idict
4234 a8083063 Iustin Pop
4235 a8083063 Iustin Pop
    return result
4236 a8083063 Iustin Pop
4237 a8083063 Iustin Pop
4238 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
4239 a8083063 Iustin Pop
  """Modifies an instances's parameters.
4240 a8083063 Iustin Pop

4241 a8083063 Iustin Pop
  """
4242 a8083063 Iustin Pop
  HPATH = "instance-modify"
4243 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4244 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
4245 1a5c7281 Guido Trotter
  REQ_BGL = False
4246 1a5c7281 Guido Trotter
4247 1a5c7281 Guido Trotter
  def ExpandNames(self):
4248 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
4249 a8083063 Iustin Pop
4250 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4251 a8083063 Iustin Pop
    """Build hooks env.
4252 a8083063 Iustin Pop

4253 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
4254 a8083063 Iustin Pop

4255 a8083063 Iustin Pop
    """
4256 396e1b78 Michael Hanselmann
    args = dict()
4257 a8083063 Iustin Pop
    if self.mem:
4258 396e1b78 Michael Hanselmann
      args['memory'] = self.mem
4259 a8083063 Iustin Pop
    if self.vcpus:
4260 396e1b78 Michael Hanselmann
      args['vcpus'] = self.vcpus
4261 ef756965 Iustin Pop
    if self.do_ip or self.do_bridge or self.mac:
4262 396e1b78 Michael Hanselmann
      if self.do_ip:
4263 396e1b78 Michael Hanselmann
        ip = self.ip
4264 396e1b78 Michael Hanselmann
      else:
4265 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
4266 396e1b78 Michael Hanselmann
      if self.bridge:
4267 396e1b78 Michael Hanselmann
        bridge = self.bridge
4268 396e1b78 Michael Hanselmann
      else:
4269 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
4270 ef756965 Iustin Pop
      if self.mac:
4271 ef756965 Iustin Pop
        mac = self.mac
4272 ef756965 Iustin Pop
      else:
4273 ef756965 Iustin Pop
        mac = self.instance.nics[0].mac
4274 ef756965 Iustin Pop
      args['nics'] = [(ip, bridge, mac)]
4275 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance, override=args)
4276 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
4277 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
4278 a8083063 Iustin Pop
    return env, nl, nl
4279 a8083063 Iustin Pop
4280 a8083063 Iustin Pop
  def CheckPrereq(self):
4281 a8083063 Iustin Pop
    """Check prerequisites.
4282 a8083063 Iustin Pop

4283 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
4284 a8083063 Iustin Pop

4285 a8083063 Iustin Pop
    """
4286 1a5c7281 Guido Trotter
    # FIXME: all the parameters could be checked before, in ExpandNames, or in
4287 1a5c7281 Guido Trotter
    # a separate CheckArguments function, if we implement one, so the operation
4288 1a5c7281 Guido Trotter
    # can be aborted without waiting for any lock, should it have an error...
4289 a8083063 Iustin Pop
    self.mem = getattr(self.op, "mem", None)
4290 a8083063 Iustin Pop
    self.vcpus = getattr(self.op, "vcpus", None)
4291 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
4292 1862d460 Alexander Schreiber
    self.mac = getattr(self.op, "mac", None)
4293 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
4294 973d7867 Iustin Pop
    self.kernel_path = getattr(self.op, "kernel_path", None)
4295 973d7867 Iustin Pop
    self.initrd_path = getattr(self.op, "initrd_path", None)
4296 25c5878d Alexander Schreiber
    self.hvm_boot_order = getattr(self.op, "hvm_boot_order", None)
4297 31a853d2 Iustin Pop
    self.hvm_acpi = getattr(self.op, "hvm_acpi", None)
4298 31a853d2 Iustin Pop
    self.hvm_pae = getattr(self.op, "hvm_pae", None)
4299 5397e0b7 Alexander Schreiber
    self.hvm_nic_type = getattr(self.op, "hvm_nic_type", None)
4300 5397e0b7 Alexander Schreiber
    self.hvm_disk_type = getattr(self.op, "hvm_disk_type", None)
4301 31a853d2 Iustin Pop
    self.hvm_cdrom_image_path = getattr(self.op, "hvm_cdrom_image_path", None)
4302 31a853d2 Iustin Pop
    self.vnc_bind_address = getattr(self.op, "vnc_bind_address", None)
4303 4300c4b6 Guido Trotter
    self.force = getattr(self.op, "force", None)
4304 31a853d2 Iustin Pop
    all_parms = [self.mem, self.vcpus, self.ip, self.bridge, self.mac,
4305 31a853d2 Iustin Pop
                 self.kernel_path, self.initrd_path, self.hvm_boot_order,
4306 31a853d2 Iustin Pop
                 self.hvm_acpi, self.hvm_pae, self.hvm_cdrom_image_path,
4307 5397e0b7 Alexander Schreiber
                 self.vnc_bind_address, self.hvm_nic_type, self.hvm_disk_type]
4308 31a853d2 Iustin Pop
    if all_parms.count(None) == len(all_parms):
4309 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
4310 a8083063 Iustin Pop
    if self.mem is not None:
4311 a8083063 Iustin Pop
      try:
4312 a8083063 Iustin Pop
        self.mem = int(self.mem)
4313 a8083063 Iustin Pop
      except ValueError, err:
4314 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid memory size: %s" % str(err))
4315 a8083063 Iustin Pop
    if self.vcpus is not None:
4316 a8083063 Iustin Pop
      try:
4317 a8083063 Iustin Pop
        self.vcpus = int(self.vcpus)
4318 a8083063 Iustin Pop
      except ValueError, err:
4319 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid vcpus number: %s" % str(err))
4320 a8083063 Iustin Pop
    if self.ip is not None:
4321 a8083063 Iustin Pop
      self.do_ip = True
4322 a8083063 Iustin Pop
      if self.ip.lower() == "none":
4323 a8083063 Iustin Pop
        self.ip = None
4324 a8083063 Iustin Pop
      else:
4325 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
4326 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
4327 a8083063 Iustin Pop
    else:
4328 a8083063 Iustin Pop
      self.do_ip = False
4329 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
4330 1862d460 Alexander Schreiber
    if self.mac is not None:
4331 1862d460 Alexander Schreiber
      if self.cfg.IsMacInUse(self.mac):
4332 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('MAC address %s already in use in cluster' %
4333 1862d460 Alexander Schreiber
                                   self.mac)
4334 1862d460 Alexander Schreiber
      if not utils.IsValidMac(self.mac):
4335 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('Invalid MAC address %s' % self.mac)
4336 a8083063 Iustin Pop
4337 973d7867 Iustin Pop
    if self.kernel_path is not None:
4338 973d7867 Iustin Pop
      self.do_kernel_path = True
4339 973d7867 Iustin Pop
      if self.kernel_path == constants.VALUE_NONE:
4340 973d7867 Iustin Pop
        raise errors.OpPrereqError("Can't set instance to no kernel")
4341 973d7867 Iustin Pop
4342 973d7867 Iustin Pop
      if self.kernel_path != constants.VALUE_DEFAULT:
4343 973d7867 Iustin Pop
        if not os.path.isabs(self.kernel_path):
4344 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The kernel path must be an absolute"
4345 973d7867 Iustin Pop
                                    " filename")
4346 8cafeb26 Iustin Pop
    else:
4347 8cafeb26 Iustin Pop
      self.do_kernel_path = False
4348 973d7867 Iustin Pop
4349 973d7867 Iustin Pop
    if self.initrd_path is not None:
4350 973d7867 Iustin Pop
      self.do_initrd_path = True
4351 973d7867 Iustin Pop
      if self.initrd_path not in (constants.VALUE_NONE,
4352 973d7867 Iustin Pop
                                  constants.VALUE_DEFAULT):
4353 2bc22872 Iustin Pop
        if not os.path.isabs(self.initrd_path):
4354 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The initrd path must be an absolute"
4355 973d7867 Iustin Pop
                                    " filename")
4356 8cafeb26 Iustin Pop
    else:
4357 8cafeb26 Iustin Pop
      self.do_initrd_path = False
4358 973d7867 Iustin Pop
4359 25c5878d Alexander Schreiber
    # boot order verification
4360 25c5878d Alexander Schreiber
    if self.hvm_boot_order is not None:
4361 25c5878d Alexander Schreiber
      if self.hvm_boot_order != constants.VALUE_DEFAULT:
4362 25c5878d Alexander Schreiber
        if len(self.hvm_boot_order.strip("acdn")) != 0:
4363 25c5878d Alexander Schreiber
          raise errors.OpPrereqError("invalid boot order specified,"
4364 25c5878d Alexander Schreiber
                                     " must be one or more of [acdn]"
4365 25c5878d Alexander Schreiber
                                     " or 'default'")
4366 25c5878d Alexander Schreiber
4367 31a853d2 Iustin Pop
    # hvm_cdrom_image_path verification
4368 31a853d2 Iustin Pop
    if self.op.hvm_cdrom_image_path is not None:
4369 3fc175f0 Alexander Schreiber
      if not (os.path.isabs(self.op.hvm_cdrom_image_path) or
4370 3fc175f0 Alexander Schreiber
              self.op.hvm_cdrom_image_path.lower() == "none"):
4371 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The path to the HVM CDROM image must"
4372 31a853d2 Iustin Pop
                                   " be an absolute path or None, not %s" %
4373 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
4374 3fc175f0 Alexander Schreiber
      if not (os.path.isfile(self.op.hvm_cdrom_image_path) or
4375 3fc175f0 Alexander Schreiber
              self.op.hvm_cdrom_image_path.lower() == "none"):
4376 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The HVM CDROM image must either be a"
4377 31a853d2 Iustin Pop
                                   " regular file or a symlink pointing to"
4378 31a853d2 Iustin Pop
                                   " an existing regular file, not %s" %
4379 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
4380 31a853d2 Iustin Pop
4381 31a853d2 Iustin Pop
    # vnc_bind_address verification
4382 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is not None:
4383 31a853d2 Iustin Pop
      if not utils.IsValidIP(self.op.vnc_bind_address):
4384 31a853d2 Iustin Pop
        raise errors.OpPrereqError("given VNC bind address '%s' doesn't look"
4385 31a853d2 Iustin Pop
                                   " like a valid IP address" %
4386 31a853d2 Iustin Pop
                                   self.op.vnc_bind_address)
4387 31a853d2 Iustin Pop
4388 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4389 1a5c7281 Guido Trotter
    assert self.instance is not None, \
4390 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4391 cfefe007 Guido Trotter
    self.warn = []
4392 cfefe007 Guido Trotter
    if self.mem is not None and not self.force:
4393 cfefe007 Guido Trotter
      pnode = self.instance.primary_node
4394 cfefe007 Guido Trotter
      nodelist = [pnode]
4395 cfefe007 Guido Trotter
      nodelist.extend(instance.secondary_nodes)
4396 cfefe007 Guido Trotter
      instance_info = rpc.call_instance_info(pnode, instance.name)
4397 cfefe007 Guido Trotter
      nodeinfo = rpc.call_node_info(nodelist, self.cfg.GetVGName())
4398 cfefe007 Guido Trotter
4399 cfefe007 Guido Trotter
      if pnode not in nodeinfo or not isinstance(nodeinfo[pnode], dict):
4400 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
4401 cfefe007 Guido Trotter
        self.warn.append("Can't get info from primary node %s" % pnode)
4402 cfefe007 Guido Trotter
      else:
4403 cfefe007 Guido Trotter
        if instance_info:
4404 cfefe007 Guido Trotter
          current_mem = instance_info['memory']
4405 cfefe007 Guido Trotter
        else:
4406 cfefe007 Guido Trotter
          # Assume instance not running
4407 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
4408 cfefe007 Guido Trotter
          # and we have no other way to check)
4409 cfefe007 Guido Trotter
          current_mem = 0
4410 cfefe007 Guido Trotter
        miss_mem = self.mem - current_mem - nodeinfo[pnode]['memory_free']
4411 cfefe007 Guido Trotter
        if miss_mem > 0:
4412 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
4413 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
4414 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
4415 cfefe007 Guido Trotter
4416 cfefe007 Guido Trotter
      for node in instance.secondary_nodes:
4417 cfefe007 Guido Trotter
        if node not in nodeinfo or not isinstance(nodeinfo[node], dict):
4418 cfefe007 Guido Trotter
          self.warn.append("Can't get info from secondary node %s" % node)
4419 cfefe007 Guido Trotter
        elif self.mem > nodeinfo[node]['memory_free']:
4420 cfefe007 Guido Trotter
          self.warn.append("Not enough memory to failover instance to secondary"
4421 cfefe007 Guido Trotter
                           " node %s" % node)
4422 cfefe007 Guido Trotter
4423 5bc84f33 Alexander Schreiber
    # Xen HVM device type checks
4424 5bc84f33 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
4425 5bc84f33 Alexander Schreiber
      if self.op.hvm_nic_type is not None:
4426 5bc84f33 Alexander Schreiber
        if self.op.hvm_nic_type not in constants.HT_HVM_VALID_NIC_TYPES:
4427 5bc84f33 Alexander Schreiber
          raise errors.OpPrereqError("Invalid NIC type %s specified for Xen"
4428 5bc84f33 Alexander Schreiber
                                     " HVM  hypervisor" % self.op.hvm_nic_type)
4429 5bc84f33 Alexander Schreiber
      if self.op.hvm_disk_type is not None:
4430 5bc84f33 Alexander Schreiber
        if self.op.hvm_disk_type not in constants.HT_HVM_VALID_DISK_TYPES:
4431 5bc84f33 Alexander Schreiber
          raise errors.OpPrereqError("Invalid disk type %s specified for Xen"
4432 5bc84f33 Alexander Schreiber
                                     " HVM hypervisor" % self.op.hvm_disk_type)
4433 5bc84f33 Alexander Schreiber
4434 a8083063 Iustin Pop
    return
4435 a8083063 Iustin Pop
4436 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4437 a8083063 Iustin Pop
    """Modifies an instance.
4438 a8083063 Iustin Pop

4439 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
4440 a8083063 Iustin Pop
    """
4441 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
4442 cfefe007 Guido Trotter
    # feedback_fn there.
4443 cfefe007 Guido Trotter
    for warn in self.warn:
4444 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
4445 cfefe007 Guido Trotter
4446 a8083063 Iustin Pop
    result = []
4447 a8083063 Iustin Pop
    instance = self.instance
4448 a8083063 Iustin Pop
    if self.mem:
4449 a8083063 Iustin Pop
      instance.memory = self.mem
4450 a8083063 Iustin Pop
      result.append(("mem", self.mem))
4451 a8083063 Iustin Pop
    if self.vcpus:
4452 a8083063 Iustin Pop
      instance.vcpus = self.vcpus
4453 a8083063 Iustin Pop
      result.append(("vcpus",  self.vcpus))
4454 a8083063 Iustin Pop
    if self.do_ip:
4455 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
4456 a8083063 Iustin Pop
      result.append(("ip", self.ip))
4457 a8083063 Iustin Pop
    if self.bridge:
4458 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
4459 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
4460 1862d460 Alexander Schreiber
    if self.mac:
4461 1862d460 Alexander Schreiber
      instance.nics[0].mac = self.mac
4462 1862d460 Alexander Schreiber
      result.append(("mac", self.mac))
4463 973d7867 Iustin Pop
    if self.do_kernel_path:
4464 973d7867 Iustin Pop
      instance.kernel_path = self.kernel_path
4465 973d7867 Iustin Pop
      result.append(("kernel_path", self.kernel_path))
4466 973d7867 Iustin Pop
    if self.do_initrd_path:
4467 973d7867 Iustin Pop
      instance.initrd_path = self.initrd_path
4468 973d7867 Iustin Pop
      result.append(("initrd_path", self.initrd_path))
4469 25c5878d Alexander Schreiber
    if self.hvm_boot_order:
4470 25c5878d Alexander Schreiber
      if self.hvm_boot_order == constants.VALUE_DEFAULT:
4471 25c5878d Alexander Schreiber
        instance.hvm_boot_order = None
4472 25c5878d Alexander Schreiber
      else:
4473 25c5878d Alexander Schreiber
        instance.hvm_boot_order = self.hvm_boot_order
4474 25c5878d Alexander Schreiber
      result.append(("hvm_boot_order", self.hvm_boot_order))
4475 3fc175f0 Alexander Schreiber
    if self.hvm_acpi is not None:
4476 ec1ba002 Iustin Pop
      instance.hvm_acpi = self.hvm_acpi
4477 31a853d2 Iustin Pop
      result.append(("hvm_acpi", self.hvm_acpi))
4478 3fc175f0 Alexander Schreiber
    if self.hvm_pae is not None:
4479 ec1ba002 Iustin Pop
      instance.hvm_pae = self.hvm_pae
4480 31a853d2 Iustin Pop
      result.append(("hvm_pae", self.hvm_pae))
4481 5397e0b7 Alexander Schreiber
    if self.hvm_nic_type is not None:
4482 5397e0b7 Alexander Schreiber
      instance.hvm_nic_type = self.hvm_nic_type
4483 5397e0b7 Alexander Schreiber
      result.append(("hvm_nic_type", self.hvm_nic_type))
4484 5397e0b7 Alexander Schreiber
    if self.hvm_disk_type is not None:
4485 5397e0b7 Alexander Schreiber
      instance.hvm_disk_type = self.hvm_disk_type
4486 5397e0b7 Alexander Schreiber
      result.append(("hvm_disk_type", self.hvm_disk_type))
4487 31a853d2 Iustin Pop
    if self.hvm_cdrom_image_path:
4488 3fc175f0 Alexander Schreiber
      if self.hvm_cdrom_image_path == constants.VALUE_NONE:
4489 3fc175f0 Alexander Schreiber
        instance.hvm_cdrom_image_path = None
4490 3fc175f0 Alexander Schreiber
      else:
4491 3fc175f0 Alexander Schreiber
        instance.hvm_cdrom_image_path = self.hvm_cdrom_image_path
4492 31a853d2 Iustin Pop
      result.append(("hvm_cdrom_image_path", self.hvm_cdrom_image_path))
4493 31a853d2 Iustin Pop
    if self.vnc_bind_address:
4494 31a853d2 Iustin Pop
      instance.vnc_bind_address = self.vnc_bind_address
4495 31a853d2 Iustin Pop
      result.append(("vnc_bind_address", self.vnc_bind_address))
4496 a8083063 Iustin Pop
4497 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
4498 a8083063 Iustin Pop
4499 a8083063 Iustin Pop
    return result
4500 a8083063 Iustin Pop
4501 a8083063 Iustin Pop
4502 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
4503 a8083063 Iustin Pop
  """Query the exports list
4504 a8083063 Iustin Pop

4505 a8083063 Iustin Pop
  """
4506 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
4507 21a15682 Guido Trotter
  REQ_BGL = False
4508 21a15682 Guido Trotter
4509 21a15682 Guido Trotter
  def ExpandNames(self):
4510 21a15682 Guido Trotter
    self.needed_locks = {}
4511 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
4512 21a15682 Guido Trotter
    if not self.op.nodes:
4513 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4514 21a15682 Guido Trotter
    else:
4515 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
4516 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
4517 a8083063 Iustin Pop
4518 a8083063 Iustin Pop
  def CheckPrereq(self):
4519 21a15682 Guido Trotter
    """Check prerequisites.
4520 a8083063 Iustin Pop

4521 a8083063 Iustin Pop
    """
4522 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
4523 a8083063 Iustin Pop
4524 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4525 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
4526 a8083063 Iustin Pop

4527 a8083063 Iustin Pop
    Returns:
4528 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
4529 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
4530 a8083063 Iustin Pop
      that node.
4531 a8083063 Iustin Pop

4532 a8083063 Iustin Pop
    """
4533 a7ba5e53 Iustin Pop
    return rpc.call_export_list(self.nodes)
4534 a8083063 Iustin Pop
4535 a8083063 Iustin Pop
4536 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
4537 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
4538 a8083063 Iustin Pop

4539 a8083063 Iustin Pop
  """
4540 a8083063 Iustin Pop
  HPATH = "instance-export"
4541 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4542 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
4543 6657590e Guido Trotter
  REQ_BGL = False
4544 6657590e Guido Trotter
4545 6657590e Guido Trotter
  def ExpandNames(self):
4546 6657590e Guido Trotter
    self._ExpandAndLockInstance()
4547 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
4548 6657590e Guido Trotter
    #
4549 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
4550 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
4551 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
4552 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
4553 6657590e Guido Trotter
    #    then one to remove, after
4554 6657590e Guido Trotter
    #  - removing the removal operation altoghether
4555 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4556 6657590e Guido Trotter
4557 6657590e Guido Trotter
  def DeclareLocks(self, level):
4558 6657590e Guido Trotter
    """Last minute lock declaration."""
4559 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
4560 a8083063 Iustin Pop
4561 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4562 a8083063 Iustin Pop
    """Build hooks env.
4563 a8083063 Iustin Pop

4564 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
4565 a8083063 Iustin Pop

4566 a8083063 Iustin Pop
    """
4567 a8083063 Iustin Pop
    env = {
4568 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
4569 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
4570 a8083063 Iustin Pop
      }
4571 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4572 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
4573 a8083063 Iustin Pop
          self.op.target_node]
4574 a8083063 Iustin Pop
    return env, nl, nl
4575 a8083063 Iustin Pop
4576 a8083063 Iustin Pop
  def CheckPrereq(self):
4577 a8083063 Iustin Pop
    """Check prerequisites.
4578 a8083063 Iustin Pop

4579 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
4580 a8083063 Iustin Pop

4581 a8083063 Iustin Pop
    """
4582 6657590e Guido Trotter
    instance_name = self.op.instance_name
4583 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
4584 6657590e Guido Trotter
    assert self.instance is not None, \
4585 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
4586 a8083063 Iustin Pop
4587 6657590e Guido Trotter
    self.dst_node = self.cfg.GetNodeInfo(
4588 6657590e Guido Trotter
      self.cfg.ExpandNodeName(self.op.target_node))
4589 a8083063 Iustin Pop
4590 6657590e Guido Trotter
    assert self.dst_node is not None, \
4591 6657590e Guido Trotter
          "Cannot retrieve locked node %s" % self.op.target_node
4592 a8083063 Iustin Pop
4593 b6023d6c Manuel Franceschini
    # instance disk type verification
4594 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
4595 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
4596 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
4597 b6023d6c Manuel Franceschini
                                   " file-based disks")
4598 b6023d6c Manuel Franceschini
4599 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4600 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
4601 a8083063 Iustin Pop

4602 a8083063 Iustin Pop
    """
4603 a8083063 Iustin Pop
    instance = self.instance
4604 a8083063 Iustin Pop
    dst_node = self.dst_node
4605 a8083063 Iustin Pop
    src_node = instance.primary_node
4606 a8083063 Iustin Pop
    if self.op.shutdown:
4607 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
4608 fb300fb7 Guido Trotter
      if not rpc.call_instance_shutdown(src_node, instance):
4609 38206f3c Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
4610 38206f3c Iustin Pop
                                 (instance.name, src_node))
4611 a8083063 Iustin Pop
4612 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
4613 a8083063 Iustin Pop
4614 a8083063 Iustin Pop
    snap_disks = []
4615 a8083063 Iustin Pop
4616 a8083063 Iustin Pop
    try:
4617 a8083063 Iustin Pop
      for disk in instance.disks:
4618 a8083063 Iustin Pop
        if disk.iv_name == "sda":
4619 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
4620 a8083063 Iustin Pop
          new_dev_name = rpc.call_blockdev_snapshot(src_node, disk)
4621 a8083063 Iustin Pop
4622 a8083063 Iustin Pop
          if not new_dev_name:
4623 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
4624 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
4625 a8083063 Iustin Pop
          else:
4626 fe96220b Iustin Pop
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
4627 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
4628 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
4629 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
4630 a8083063 Iustin Pop
            snap_disks.append(new_dev)
4631 a8083063 Iustin Pop
4632 a8083063 Iustin Pop
    finally:
4633 fb300fb7 Guido Trotter
      if self.op.shutdown and instance.status == "up":
4634 fb300fb7 Guido Trotter
        if not rpc.call_instance_start(src_node, instance, None):
4635 fb300fb7 Guido Trotter
          _ShutdownInstanceDisks(instance, self.cfg)
4636 fb300fb7 Guido Trotter
          raise errors.OpExecError("Could not start instance")
4637 a8083063 Iustin Pop
4638 a8083063 Iustin Pop
    # TODO: check for size
4639 a8083063 Iustin Pop
4640 a8083063 Iustin Pop
    for dev in snap_disks:
4641 16687b98 Manuel Franceschini
      if not rpc.call_snapshot_export(src_node, dev, dst_node.name, instance):
4642 16687b98 Manuel Franceschini
        logger.Error("could not export block device %s from node %s to node %s"
4643 16687b98 Manuel Franceschini
                     % (dev.logical_id[1], src_node, dst_node.name))
4644 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(src_node, dev):
4645 16687b98 Manuel Franceschini
        logger.Error("could not remove snapshot block device %s from node %s" %
4646 16687b98 Manuel Franceschini
                     (dev.logical_id[1], src_node))
4647 a8083063 Iustin Pop
4648 a8083063 Iustin Pop
    if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
4649 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
4650 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
4651 a8083063 Iustin Pop
4652 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
4653 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
4654 a8083063 Iustin Pop
4655 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
4656 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
4657 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
4658 a8083063 Iustin Pop
    if nodelist:
4659 204f2086 Guido Trotter
      exportlist = rpc.call_export_list(nodelist)
4660 a8083063 Iustin Pop
      for node in exportlist:
4661 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
4662 a8083063 Iustin Pop
          if not rpc.call_export_remove(node, instance.name):
4663 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
4664 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))
4665 5c947f38 Iustin Pop
4666 5c947f38 Iustin Pop
4667 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
4668 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
4669 9ac99fda Guido Trotter

4670 9ac99fda Guido Trotter
  """
4671 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
4672 9ac99fda Guido Trotter
4673 9ac99fda Guido Trotter
  def CheckPrereq(self):
4674 9ac99fda Guido Trotter
    """Check prerequisites.
4675 9ac99fda Guido Trotter
    """
4676 9ac99fda Guido Trotter
    pass
4677 9ac99fda Guido Trotter
4678 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
4679 9ac99fda Guido Trotter
    """Remove any export.
4680 9ac99fda Guido Trotter

4681 9ac99fda Guido Trotter
    """
4682 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4683 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
4684 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
4685 9ac99fda Guido Trotter
    fqdn_warn = False
4686 9ac99fda Guido Trotter
    if not instance_name:
4687 9ac99fda Guido Trotter
      fqdn_warn = True
4688 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
4689 9ac99fda Guido Trotter
4690 204f2086 Guido Trotter
    exportlist = rpc.call_export_list(self.cfg.GetNodeList())
4691 9ac99fda Guido Trotter
    found = False
4692 9ac99fda Guido Trotter
    for node in exportlist:
4693 9ac99fda Guido Trotter
      if instance_name in exportlist[node]:
4694 9ac99fda Guido Trotter
        found = True
4695 9ac99fda Guido Trotter
        if not rpc.call_export_remove(node, instance_name):
4696 9ac99fda Guido Trotter
          logger.Error("could not remove export for instance %s"
4697 9ac99fda Guido Trotter
                       " on node %s" % (instance_name, node))
4698 9ac99fda Guido Trotter
4699 9ac99fda Guido Trotter
    if fqdn_warn and not found:
4700 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
4701 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
4702 9ac99fda Guido Trotter
                  " Domain Name.")
4703 9ac99fda Guido Trotter
4704 9ac99fda Guido Trotter
4705 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
4706 5c947f38 Iustin Pop
  """Generic tags LU.
4707 5c947f38 Iustin Pop

4708 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
4709 5c947f38 Iustin Pop

4710 5c947f38 Iustin Pop
  """
4711 5c947f38 Iustin Pop
  def CheckPrereq(self):
4712 5c947f38 Iustin Pop
    """Check prerequisites.
4713 5c947f38 Iustin Pop

4714 5c947f38 Iustin Pop
    """
4715 5c947f38 Iustin Pop
    if self.op.kind == constants.TAG_CLUSTER:
4716 5c947f38 Iustin Pop
      self.target = self.cfg.GetClusterInfo()
4717 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_NODE:
4718 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
4719 5c947f38 Iustin Pop
      if name is None:
4720 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
4721 3ecf6786 Iustin Pop
                                   (self.op.name,))
4722 5c947f38 Iustin Pop
      self.op.name = name
4723 5c947f38 Iustin Pop
      self.target = self.cfg.GetNodeInfo(name)
4724 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
4725 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
4726 5c947f38 Iustin Pop
      if name is None:
4727 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
4728 3ecf6786 Iustin Pop
                                   (self.op.name,))
4729 5c947f38 Iustin Pop
      self.op.name = name
4730 5c947f38 Iustin Pop
      self.target = self.cfg.GetInstanceInfo(name)
4731 5c947f38 Iustin Pop
    else:
4732 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
4733 3ecf6786 Iustin Pop
                                 str(self.op.kind))
4734 5c947f38 Iustin Pop
4735 5c947f38 Iustin Pop
4736 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
4737 5c947f38 Iustin Pop
  """Returns the tags of a given object.
4738 5c947f38 Iustin Pop

4739 5c947f38 Iustin Pop
  """
4740 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
4741 5c947f38 Iustin Pop
4742 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4743 5c947f38 Iustin Pop
    """Returns the tag list.
4744 5c947f38 Iustin Pop

4745 5c947f38 Iustin Pop
    """
4746 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
4747 5c947f38 Iustin Pop
4748 5c947f38 Iustin Pop
4749 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
4750 73415719 Iustin Pop
  """Searches the tags for a given pattern.
4751 73415719 Iustin Pop

4752 73415719 Iustin Pop
  """
4753 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
4754 73415719 Iustin Pop
4755 73415719 Iustin Pop
  def CheckPrereq(self):
4756 73415719 Iustin Pop
    """Check prerequisites.
4757 73415719 Iustin Pop

4758 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
4759 73415719 Iustin Pop

4760 73415719 Iustin Pop
    """
4761 73415719 Iustin Pop
    try:
4762 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
4763 73415719 Iustin Pop
    except re.error, err:
4764 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
4765 73415719 Iustin Pop
                                 (self.op.pattern, err))
4766 73415719 Iustin Pop
4767 73415719 Iustin Pop
  def Exec(self, feedback_fn):
4768 73415719 Iustin Pop
    """Returns the tag list.
4769 73415719 Iustin Pop

4770 73415719 Iustin Pop
    """
4771 73415719 Iustin Pop
    cfg = self.cfg
4772 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
4773 73415719 Iustin Pop
    ilist = [cfg.GetInstanceInfo(name) for name in cfg.GetInstanceList()]
4774 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
4775 73415719 Iustin Pop
    nlist = [cfg.GetNodeInfo(name) for name in cfg.GetNodeList()]
4776 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
4777 73415719 Iustin Pop
    results = []
4778 73415719 Iustin Pop
    for path, target in tgts:
4779 73415719 Iustin Pop
      for tag in target.GetTags():
4780 73415719 Iustin Pop
        if self.re.search(tag):
4781 73415719 Iustin Pop
          results.append((path, tag))
4782 73415719 Iustin Pop
    return results
4783 73415719 Iustin Pop
4784 73415719 Iustin Pop
4785 f27302fa Iustin Pop
class LUAddTags(TagsLU):
4786 5c947f38 Iustin Pop
  """Sets a tag on a given object.
4787 5c947f38 Iustin Pop

4788 5c947f38 Iustin Pop
  """
4789 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4790 5c947f38 Iustin Pop
4791 5c947f38 Iustin Pop
  def CheckPrereq(self):
4792 5c947f38 Iustin Pop
    """Check prerequisites.
4793 5c947f38 Iustin Pop

4794 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
4795 5c947f38 Iustin Pop

4796 5c947f38 Iustin Pop
    """
4797 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4798 f27302fa Iustin Pop
    for tag in self.op.tags:
4799 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4800 5c947f38 Iustin Pop
4801 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4802 5c947f38 Iustin Pop
    """Sets the tag.
4803 5c947f38 Iustin Pop

4804 5c947f38 Iustin Pop
    """
4805 5c947f38 Iustin Pop
    try:
4806 f27302fa Iustin Pop
      for tag in self.op.tags:
4807 f27302fa Iustin Pop
        self.target.AddTag(tag)
4808 5c947f38 Iustin Pop
    except errors.TagError, err:
4809 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
4810 5c947f38 Iustin Pop
    try:
4811 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4812 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4813 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4814 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4815 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4816 5c947f38 Iustin Pop
4817 5c947f38 Iustin Pop
4818 f27302fa Iustin Pop
class LUDelTags(TagsLU):
4819 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
4820 5c947f38 Iustin Pop

4821 5c947f38 Iustin Pop
  """
4822 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4823 5c947f38 Iustin Pop
4824 5c947f38 Iustin Pop
  def CheckPrereq(self):
4825 5c947f38 Iustin Pop
    """Check prerequisites.
4826 5c947f38 Iustin Pop

4827 5c947f38 Iustin Pop
    This checks that we have the given tag.
4828 5c947f38 Iustin Pop

4829 5c947f38 Iustin Pop
    """
4830 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4831 f27302fa Iustin Pop
    for tag in self.op.tags:
4832 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4833 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
4834 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
4835 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
4836 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
4837 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
4838 f27302fa Iustin Pop
      diff_names.sort()
4839 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
4840 f27302fa Iustin Pop
                                 (",".join(diff_names)))
4841 5c947f38 Iustin Pop
4842 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4843 5c947f38 Iustin Pop
    """Remove the tag from the object.
4844 5c947f38 Iustin Pop

4845 5c947f38 Iustin Pop
    """
4846 f27302fa Iustin Pop
    for tag in self.op.tags:
4847 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
4848 5c947f38 Iustin Pop
    try:
4849 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4850 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4851 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4852 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4853 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4854 06009e27 Iustin Pop
4855 0eed6e61 Guido Trotter
4856 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
4857 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
4858 06009e27 Iustin Pop

4859 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
4860 06009e27 Iustin Pop
  time.
4861 06009e27 Iustin Pop

4862 06009e27 Iustin Pop
  """
4863 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
4864 fbe9022f Guido Trotter
  REQ_BGL = False
4865 06009e27 Iustin Pop
4866 fbe9022f Guido Trotter
  def ExpandNames(self):
4867 fbe9022f Guido Trotter
    """Expand names and set required locks.
4868 06009e27 Iustin Pop

4869 fbe9022f Guido Trotter
    This expands the node list, if any.
4870 06009e27 Iustin Pop

4871 06009e27 Iustin Pop
    """
4872 fbe9022f Guido Trotter
    self.needed_locks = {}
4873 06009e27 Iustin Pop
    if self.op.on_nodes:
4874 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
4875 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
4876 fbe9022f Guido Trotter
      # more information.
4877 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
4878 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
4879 fbe9022f Guido Trotter
4880 fbe9022f Guido Trotter
  def CheckPrereq(self):
4881 fbe9022f Guido Trotter
    """Check prerequisites.
4882 fbe9022f Guido Trotter

4883 fbe9022f Guido Trotter
    """
4884 06009e27 Iustin Pop
4885 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
4886 06009e27 Iustin Pop
    """Do the actual sleep.
4887 06009e27 Iustin Pop

4888 06009e27 Iustin Pop
    """
4889 06009e27 Iustin Pop
    if self.op.on_master:
4890 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
4891 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
4892 06009e27 Iustin Pop
    if self.op.on_nodes:
4893 06009e27 Iustin Pop
      result = rpc.call_test_delay(self.op.on_nodes, self.op.duration)
4894 06009e27 Iustin Pop
      if not result:
4895 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
4896 06009e27 Iustin Pop
      for node, node_result in result.items():
4897 06009e27 Iustin Pop
        if not node_result:
4898 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
4899 06009e27 Iustin Pop
                                   " result: %s" % (node, node_result))
4900 d61df03e Iustin Pop
4901 d61df03e Iustin Pop
4902 d1c2dd75 Iustin Pop
class IAllocator(object):
4903 d1c2dd75 Iustin Pop
  """IAllocator framework.
4904 d61df03e Iustin Pop

4905 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
4906 d1c2dd75 Iustin Pop
    - cfg/sstore that are needed to query the cluster
4907 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
4908 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
4909 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
4910 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
4911 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
4912 d1c2dd75 Iustin Pop
      easy usage
4913 d61df03e Iustin Pop

4914 d61df03e Iustin Pop
  """
4915 29859cb7 Iustin Pop
  _ALLO_KEYS = [
4916 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
4917 d1c2dd75 Iustin Pop
    "os", "tags", "nics", "vcpus",
4918 d1c2dd75 Iustin Pop
    ]
4919 29859cb7 Iustin Pop
  _RELO_KEYS = [
4920 29859cb7 Iustin Pop
    "relocate_from",
4921 29859cb7 Iustin Pop
    ]
4922 d1c2dd75 Iustin Pop
4923 29859cb7 Iustin Pop
  def __init__(self, cfg, sstore, mode, name, **kwargs):
4924 d1c2dd75 Iustin Pop
    self.cfg = cfg
4925 d1c2dd75 Iustin Pop
    self.sstore = sstore
4926 d1c2dd75 Iustin Pop
    # init buffer variables
4927 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
4928 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
4929 29859cb7 Iustin Pop
    self.mode = mode
4930 29859cb7 Iustin Pop
    self.name = name
4931 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
4932 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
4933 29859cb7 Iustin Pop
    self.relocate_from = None
4934 27579978 Iustin Pop
    # computed fields
4935 27579978 Iustin Pop
    self.required_nodes = None
4936 d1c2dd75 Iustin Pop
    # init result fields
4937 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
4938 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
4939 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
4940 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
4941 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
4942 29859cb7 Iustin Pop
    else:
4943 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
4944 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
4945 d1c2dd75 Iustin Pop
    for key in kwargs:
4946 29859cb7 Iustin Pop
      if key not in keyset:
4947 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
4948 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
4949 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
4950 29859cb7 Iustin Pop
    for key in keyset:
4951 d1c2dd75 Iustin Pop
      if key not in kwargs:
4952 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
4953 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
4954 d1c2dd75 Iustin Pop
    self._BuildInputData()
4955 d1c2dd75 Iustin Pop
4956 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
4957 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
4958 d1c2dd75 Iustin Pop

4959 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
4960 d1c2dd75 Iustin Pop

4961 d1c2dd75 Iustin Pop
    """
4962 d1c2dd75 Iustin Pop
    cfg = self.cfg
4963 d1c2dd75 Iustin Pop
    # cluster data
4964 d1c2dd75 Iustin Pop
    data = {
4965 d1c2dd75 Iustin Pop
      "version": 1,
4966 d1c2dd75 Iustin Pop
      "cluster_name": self.sstore.GetClusterName(),
4967 d1c2dd75 Iustin Pop
      "cluster_tags": list(cfg.GetClusterInfo().GetTags()),
4968 6286519f Iustin Pop
      "hypervisor_type": self.sstore.GetHypervisorType(),
4969 d1c2dd75 Iustin Pop
      # we don't have job IDs
4970 d61df03e Iustin Pop
      }
4971 d61df03e Iustin Pop
4972 6286519f Iustin Pop
    i_list = [cfg.GetInstanceInfo(iname) for iname in cfg.GetInstanceList()]
4973 6286519f Iustin Pop
4974 d1c2dd75 Iustin Pop
    # node data
4975 d1c2dd75 Iustin Pop
    node_results = {}
4976 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
4977 d1c2dd75 Iustin Pop
    node_data = rpc.call_node_info(node_list, cfg.GetVGName())
4978 d1c2dd75 Iustin Pop
    for nname in node_list:
4979 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
4980 d1c2dd75 Iustin Pop
      if nname not in node_data or not isinstance(node_data[nname], dict):
4981 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't get data for node %s" % nname)
4982 d1c2dd75 Iustin Pop
      remote_info = node_data[nname]
4983 b2662e7f Iustin Pop
      for attr in ['memory_total', 'memory_free', 'memory_dom0',
4984 4337cf1b Iustin Pop
                   'vg_size', 'vg_free', 'cpu_total']:
4985 d1c2dd75 Iustin Pop
        if attr not in remote_info:
4986 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' didn't return attribute '%s'" %
4987 d1c2dd75 Iustin Pop
                                   (nname, attr))
4988 d1c2dd75 Iustin Pop
        try:
4989 b2662e7f Iustin Pop
          remote_info[attr] = int(remote_info[attr])
4990 d1c2dd75 Iustin Pop
        except ValueError, err:
4991 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' returned invalid value for '%s':"
4992 d1c2dd75 Iustin Pop
                                   " %s" % (nname, attr, str(err)))
4993 6286519f Iustin Pop
      # compute memory used by primary instances
4994 6286519f Iustin Pop
      i_p_mem = i_p_up_mem = 0
4995 6286519f Iustin Pop
      for iinfo in i_list:
4996 6286519f Iustin Pop
        if iinfo.primary_node == nname:
4997 6286519f Iustin Pop
          i_p_mem += iinfo.memory
4998 6286519f Iustin Pop
          if iinfo.status == "up":
4999 6286519f Iustin Pop
            i_p_up_mem += iinfo.memory
5000 6286519f Iustin Pop
5001 b2662e7f Iustin Pop
      # compute memory used by instances
5002 d1c2dd75 Iustin Pop
      pnr = {
5003 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
5004 b2662e7f Iustin Pop
        "total_memory": remote_info['memory_total'],
5005 b2662e7f Iustin Pop
        "reserved_memory": remote_info['memory_dom0'],
5006 b2662e7f Iustin Pop
        "free_memory": remote_info['memory_free'],
5007 6286519f Iustin Pop
        "i_pri_memory": i_p_mem,
5008 6286519f Iustin Pop
        "i_pri_up_memory": i_p_up_mem,
5009 b2662e7f Iustin Pop
        "total_disk": remote_info['vg_size'],
5010 b2662e7f Iustin Pop
        "free_disk": remote_info['vg_free'],
5011 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
5012 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
5013 4337cf1b Iustin Pop
        "total_cpus": remote_info['cpu_total'],
5014 d1c2dd75 Iustin Pop
        }
5015 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
5016 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
5017 d1c2dd75 Iustin Pop
5018 d1c2dd75 Iustin Pop
    # instance data
5019 d1c2dd75 Iustin Pop
    instance_data = {}
5020 6286519f Iustin Pop
    for iinfo in i_list:
5021 d1c2dd75 Iustin Pop
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
5022 d1c2dd75 Iustin Pop
                  for n in iinfo.nics]
5023 d1c2dd75 Iustin Pop
      pir = {
5024 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
5025 d1c2dd75 Iustin Pop
        "should_run": iinfo.status == "up",
5026 d1c2dd75 Iustin Pop
        "vcpus": iinfo.vcpus,
5027 d1c2dd75 Iustin Pop
        "memory": iinfo.memory,
5028 d1c2dd75 Iustin Pop
        "os": iinfo.os,
5029 d1c2dd75 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
5030 d1c2dd75 Iustin Pop
        "nics": nic_data,
5031 d1c2dd75 Iustin Pop
        "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
5032 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
5033 d1c2dd75 Iustin Pop
        }
5034 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
5035 d61df03e Iustin Pop
5036 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
5037 d61df03e Iustin Pop
5038 d1c2dd75 Iustin Pop
    self.in_data = data
5039 d61df03e Iustin Pop
5040 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
5041 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
5042 d61df03e Iustin Pop

5043 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
5044 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5045 d61df03e Iustin Pop

5046 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5047 d1c2dd75 Iustin Pop
    done.
5048 d61df03e Iustin Pop

5049 d1c2dd75 Iustin Pop
    """
5050 d1c2dd75 Iustin Pop
    data = self.in_data
5051 d1c2dd75 Iustin Pop
    if len(self.disks) != 2:
5052 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Only two-disk configurations supported")
5053 d1c2dd75 Iustin Pop
5054 d1c2dd75 Iustin Pop
    disk_space = _ComputeDiskSize(self.disk_template,
5055 d1c2dd75 Iustin Pop
                                  self.disks[0]["size"], self.disks[1]["size"])
5056 d1c2dd75 Iustin Pop
5057 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
5058 27579978 Iustin Pop
      self.required_nodes = 2
5059 27579978 Iustin Pop
    else:
5060 27579978 Iustin Pop
      self.required_nodes = 1
5061 d1c2dd75 Iustin Pop
    request = {
5062 d1c2dd75 Iustin Pop
      "type": "allocate",
5063 d1c2dd75 Iustin Pop
      "name": self.name,
5064 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
5065 d1c2dd75 Iustin Pop
      "tags": self.tags,
5066 d1c2dd75 Iustin Pop
      "os": self.os,
5067 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
5068 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
5069 d1c2dd75 Iustin Pop
      "disks": self.disks,
5070 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
5071 d1c2dd75 Iustin Pop
      "nics": self.nics,
5072 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5073 d1c2dd75 Iustin Pop
      }
5074 d1c2dd75 Iustin Pop
    data["request"] = request
5075 298fe380 Iustin Pop
5076 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
5077 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
5078 298fe380 Iustin Pop

5079 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
5080 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5081 d61df03e Iustin Pop

5082 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5083 d1c2dd75 Iustin Pop
    done.
5084 d61df03e Iustin Pop

5085 d1c2dd75 Iustin Pop
    """
5086 27579978 Iustin Pop
    instance = self.cfg.GetInstanceInfo(self.name)
5087 27579978 Iustin Pop
    if instance is None:
5088 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
5089 27579978 Iustin Pop
                                   " IAllocator" % self.name)
5090 27579978 Iustin Pop
5091 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
5092 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
5093 27579978 Iustin Pop
5094 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
5095 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
5096 2a139bb0 Iustin Pop
5097 27579978 Iustin Pop
    self.required_nodes = 1
5098 27579978 Iustin Pop
5099 27579978 Iustin Pop
    disk_space = _ComputeDiskSize(instance.disk_template,
5100 27579978 Iustin Pop
                                  instance.disks[0].size,
5101 27579978 Iustin Pop
                                  instance.disks[1].size)
5102 27579978 Iustin Pop
5103 d1c2dd75 Iustin Pop
    request = {
5104 2a139bb0 Iustin Pop
      "type": "relocate",
5105 d1c2dd75 Iustin Pop
      "name": self.name,
5106 27579978 Iustin Pop
      "disk_space_total": disk_space,
5107 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5108 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
5109 d1c2dd75 Iustin Pop
      }
5110 27579978 Iustin Pop
    self.in_data["request"] = request
5111 d61df03e Iustin Pop
5112 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
5113 d1c2dd75 Iustin Pop
    """Build input data structures.
5114 d61df03e Iustin Pop

5115 d1c2dd75 Iustin Pop
    """
5116 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
5117 d61df03e Iustin Pop
5118 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5119 d1c2dd75 Iustin Pop
      self._AddNewInstance()
5120 d1c2dd75 Iustin Pop
    else:
5121 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
5122 d61df03e Iustin Pop
5123 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
5124 d61df03e Iustin Pop
5125 8d528b7c Iustin Pop
  def Run(self, name, validate=True, call_fn=rpc.call_iallocator_runner):
5126 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
5127 298fe380 Iustin Pop

5128 d1c2dd75 Iustin Pop
    """
5129 d1c2dd75 Iustin Pop
    data = self.in_text
5130 298fe380 Iustin Pop
5131 8d528b7c Iustin Pop
    result = call_fn(self.sstore.GetMasterNode(), name, self.in_text)
5132 298fe380 Iustin Pop
5133 43f5ea7a Guido Trotter
    if not isinstance(result, (list, tuple)) or len(result) != 4:
5134 8d528b7c Iustin Pop
      raise errors.OpExecError("Invalid result from master iallocator runner")
5135 8d528b7c Iustin Pop
5136 8d528b7c Iustin Pop
    rcode, stdout, stderr, fail = result
5137 8d528b7c Iustin Pop
5138 8d528b7c Iustin Pop
    if rcode == constants.IARUN_NOTFOUND:
5139 8d528b7c Iustin Pop
      raise errors.OpExecError("Can't find allocator '%s'" % name)
5140 8d528b7c Iustin Pop
    elif rcode == constants.IARUN_FAILURE:
5141 38206f3c Iustin Pop
      raise errors.OpExecError("Instance allocator call failed: %s,"
5142 38206f3c Iustin Pop
                               " output: %s" % (fail, stdout+stderr))
5143 8d528b7c Iustin Pop
    self.out_text = stdout
5144 d1c2dd75 Iustin Pop
    if validate:
5145 d1c2dd75 Iustin Pop
      self._ValidateResult()
5146 298fe380 Iustin Pop
5147 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
5148 d1c2dd75 Iustin Pop
    """Process the allocator results.
5149 538475ca Iustin Pop

5150 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
5151 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
5152 538475ca Iustin Pop

5153 d1c2dd75 Iustin Pop
    """
5154 d1c2dd75 Iustin Pop
    try:
5155 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
5156 d1c2dd75 Iustin Pop
    except Exception, err:
5157 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
5158 d1c2dd75 Iustin Pop
5159 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
5160 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
5161 538475ca Iustin Pop
5162 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
5163 d1c2dd75 Iustin Pop
      if key not in rdict:
5164 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
5165 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
5166 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
5167 538475ca Iustin Pop
5168 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
5169 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
5170 d1c2dd75 Iustin Pop
                               " is not a list")
5171 d1c2dd75 Iustin Pop
    self.out_data = rdict
5172 538475ca Iustin Pop
5173 538475ca Iustin Pop
5174 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
5175 d61df03e Iustin Pop
  """Run allocator tests.
5176 d61df03e Iustin Pop

5177 d61df03e Iustin Pop
  This LU runs the allocator tests
5178 d61df03e Iustin Pop

5179 d61df03e Iustin Pop
  """
5180 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
5181 d61df03e Iustin Pop
5182 d61df03e Iustin Pop
  def CheckPrereq(self):
5183 d61df03e Iustin Pop
    """Check prerequisites.
5184 d61df03e Iustin Pop

5185 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
5186 d61df03e Iustin Pop

5187 d61df03e Iustin Pop
    """
5188 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5189 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
5190 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
5191 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
5192 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
5193 d61df03e Iustin Pop
                                     attr)
5194 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
5195 d61df03e Iustin Pop
      if iname is not None:
5196 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
5197 d61df03e Iustin Pop
                                   iname)
5198 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
5199 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
5200 d61df03e Iustin Pop
      for row in self.op.nics:
5201 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5202 d61df03e Iustin Pop
            "mac" not in row or
5203 d61df03e Iustin Pop
            "ip" not in row or
5204 d61df03e Iustin Pop
            "bridge" not in row):
5205 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5206 d61df03e Iustin Pop
                                     " 'nics' parameter")
5207 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
5208 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
5209 298fe380 Iustin Pop
      if len(self.op.disks) != 2:
5210 298fe380 Iustin Pop
        raise errors.OpPrereqError("Only two-disk configurations supported")
5211 d61df03e Iustin Pop
      for row in self.op.disks:
5212 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5213 d61df03e Iustin Pop
            "size" not in row or
5214 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
5215 d61df03e Iustin Pop
            "mode" not in row or
5216 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
5217 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5218 d61df03e Iustin Pop
                                     " 'disks' parameter")
5219 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
5220 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
5221 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
5222 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
5223 d61df03e Iustin Pop
      if fname is None:
5224 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
5225 d61df03e Iustin Pop
                                   self.op.name)
5226 d61df03e Iustin Pop
      self.op.name = fname
5227 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
5228 d61df03e Iustin Pop
    else:
5229 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
5230 d61df03e Iustin Pop
                                 self.op.mode)
5231 d61df03e Iustin Pop
5232 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
5233 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
5234 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
5235 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
5236 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
5237 d61df03e Iustin Pop
                                 self.op.direction)
5238 d61df03e Iustin Pop
5239 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
5240 d61df03e Iustin Pop
    """Run the allocator test.
5241 d61df03e Iustin Pop

5242 d61df03e Iustin Pop
    """
5243 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5244 29859cb7 Iustin Pop
      ial = IAllocator(self.cfg, self.sstore,
5245 29859cb7 Iustin Pop
                       mode=self.op.mode,
5246 29859cb7 Iustin Pop
                       name=self.op.name,
5247 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
5248 29859cb7 Iustin Pop
                       disks=self.op.disks,
5249 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
5250 29859cb7 Iustin Pop
                       os=self.op.os,
5251 29859cb7 Iustin Pop
                       tags=self.op.tags,
5252 29859cb7 Iustin Pop
                       nics=self.op.nics,
5253 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
5254 29859cb7 Iustin Pop
                       )
5255 29859cb7 Iustin Pop
    else:
5256 29859cb7 Iustin Pop
      ial = IAllocator(self.cfg, self.sstore,
5257 29859cb7 Iustin Pop
                       mode=self.op.mode,
5258 29859cb7 Iustin Pop
                       name=self.op.name,
5259 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
5260 29859cb7 Iustin Pop
                       )
5261 d61df03e Iustin Pop
5262 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
5263 d1c2dd75 Iustin Pop
      result = ial.in_text
5264 298fe380 Iustin Pop
    else:
5265 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
5266 d1c2dd75 Iustin Pop
      result = ial.out_text
5267 298fe380 Iustin Pop
    return result