Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ f6bd6e98

History | View | Annotate | Download (188.1 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 ffa1c0dc Iustin Pop
import logging
34 a8083063 Iustin Pop
35 a8083063 Iustin Pop
from ganeti import rpc
36 a8083063 Iustin Pop
from ganeti import ssh
37 a8083063 Iustin Pop
from ganeti import logger
38 a8083063 Iustin Pop
from ganeti import utils
39 a8083063 Iustin Pop
from ganeti import errors
40 a8083063 Iustin Pop
from ganeti import hypervisor
41 6048c986 Guido Trotter
from ganeti import locking
42 a8083063 Iustin Pop
from ganeti import constants
43 a8083063 Iustin Pop
from ganeti import objects
44 a8083063 Iustin Pop
from ganeti import opcodes
45 8d14b30d Iustin Pop
from ganeti import serializer
46 d61df03e Iustin Pop
47 d61df03e Iustin Pop
48 a8083063 Iustin Pop
class LogicalUnit(object):
49 396e1b78 Michael Hanselmann
  """Logical Unit base class.
50 a8083063 Iustin Pop

51 a8083063 Iustin Pop
  Subclasses must follow these rules:
52 d465bdc8 Guido Trotter
    - implement ExpandNames
53 d465bdc8 Guido Trotter
    - implement CheckPrereq
54 a8083063 Iustin Pop
    - implement Exec
55 a8083063 Iustin Pop
    - implement BuildHooksEnv
56 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
57 05f86716 Guido Trotter
    - optionally redefine their run requirements:
58 05f86716 Guido Trotter
        REQ_MASTER: the LU needs to run on the master node
59 05f86716 Guido Trotter
        REQ_WSSTORE: the LU needs a writable SimpleStore
60 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
61 05f86716 Guido Trotter

62 05f86716 Guido Trotter
  Note that all commands require root permissions.
63 a8083063 Iustin Pop

64 a8083063 Iustin Pop
  """
65 a8083063 Iustin Pop
  HPATH = None
66 a8083063 Iustin Pop
  HTYPE = None
67 a8083063 Iustin Pop
  _OP_REQP = []
68 a8083063 Iustin Pop
  REQ_MASTER = True
69 05f86716 Guido Trotter
  REQ_WSSTORE = False
70 7e55040e Guido Trotter
  REQ_BGL = True
71 a8083063 Iustin Pop
72 77b657a3 Guido Trotter
  def __init__(self, processor, op, context, sstore):
73 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
74 a8083063 Iustin Pop

75 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
76 a8083063 Iustin Pop
    validity.
77 a8083063 Iustin Pop

78 a8083063 Iustin Pop
    """
79 5bfac263 Iustin Pop
    self.proc = processor
80 a8083063 Iustin Pop
    self.op = op
81 77b657a3 Guido Trotter
    self.cfg = context.cfg
82 a8083063 Iustin Pop
    self.sstore = sstore
83 77b657a3 Guido Trotter
    self.context = context
84 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
85 d465bdc8 Guido Trotter
    self.needed_locks = None
86 6683bba2 Guido Trotter
    self.acquired_locks = {}
87 3977a4c1 Guido Trotter
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
88 ca2a79e1 Guido Trotter
    self.add_locks = {}
89 ca2a79e1 Guido Trotter
    self.remove_locks = {}
90 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
91 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
92 c92b310a Michael Hanselmann
    self.__ssh = None
93 c92b310a Michael Hanselmann
94 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
95 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
96 a8083063 Iustin Pop
      if attr_val is None:
97 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
98 3ecf6786 Iustin Pop
                                   attr_name)
99 c6d58a2b Michael Hanselmann
100 f64c9de6 Guido Trotter
    if not self.cfg.IsCluster():
101 c6d58a2b Michael Hanselmann
      raise errors.OpPrereqError("Cluster not initialized yet,"
102 c6d58a2b Michael Hanselmann
                                 " use 'gnt-cluster init' first.")
103 c6d58a2b Michael Hanselmann
    if self.REQ_MASTER:
104 c6d58a2b Michael Hanselmann
      master = sstore.GetMasterNode()
105 c6d58a2b Michael Hanselmann
      if master != utils.HostInfo().name:
106 c6d58a2b Michael Hanselmann
        raise errors.OpPrereqError("Commands must be run on the master"
107 c6d58a2b Michael Hanselmann
                                   " node %s" % master)
108 a8083063 Iustin Pop
109 c92b310a Michael Hanselmann
  def __GetSSH(self):
110 c92b310a Michael Hanselmann
    """Returns the SshRunner object
111 c92b310a Michael Hanselmann

112 c92b310a Michael Hanselmann
    """
113 c92b310a Michael Hanselmann
    if not self.__ssh:
114 1ff08570 Michael Hanselmann
      self.__ssh = ssh.SshRunner(self.sstore)
115 c92b310a Michael Hanselmann
    return self.__ssh
116 c92b310a Michael Hanselmann
117 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
118 c92b310a Michael Hanselmann
119 d465bdc8 Guido Trotter
  def ExpandNames(self):
120 d465bdc8 Guido Trotter
    """Expand names for this LU.
121 d465bdc8 Guido Trotter

122 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
123 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
124 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
125 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
126 d465bdc8 Guido Trotter

127 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
128 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
129 d465bdc8 Guido Trotter
    as values. Rules:
130 d465bdc8 Guido Trotter
      - Use an empty dict if you don't need any lock
131 d465bdc8 Guido Trotter
      - If you don't need any lock at a particular level omit that level
132 d465bdc8 Guido Trotter
      - Don't put anything for the BGL level
133 e310b019 Guido Trotter
      - If you want all locks at a level use locking.ALL_SET as a value
134 d465bdc8 Guido Trotter

135 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
136 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
137 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
138 3977a4c1 Guido Trotter

139 d465bdc8 Guido Trotter
    Examples:
140 d465bdc8 Guido Trotter
    # Acquire all nodes and one instance
141 d465bdc8 Guido Trotter
    self.needed_locks = {
142 e310b019 Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
143 3a5d7305 Guido Trotter
      locking.LEVEL_INSTANCE: ['instance1.example.tld'],
144 d465bdc8 Guido Trotter
    }
145 d465bdc8 Guido Trotter
    # Acquire just two nodes
146 d465bdc8 Guido Trotter
    self.needed_locks = {
147 d465bdc8 Guido Trotter
      locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
148 d465bdc8 Guido Trotter
    }
149 d465bdc8 Guido Trotter
    # Acquire no locks
150 d465bdc8 Guido Trotter
    self.needed_locks = {} # No, you can't leave it to the default value None
151 d465bdc8 Guido Trotter

152 d465bdc8 Guido Trotter
    """
153 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
154 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
155 d465bdc8 Guido Trotter
    # time.
156 d465bdc8 Guido Trotter
    if self.REQ_BGL:
157 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
158 d465bdc8 Guido Trotter
    else:
159 d465bdc8 Guido Trotter
      raise NotImplementedError
160 d465bdc8 Guido Trotter
161 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
162 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
163 fb8dcb62 Guido Trotter

164 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
165 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
166 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
167 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
168 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
169 fb8dcb62 Guido Trotter
    default it does nothing.
170 fb8dcb62 Guido Trotter

171 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
172 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
173 fb8dcb62 Guido Trotter

174 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
175 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
176 fb8dcb62 Guido Trotter

177 fb8dcb62 Guido Trotter
    """
178 fb8dcb62 Guido Trotter
179 a8083063 Iustin Pop
  def CheckPrereq(self):
180 a8083063 Iustin Pop
    """Check prerequisites for this LU.
181 a8083063 Iustin Pop

182 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
183 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
184 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
185 a8083063 Iustin Pop
    allowed.
186 a8083063 Iustin Pop

187 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
188 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
189 a8083063 Iustin Pop

190 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
191 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
192 a8083063 Iustin Pop

193 a8083063 Iustin Pop
    """
194 a8083063 Iustin Pop
    raise NotImplementedError
195 a8083063 Iustin Pop
196 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
197 a8083063 Iustin Pop
    """Execute the LU.
198 a8083063 Iustin Pop

199 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
200 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
201 a8083063 Iustin Pop
    code, or expected.
202 a8083063 Iustin Pop

203 a8083063 Iustin Pop
    """
204 a8083063 Iustin Pop
    raise NotImplementedError
205 a8083063 Iustin Pop
206 a8083063 Iustin Pop
  def BuildHooksEnv(self):
207 a8083063 Iustin Pop
    """Build hooks environment for this LU.
208 a8083063 Iustin Pop

209 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
210 a8083063 Iustin Pop
    containing the environment that will be used for running the
211 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
212 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
213 a8083063 Iustin Pop
    the hook should run after the execution.
214 a8083063 Iustin Pop

215 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
216 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
217 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
218 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
219 a8083063 Iustin Pop

220 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
221 a8083063 Iustin Pop

222 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
223 a8083063 Iustin Pop
    not be called.
224 a8083063 Iustin Pop

225 a8083063 Iustin Pop
    """
226 a8083063 Iustin Pop
    raise NotImplementedError
227 a8083063 Iustin Pop
228 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
229 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
230 1fce5219 Guido Trotter

231 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
232 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
233 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
234 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
235 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
236 1fce5219 Guido Trotter

237 1fce5219 Guido Trotter
    Args:
238 1fce5219 Guido Trotter
      phase: the hooks phase that has just been run
239 1fce5219 Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
240 1fce5219 Guido Trotter
      feedback_fn: function to send feedback back to the caller
241 1fce5219 Guido Trotter
      lu_result: the previous result this LU had, or None in the PRE phase.
242 1fce5219 Guido Trotter

243 1fce5219 Guido Trotter
    """
244 1fce5219 Guido Trotter
    return lu_result
245 1fce5219 Guido Trotter
246 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
247 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
248 43905206 Guido Trotter

249 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
250 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
251 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
252 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
253 43905206 Guido Trotter
    before.
254 43905206 Guido Trotter

255 43905206 Guido Trotter
    """
256 43905206 Guido Trotter
    if self.needed_locks is None:
257 43905206 Guido Trotter
      self.needed_locks = {}
258 43905206 Guido Trotter
    else:
259 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
260 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
261 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
262 43905206 Guido Trotter
    if expanded_name is None:
263 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
264 43905206 Guido Trotter
                                  self.op.instance_name)
265 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
266 43905206 Guido Trotter
    self.op.instance_name = expanded_name
267 43905206 Guido Trotter
268 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
269 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
270 c4a2fee1 Guido Trotter

271 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
272 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
273 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
274 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
275 c4a2fee1 Guido Trotter

276 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
277 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
278 c4a2fee1 Guido Trotter

279 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
280 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
281 c4a2fee1 Guido Trotter

282 c4a2fee1 Guido Trotter
    If should be called in DeclareLocks in a way similar to:
283 c4a2fee1 Guido Trotter

284 c4a2fee1 Guido Trotter
    if level == locking.LEVEL_NODE:
285 c4a2fee1 Guido Trotter
      self._LockInstancesNodes()
286 c4a2fee1 Guido Trotter

287 a82ce292 Guido Trotter
    @type primary_only: boolean
288 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
289 a82ce292 Guido Trotter

290 c4a2fee1 Guido Trotter
    """
291 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
292 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
293 c4a2fee1 Guido Trotter
294 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
295 c4a2fee1 Guido Trotter
296 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
297 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
298 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
299 c4a2fee1 Guido Trotter
    wanted_nodes = []
300 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
301 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
302 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
303 a82ce292 Guido Trotter
      if not primary_only:
304 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
305 9513b6ab Guido Trotter
306 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
307 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
308 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
309 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
310 c4a2fee1 Guido Trotter
311 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
312 c4a2fee1 Guido Trotter
313 a8083063 Iustin Pop
314 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
315 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
316 a8083063 Iustin Pop

317 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
318 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
319 a8083063 Iustin Pop

320 a8083063 Iustin Pop
  """
321 a8083063 Iustin Pop
  HPATH = None
322 a8083063 Iustin Pop
  HTYPE = None
323 a8083063 Iustin Pop
324 a8083063 Iustin Pop
325 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
326 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
327 83120a01 Michael Hanselmann

328 83120a01 Michael Hanselmann
  Args:
329 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
330 83120a01 Michael Hanselmann

331 83120a01 Michael Hanselmann
  """
332 3312b702 Iustin Pop
  if not isinstance(nodes, list):
333 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
334 dcb93971 Michael Hanselmann
335 ea47808a Guido Trotter
  if not nodes:
336 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
337 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
338 dcb93971 Michael Hanselmann
339 ea47808a Guido Trotter
  wanted = []
340 ea47808a Guido Trotter
  for name in nodes:
341 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
342 ea47808a Guido Trotter
    if node is None:
343 ea47808a Guido Trotter
      raise errors.OpPrereqError("No such node name '%s'" % name)
344 ea47808a Guido Trotter
    wanted.append(node)
345 dcb93971 Michael Hanselmann
346 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
347 3312b702 Iustin Pop
348 3312b702 Iustin Pop
349 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
350 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
351 3312b702 Iustin Pop

352 3312b702 Iustin Pop
  Args:
353 3312b702 Iustin Pop
    instances: List of instances (strings) or None for all
354 3312b702 Iustin Pop

355 3312b702 Iustin Pop
  """
356 3312b702 Iustin Pop
  if not isinstance(instances, list):
357 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
358 3312b702 Iustin Pop
359 3312b702 Iustin Pop
  if instances:
360 3312b702 Iustin Pop
    wanted = []
361 3312b702 Iustin Pop
362 3312b702 Iustin Pop
    for name in instances:
363 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
364 3312b702 Iustin Pop
      if instance is None:
365 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
366 3312b702 Iustin Pop
      wanted.append(instance)
367 3312b702 Iustin Pop
368 3312b702 Iustin Pop
  else:
369 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
370 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
371 dcb93971 Michael Hanselmann
372 dcb93971 Michael Hanselmann
373 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
374 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
375 83120a01 Michael Hanselmann

376 83120a01 Michael Hanselmann
  Args:
377 83120a01 Michael Hanselmann
    static: Static fields
378 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
379 83120a01 Michael Hanselmann

380 83120a01 Michael Hanselmann
  """
381 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
382 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
383 dcb93971 Michael Hanselmann
384 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
385 dcb93971 Michael Hanselmann
386 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
387 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
388 3ecf6786 Iustin Pop
                               % ",".join(frozenset(selected).
389 3ecf6786 Iustin Pop
                                          difference(all_fields)))
390 dcb93971 Michael Hanselmann
391 dcb93971 Michael Hanselmann
392 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
393 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
394 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
395 ecb215b5 Michael Hanselmann

396 ecb215b5 Michael Hanselmann
  Args:
397 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
398 396e1b78 Michael Hanselmann
  """
399 396e1b78 Michael Hanselmann
  env = {
400 0e137c28 Iustin Pop
    "OP_TARGET": name,
401 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
402 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
403 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
404 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
405 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
406 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
407 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
408 396e1b78 Michael Hanselmann
  }
409 396e1b78 Michael Hanselmann
410 396e1b78 Michael Hanselmann
  if nics:
411 396e1b78 Michael Hanselmann
    nic_count = len(nics)
412 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
413 396e1b78 Michael Hanselmann
      if ip is None:
414 396e1b78 Michael Hanselmann
        ip = ""
415 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
416 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
417 53e4e875 Guido Trotter
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
418 396e1b78 Michael Hanselmann
  else:
419 396e1b78 Michael Hanselmann
    nic_count = 0
420 396e1b78 Michael Hanselmann
421 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
422 396e1b78 Michael Hanselmann
423 396e1b78 Michael Hanselmann
  return env
424 396e1b78 Michael Hanselmann
425 396e1b78 Michael Hanselmann
426 396e1b78 Michael Hanselmann
def _BuildInstanceHookEnvByObject(instance, override=None):
427 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
428 ecb215b5 Michael Hanselmann

429 ecb215b5 Michael Hanselmann
  Args:
430 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
431 ecb215b5 Michael Hanselmann
    override: dict of values to override
432 ecb215b5 Michael Hanselmann
  """
433 396e1b78 Michael Hanselmann
  args = {
434 396e1b78 Michael Hanselmann
    'name': instance.name,
435 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
436 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
437 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
438 396e1b78 Michael Hanselmann
    'status': instance.os,
439 396e1b78 Michael Hanselmann
    'memory': instance.memory,
440 396e1b78 Michael Hanselmann
    'vcpus': instance.vcpus,
441 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
442 396e1b78 Michael Hanselmann
  }
443 396e1b78 Michael Hanselmann
  if override:
444 396e1b78 Michael Hanselmann
    args.update(override)
445 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
446 396e1b78 Michael Hanselmann
447 396e1b78 Michael Hanselmann
448 bf6929a2 Alexander Schreiber
def _CheckInstanceBridgesExist(instance):
449 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
450 bf6929a2 Alexander Schreiber

451 bf6929a2 Alexander Schreiber
  """
452 bf6929a2 Alexander Schreiber
  # check bridges existance
453 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
454 bf6929a2 Alexander Schreiber
  if not rpc.call_bridges_exist(instance.primary_node, brlist):
455 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
456 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
457 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
458 bf6929a2 Alexander Schreiber
459 bf6929a2 Alexander Schreiber
460 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
461 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
462 a8083063 Iustin Pop

463 a8083063 Iustin Pop
  """
464 a8083063 Iustin Pop
  _OP_REQP = []
465 a8083063 Iustin Pop
466 a8083063 Iustin Pop
  def CheckPrereq(self):
467 a8083063 Iustin Pop
    """Check prerequisites.
468 a8083063 Iustin Pop

469 a8083063 Iustin Pop
    This checks whether the cluster is empty.
470 a8083063 Iustin Pop

471 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
472 a8083063 Iustin Pop

473 a8083063 Iustin Pop
    """
474 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
475 a8083063 Iustin Pop
476 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
477 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
478 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
479 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
480 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
481 db915bd1 Michael Hanselmann
    if instancelist:
482 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
483 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
484 a8083063 Iustin Pop
485 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
486 a8083063 Iustin Pop
    """Destroys the cluster.
487 a8083063 Iustin Pop

488 a8083063 Iustin Pop
    """
489 c8a0948f Michael Hanselmann
    master = self.sstore.GetMasterNode()
490 1c65840b Iustin Pop
    if not rpc.call_node_stop_master(master, False):
491 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
492 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
493 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
494 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
495 140aa4a8 Iustin Pop
    return master
496 a8083063 Iustin Pop
497 a8083063 Iustin Pop
498 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
499 a8083063 Iustin Pop
  """Verifies the cluster status.
500 a8083063 Iustin Pop

501 a8083063 Iustin Pop
  """
502 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
503 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
504 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
505 d4b9d97f Guido Trotter
  REQ_BGL = False
506 d4b9d97f Guido Trotter
507 d4b9d97f Guido Trotter
  def ExpandNames(self):
508 d4b9d97f Guido Trotter
    self.needed_locks = {
509 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
510 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
511 d4b9d97f Guido Trotter
    }
512 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
513 a8083063 Iustin Pop
514 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
515 a8083063 Iustin Pop
                  remote_version, feedback_fn):
516 a8083063 Iustin Pop
    """Run multiple tests against a node.
517 a8083063 Iustin Pop

518 a8083063 Iustin Pop
    Test list:
519 a8083063 Iustin Pop
      - compares ganeti version
520 a8083063 Iustin Pop
      - checks vg existance and size > 20G
521 a8083063 Iustin Pop
      - checks config file checksum
522 a8083063 Iustin Pop
      - checks ssh to other nodes
523 a8083063 Iustin Pop

524 a8083063 Iustin Pop
    Args:
525 a8083063 Iustin Pop
      node: name of the node to check
526 a8083063 Iustin Pop
      file_list: required list of files
527 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
528 098c0958 Michael Hanselmann

529 a8083063 Iustin Pop
    """
530 a8083063 Iustin Pop
    # compares ganeti version
531 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
532 a8083063 Iustin Pop
    if not remote_version:
533 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
534 a8083063 Iustin Pop
      return True
535 a8083063 Iustin Pop
536 a8083063 Iustin Pop
    if local_version != remote_version:
537 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
538 a8083063 Iustin Pop
                      (local_version, node, remote_version))
539 a8083063 Iustin Pop
      return True
540 a8083063 Iustin Pop
541 a8083063 Iustin Pop
    # checks vg existance and size > 20G
542 a8083063 Iustin Pop
543 a8083063 Iustin Pop
    bad = False
544 a8083063 Iustin Pop
    if not vglist:
545 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
546 a8083063 Iustin Pop
                      (node,))
547 a8083063 Iustin Pop
      bad = True
548 a8083063 Iustin Pop
    else:
549 8d1a2a64 Michael Hanselmann
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
550 8d1a2a64 Michael Hanselmann
                                            constants.MIN_VG_SIZE)
551 a8083063 Iustin Pop
      if vgstatus:
552 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
553 a8083063 Iustin Pop
        bad = True
554 a8083063 Iustin Pop
555 a8083063 Iustin Pop
    # checks config file checksum
556 a8083063 Iustin Pop
    # checks ssh to any
557 a8083063 Iustin Pop
558 a8083063 Iustin Pop
    if 'filelist' not in node_result:
559 a8083063 Iustin Pop
      bad = True
560 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
561 a8083063 Iustin Pop
    else:
562 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
563 a8083063 Iustin Pop
      for file_name in file_list:
564 a8083063 Iustin Pop
        if file_name not in remote_cksum:
565 a8083063 Iustin Pop
          bad = True
566 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
567 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
568 a8083063 Iustin Pop
          bad = True
569 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
570 a8083063 Iustin Pop
571 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
572 a8083063 Iustin Pop
      bad = True
573 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
574 a8083063 Iustin Pop
    else:
575 a8083063 Iustin Pop
      if node_result['nodelist']:
576 a8083063 Iustin Pop
        bad = True
577 a8083063 Iustin Pop
        for node in node_result['nodelist']:
578 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
579 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
580 9d4bfc96 Iustin Pop
    if 'node-net-test' not in node_result:
581 9d4bfc96 Iustin Pop
      bad = True
582 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
583 9d4bfc96 Iustin Pop
    else:
584 9d4bfc96 Iustin Pop
      if node_result['node-net-test']:
585 9d4bfc96 Iustin Pop
        bad = True
586 9d4bfc96 Iustin Pop
        nlist = utils.NiceSort(node_result['node-net-test'].keys())
587 9d4bfc96 Iustin Pop
        for node in nlist:
588 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
589 9d4bfc96 Iustin Pop
                          (node, node_result['node-net-test'][node]))
590 9d4bfc96 Iustin Pop
591 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
592 a8083063 Iustin Pop
    if hyp_result is not None:
593 a8083063 Iustin Pop
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
594 a8083063 Iustin Pop
    return bad
595 a8083063 Iustin Pop
596 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
597 c5705f58 Guido Trotter
                      node_instance, feedback_fn):
598 a8083063 Iustin Pop
    """Verify an instance.
599 a8083063 Iustin Pop

600 a8083063 Iustin Pop
    This function checks to see if the required block devices are
601 a8083063 Iustin Pop
    available on the instance's node.
602 a8083063 Iustin Pop

603 a8083063 Iustin Pop
    """
604 a8083063 Iustin Pop
    bad = False
605 a8083063 Iustin Pop
606 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
607 a8083063 Iustin Pop
608 a8083063 Iustin Pop
    node_vol_should = {}
609 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
610 a8083063 Iustin Pop
611 a8083063 Iustin Pop
    for node in node_vol_should:
612 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
613 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
614 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
615 a8083063 Iustin Pop
                          (volume, node))
616 a8083063 Iustin Pop
          bad = True
617 a8083063 Iustin Pop
618 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
619 a872dae6 Guido Trotter
      if (node_current not in node_instance or
620 a872dae6 Guido Trotter
          not instance in node_instance[node_current]):
621 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
622 a8083063 Iustin Pop
                        (instance, node_current))
623 a8083063 Iustin Pop
        bad = True
624 a8083063 Iustin Pop
625 a8083063 Iustin Pop
    for node in node_instance:
626 a8083063 Iustin Pop
      if (not node == node_current):
627 a8083063 Iustin Pop
        if instance in node_instance[node]:
628 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
629 a8083063 Iustin Pop
                          (instance, node))
630 a8083063 Iustin Pop
          bad = True
631 a8083063 Iustin Pop
632 6a438c98 Michael Hanselmann
    return bad
633 a8083063 Iustin Pop
634 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
635 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
636 a8083063 Iustin Pop

637 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
638 a8083063 Iustin Pop
    reported as unknown.
639 a8083063 Iustin Pop

640 a8083063 Iustin Pop
    """
641 a8083063 Iustin Pop
    bad = False
642 a8083063 Iustin Pop
643 a8083063 Iustin Pop
    for node in node_vol_is:
644 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
645 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
646 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
647 a8083063 Iustin Pop
                      (volume, node))
648 a8083063 Iustin Pop
          bad = True
649 a8083063 Iustin Pop
    return bad
650 a8083063 Iustin Pop
651 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
652 a8083063 Iustin Pop
    """Verify the list of running instances.
653 a8083063 Iustin Pop

654 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
655 a8083063 Iustin Pop

656 a8083063 Iustin Pop
    """
657 a8083063 Iustin Pop
    bad = False
658 a8083063 Iustin Pop
    for node in node_instance:
659 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
660 a8083063 Iustin Pop
        if runninginstance not in instancelist:
661 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
662 a8083063 Iustin Pop
                          (runninginstance, node))
663 a8083063 Iustin Pop
          bad = True
664 a8083063 Iustin Pop
    return bad
665 a8083063 Iustin Pop
666 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
667 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
668 2b3b6ddd Guido Trotter

669 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
670 2b3b6ddd Guido Trotter
    was primary for.
671 2b3b6ddd Guido Trotter

672 2b3b6ddd Guido Trotter
    """
673 2b3b6ddd Guido Trotter
    bad = False
674 2b3b6ddd Guido Trotter
675 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
676 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
677 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
678 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
679 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
680 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
681 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
682 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
683 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
684 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
685 2b3b6ddd Guido Trotter
        needed_mem = 0
686 2b3b6ddd Guido Trotter
        for instance in instances:
687 2b3b6ddd Guido Trotter
          needed_mem += instance_cfg[instance].memory
688 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
689 2b3b6ddd Guido Trotter
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
690 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
691 2b3b6ddd Guido Trotter
          bad = True
692 2b3b6ddd Guido Trotter
    return bad
693 2b3b6ddd Guido Trotter
694 a8083063 Iustin Pop
  def CheckPrereq(self):
695 a8083063 Iustin Pop
    """Check prerequisites.
696 a8083063 Iustin Pop

697 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
698 e54c4c5e Guido Trotter
    all its members are valid.
699 a8083063 Iustin Pop

700 a8083063 Iustin Pop
    """
701 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
702 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
703 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
704 a8083063 Iustin Pop
705 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
706 d8fff41c Guido Trotter
    """Build hooks env.
707 d8fff41c Guido Trotter

708 d8fff41c Guido Trotter
    Cluster-Verify hooks just rone in the post phase and their failure makes
709 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
710 d8fff41c Guido Trotter

711 d8fff41c Guido Trotter
    """
712 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
713 d8fff41c Guido Trotter
    # TODO: populate the environment with useful information for verify hooks
714 d8fff41c Guido Trotter
    env = {}
715 d8fff41c Guido Trotter
    return env, [], all_nodes
716 d8fff41c Guido Trotter
717 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
718 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
719 a8083063 Iustin Pop

720 a8083063 Iustin Pop
    """
721 a8083063 Iustin Pop
    bad = False
722 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
723 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
724 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
725 a8083063 Iustin Pop
726 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
727 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
728 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
729 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
730 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
731 a8083063 Iustin Pop
    node_volume = {}
732 a8083063 Iustin Pop
    node_instance = {}
733 9c9c7d30 Guido Trotter
    node_info = {}
734 26b6af5e Guido Trotter
    instance_cfg = {}
735 a8083063 Iustin Pop
736 a8083063 Iustin Pop
    # FIXME: verify OS list
737 a8083063 Iustin Pop
    # do local checksums
738 cb91d46e Iustin Pop
    file_names = list(self.sstore.GetFileList())
739 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
740 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
741 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
742 a8083063 Iustin Pop
743 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
744 a8083063 Iustin Pop
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
745 a8083063 Iustin Pop
    all_instanceinfo = rpc.call_instance_list(nodelist)
746 a8083063 Iustin Pop
    all_vglist = rpc.call_vg_list(nodelist)
747 a8083063 Iustin Pop
    node_verify_param = {
748 a8083063 Iustin Pop
      'filelist': file_names,
749 a8083063 Iustin Pop
      'nodelist': nodelist,
750 a8083063 Iustin Pop
      'hypervisor': None,
751 9d4bfc96 Iustin Pop
      'node-net-test': [(node.name, node.primary_ip, node.secondary_ip)
752 9d4bfc96 Iustin Pop
                        for node in nodeinfo]
753 a8083063 Iustin Pop
      }
754 a8083063 Iustin Pop
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
755 a8083063 Iustin Pop
    all_rversion = rpc.call_version(nodelist)
756 9c9c7d30 Guido Trotter
    all_ninfo = rpc.call_node_info(nodelist, self.cfg.GetVGName())
757 a8083063 Iustin Pop
758 a8083063 Iustin Pop
    for node in nodelist:
759 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
760 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
761 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
762 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
763 a8083063 Iustin Pop
      bad = bad or result
764 a8083063 Iustin Pop
765 a8083063 Iustin Pop
      # node_volume
766 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
767 a8083063 Iustin Pop
768 b63ed789 Iustin Pop
      if isinstance(volumeinfo, basestring):
769 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
770 b63ed789 Iustin Pop
                    (node, volumeinfo[-400:].encode('string_escape')))
771 b63ed789 Iustin Pop
        bad = True
772 b63ed789 Iustin Pop
        node_volume[node] = {}
773 b63ed789 Iustin Pop
      elif not isinstance(volumeinfo, dict):
774 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
775 a8083063 Iustin Pop
        bad = True
776 a8083063 Iustin Pop
        continue
777 b63ed789 Iustin Pop
      else:
778 b63ed789 Iustin Pop
        node_volume[node] = volumeinfo
779 a8083063 Iustin Pop
780 a8083063 Iustin Pop
      # node_instance
781 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
782 a8083063 Iustin Pop
      if type(nodeinstance) != list:
783 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
784 a8083063 Iustin Pop
        bad = True
785 a8083063 Iustin Pop
        continue
786 a8083063 Iustin Pop
787 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
788 a8083063 Iustin Pop
789 9c9c7d30 Guido Trotter
      # node_info
790 9c9c7d30 Guido Trotter
      nodeinfo = all_ninfo[node]
791 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
792 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
793 9c9c7d30 Guido Trotter
        bad = True
794 9c9c7d30 Guido Trotter
        continue
795 9c9c7d30 Guido Trotter
796 9c9c7d30 Guido Trotter
      try:
797 9c9c7d30 Guido Trotter
        node_info[node] = {
798 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
799 9c9c7d30 Guido Trotter
          "dfree": int(nodeinfo['vg_free']),
800 93e4c50b Guido Trotter
          "pinst": [],
801 93e4c50b Guido Trotter
          "sinst": [],
802 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
803 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
804 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
805 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
806 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
807 36e7da50 Guido Trotter
          # secondary.
808 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
809 9c9c7d30 Guido Trotter
        }
810 9c9c7d30 Guido Trotter
      except ValueError:
811 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
812 9c9c7d30 Guido Trotter
        bad = True
813 9c9c7d30 Guido Trotter
        continue
814 9c9c7d30 Guido Trotter
815 a8083063 Iustin Pop
    node_vol_should = {}
816 a8083063 Iustin Pop
817 a8083063 Iustin Pop
    for instance in instancelist:
818 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
819 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
820 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
821 c5705f58 Guido Trotter
                                     node_instance, feedback_fn)
822 c5705f58 Guido Trotter
      bad = bad or result
823 a8083063 Iustin Pop
824 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
825 a8083063 Iustin Pop
826 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
827 26b6af5e Guido Trotter
828 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
829 93e4c50b Guido Trotter
      if pnode in node_info:
830 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
831 93e4c50b Guido Trotter
      else:
832 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
833 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
834 93e4c50b Guido Trotter
        bad = True
835 93e4c50b Guido Trotter
836 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
837 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
838 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
839 93e4c50b Guido Trotter
      # supported either.
840 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
841 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
842 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
843 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
844 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
845 93e4c50b Guido Trotter
                    % instance)
846 93e4c50b Guido Trotter
847 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
848 93e4c50b Guido Trotter
        if snode in node_info:
849 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
850 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
851 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
852 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
853 93e4c50b Guido Trotter
        else:
854 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
855 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
856 93e4c50b Guido Trotter
857 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
858 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
859 a8083063 Iustin Pop
                                       feedback_fn)
860 a8083063 Iustin Pop
    bad = bad or result
861 a8083063 Iustin Pop
862 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
863 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
864 a8083063 Iustin Pop
                                         feedback_fn)
865 a8083063 Iustin Pop
    bad = bad or result
866 a8083063 Iustin Pop
867 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
868 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
869 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
870 e54c4c5e Guido Trotter
      bad = bad or result
871 2b3b6ddd Guido Trotter
872 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
873 2b3b6ddd Guido Trotter
    if i_non_redundant:
874 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
875 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
876 2b3b6ddd Guido Trotter
877 34290825 Michael Hanselmann
    return not bad
878 a8083063 Iustin Pop
879 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
880 d8fff41c Guido Trotter
    """Analize the post-hooks' result, handle it, and send some
881 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
882 d8fff41c Guido Trotter

883 d8fff41c Guido Trotter
    Args:
884 d8fff41c Guido Trotter
      phase: the hooks phase that has just been run
885 d8fff41c Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
886 d8fff41c Guido Trotter
      feedback_fn: function to send feedback back to the caller
887 d8fff41c Guido Trotter
      lu_result: previous Exec result
888 d8fff41c Guido Trotter

889 d8fff41c Guido Trotter
    """
890 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
891 38206f3c Iustin Pop
    # their results
892 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
893 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
894 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
895 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
896 d8fff41c Guido Trotter
      if not hooks_results:
897 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
898 d8fff41c Guido Trotter
        lu_result = 1
899 d8fff41c Guido Trotter
      else:
900 d8fff41c Guido Trotter
        for node_name in hooks_results:
901 d8fff41c Guido Trotter
          show_node_header = True
902 d8fff41c Guido Trotter
          res = hooks_results[node_name]
903 d8fff41c Guido Trotter
          if res is False or not isinstance(res, list):
904 d8fff41c Guido Trotter
            feedback_fn("    Communication failure")
905 d8fff41c Guido Trotter
            lu_result = 1
906 d8fff41c Guido Trotter
            continue
907 d8fff41c Guido Trotter
          for script, hkr, output in res:
908 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
909 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
910 d8fff41c Guido Trotter
              # failing hooks on that node
911 d8fff41c Guido Trotter
              if show_node_header:
912 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
913 d8fff41c Guido Trotter
                show_node_header = False
914 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
915 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
916 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
917 d8fff41c Guido Trotter
              lu_result = 1
918 d8fff41c Guido Trotter
919 d8fff41c Guido Trotter
      return lu_result
920 d8fff41c Guido Trotter
921 a8083063 Iustin Pop
922 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
923 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
924 2c95a8d4 Iustin Pop

925 2c95a8d4 Iustin Pop
  """
926 2c95a8d4 Iustin Pop
  _OP_REQP = []
927 d4b9d97f Guido Trotter
  REQ_BGL = False
928 d4b9d97f Guido Trotter
929 d4b9d97f Guido Trotter
  def ExpandNames(self):
930 d4b9d97f Guido Trotter
    self.needed_locks = {
931 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
932 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
933 d4b9d97f Guido Trotter
    }
934 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
935 2c95a8d4 Iustin Pop
936 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
937 2c95a8d4 Iustin Pop
    """Check prerequisites.
938 2c95a8d4 Iustin Pop

939 2c95a8d4 Iustin Pop
    This has no prerequisites.
940 2c95a8d4 Iustin Pop

941 2c95a8d4 Iustin Pop
    """
942 2c95a8d4 Iustin Pop
    pass
943 2c95a8d4 Iustin Pop
944 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
945 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
946 2c95a8d4 Iustin Pop

947 2c95a8d4 Iustin Pop
    """
948 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
949 2c95a8d4 Iustin Pop
950 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
951 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
952 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
953 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
954 2c95a8d4 Iustin Pop
955 2c95a8d4 Iustin Pop
    nv_dict = {}
956 2c95a8d4 Iustin Pop
    for inst in instances:
957 2c95a8d4 Iustin Pop
      inst_lvs = {}
958 2c95a8d4 Iustin Pop
      if (inst.status != "up" or
959 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
960 2c95a8d4 Iustin Pop
        continue
961 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
962 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
963 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
964 2c95a8d4 Iustin Pop
        for vol in vol_list:
965 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
966 2c95a8d4 Iustin Pop
967 2c95a8d4 Iustin Pop
    if not nv_dict:
968 2c95a8d4 Iustin Pop
      return result
969 2c95a8d4 Iustin Pop
970 2c95a8d4 Iustin Pop
    node_lvs = rpc.call_volume_list(nodes, vg_name)
971 2c95a8d4 Iustin Pop
972 2c95a8d4 Iustin Pop
    to_act = set()
973 2c95a8d4 Iustin Pop
    for node in nodes:
974 2c95a8d4 Iustin Pop
      # node_volume
975 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
976 2c95a8d4 Iustin Pop
977 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
978 b63ed789 Iustin Pop
        logger.Info("error enumerating LVs on node %s: %s" % (node, lvs))
979 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
980 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
981 2c95a8d4 Iustin Pop
        logger.Info("connection to node %s failed or invalid data returned" %
982 2c95a8d4 Iustin Pop
                    (node,))
983 2c95a8d4 Iustin Pop
        res_nodes.append(node)
984 2c95a8d4 Iustin Pop
        continue
985 2c95a8d4 Iustin Pop
986 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
987 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
988 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
989 b63ed789 Iustin Pop
            and inst.name not in res_instances):
990 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
991 2c95a8d4 Iustin Pop
992 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
993 b63ed789 Iustin Pop
    # data better
994 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
995 b63ed789 Iustin Pop
      if inst.name not in res_missing:
996 b63ed789 Iustin Pop
        res_missing[inst.name] = []
997 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
998 b63ed789 Iustin Pop
999 2c95a8d4 Iustin Pop
    return result
1000 2c95a8d4 Iustin Pop
1001 2c95a8d4 Iustin Pop
1002 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1003 07bd8a51 Iustin Pop
  """Rename the cluster.
1004 07bd8a51 Iustin Pop

1005 07bd8a51 Iustin Pop
  """
1006 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1007 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1008 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1009 05f86716 Guido Trotter
  REQ_WSSTORE = True
1010 07bd8a51 Iustin Pop
1011 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1012 07bd8a51 Iustin Pop
    """Build hooks env.
1013 07bd8a51 Iustin Pop

1014 07bd8a51 Iustin Pop
    """
1015 07bd8a51 Iustin Pop
    env = {
1016 488b540d Iustin Pop
      "OP_TARGET": self.sstore.GetClusterName(),
1017 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1018 07bd8a51 Iustin Pop
      }
1019 07bd8a51 Iustin Pop
    mn = self.sstore.GetMasterNode()
1020 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1021 07bd8a51 Iustin Pop
1022 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1023 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1024 07bd8a51 Iustin Pop

1025 07bd8a51 Iustin Pop
    """
1026 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1027 07bd8a51 Iustin Pop
1028 bcf043c9 Iustin Pop
    new_name = hostname.name
1029 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1030 07bd8a51 Iustin Pop
    old_name = self.sstore.GetClusterName()
1031 07bd8a51 Iustin Pop
    old_ip = self.sstore.GetMasterIP()
1032 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1033 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1034 07bd8a51 Iustin Pop
                                 " cluster has changed")
1035 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1036 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1037 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1038 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1039 07bd8a51 Iustin Pop
                                   new_ip)
1040 07bd8a51 Iustin Pop
1041 07bd8a51 Iustin Pop
    self.op.name = new_name
1042 07bd8a51 Iustin Pop
1043 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1044 07bd8a51 Iustin Pop
    """Rename the cluster.
1045 07bd8a51 Iustin Pop

1046 07bd8a51 Iustin Pop
    """
1047 07bd8a51 Iustin Pop
    clustername = self.op.name
1048 07bd8a51 Iustin Pop
    ip = self.ip
1049 07bd8a51 Iustin Pop
    ss = self.sstore
1050 07bd8a51 Iustin Pop
1051 07bd8a51 Iustin Pop
    # shutdown the master IP
1052 07bd8a51 Iustin Pop
    master = ss.GetMasterNode()
1053 1c65840b Iustin Pop
    if not rpc.call_node_stop_master(master, False):
1054 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1055 07bd8a51 Iustin Pop
1056 07bd8a51 Iustin Pop
    try:
1057 07bd8a51 Iustin Pop
      # modify the sstore
1058 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
1059 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
1060 07bd8a51 Iustin Pop
1061 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
1062 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
1063 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
1064 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
1065 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
1066 07bd8a51 Iustin Pop
1067 07bd8a51 Iustin Pop
      logger.Debug("Copying updated ssconf data to all nodes")
1068 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1069 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
1070 07bd8a51 Iustin Pop
        result = rpc.call_upload_file(dist_nodes, fname)
1071 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
1072 07bd8a51 Iustin Pop
          if not result[to_node]:
1073 07bd8a51 Iustin Pop
            logger.Error("copy of file %s to node %s failed" %
1074 07bd8a51 Iustin Pop
                         (fname, to_node))
1075 07bd8a51 Iustin Pop
    finally:
1076 1c65840b Iustin Pop
      if not rpc.call_node_start_master(master, False):
1077 f4bc1f2c Michael Hanselmann
        logger.Error("Could not re-enable the master role on the master,"
1078 f4bc1f2c Michael Hanselmann
                     " please restart manually.")
1079 07bd8a51 Iustin Pop
1080 07bd8a51 Iustin Pop
1081 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1082 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1083 8084f9f6 Manuel Franceschini

1084 8084f9f6 Manuel Franceschini
  Args:
1085 8084f9f6 Manuel Franceschini
    disk: ganeti.objects.Disk object
1086 8084f9f6 Manuel Franceschini

1087 8084f9f6 Manuel Franceschini
  Returns:
1088 8084f9f6 Manuel Franceschini
    boolean indicating whether a LD_LV dev_type was found or not
1089 8084f9f6 Manuel Franceschini

1090 8084f9f6 Manuel Franceschini
  """
1091 8084f9f6 Manuel Franceschini
  if disk.children:
1092 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1093 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1094 8084f9f6 Manuel Franceschini
        return True
1095 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1096 8084f9f6 Manuel Franceschini
1097 8084f9f6 Manuel Franceschini
1098 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1099 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1100 8084f9f6 Manuel Franceschini

1101 8084f9f6 Manuel Franceschini
  """
1102 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1103 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1104 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1105 c53279cf Guido Trotter
  REQ_BGL = False
1106 c53279cf Guido Trotter
1107 c53279cf Guido Trotter
  def ExpandNames(self):
1108 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
1109 c53279cf Guido Trotter
    # all nodes to be modified.
1110 c53279cf Guido Trotter
    self.needed_locks = {
1111 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1112 c53279cf Guido Trotter
    }
1113 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1114 8084f9f6 Manuel Franceschini
1115 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1116 8084f9f6 Manuel Franceschini
    """Build hooks env.
1117 8084f9f6 Manuel Franceschini

1118 8084f9f6 Manuel Franceschini
    """
1119 8084f9f6 Manuel Franceschini
    env = {
1120 8084f9f6 Manuel Franceschini
      "OP_TARGET": self.sstore.GetClusterName(),
1121 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1122 8084f9f6 Manuel Franceschini
      }
1123 8084f9f6 Manuel Franceschini
    mn = self.sstore.GetMasterNode()
1124 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1125 8084f9f6 Manuel Franceschini
1126 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1127 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1128 8084f9f6 Manuel Franceschini

1129 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1130 5f83e263 Iustin Pop
    if the given volume group is valid.
1131 8084f9f6 Manuel Franceschini

1132 8084f9f6 Manuel Franceschini
    """
1133 c53279cf Guido Trotter
    # FIXME: This only works because there is only one parameter that can be
1134 c53279cf Guido Trotter
    # changed or removed.
1135 8084f9f6 Manuel Franceschini
    if not self.op.vg_name:
1136 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
1137 8084f9f6 Manuel Franceschini
      for inst in instances:
1138 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1139 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1140 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1141 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1142 8084f9f6 Manuel Franceschini
1143 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1144 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1145 c53279cf Guido Trotter
      node_list = self.acquired_locks[locking.LEVEL_NODE]
1146 8084f9f6 Manuel Franceschini
      vglist = rpc.call_vg_list(node_list)
1147 8084f9f6 Manuel Franceschini
      for node in node_list:
1148 8d1a2a64 Michael Hanselmann
        vgstatus = utils.CheckVolumeGroupSize(vglist[node], self.op.vg_name,
1149 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1150 8084f9f6 Manuel Franceschini
        if vgstatus:
1151 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1152 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1153 8084f9f6 Manuel Franceschini
1154 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1155 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1156 8084f9f6 Manuel Franceschini

1157 8084f9f6 Manuel Franceschini
    """
1158 8084f9f6 Manuel Franceschini
    if self.op.vg_name != self.cfg.GetVGName():
1159 8084f9f6 Manuel Franceschini
      self.cfg.SetVGName(self.op.vg_name)
1160 8084f9f6 Manuel Franceschini
    else:
1161 8084f9f6 Manuel Franceschini
      feedback_fn("Cluster LVM configuration already in desired"
1162 8084f9f6 Manuel Franceschini
                  " state, not changing")
1163 8084f9f6 Manuel Franceschini
1164 8084f9f6 Manuel Franceschini
1165 5bfac263 Iustin Pop
def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
1166 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1167 a8083063 Iustin Pop

1168 a8083063 Iustin Pop
  """
1169 a8083063 Iustin Pop
  if not instance.disks:
1170 a8083063 Iustin Pop
    return True
1171 a8083063 Iustin Pop
1172 a8083063 Iustin Pop
  if not oneshot:
1173 5bfac263 Iustin Pop
    proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1174 a8083063 Iustin Pop
1175 a8083063 Iustin Pop
  node = instance.primary_node
1176 a8083063 Iustin Pop
1177 a8083063 Iustin Pop
  for dev in instance.disks:
1178 a8083063 Iustin Pop
    cfgw.SetDiskID(dev, node)
1179 a8083063 Iustin Pop
1180 a8083063 Iustin Pop
  retries = 0
1181 a8083063 Iustin Pop
  while True:
1182 a8083063 Iustin Pop
    max_time = 0
1183 a8083063 Iustin Pop
    done = True
1184 a8083063 Iustin Pop
    cumul_degraded = False
1185 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1186 a8083063 Iustin Pop
    if not rstats:
1187 5bfac263 Iustin Pop
      proc.LogWarning("Can't get any data from node %s" % node)
1188 a8083063 Iustin Pop
      retries += 1
1189 a8083063 Iustin Pop
      if retries >= 10:
1190 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1191 3ecf6786 Iustin Pop
                                 " aborting." % node)
1192 a8083063 Iustin Pop
      time.sleep(6)
1193 a8083063 Iustin Pop
      continue
1194 a8083063 Iustin Pop
    retries = 0
1195 a8083063 Iustin Pop
    for i in range(len(rstats)):
1196 a8083063 Iustin Pop
      mstat = rstats[i]
1197 a8083063 Iustin Pop
      if mstat is None:
1198 5bfac263 Iustin Pop
        proc.LogWarning("Can't compute data for node %s/%s" %
1199 a8083063 Iustin Pop
                        (node, instance.disks[i].iv_name))
1200 a8083063 Iustin Pop
        continue
1201 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1202 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1203 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1204 a8083063 Iustin Pop
      if perc_done is not None:
1205 a8083063 Iustin Pop
        done = False
1206 a8083063 Iustin Pop
        if est_time is not None:
1207 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1208 a8083063 Iustin Pop
          max_time = est_time
1209 a8083063 Iustin Pop
        else:
1210 a8083063 Iustin Pop
          rem_time = "no time estimate"
1211 5bfac263 Iustin Pop
        proc.LogInfo("- device %s: %5.2f%% done, %s" %
1212 5bfac263 Iustin Pop
                     (instance.disks[i].iv_name, perc_done, rem_time))
1213 a8083063 Iustin Pop
    if done or oneshot:
1214 a8083063 Iustin Pop
      break
1215 a8083063 Iustin Pop
1216 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
1217 a8083063 Iustin Pop
1218 a8083063 Iustin Pop
  if done:
1219 5bfac263 Iustin Pop
    proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1220 a8083063 Iustin Pop
  return not cumul_degraded
1221 a8083063 Iustin Pop
1222 a8083063 Iustin Pop
1223 0834c866 Iustin Pop
def _CheckDiskConsistency(cfgw, dev, node, on_primary, ldisk=False):
1224 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1225 a8083063 Iustin Pop

1226 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1227 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1228 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1229 0834c866 Iustin Pop

1230 a8083063 Iustin Pop
  """
1231 a8083063 Iustin Pop
  cfgw.SetDiskID(dev, node)
1232 0834c866 Iustin Pop
  if ldisk:
1233 0834c866 Iustin Pop
    idx = 6
1234 0834c866 Iustin Pop
  else:
1235 0834c866 Iustin Pop
    idx = 5
1236 a8083063 Iustin Pop
1237 a8083063 Iustin Pop
  result = True
1238 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1239 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_find(node, dev)
1240 a8083063 Iustin Pop
    if not rstats:
1241 aa9d0c32 Guido Trotter
      logger.ToStderr("Node %s: Disk degraded, not found or node down" % node)
1242 a8083063 Iustin Pop
      result = False
1243 a8083063 Iustin Pop
    else:
1244 0834c866 Iustin Pop
      result = result and (not rstats[idx])
1245 a8083063 Iustin Pop
  if dev.children:
1246 a8083063 Iustin Pop
    for child in dev.children:
1247 a8083063 Iustin Pop
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
1248 a8083063 Iustin Pop
1249 a8083063 Iustin Pop
  return result
1250 a8083063 Iustin Pop
1251 a8083063 Iustin Pop
1252 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1253 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1254 a8083063 Iustin Pop

1255 a8083063 Iustin Pop
  """
1256 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1257 6bf01bbb Guido Trotter
  REQ_BGL = False
1258 a8083063 Iustin Pop
1259 6bf01bbb Guido Trotter
  def ExpandNames(self):
1260 1f9430d6 Iustin Pop
    if self.op.names:
1261 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1262 1f9430d6 Iustin Pop
1263 1f9430d6 Iustin Pop
    self.dynamic_fields = frozenset(["name", "valid", "node_status"])
1264 1f9430d6 Iustin Pop
    _CheckOutputFields(static=[],
1265 1f9430d6 Iustin Pop
                       dynamic=self.dynamic_fields,
1266 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1267 1f9430d6 Iustin Pop
1268 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
1269 6bf01bbb Guido Trotter
    self.needed_locks = {}
1270 6bf01bbb Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1271 e310b019 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1272 6bf01bbb Guido Trotter
1273 6bf01bbb Guido Trotter
  def CheckPrereq(self):
1274 6bf01bbb Guido Trotter
    """Check prerequisites.
1275 6bf01bbb Guido Trotter

1276 6bf01bbb Guido Trotter
    """
1277 6bf01bbb Guido Trotter
1278 1f9430d6 Iustin Pop
  @staticmethod
1279 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1280 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1281 1f9430d6 Iustin Pop

1282 1f9430d6 Iustin Pop
      Args:
1283 1f9430d6 Iustin Pop
        node_list: a list with the names of all nodes
1284 1f9430d6 Iustin Pop
        rlist: a map with node names as keys and OS objects as values
1285 1f9430d6 Iustin Pop

1286 1f9430d6 Iustin Pop
      Returns:
1287 1f9430d6 Iustin Pop
        map: a map with osnames as keys and as value another map, with
1288 1f9430d6 Iustin Pop
             nodes as
1289 1f9430d6 Iustin Pop
             keys and list of OS objects as values
1290 1f9430d6 Iustin Pop
             e.g. {"debian-etch": {"node1": [<object>,...],
1291 1f9430d6 Iustin Pop
                                   "node2": [<object>,]}
1292 1f9430d6 Iustin Pop
                  }
1293 1f9430d6 Iustin Pop

1294 1f9430d6 Iustin Pop
    """
1295 1f9430d6 Iustin Pop
    all_os = {}
1296 1f9430d6 Iustin Pop
    for node_name, nr in rlist.iteritems():
1297 1f9430d6 Iustin Pop
      if not nr:
1298 1f9430d6 Iustin Pop
        continue
1299 b4de68a9 Iustin Pop
      for os_obj in nr:
1300 b4de68a9 Iustin Pop
        if os_obj.name not in all_os:
1301 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1302 1f9430d6 Iustin Pop
          # for each node in node_list
1303 b4de68a9 Iustin Pop
          all_os[os_obj.name] = {}
1304 1f9430d6 Iustin Pop
          for nname in node_list:
1305 b4de68a9 Iustin Pop
            all_os[os_obj.name][nname] = []
1306 b4de68a9 Iustin Pop
        all_os[os_obj.name][node_name].append(os_obj)
1307 1f9430d6 Iustin Pop
    return all_os
1308 a8083063 Iustin Pop
1309 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1310 a8083063 Iustin Pop
    """Compute the list of OSes.
1311 a8083063 Iustin Pop

1312 a8083063 Iustin Pop
    """
1313 6bf01bbb Guido Trotter
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1314 a8083063 Iustin Pop
    node_data = rpc.call_os_diagnose(node_list)
1315 a8083063 Iustin Pop
    if node_data == False:
1316 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1317 1f9430d6 Iustin Pop
    pol = self._DiagnoseByOS(node_list, node_data)
1318 1f9430d6 Iustin Pop
    output = []
1319 1f9430d6 Iustin Pop
    for os_name, os_data in pol.iteritems():
1320 1f9430d6 Iustin Pop
      row = []
1321 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1322 1f9430d6 Iustin Pop
        if field == "name":
1323 1f9430d6 Iustin Pop
          val = os_name
1324 1f9430d6 Iustin Pop
        elif field == "valid":
1325 1f9430d6 Iustin Pop
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1326 1f9430d6 Iustin Pop
        elif field == "node_status":
1327 1f9430d6 Iustin Pop
          val = {}
1328 1f9430d6 Iustin Pop
          for node_name, nos_list in os_data.iteritems():
1329 1f9430d6 Iustin Pop
            val[node_name] = [(v.status, v.path) for v in nos_list]
1330 1f9430d6 Iustin Pop
        else:
1331 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1332 1f9430d6 Iustin Pop
        row.append(val)
1333 1f9430d6 Iustin Pop
      output.append(row)
1334 1f9430d6 Iustin Pop
1335 1f9430d6 Iustin Pop
    return output
1336 a8083063 Iustin Pop
1337 a8083063 Iustin Pop
1338 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1339 a8083063 Iustin Pop
  """Logical unit for removing a node.
1340 a8083063 Iustin Pop

1341 a8083063 Iustin Pop
  """
1342 a8083063 Iustin Pop
  HPATH = "node-remove"
1343 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1344 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1345 a8083063 Iustin Pop
1346 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1347 a8083063 Iustin Pop
    """Build hooks env.
1348 a8083063 Iustin Pop

1349 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1350 d08869ee Guido Trotter
    node would then be impossible to remove.
1351 a8083063 Iustin Pop

1352 a8083063 Iustin Pop
    """
1353 396e1b78 Michael Hanselmann
    env = {
1354 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1355 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1356 396e1b78 Michael Hanselmann
      }
1357 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1358 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1359 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1360 a8083063 Iustin Pop
1361 a8083063 Iustin Pop
  def CheckPrereq(self):
1362 a8083063 Iustin Pop
    """Check prerequisites.
1363 a8083063 Iustin Pop

1364 a8083063 Iustin Pop
    This checks:
1365 a8083063 Iustin Pop
     - the node exists in the configuration
1366 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1367 a8083063 Iustin Pop
     - it's not the master
1368 a8083063 Iustin Pop

1369 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1370 a8083063 Iustin Pop

1371 a8083063 Iustin Pop
    """
1372 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1373 a8083063 Iustin Pop
    if node is None:
1374 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1375 a8083063 Iustin Pop
1376 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1377 a8083063 Iustin Pop
1378 880478f8 Iustin Pop
    masternode = self.sstore.GetMasterNode()
1379 a8083063 Iustin Pop
    if node.name == masternode:
1380 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1381 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1382 a8083063 Iustin Pop
1383 a8083063 Iustin Pop
    for instance_name in instance_list:
1384 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1385 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1386 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1387 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1388 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1389 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1390 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1391 a8083063 Iustin Pop
    self.op.node_name = node.name
1392 a8083063 Iustin Pop
    self.node = node
1393 a8083063 Iustin Pop
1394 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1395 a8083063 Iustin Pop
    """Removes the node from the cluster.
1396 a8083063 Iustin Pop

1397 a8083063 Iustin Pop
    """
1398 a8083063 Iustin Pop
    node = self.node
1399 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1400 a8083063 Iustin Pop
                node.name)
1401 a8083063 Iustin Pop
1402 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
1403 a8083063 Iustin Pop
1404 d8470559 Michael Hanselmann
    rpc.call_node_leave_cluster(node.name)
1405 c8a0948f Michael Hanselmann
1406 a8083063 Iustin Pop
1407 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1408 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1409 a8083063 Iustin Pop

1410 a8083063 Iustin Pop
  """
1411 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1412 35705d8f Guido Trotter
  REQ_BGL = False
1413 a8083063 Iustin Pop
1414 35705d8f Guido Trotter
  def ExpandNames(self):
1415 e8a4c138 Iustin Pop
    self.dynamic_fields = frozenset([
1416 e8a4c138 Iustin Pop
      "dtotal", "dfree",
1417 e8a4c138 Iustin Pop
      "mtotal", "mnode", "mfree",
1418 e8a4c138 Iustin Pop
      "bootid",
1419 e8a4c138 Iustin Pop
      "ctotal",
1420 e8a4c138 Iustin Pop
      ])
1421 a8083063 Iustin Pop
1422 c8d8b4c8 Iustin Pop
    self.static_fields = frozenset([
1423 c8d8b4c8 Iustin Pop
      "name", "pinst_cnt", "sinst_cnt",
1424 c8d8b4c8 Iustin Pop
      "pinst_list", "sinst_list",
1425 c8d8b4c8 Iustin Pop
      "pip", "sip", "tags",
1426 38d7239a Iustin Pop
      "serial_no",
1427 c8d8b4c8 Iustin Pop
      ])
1428 c8d8b4c8 Iustin Pop
1429 c8d8b4c8 Iustin Pop
    _CheckOutputFields(static=self.static_fields,
1430 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1431 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1432 a8083063 Iustin Pop
1433 35705d8f Guido Trotter
    self.needed_locks = {}
1434 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1435 c8d8b4c8 Iustin Pop
1436 c8d8b4c8 Iustin Pop
    if self.op.names:
1437 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
1438 35705d8f Guido Trotter
    else:
1439 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
1440 c8d8b4c8 Iustin Pop
1441 c8d8b4c8 Iustin Pop
    self.do_locking = not self.static_fields.issuperset(self.op.output_fields)
1442 c8d8b4c8 Iustin Pop
    if self.do_locking:
1443 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
1444 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
1445 c8d8b4c8 Iustin Pop
1446 35705d8f Guido Trotter
1447 35705d8f Guido Trotter
  def CheckPrereq(self):
1448 35705d8f Guido Trotter
    """Check prerequisites.
1449 35705d8f Guido Trotter

1450 35705d8f Guido Trotter
    """
1451 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
1452 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
1453 c8d8b4c8 Iustin Pop
    pass
1454 a8083063 Iustin Pop
1455 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1456 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1457 a8083063 Iustin Pop

1458 a8083063 Iustin Pop
    """
1459 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
1460 c8d8b4c8 Iustin Pop
    if self.do_locking:
1461 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
1462 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
1463 3fa93523 Guido Trotter
      nodenames = self.wanted
1464 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
1465 3fa93523 Guido Trotter
      if missing:
1466 3fa93523 Guido Trotter
        raise self.OpExecError(
1467 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
1468 c8d8b4c8 Iustin Pop
    else:
1469 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
1470 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
1471 a8083063 Iustin Pop
1472 a8083063 Iustin Pop
    # begin data gathering
1473 a8083063 Iustin Pop
1474 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1475 a8083063 Iustin Pop
      live_data = {}
1476 a8083063 Iustin Pop
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
1477 a8083063 Iustin Pop
      for name in nodenames:
1478 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1479 a8083063 Iustin Pop
        if nodeinfo:
1480 a8083063 Iustin Pop
          live_data[name] = {
1481 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1482 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1483 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1484 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1485 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1486 e8a4c138 Iustin Pop
            "ctotal": utils.TryConvert(int, nodeinfo['cpu_total']),
1487 3ef10550 Michael Hanselmann
            "bootid": nodeinfo['bootid'],
1488 a8083063 Iustin Pop
            }
1489 a8083063 Iustin Pop
        else:
1490 a8083063 Iustin Pop
          live_data[name] = {}
1491 a8083063 Iustin Pop
    else:
1492 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1493 a8083063 Iustin Pop
1494 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1495 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1496 a8083063 Iustin Pop
1497 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1498 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1499 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1500 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1501 a8083063 Iustin Pop
1502 ec223efb Iustin Pop
      for instance_name in instancelist:
1503 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1504 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1505 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1506 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1507 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1508 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1509 a8083063 Iustin Pop
1510 a8083063 Iustin Pop
    # end data gathering
1511 a8083063 Iustin Pop
1512 a8083063 Iustin Pop
    output = []
1513 a8083063 Iustin Pop
    for node in nodelist:
1514 a8083063 Iustin Pop
      node_output = []
1515 a8083063 Iustin Pop
      for field in self.op.output_fields:
1516 a8083063 Iustin Pop
        if field == "name":
1517 a8083063 Iustin Pop
          val = node.name
1518 ec223efb Iustin Pop
        elif field == "pinst_list":
1519 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1520 ec223efb Iustin Pop
        elif field == "sinst_list":
1521 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1522 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1523 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1524 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1525 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1526 a8083063 Iustin Pop
        elif field == "pip":
1527 a8083063 Iustin Pop
          val = node.primary_ip
1528 a8083063 Iustin Pop
        elif field == "sip":
1529 a8083063 Iustin Pop
          val = node.secondary_ip
1530 130a6a6f Iustin Pop
        elif field == "tags":
1531 130a6a6f Iustin Pop
          val = list(node.GetTags())
1532 38d7239a Iustin Pop
        elif field == "serial_no":
1533 38d7239a Iustin Pop
          val = node.serial_no
1534 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1535 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1536 a8083063 Iustin Pop
        else:
1537 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1538 a8083063 Iustin Pop
        node_output.append(val)
1539 a8083063 Iustin Pop
      output.append(node_output)
1540 a8083063 Iustin Pop
1541 a8083063 Iustin Pop
    return output
1542 a8083063 Iustin Pop
1543 a8083063 Iustin Pop
1544 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1545 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1546 dcb93971 Michael Hanselmann

1547 dcb93971 Michael Hanselmann
  """
1548 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1549 21a15682 Guido Trotter
  REQ_BGL = False
1550 21a15682 Guido Trotter
1551 21a15682 Guido Trotter
  def ExpandNames(self):
1552 21a15682 Guido Trotter
    _CheckOutputFields(static=["node"],
1553 21a15682 Guido Trotter
                       dynamic=["phys", "vg", "name", "size", "instance"],
1554 21a15682 Guido Trotter
                       selected=self.op.output_fields)
1555 21a15682 Guido Trotter
1556 21a15682 Guido Trotter
    self.needed_locks = {}
1557 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1558 21a15682 Guido Trotter
    if not self.op.nodes:
1559 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1560 21a15682 Guido Trotter
    else:
1561 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
1562 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
1563 dcb93971 Michael Hanselmann
1564 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1565 dcb93971 Michael Hanselmann
    """Check prerequisites.
1566 dcb93971 Michael Hanselmann

1567 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1568 dcb93971 Michael Hanselmann

1569 dcb93971 Michael Hanselmann
    """
1570 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
1571 dcb93971 Michael Hanselmann
1572 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1573 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1574 dcb93971 Michael Hanselmann

1575 dcb93971 Michael Hanselmann
    """
1576 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1577 dcb93971 Michael Hanselmann
    volumes = rpc.call_node_volumes(nodenames)
1578 dcb93971 Michael Hanselmann
1579 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1580 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1581 dcb93971 Michael Hanselmann
1582 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1583 dcb93971 Michael Hanselmann
1584 dcb93971 Michael Hanselmann
    output = []
1585 dcb93971 Michael Hanselmann
    for node in nodenames:
1586 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1587 37d19eb2 Michael Hanselmann
        continue
1588 37d19eb2 Michael Hanselmann
1589 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1590 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1591 dcb93971 Michael Hanselmann
1592 dcb93971 Michael Hanselmann
      for vol in node_vols:
1593 dcb93971 Michael Hanselmann
        node_output = []
1594 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1595 dcb93971 Michael Hanselmann
          if field == "node":
1596 dcb93971 Michael Hanselmann
            val = node
1597 dcb93971 Michael Hanselmann
          elif field == "phys":
1598 dcb93971 Michael Hanselmann
            val = vol['dev']
1599 dcb93971 Michael Hanselmann
          elif field == "vg":
1600 dcb93971 Michael Hanselmann
            val = vol['vg']
1601 dcb93971 Michael Hanselmann
          elif field == "name":
1602 dcb93971 Michael Hanselmann
            val = vol['name']
1603 dcb93971 Michael Hanselmann
          elif field == "size":
1604 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1605 dcb93971 Michael Hanselmann
          elif field == "instance":
1606 dcb93971 Michael Hanselmann
            for inst in ilist:
1607 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1608 dcb93971 Michael Hanselmann
                continue
1609 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1610 dcb93971 Michael Hanselmann
                val = inst.name
1611 dcb93971 Michael Hanselmann
                break
1612 dcb93971 Michael Hanselmann
            else:
1613 dcb93971 Michael Hanselmann
              val = '-'
1614 dcb93971 Michael Hanselmann
          else:
1615 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1616 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1617 dcb93971 Michael Hanselmann
1618 dcb93971 Michael Hanselmann
        output.append(node_output)
1619 dcb93971 Michael Hanselmann
1620 dcb93971 Michael Hanselmann
    return output
1621 dcb93971 Michael Hanselmann
1622 dcb93971 Michael Hanselmann
1623 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1624 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1625 a8083063 Iustin Pop

1626 a8083063 Iustin Pop
  """
1627 a8083063 Iustin Pop
  HPATH = "node-add"
1628 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1629 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1630 a8083063 Iustin Pop
1631 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1632 a8083063 Iustin Pop
    """Build hooks env.
1633 a8083063 Iustin Pop

1634 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1635 a8083063 Iustin Pop

1636 a8083063 Iustin Pop
    """
1637 a8083063 Iustin Pop
    env = {
1638 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1639 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1640 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1641 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1642 a8083063 Iustin Pop
      }
1643 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1644 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1645 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1646 a8083063 Iustin Pop
1647 a8083063 Iustin Pop
  def CheckPrereq(self):
1648 a8083063 Iustin Pop
    """Check prerequisites.
1649 a8083063 Iustin Pop

1650 a8083063 Iustin Pop
    This checks:
1651 a8083063 Iustin Pop
     - the new node is not already in the config
1652 a8083063 Iustin Pop
     - it is resolvable
1653 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1654 a8083063 Iustin Pop

1655 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1656 a8083063 Iustin Pop

1657 a8083063 Iustin Pop
    """
1658 a8083063 Iustin Pop
    node_name = self.op.node_name
1659 a8083063 Iustin Pop
    cfg = self.cfg
1660 a8083063 Iustin Pop
1661 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1662 a8083063 Iustin Pop
1663 bcf043c9 Iustin Pop
    node = dns_data.name
1664 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1665 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1666 a8083063 Iustin Pop
    if secondary_ip is None:
1667 a8083063 Iustin Pop
      secondary_ip = primary_ip
1668 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1669 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1670 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1671 e7c6e02b Michael Hanselmann
1672 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1673 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
1674 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
1675 e7c6e02b Michael Hanselmann
                                 node)
1676 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
1677 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
1678 a8083063 Iustin Pop
1679 a8083063 Iustin Pop
    for existing_node_name in node_list:
1680 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1681 e7c6e02b Michael Hanselmann
1682 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
1683 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
1684 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
1685 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
1686 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
1687 e7c6e02b Michael Hanselmann
        continue
1688 e7c6e02b Michael Hanselmann
1689 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1690 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1691 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1692 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1693 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1694 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1695 a8083063 Iustin Pop
1696 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1697 a8083063 Iustin Pop
    # same as for the master
1698 880478f8 Iustin Pop
    myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
1699 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1700 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1701 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1702 a8083063 Iustin Pop
      if master_singlehomed:
1703 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1704 3ecf6786 Iustin Pop
                                   " new node has one")
1705 a8083063 Iustin Pop
      else:
1706 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1707 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1708 a8083063 Iustin Pop
1709 a8083063 Iustin Pop
    # checks reachablity
1710 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
1711 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1712 a8083063 Iustin Pop
1713 a8083063 Iustin Pop
    if not newbie_singlehomed:
1714 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1715 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
1716 b15d625f Iustin Pop
                           source=myself.secondary_ip):
1717 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1718 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
1719 a8083063 Iustin Pop
1720 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1721 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1722 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1723 a8083063 Iustin Pop
1724 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1725 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1726 a8083063 Iustin Pop

1727 a8083063 Iustin Pop
    """
1728 a8083063 Iustin Pop
    new_node = self.new_node
1729 a8083063 Iustin Pop
    node = new_node.name
1730 a8083063 Iustin Pop
1731 a8083063 Iustin Pop
    # check connectivity
1732 a8083063 Iustin Pop
    result = rpc.call_version([node])[node]
1733 a8083063 Iustin Pop
    if result:
1734 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1735 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1736 a8083063 Iustin Pop
                    (node, result))
1737 a8083063 Iustin Pop
      else:
1738 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1739 3ecf6786 Iustin Pop
                                 " node version %s" %
1740 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1741 a8083063 Iustin Pop
    else:
1742 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1743 a8083063 Iustin Pop
1744 a8083063 Iustin Pop
    # setup ssh on node
1745 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1746 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1747 a8083063 Iustin Pop
    keyarray = []
1748 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1749 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1750 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1751 a8083063 Iustin Pop
1752 a8083063 Iustin Pop
    for i in keyfiles:
1753 a8083063 Iustin Pop
      f = open(i, 'r')
1754 a8083063 Iustin Pop
      try:
1755 a8083063 Iustin Pop
        keyarray.append(f.read())
1756 a8083063 Iustin Pop
      finally:
1757 a8083063 Iustin Pop
        f.close()
1758 a8083063 Iustin Pop
1759 a8083063 Iustin Pop
    result = rpc.call_node_add(node, keyarray[0], keyarray[1], keyarray[2],
1760 a8083063 Iustin Pop
                               keyarray[3], keyarray[4], keyarray[5])
1761 a8083063 Iustin Pop
1762 a8083063 Iustin Pop
    if not result:
1763 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1764 a8083063 Iustin Pop
1765 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1766 d9c02ca6 Michael Hanselmann
    utils.AddHostToEtcHosts(new_node.name)
1767 c8a0948f Michael Hanselmann
1768 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1769 16abfbc2 Alexander Schreiber
      if not rpc.call_node_tcp_ping(new_node.name,
1770 16abfbc2 Alexander Schreiber
                                    constants.LOCALHOST_IP_ADDRESS,
1771 16abfbc2 Alexander Schreiber
                                    new_node.secondary_ip,
1772 16abfbc2 Alexander Schreiber
                                    constants.DEFAULT_NODED_PORT,
1773 16abfbc2 Alexander Schreiber
                                    10, False):
1774 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
1775 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
1776 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
1777 a8083063 Iustin Pop
1778 5c0527ed Guido Trotter
    node_verify_list = [self.sstore.GetMasterNode()]
1779 5c0527ed Guido Trotter
    node_verify_param = {
1780 5c0527ed Guido Trotter
      'nodelist': [node],
1781 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
1782 5c0527ed Guido Trotter
    }
1783 5c0527ed Guido Trotter
1784 5c0527ed Guido Trotter
    result = rpc.call_node_verify(node_verify_list, node_verify_param)
1785 5c0527ed Guido Trotter
    for verifier in node_verify_list:
1786 5c0527ed Guido Trotter
      if not result[verifier]:
1787 5c0527ed Guido Trotter
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
1788 5c0527ed Guido Trotter
                                 " for remote verification" % verifier)
1789 5c0527ed Guido Trotter
      if result[verifier]['nodelist']:
1790 5c0527ed Guido Trotter
        for failed in result[verifier]['nodelist']:
1791 5c0527ed Guido Trotter
          feedback_fn("ssh/hostname verification failed %s -> %s" %
1792 5c0527ed Guido Trotter
                      (verifier, result[verifier]['nodelist'][failed]))
1793 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
1794 ff98055b Iustin Pop
1795 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1796 a8083063 Iustin Pop
    # including the node just added
1797 880478f8 Iustin Pop
    myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
1798 102b115b Michael Hanselmann
    dist_nodes = self.cfg.GetNodeList()
1799 102b115b Michael Hanselmann
    if not self.op.readd:
1800 102b115b Michael Hanselmann
      dist_nodes.append(node)
1801 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1802 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1803 a8083063 Iustin Pop
1804 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1805 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
1806 a8083063 Iustin Pop
      result = rpc.call_upload_file(dist_nodes, fname)
1807 a8083063 Iustin Pop
      for to_node in dist_nodes:
1808 a8083063 Iustin Pop
        if not result[to_node]:
1809 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1810 a8083063 Iustin Pop
                       (fname, to_node))
1811 a8083063 Iustin Pop
1812 3d1e7706 Guido Trotter
    to_copy = self.sstore.GetFileList()
1813 2a6469d5 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
1814 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
1815 a8083063 Iustin Pop
    for fname in to_copy:
1816 b5602d15 Guido Trotter
      result = rpc.call_upload_file([node], fname)
1817 b5602d15 Guido Trotter
      if not result[node]:
1818 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1819 a8083063 Iustin Pop
1820 d8470559 Michael Hanselmann
    if self.op.readd:
1821 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
1822 d8470559 Michael Hanselmann
    else:
1823 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
1824 a8083063 Iustin Pop
1825 a8083063 Iustin Pop
1826 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1827 a8083063 Iustin Pop
  """Query cluster configuration.
1828 a8083063 Iustin Pop

1829 a8083063 Iustin Pop
  """
1830 a8083063 Iustin Pop
  _OP_REQP = []
1831 59322403 Iustin Pop
  REQ_MASTER = False
1832 642339cf Guido Trotter
  REQ_BGL = False
1833 642339cf Guido Trotter
1834 642339cf Guido Trotter
  def ExpandNames(self):
1835 642339cf Guido Trotter
    self.needed_locks = {}
1836 a8083063 Iustin Pop
1837 a8083063 Iustin Pop
  def CheckPrereq(self):
1838 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1839 a8083063 Iustin Pop

1840 a8083063 Iustin Pop
    """
1841 a8083063 Iustin Pop
    pass
1842 a8083063 Iustin Pop
1843 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1844 a8083063 Iustin Pop
    """Return cluster config.
1845 a8083063 Iustin Pop

1846 a8083063 Iustin Pop
    """
1847 a8083063 Iustin Pop
    result = {
1848 5fcdc80d Iustin Pop
      "name": self.sstore.GetClusterName(),
1849 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1850 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1851 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1852 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1853 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1854 880478f8 Iustin Pop
      "master": self.sstore.GetMasterNode(),
1855 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1856 8a12ce45 Iustin Pop
      "hypervisor_type": self.sstore.GetHypervisorType(),
1857 a8083063 Iustin Pop
      }
1858 a8083063 Iustin Pop
1859 a8083063 Iustin Pop
    return result
1860 a8083063 Iustin Pop
1861 a8083063 Iustin Pop
1862 a8083063 Iustin Pop
class LUDumpClusterConfig(NoHooksLU):
1863 a8083063 Iustin Pop
  """Return a text-representation of the cluster-config.
1864 a8083063 Iustin Pop

1865 a8083063 Iustin Pop
  """
1866 a8083063 Iustin Pop
  _OP_REQP = []
1867 642339cf Guido Trotter
  REQ_BGL = False
1868 642339cf Guido Trotter
1869 642339cf Guido Trotter
  def ExpandNames(self):
1870 642339cf Guido Trotter
    self.needed_locks = {}
1871 a8083063 Iustin Pop
1872 a8083063 Iustin Pop
  def CheckPrereq(self):
1873 a8083063 Iustin Pop
    """No prerequisites.
1874 a8083063 Iustin Pop

1875 a8083063 Iustin Pop
    """
1876 a8083063 Iustin Pop
    pass
1877 a8083063 Iustin Pop
1878 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1879 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
1880 a8083063 Iustin Pop

1881 a8083063 Iustin Pop
    """
1882 a8083063 Iustin Pop
    return self.cfg.DumpConfig()
1883 a8083063 Iustin Pop
1884 a8083063 Iustin Pop
1885 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
1886 a8083063 Iustin Pop
  """Bring up an instance's disks.
1887 a8083063 Iustin Pop

1888 a8083063 Iustin Pop
  """
1889 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1890 f22a8ba3 Guido Trotter
  REQ_BGL = False
1891 f22a8ba3 Guido Trotter
1892 f22a8ba3 Guido Trotter
  def ExpandNames(self):
1893 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
1894 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
1895 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1896 f22a8ba3 Guido Trotter
1897 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
1898 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
1899 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
1900 a8083063 Iustin Pop
1901 a8083063 Iustin Pop
  def CheckPrereq(self):
1902 a8083063 Iustin Pop
    """Check prerequisites.
1903 a8083063 Iustin Pop

1904 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1905 a8083063 Iustin Pop

1906 a8083063 Iustin Pop
    """
1907 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1908 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
1909 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
1910 a8083063 Iustin Pop
1911 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1912 a8083063 Iustin Pop
    """Activate the disks.
1913 a8083063 Iustin Pop

1914 a8083063 Iustin Pop
    """
1915 a8083063 Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
1916 a8083063 Iustin Pop
    if not disks_ok:
1917 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
1918 a8083063 Iustin Pop
1919 a8083063 Iustin Pop
    return disks_info
1920 a8083063 Iustin Pop
1921 a8083063 Iustin Pop
1922 a8083063 Iustin Pop
def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
1923 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
1924 a8083063 Iustin Pop

1925 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
1926 a8083063 Iustin Pop

1927 a8083063 Iustin Pop
  Args:
1928 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
1929 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
1930 a8083063 Iustin Pop
                        in an error return from the function
1931 a8083063 Iustin Pop

1932 a8083063 Iustin Pop
  Returns:
1933 a8083063 Iustin Pop
    false if the operation failed
1934 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
1935 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
1936 a8083063 Iustin Pop
  """
1937 a8083063 Iustin Pop
  device_info = []
1938 a8083063 Iustin Pop
  disks_ok = True
1939 fdbd668d Iustin Pop
  iname = instance.name
1940 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
1941 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
1942 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
1943 fdbd668d Iustin Pop
1944 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
1945 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
1946 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
1947 fdbd668d Iustin Pop
  # SyncSource, etc.)
1948 fdbd668d Iustin Pop
1949 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
1950 a8083063 Iustin Pop
  for inst_disk in instance.disks:
1951 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1952 a8083063 Iustin Pop
      cfg.SetDiskID(node_disk, node)
1953 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, False)
1954 a8083063 Iustin Pop
      if not result:
1955 f4bc1f2c Michael Hanselmann
        logger.Error("could not prepare block device %s on node %s"
1956 fdbd668d Iustin Pop
                     " (is_primary=False, pass=1)" % (inst_disk.iv_name, node))
1957 fdbd668d Iustin Pop
        if not ignore_secondaries:
1958 a8083063 Iustin Pop
          disks_ok = False
1959 fdbd668d Iustin Pop
1960 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
1961 fdbd668d Iustin Pop
1962 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
1963 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
1964 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1965 fdbd668d Iustin Pop
      if node != instance.primary_node:
1966 fdbd668d Iustin Pop
        continue
1967 fdbd668d Iustin Pop
      cfg.SetDiskID(node_disk, node)
1968 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, True)
1969 fdbd668d Iustin Pop
      if not result:
1970 fdbd668d Iustin Pop
        logger.Error("could not prepare block device %s on node %s"
1971 fdbd668d Iustin Pop
                     " (is_primary=True, pass=2)" % (inst_disk.iv_name, node))
1972 fdbd668d Iustin Pop
        disks_ok = False
1973 fdbd668d Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name, result))
1974 a8083063 Iustin Pop
1975 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
1976 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
1977 b352ab5b Iustin Pop
  # improving the logical/physical id handling
1978 b352ab5b Iustin Pop
  for disk in instance.disks:
1979 b352ab5b Iustin Pop
    cfg.SetDiskID(disk, instance.primary_node)
1980 b352ab5b Iustin Pop
1981 a8083063 Iustin Pop
  return disks_ok, device_info
1982 a8083063 Iustin Pop
1983 a8083063 Iustin Pop
1984 fe7b0351 Michael Hanselmann
def _StartInstanceDisks(cfg, instance, force):
1985 3ecf6786 Iustin Pop
  """Start the disks of an instance.
1986 3ecf6786 Iustin Pop

1987 3ecf6786 Iustin Pop
  """
1988 fe7b0351 Michael Hanselmann
  disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
1989 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
1990 fe7b0351 Michael Hanselmann
  if not disks_ok:
1991 fe7b0351 Michael Hanselmann
    _ShutdownInstanceDisks(instance, cfg)
1992 fe7b0351 Michael Hanselmann
    if force is not None and not force:
1993 fe7b0351 Michael Hanselmann
      logger.Error("If the message above refers to a secondary node,"
1994 fe7b0351 Michael Hanselmann
                   " you can retry the operation using '--force'.")
1995 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
1996 fe7b0351 Michael Hanselmann
1997 fe7b0351 Michael Hanselmann
1998 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
1999 a8083063 Iustin Pop
  """Shutdown an instance's disks.
2000 a8083063 Iustin Pop

2001 a8083063 Iustin Pop
  """
2002 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2003 f22a8ba3 Guido Trotter
  REQ_BGL = False
2004 f22a8ba3 Guido Trotter
2005 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2006 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2007 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2008 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2009 f22a8ba3 Guido Trotter
2010 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2011 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2012 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2013 a8083063 Iustin Pop
2014 a8083063 Iustin Pop
  def CheckPrereq(self):
2015 a8083063 Iustin Pop
    """Check prerequisites.
2016 a8083063 Iustin Pop

2017 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2018 a8083063 Iustin Pop

2019 a8083063 Iustin Pop
    """
2020 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2021 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2022 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2023 a8083063 Iustin Pop
2024 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2025 a8083063 Iustin Pop
    """Deactivate the disks
2026 a8083063 Iustin Pop

2027 a8083063 Iustin Pop
    """
2028 a8083063 Iustin Pop
    instance = self.instance
2029 155d6c75 Guido Trotter
    _SafeShutdownInstanceDisks(instance, self.cfg)
2030 a8083063 Iustin Pop
2031 a8083063 Iustin Pop
2032 155d6c75 Guido Trotter
def _SafeShutdownInstanceDisks(instance, cfg):
2033 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
2034 155d6c75 Guido Trotter

2035 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
2036 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
2037 155d6c75 Guido Trotter

2038 155d6c75 Guido Trotter
  """
2039 155d6c75 Guido Trotter
  ins_l = rpc.call_instance_list([instance.primary_node])
2040 155d6c75 Guido Trotter
  ins_l = ins_l[instance.primary_node]
2041 155d6c75 Guido Trotter
  if not type(ins_l) is list:
2042 155d6c75 Guido Trotter
    raise errors.OpExecError("Can't contact node '%s'" %
2043 155d6c75 Guido Trotter
                             instance.primary_node)
2044 155d6c75 Guido Trotter
2045 155d6c75 Guido Trotter
  if instance.name in ins_l:
2046 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
2047 155d6c75 Guido Trotter
                             " block devices.")
2048 155d6c75 Guido Trotter
2049 155d6c75 Guido Trotter
  _ShutdownInstanceDisks(instance, cfg)
2050 a8083063 Iustin Pop
2051 a8083063 Iustin Pop
2052 a8083063 Iustin Pop
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
2053 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
2054 a8083063 Iustin Pop

2055 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
2056 a8083063 Iustin Pop

2057 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
2058 a8083063 Iustin Pop
  ignored.
2059 a8083063 Iustin Pop

2060 a8083063 Iustin Pop
  """
2061 a8083063 Iustin Pop
  result = True
2062 a8083063 Iustin Pop
  for disk in instance.disks:
2063 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2064 a8083063 Iustin Pop
      cfg.SetDiskID(top_disk, node)
2065 a8083063 Iustin Pop
      if not rpc.call_blockdev_shutdown(node, top_disk):
2066 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
2067 a8083063 Iustin Pop
                     (disk.iv_name, node))
2068 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
2069 a8083063 Iustin Pop
          result = False
2070 a8083063 Iustin Pop
  return result
2071 a8083063 Iustin Pop
2072 a8083063 Iustin Pop
2073 d4f16fd9 Iustin Pop
def _CheckNodeFreeMemory(cfg, node, reason, requested):
2074 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
2075 d4f16fd9 Iustin Pop

2076 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
2077 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
2078 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
2079 d4f16fd9 Iustin Pop
  exception.
2080 d4f16fd9 Iustin Pop

2081 d4f16fd9 Iustin Pop
  Args:
2082 d4f16fd9 Iustin Pop
    - cfg: a ConfigWriter instance
2083 d4f16fd9 Iustin Pop
    - node: the node name
2084 d4f16fd9 Iustin Pop
    - reason: string to use in the error message
2085 d4f16fd9 Iustin Pop
    - requested: the amount of memory in MiB
2086 d4f16fd9 Iustin Pop

2087 d4f16fd9 Iustin Pop
  """
2088 d4f16fd9 Iustin Pop
  nodeinfo = rpc.call_node_info([node], cfg.GetVGName())
2089 d4f16fd9 Iustin Pop
  if not nodeinfo or not isinstance(nodeinfo, dict):
2090 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Could not contact node %s for resource"
2091 d4f16fd9 Iustin Pop
                             " information" % (node,))
2092 d4f16fd9 Iustin Pop
2093 d4f16fd9 Iustin Pop
  free_mem = nodeinfo[node].get('memory_free')
2094 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2095 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2096 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
2097 d4f16fd9 Iustin Pop
  if requested > free_mem:
2098 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2099 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
2100 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
2101 d4f16fd9 Iustin Pop
2102 d4f16fd9 Iustin Pop
2103 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2104 a8083063 Iustin Pop
  """Starts an instance.
2105 a8083063 Iustin Pop

2106 a8083063 Iustin Pop
  """
2107 a8083063 Iustin Pop
  HPATH = "instance-start"
2108 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2109 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2110 e873317a Guido Trotter
  REQ_BGL = False
2111 e873317a Guido Trotter
2112 e873317a Guido Trotter
  def ExpandNames(self):
2113 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2114 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2115 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2116 e873317a Guido Trotter
2117 e873317a Guido Trotter
  def DeclareLocks(self, level):
2118 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2119 e873317a Guido Trotter
      self._LockInstancesNodes()
2120 a8083063 Iustin Pop
2121 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2122 a8083063 Iustin Pop
    """Build hooks env.
2123 a8083063 Iustin Pop

2124 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2125 a8083063 Iustin Pop

2126 a8083063 Iustin Pop
    """
2127 a8083063 Iustin Pop
    env = {
2128 a8083063 Iustin Pop
      "FORCE": self.op.force,
2129 a8083063 Iustin Pop
      }
2130 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2131 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2132 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2133 a8083063 Iustin Pop
    return env, nl, nl
2134 a8083063 Iustin Pop
2135 a8083063 Iustin Pop
  def CheckPrereq(self):
2136 a8083063 Iustin Pop
    """Check prerequisites.
2137 a8083063 Iustin Pop

2138 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2139 a8083063 Iustin Pop

2140 a8083063 Iustin Pop
    """
2141 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2142 e873317a Guido Trotter
    assert self.instance is not None, \
2143 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2144 a8083063 Iustin Pop
2145 a8083063 Iustin Pop
    # check bridges existance
2146 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2147 a8083063 Iustin Pop
2148 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, instance.primary_node,
2149 d4f16fd9 Iustin Pop
                         "starting instance %s" % instance.name,
2150 d4f16fd9 Iustin Pop
                         instance.memory)
2151 d4f16fd9 Iustin Pop
2152 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2153 a8083063 Iustin Pop
    """Start the instance.
2154 a8083063 Iustin Pop

2155 a8083063 Iustin Pop
    """
2156 a8083063 Iustin Pop
    instance = self.instance
2157 a8083063 Iustin Pop
    force = self.op.force
2158 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
2159 a8083063 Iustin Pop
2160 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2161 fe482621 Iustin Pop
2162 a8083063 Iustin Pop
    node_current = instance.primary_node
2163 a8083063 Iustin Pop
2164 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, instance, force)
2165 a8083063 Iustin Pop
2166 a8083063 Iustin Pop
    if not rpc.call_instance_start(node_current, instance, extra_args):
2167 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2168 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
2169 a8083063 Iustin Pop
2170 a8083063 Iustin Pop
2171 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2172 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2173 bf6929a2 Alexander Schreiber

2174 bf6929a2 Alexander Schreiber
  """
2175 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2176 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2177 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2178 e873317a Guido Trotter
  REQ_BGL = False
2179 e873317a Guido Trotter
2180 e873317a Guido Trotter
  def ExpandNames(self):
2181 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2182 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2183 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
2184 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2185 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
2186 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2187 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
2188 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2189 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2190 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2191 e873317a Guido Trotter
2192 e873317a Guido Trotter
  def DeclareLocks(self, level):
2193 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2194 849da276 Guido Trotter
      primary_only = not constants.INSTANCE_REBOOT_FULL
2195 849da276 Guido Trotter
      self._LockInstancesNodes(primary_only=primary_only)
2196 bf6929a2 Alexander Schreiber
2197 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2198 bf6929a2 Alexander Schreiber
    """Build hooks env.
2199 bf6929a2 Alexander Schreiber

2200 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2201 bf6929a2 Alexander Schreiber

2202 bf6929a2 Alexander Schreiber
    """
2203 bf6929a2 Alexander Schreiber
    env = {
2204 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2205 bf6929a2 Alexander Schreiber
      }
2206 bf6929a2 Alexander Schreiber
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2207 bf6929a2 Alexander Schreiber
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2208 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
2209 bf6929a2 Alexander Schreiber
    return env, nl, nl
2210 bf6929a2 Alexander Schreiber
2211 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2212 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2213 bf6929a2 Alexander Schreiber

2214 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2215 bf6929a2 Alexander Schreiber

2216 bf6929a2 Alexander Schreiber
    """
2217 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2218 e873317a Guido Trotter
    assert self.instance is not None, \
2219 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2220 bf6929a2 Alexander Schreiber
2221 bf6929a2 Alexander Schreiber
    # check bridges existance
2222 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2223 bf6929a2 Alexander Schreiber
2224 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2225 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2226 bf6929a2 Alexander Schreiber

2227 bf6929a2 Alexander Schreiber
    """
2228 bf6929a2 Alexander Schreiber
    instance = self.instance
2229 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2230 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2231 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2232 bf6929a2 Alexander Schreiber
2233 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2234 bf6929a2 Alexander Schreiber
2235 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2236 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2237 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_reboot(node_current, instance,
2238 bf6929a2 Alexander Schreiber
                                      reboot_type, extra_args):
2239 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2240 bf6929a2 Alexander Schreiber
    else:
2241 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_shutdown(node_current, instance):
2242 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2243 bf6929a2 Alexander Schreiber
      _ShutdownInstanceDisks(instance, self.cfg)
2244 bf6929a2 Alexander Schreiber
      _StartInstanceDisks(self.cfg, instance, ignore_secondaries)
2245 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_start(node_current, instance, extra_args):
2246 bf6929a2 Alexander Schreiber
        _ShutdownInstanceDisks(instance, self.cfg)
2247 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2248 bf6929a2 Alexander Schreiber
2249 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2250 bf6929a2 Alexander Schreiber
2251 bf6929a2 Alexander Schreiber
2252 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2253 a8083063 Iustin Pop
  """Shutdown an instance.
2254 a8083063 Iustin Pop

2255 a8083063 Iustin Pop
  """
2256 a8083063 Iustin Pop
  HPATH = "instance-stop"
2257 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2258 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2259 e873317a Guido Trotter
  REQ_BGL = False
2260 e873317a Guido Trotter
2261 e873317a Guido Trotter
  def ExpandNames(self):
2262 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2263 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2264 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2265 e873317a Guido Trotter
2266 e873317a Guido Trotter
  def DeclareLocks(self, level):
2267 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2268 e873317a Guido Trotter
      self._LockInstancesNodes()
2269 a8083063 Iustin Pop
2270 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2271 a8083063 Iustin Pop
    """Build hooks env.
2272 a8083063 Iustin Pop

2273 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2274 a8083063 Iustin Pop

2275 a8083063 Iustin Pop
    """
2276 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2277 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2278 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2279 a8083063 Iustin Pop
    return env, nl, nl
2280 a8083063 Iustin Pop
2281 a8083063 Iustin Pop
  def CheckPrereq(self):
2282 a8083063 Iustin Pop
    """Check prerequisites.
2283 a8083063 Iustin Pop

2284 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2285 a8083063 Iustin Pop

2286 a8083063 Iustin Pop
    """
2287 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2288 e873317a Guido Trotter
    assert self.instance is not None, \
2289 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2290 a8083063 Iustin Pop
2291 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2292 a8083063 Iustin Pop
    """Shutdown the instance.
2293 a8083063 Iustin Pop

2294 a8083063 Iustin Pop
    """
2295 a8083063 Iustin Pop
    instance = self.instance
2296 a8083063 Iustin Pop
    node_current = instance.primary_node
2297 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2298 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(node_current, instance):
2299 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
2300 a8083063 Iustin Pop
2301 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
2302 a8083063 Iustin Pop
2303 a8083063 Iustin Pop
2304 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2305 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2306 fe7b0351 Michael Hanselmann

2307 fe7b0351 Michael Hanselmann
  """
2308 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2309 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2310 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2311 4e0b4d2d Guido Trotter
  REQ_BGL = False
2312 4e0b4d2d Guido Trotter
2313 4e0b4d2d Guido Trotter
  def ExpandNames(self):
2314 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
2315 4e0b4d2d Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2316 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2317 4e0b4d2d Guido Trotter
2318 4e0b4d2d Guido Trotter
  def DeclareLocks(self, level):
2319 4e0b4d2d Guido Trotter
    if level == locking.LEVEL_NODE:
2320 4e0b4d2d Guido Trotter
      self._LockInstancesNodes()
2321 fe7b0351 Michael Hanselmann
2322 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2323 fe7b0351 Michael Hanselmann
    """Build hooks env.
2324 fe7b0351 Michael Hanselmann

2325 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2326 fe7b0351 Michael Hanselmann

2327 fe7b0351 Michael Hanselmann
    """
2328 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2329 fe7b0351 Michael Hanselmann
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2330 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2331 fe7b0351 Michael Hanselmann
    return env, nl, nl
2332 fe7b0351 Michael Hanselmann
2333 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2334 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2335 fe7b0351 Michael Hanselmann

2336 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2337 fe7b0351 Michael Hanselmann

2338 fe7b0351 Michael Hanselmann
    """
2339 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2340 4e0b4d2d Guido Trotter
    assert instance is not None, \
2341 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2342 4e0b4d2d Guido Trotter
2343 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2344 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2345 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2346 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2347 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2348 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2349 fe7b0351 Michael Hanselmann
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2350 fe7b0351 Michael Hanselmann
    if remote_info:
2351 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2352 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2353 3ecf6786 Iustin Pop
                                  instance.primary_node))
2354 d0834de3 Michael Hanselmann
2355 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2356 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2357 d0834de3 Michael Hanselmann
      # OS verification
2358 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2359 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2360 d0834de3 Michael Hanselmann
      if pnode is None:
2361 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2362 3ecf6786 Iustin Pop
                                   self.op.pnode)
2363 00fe9e38 Guido Trotter
      os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
2364 dfa96ded Guido Trotter
      if not os_obj:
2365 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2366 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2367 d0834de3 Michael Hanselmann
2368 fe7b0351 Michael Hanselmann
    self.instance = instance
2369 fe7b0351 Michael Hanselmann
2370 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2371 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2372 fe7b0351 Michael Hanselmann

2373 fe7b0351 Michael Hanselmann
    """
2374 fe7b0351 Michael Hanselmann
    inst = self.instance
2375 fe7b0351 Michael Hanselmann
2376 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2377 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2378 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2379 97abc79f Iustin Pop
      self.cfg.Update(inst)
2380 d0834de3 Michael Hanselmann
2381 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, inst, None)
2382 fe7b0351 Michael Hanselmann
    try:
2383 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2384 fe7b0351 Michael Hanselmann
      if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
2385 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
2386 f4bc1f2c Michael Hanselmann
                                 " on node %s" %
2387 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2388 fe7b0351 Michael Hanselmann
    finally:
2389 fe7b0351 Michael Hanselmann
      _ShutdownInstanceDisks(inst, self.cfg)
2390 fe7b0351 Michael Hanselmann
2391 fe7b0351 Michael Hanselmann
2392 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2393 decd5f45 Iustin Pop
  """Rename an instance.
2394 decd5f45 Iustin Pop

2395 decd5f45 Iustin Pop
  """
2396 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2397 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2398 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2399 decd5f45 Iustin Pop
2400 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2401 decd5f45 Iustin Pop
    """Build hooks env.
2402 decd5f45 Iustin Pop

2403 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2404 decd5f45 Iustin Pop

2405 decd5f45 Iustin Pop
    """
2406 decd5f45 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self.instance)
2407 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2408 decd5f45 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2409 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2410 decd5f45 Iustin Pop
    return env, nl, nl
2411 decd5f45 Iustin Pop
2412 decd5f45 Iustin Pop
  def CheckPrereq(self):
2413 decd5f45 Iustin Pop
    """Check prerequisites.
2414 decd5f45 Iustin Pop

2415 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2416 decd5f45 Iustin Pop

2417 decd5f45 Iustin Pop
    """
2418 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2419 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2420 decd5f45 Iustin Pop
    if instance is None:
2421 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2422 decd5f45 Iustin Pop
                                 self.op.instance_name)
2423 decd5f45 Iustin Pop
    if instance.status != "down":
2424 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2425 decd5f45 Iustin Pop
                                 self.op.instance_name)
2426 decd5f45 Iustin Pop
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2427 decd5f45 Iustin Pop
    if remote_info:
2428 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2429 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2430 decd5f45 Iustin Pop
                                  instance.primary_node))
2431 decd5f45 Iustin Pop
    self.instance = instance
2432 decd5f45 Iustin Pop
2433 decd5f45 Iustin Pop
    # new name verification
2434 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2435 decd5f45 Iustin Pop
2436 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2437 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
2438 7bde3275 Guido Trotter
    if new_name in instance_list:
2439 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2440 c09f363f Manuel Franceschini
                                 new_name)
2441 7bde3275 Guido Trotter
2442 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2443 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
2444 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2445 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2446 decd5f45 Iustin Pop
2447 decd5f45 Iustin Pop
2448 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2449 decd5f45 Iustin Pop
    """Reinstall the instance.
2450 decd5f45 Iustin Pop

2451 decd5f45 Iustin Pop
    """
2452 decd5f45 Iustin Pop
    inst = self.instance
2453 decd5f45 Iustin Pop
    old_name = inst.name
2454 decd5f45 Iustin Pop
2455 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2456 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2457 b23c4333 Manuel Franceschini
2458 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2459 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
2460 74b5913f Guido Trotter
    self.context.glm.remove(locking.LEVEL_INSTANCE, inst.name)
2461 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
2462 decd5f45 Iustin Pop
2463 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2464 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2465 decd5f45 Iustin Pop
2466 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2467 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2468 b23c4333 Manuel Franceschini
      result = rpc.call_file_storage_dir_rename(inst.primary_node,
2469 b23c4333 Manuel Franceschini
                                                old_file_storage_dir,
2470 b23c4333 Manuel Franceschini
                                                new_file_storage_dir)
2471 b23c4333 Manuel Franceschini
2472 b23c4333 Manuel Franceschini
      if not result:
2473 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not connect to node '%s' to rename"
2474 b23c4333 Manuel Franceschini
                                 " directory '%s' to '%s' (but the instance"
2475 b23c4333 Manuel Franceschini
                                 " has been renamed in Ganeti)" % (
2476 b23c4333 Manuel Franceschini
                                 inst.primary_node, old_file_storage_dir,
2477 b23c4333 Manuel Franceschini
                                 new_file_storage_dir))
2478 b23c4333 Manuel Franceschini
2479 b23c4333 Manuel Franceschini
      if not result[0]:
2480 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2481 b23c4333 Manuel Franceschini
                                 " (but the instance has been renamed in"
2482 b23c4333 Manuel Franceschini
                                 " Ganeti)" % (old_file_storage_dir,
2483 b23c4333 Manuel Franceschini
                                               new_file_storage_dir))
2484 b23c4333 Manuel Franceschini
2485 decd5f45 Iustin Pop
    _StartInstanceDisks(self.cfg, inst, None)
2486 decd5f45 Iustin Pop
    try:
2487 decd5f45 Iustin Pop
      if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
2488 decd5f45 Iustin Pop
                                          "sda", "sdb"):
2489 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
2490 6291574d Alexander Schreiber
               " (but the instance has been renamed in Ganeti)" %
2491 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2492 decd5f45 Iustin Pop
        logger.Error(msg)
2493 decd5f45 Iustin Pop
    finally:
2494 decd5f45 Iustin Pop
      _ShutdownInstanceDisks(inst, self.cfg)
2495 decd5f45 Iustin Pop
2496 decd5f45 Iustin Pop
2497 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2498 a8083063 Iustin Pop
  """Remove an instance.
2499 a8083063 Iustin Pop

2500 a8083063 Iustin Pop
  """
2501 a8083063 Iustin Pop
  HPATH = "instance-remove"
2502 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2503 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
2504 cf472233 Guido Trotter
  REQ_BGL = False
2505 cf472233 Guido Trotter
2506 cf472233 Guido Trotter
  def ExpandNames(self):
2507 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
2508 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2509 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2510 cf472233 Guido Trotter
2511 cf472233 Guido Trotter
  def DeclareLocks(self, level):
2512 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
2513 cf472233 Guido Trotter
      self._LockInstancesNodes()
2514 a8083063 Iustin Pop
2515 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2516 a8083063 Iustin Pop
    """Build hooks env.
2517 a8083063 Iustin Pop

2518 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2519 a8083063 Iustin Pop

2520 a8083063 Iustin Pop
    """
2521 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2522 1d67656e Iustin Pop
    nl = [self.sstore.GetMasterNode()]
2523 a8083063 Iustin Pop
    return env, nl, nl
2524 a8083063 Iustin Pop
2525 a8083063 Iustin Pop
  def CheckPrereq(self):
2526 a8083063 Iustin Pop
    """Check prerequisites.
2527 a8083063 Iustin Pop

2528 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2529 a8083063 Iustin Pop

2530 a8083063 Iustin Pop
    """
2531 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2532 cf472233 Guido Trotter
    assert self.instance is not None, \
2533 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2534 a8083063 Iustin Pop
2535 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2536 a8083063 Iustin Pop
    """Remove the instance.
2537 a8083063 Iustin Pop

2538 a8083063 Iustin Pop
    """
2539 a8083063 Iustin Pop
    instance = self.instance
2540 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
2541 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
2542 a8083063 Iustin Pop
2543 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(instance.primary_node, instance):
2544 1d67656e Iustin Pop
      if self.op.ignore_failures:
2545 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2546 1d67656e Iustin Pop
      else:
2547 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2548 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2549 a8083063 Iustin Pop
2550 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
2551 a8083063 Iustin Pop
2552 1d67656e Iustin Pop
    if not _RemoveDisks(instance, self.cfg):
2553 1d67656e Iustin Pop
      if self.op.ignore_failures:
2554 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2555 1d67656e Iustin Pop
      else:
2556 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2557 a8083063 Iustin Pop
2558 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
2559 a8083063 Iustin Pop
2560 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2561 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
2562 a8083063 Iustin Pop
2563 a8083063 Iustin Pop
2564 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2565 a8083063 Iustin Pop
  """Logical unit for querying instances.
2566 a8083063 Iustin Pop

2567 a8083063 Iustin Pop
  """
2568 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2569 7eb9d8f7 Guido Trotter
  REQ_BGL = False
2570 a8083063 Iustin Pop
2571 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
2572 d8052456 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram", "status"])
2573 57a2fb91 Iustin Pop
    self.static_fields = frozenset([
2574 57a2fb91 Iustin Pop
      "name", "os", "pnode", "snodes",
2575 57a2fb91 Iustin Pop
      "admin_state", "admin_ram",
2576 57a2fb91 Iustin Pop
      "disk_template", "ip", "mac", "bridge",
2577 57a2fb91 Iustin Pop
      "sda_size", "sdb_size", "vcpus", "tags",
2578 57a2fb91 Iustin Pop
      "network_port", "kernel_path", "initrd_path",
2579 57a2fb91 Iustin Pop
      "hvm_boot_order", "hvm_acpi", "hvm_pae",
2580 57a2fb91 Iustin Pop
      "hvm_cdrom_image_path", "hvm_nic_type",
2581 57a2fb91 Iustin Pop
      "hvm_disk_type", "vnc_bind_address",
2582 38d7239a Iustin Pop
      "serial_no",
2583 57a2fb91 Iustin Pop
      ])
2584 57a2fb91 Iustin Pop
    _CheckOutputFields(static=self.static_fields,
2585 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2586 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2587 a8083063 Iustin Pop
2588 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
2589 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
2590 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2591 7eb9d8f7 Guido Trotter
2592 57a2fb91 Iustin Pop
    if self.op.names:
2593 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
2594 7eb9d8f7 Guido Trotter
    else:
2595 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
2596 7eb9d8f7 Guido Trotter
2597 57a2fb91 Iustin Pop
    self.do_locking = not self.static_fields.issuperset(self.op.output_fields)
2598 57a2fb91 Iustin Pop
    if self.do_locking:
2599 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
2600 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
2601 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2602 7eb9d8f7 Guido Trotter
2603 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
2604 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
2605 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
2606 7eb9d8f7 Guido Trotter
2607 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
2608 7eb9d8f7 Guido Trotter
    """Check prerequisites.
2609 7eb9d8f7 Guido Trotter

2610 7eb9d8f7 Guido Trotter
    """
2611 57a2fb91 Iustin Pop
    pass
2612 069dcc86 Iustin Pop
2613 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2614 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2615 a8083063 Iustin Pop

2616 a8083063 Iustin Pop
    """
2617 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
2618 57a2fb91 Iustin Pop
    if self.do_locking:
2619 57a2fb91 Iustin Pop
      instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2620 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
2621 3fa93523 Guido Trotter
      instance_names = self.wanted
2622 3fa93523 Guido Trotter
      missing = set(instance_names).difference(all_info.keys())
2623 3fa93523 Guido Trotter
      if missing:
2624 3fa93523 Guido Trotter
        raise self.OpExecError(
2625 3fa93523 Guido Trotter
          "Some instances were removed before retrieving their data: %s"
2626 3fa93523 Guido Trotter
          % missing)
2627 57a2fb91 Iustin Pop
    else:
2628 57a2fb91 Iustin Pop
      instance_names = all_info.keys()
2629 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
2630 a8083063 Iustin Pop
2631 a8083063 Iustin Pop
    # begin data gathering
2632 a8083063 Iustin Pop
2633 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2634 a8083063 Iustin Pop
2635 a8083063 Iustin Pop
    bad_nodes = []
2636 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2637 a8083063 Iustin Pop
      live_data = {}
2638 a8083063 Iustin Pop
      node_data = rpc.call_all_instances_info(nodes)
2639 a8083063 Iustin Pop
      for name in nodes:
2640 a8083063 Iustin Pop
        result = node_data[name]
2641 a8083063 Iustin Pop
        if result:
2642 a8083063 Iustin Pop
          live_data.update(result)
2643 a8083063 Iustin Pop
        elif result == False:
2644 a8083063 Iustin Pop
          bad_nodes.append(name)
2645 a8083063 Iustin Pop
        # else no instance is alive
2646 a8083063 Iustin Pop
    else:
2647 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2648 a8083063 Iustin Pop
2649 a8083063 Iustin Pop
    # end data gathering
2650 a8083063 Iustin Pop
2651 a8083063 Iustin Pop
    output = []
2652 a8083063 Iustin Pop
    for instance in instance_list:
2653 a8083063 Iustin Pop
      iout = []
2654 a8083063 Iustin Pop
      for field in self.op.output_fields:
2655 a8083063 Iustin Pop
        if field == "name":
2656 a8083063 Iustin Pop
          val = instance.name
2657 a8083063 Iustin Pop
        elif field == "os":
2658 a8083063 Iustin Pop
          val = instance.os
2659 a8083063 Iustin Pop
        elif field == "pnode":
2660 a8083063 Iustin Pop
          val = instance.primary_node
2661 a8083063 Iustin Pop
        elif field == "snodes":
2662 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2663 a8083063 Iustin Pop
        elif field == "admin_state":
2664 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2665 a8083063 Iustin Pop
        elif field == "oper_state":
2666 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2667 8a23d2d3 Iustin Pop
            val = None
2668 a8083063 Iustin Pop
          else:
2669 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2670 d8052456 Iustin Pop
        elif field == "status":
2671 d8052456 Iustin Pop
          if instance.primary_node in bad_nodes:
2672 d8052456 Iustin Pop
            val = "ERROR_nodedown"
2673 d8052456 Iustin Pop
          else:
2674 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
2675 d8052456 Iustin Pop
            if running:
2676 d8052456 Iustin Pop
              if instance.status != "down":
2677 d8052456 Iustin Pop
                val = "running"
2678 d8052456 Iustin Pop
              else:
2679 d8052456 Iustin Pop
                val = "ERROR_up"
2680 d8052456 Iustin Pop
            else:
2681 d8052456 Iustin Pop
              if instance.status != "down":
2682 d8052456 Iustin Pop
                val = "ERROR_down"
2683 d8052456 Iustin Pop
              else:
2684 d8052456 Iustin Pop
                val = "ADMIN_down"
2685 a8083063 Iustin Pop
        elif field == "admin_ram":
2686 a8083063 Iustin Pop
          val = instance.memory
2687 a8083063 Iustin Pop
        elif field == "oper_ram":
2688 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2689 8a23d2d3 Iustin Pop
            val = None
2690 a8083063 Iustin Pop
          elif instance.name in live_data:
2691 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2692 a8083063 Iustin Pop
          else:
2693 a8083063 Iustin Pop
            val = "-"
2694 a8083063 Iustin Pop
        elif field == "disk_template":
2695 a8083063 Iustin Pop
          val = instance.disk_template
2696 a8083063 Iustin Pop
        elif field == "ip":
2697 a8083063 Iustin Pop
          val = instance.nics[0].ip
2698 a8083063 Iustin Pop
        elif field == "bridge":
2699 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2700 a8083063 Iustin Pop
        elif field == "mac":
2701 a8083063 Iustin Pop
          val = instance.nics[0].mac
2702 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2703 644eeef9 Iustin Pop
          disk = instance.FindDisk(field[:3])
2704 644eeef9 Iustin Pop
          if disk is None:
2705 8a23d2d3 Iustin Pop
            val = None
2706 644eeef9 Iustin Pop
          else:
2707 644eeef9 Iustin Pop
            val = disk.size
2708 d6d415e8 Iustin Pop
        elif field == "vcpus":
2709 d6d415e8 Iustin Pop
          val = instance.vcpus
2710 130a6a6f Iustin Pop
        elif field == "tags":
2711 130a6a6f Iustin Pop
          val = list(instance.GetTags())
2712 38d7239a Iustin Pop
        elif field == "serial_no":
2713 38d7239a Iustin Pop
          val = instance.serial_no
2714 3fb1e1c5 Alexander Schreiber
        elif field in ("network_port", "kernel_path", "initrd_path",
2715 3fb1e1c5 Alexander Schreiber
                       "hvm_boot_order", "hvm_acpi", "hvm_pae",
2716 3fb1e1c5 Alexander Schreiber
                       "hvm_cdrom_image_path", "hvm_nic_type",
2717 3fb1e1c5 Alexander Schreiber
                       "hvm_disk_type", "vnc_bind_address"):
2718 3fb1e1c5 Alexander Schreiber
          val = getattr(instance, field, None)
2719 3fb1e1c5 Alexander Schreiber
          if val is not None:
2720 3fb1e1c5 Alexander Schreiber
            pass
2721 3fb1e1c5 Alexander Schreiber
          elif field in ("hvm_nic_type", "hvm_disk_type",
2722 3fb1e1c5 Alexander Schreiber
                         "kernel_path", "initrd_path"):
2723 3fb1e1c5 Alexander Schreiber
            val = "default"
2724 3fb1e1c5 Alexander Schreiber
          else:
2725 3fb1e1c5 Alexander Schreiber
            val = "-"
2726 a8083063 Iustin Pop
        else:
2727 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2728 a8083063 Iustin Pop
        iout.append(val)
2729 a8083063 Iustin Pop
      output.append(iout)
2730 a8083063 Iustin Pop
2731 a8083063 Iustin Pop
    return output
2732 a8083063 Iustin Pop
2733 a8083063 Iustin Pop
2734 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2735 a8083063 Iustin Pop
  """Failover an instance.
2736 a8083063 Iustin Pop

2737 a8083063 Iustin Pop
  """
2738 a8083063 Iustin Pop
  HPATH = "instance-failover"
2739 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2740 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2741 c9e5c064 Guido Trotter
  REQ_BGL = False
2742 c9e5c064 Guido Trotter
2743 c9e5c064 Guido Trotter
  def ExpandNames(self):
2744 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
2745 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2746 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2747 c9e5c064 Guido Trotter
2748 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
2749 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
2750 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
2751 a8083063 Iustin Pop
2752 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2753 a8083063 Iustin Pop
    """Build hooks env.
2754 a8083063 Iustin Pop

2755 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2756 a8083063 Iustin Pop

2757 a8083063 Iustin Pop
    """
2758 a8083063 Iustin Pop
    env = {
2759 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2760 a8083063 Iustin Pop
      }
2761 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2762 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
2763 a8083063 Iustin Pop
    return env, nl, nl
2764 a8083063 Iustin Pop
2765 a8083063 Iustin Pop
  def CheckPrereq(self):
2766 a8083063 Iustin Pop
    """Check prerequisites.
2767 a8083063 Iustin Pop

2768 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2769 a8083063 Iustin Pop

2770 a8083063 Iustin Pop
    """
2771 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2772 c9e5c064 Guido Trotter
    assert self.instance is not None, \
2773 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2774 a8083063 Iustin Pop
2775 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2776 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2777 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
2778 2a710df1 Michael Hanselmann
2779 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2780 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2781 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2782 abdf0113 Iustin Pop
                                   "a mirrored disk template")
2783 2a710df1 Michael Hanselmann
2784 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2785 d4f16fd9 Iustin Pop
    # check memory requirements on the secondary node
2786 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, target_node, "failing over instance %s" %
2787 d4f16fd9 Iustin Pop
                         instance.name, instance.memory)
2788 3a7c308e Guido Trotter
2789 a8083063 Iustin Pop
    # check bridge existance
2790 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2791 50ff9a7a Iustin Pop
    if not rpc.call_bridges_exist(target_node, brlist):
2792 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2793 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2794 50ff9a7a Iustin Pop
                                 (brlist, target_node))
2795 a8083063 Iustin Pop
2796 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2797 a8083063 Iustin Pop
    """Failover an instance.
2798 a8083063 Iustin Pop

2799 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2800 a8083063 Iustin Pop
    starting it on the secondary.
2801 a8083063 Iustin Pop

2802 a8083063 Iustin Pop
    """
2803 a8083063 Iustin Pop
    instance = self.instance
2804 a8083063 Iustin Pop
2805 a8083063 Iustin Pop
    source_node = instance.primary_node
2806 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2807 a8083063 Iustin Pop
2808 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2809 a8083063 Iustin Pop
    for dev in instance.disks:
2810 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
2811 a8083063 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
2812 a0aaa0d0 Guido Trotter
        if instance.status == "up" and not self.op.ignore_consistency:
2813 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
2814 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
2815 a8083063 Iustin Pop
2816 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2817 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2818 a8083063 Iustin Pop
                (instance.name, source_node))
2819 a8083063 Iustin Pop
2820 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(source_node, instance):
2821 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
2822 24a40d57 Iustin Pop
        logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2823 24a40d57 Iustin Pop
                     " anyway. Please make sure node %s is down"  %
2824 24a40d57 Iustin Pop
                     (instance.name, source_node, source_node))
2825 24a40d57 Iustin Pop
      else:
2826 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2827 24a40d57 Iustin Pop
                                 (instance.name, source_node))
2828 a8083063 Iustin Pop
2829 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2830 a8083063 Iustin Pop
    if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
2831 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
2832 a8083063 Iustin Pop
2833 a8083063 Iustin Pop
    instance.primary_node = target_node
2834 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2835 b6102dab Guido Trotter
    self.cfg.Update(instance)
2836 a8083063 Iustin Pop
2837 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
2838 12a0cfbe Guido Trotter
    if instance.status == "up":
2839 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
2840 12a0cfbe Guido Trotter
      logger.Info("Starting instance %s on node %s" %
2841 12a0cfbe Guido Trotter
                  (instance.name, target_node))
2842 12a0cfbe Guido Trotter
2843 12a0cfbe Guido Trotter
      disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
2844 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
2845 12a0cfbe Guido Trotter
      if not disks_ok:
2846 12a0cfbe Guido Trotter
        _ShutdownInstanceDisks(instance, self.cfg)
2847 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
2848 a8083063 Iustin Pop
2849 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
2850 12a0cfbe Guido Trotter
      if not rpc.call_instance_start(target_node, instance, None):
2851 12a0cfbe Guido Trotter
        _ShutdownInstanceDisks(instance, self.cfg)
2852 12a0cfbe Guido Trotter
        raise errors.OpExecError("Could not start instance %s on node %s." %
2853 12a0cfbe Guido Trotter
                                 (instance.name, target_node))
2854 a8083063 Iustin Pop
2855 a8083063 Iustin Pop
2856 3f78eef2 Iustin Pop
def _CreateBlockDevOnPrimary(cfg, node, instance, device, info):
2857 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2858 a8083063 Iustin Pop

2859 a8083063 Iustin Pop
  This always creates all devices.
2860 a8083063 Iustin Pop

2861 a8083063 Iustin Pop
  """
2862 a8083063 Iustin Pop
  if device.children:
2863 a8083063 Iustin Pop
    for child in device.children:
2864 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, node, instance, child, info):
2865 a8083063 Iustin Pop
        return False
2866 a8083063 Iustin Pop
2867 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2868 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2869 3f78eef2 Iustin Pop
                                    instance.name, True, info)
2870 a8083063 Iustin Pop
  if not new_id:
2871 a8083063 Iustin Pop
    return False
2872 a8083063 Iustin Pop
  if device.physical_id is None:
2873 a8083063 Iustin Pop
    device.physical_id = new_id
2874 a8083063 Iustin Pop
  return True
2875 a8083063 Iustin Pop
2876 a8083063 Iustin Pop
2877 3f78eef2 Iustin Pop
def _CreateBlockDevOnSecondary(cfg, node, instance, device, force, info):
2878 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2879 a8083063 Iustin Pop

2880 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2881 a8083063 Iustin Pop
  all its children.
2882 a8083063 Iustin Pop

2883 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2884 a8083063 Iustin Pop

2885 a8083063 Iustin Pop
  """
2886 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2887 a8083063 Iustin Pop
    force = True
2888 a8083063 Iustin Pop
  if device.children:
2889 a8083063 Iustin Pop
    for child in device.children:
2890 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, node, instance,
2891 3f78eef2 Iustin Pop
                                        child, force, info):
2892 a8083063 Iustin Pop
        return False
2893 a8083063 Iustin Pop
2894 a8083063 Iustin Pop
  if not force:
2895 a8083063 Iustin Pop
    return True
2896 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2897 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2898 3f78eef2 Iustin Pop
                                    instance.name, False, info)
2899 a8083063 Iustin Pop
  if not new_id:
2900 a8083063 Iustin Pop
    return False
2901 a8083063 Iustin Pop
  if device.physical_id is None:
2902 a8083063 Iustin Pop
    device.physical_id = new_id
2903 a8083063 Iustin Pop
  return True
2904 a8083063 Iustin Pop
2905 a8083063 Iustin Pop
2906 923b1523 Iustin Pop
def _GenerateUniqueNames(cfg, exts):
2907 923b1523 Iustin Pop
  """Generate a suitable LV name.
2908 923b1523 Iustin Pop

2909 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
2910 923b1523 Iustin Pop

2911 923b1523 Iustin Pop
  """
2912 923b1523 Iustin Pop
  results = []
2913 923b1523 Iustin Pop
  for val in exts:
2914 923b1523 Iustin Pop
    new_id = cfg.GenerateUniqueID()
2915 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
2916 923b1523 Iustin Pop
  return results
2917 923b1523 Iustin Pop
2918 923b1523 Iustin Pop
2919 ffa1c0dc Iustin Pop
def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name,
2920 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
2921 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
2922 a1f445d3 Iustin Pop

2923 a1f445d3 Iustin Pop
  """
2924 a1f445d3 Iustin Pop
  port = cfg.AllocatePort()
2925 a1f445d3 Iustin Pop
  vgname = cfg.GetVGName()
2926 f9518d38 Iustin Pop
  shared_secret = cfg.GenerateDRBDSecret()
2927 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2928 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
2929 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2930 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
2931 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
2932 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
2933 f9518d38 Iustin Pop
                                      p_minor, s_minor,
2934 f9518d38 Iustin Pop
                                      shared_secret),
2935 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
2936 a1f445d3 Iustin Pop
                          iv_name=iv_name)
2937 a1f445d3 Iustin Pop
  return drbd_dev
2938 a1f445d3 Iustin Pop
2939 7c0d6283 Michael Hanselmann
2940 923b1523 Iustin Pop
def _GenerateDiskTemplate(cfg, template_name,
2941 a8083063 Iustin Pop
                          instance_name, primary_node,
2942 0f1a06e3 Manuel Franceschini
                          secondary_nodes, disk_sz, swap_sz,
2943 0f1a06e3 Manuel Franceschini
                          file_storage_dir, file_driver):
2944 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
2945 a8083063 Iustin Pop

2946 a8083063 Iustin Pop
  """
2947 a8083063 Iustin Pop
  #TODO: compute space requirements
2948 a8083063 Iustin Pop
2949 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2950 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
2951 a8083063 Iustin Pop
    disks = []
2952 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
2953 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2954 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2955 923b1523 Iustin Pop
2956 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
2957 fe96220b Iustin Pop
    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2958 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
2959 a8083063 Iustin Pop
                           iv_name = "sda")
2960 fe96220b Iustin Pop
    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2961 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
2962 a8083063 Iustin Pop
                           iv_name = "sdb")
2963 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
2964 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
2965 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
2966 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2967 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
2968 ffa1c0dc Iustin Pop
    (minor_pa, minor_pb,
2969 a1578d63 Iustin Pop
     minor_sa, minor_sb) = cfg.AllocateDRBDMinor(
2970 a1578d63 Iustin Pop
      [primary_node, primary_node, remote_node, remote_node], instance_name)
2971 ffa1c0dc Iustin Pop
2972 a1f445d3 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2973 a1f445d3 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2974 a1f445d3 Iustin Pop
    drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2975 ffa1c0dc Iustin Pop
                                        disk_sz, names[0:2], "sda",
2976 ffa1c0dc Iustin Pop
                                        minor_pa, minor_sa)
2977 a1f445d3 Iustin Pop
    drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2978 ffa1c0dc Iustin Pop
                                        swap_sz, names[2:4], "sdb",
2979 ffa1c0dc Iustin Pop
                                        minor_pb, minor_sb)
2980 a1f445d3 Iustin Pop
    disks = [drbd_sda_dev, drbd_sdb_dev]
2981 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
2982 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
2983 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
2984 0f1a06e3 Manuel Franceschini
2985 0f1a06e3 Manuel Franceschini
    file_sda_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk_sz,
2986 0f1a06e3 Manuel Franceschini
                                iv_name="sda", logical_id=(file_driver,
2987 0f1a06e3 Manuel Franceschini
                                "%s/sda" % file_storage_dir))
2988 0f1a06e3 Manuel Franceschini
    file_sdb_dev = objects.Disk(dev_type=constants.LD_FILE, size=swap_sz,
2989 0f1a06e3 Manuel Franceschini
                                iv_name="sdb", logical_id=(file_driver,
2990 0f1a06e3 Manuel Franceschini
                                "%s/sdb" % file_storage_dir))
2991 0f1a06e3 Manuel Franceschini
    disks = [file_sda_dev, file_sdb_dev]
2992 a8083063 Iustin Pop
  else:
2993 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
2994 a8083063 Iustin Pop
  return disks
2995 a8083063 Iustin Pop
2996 a8083063 Iustin Pop
2997 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
2998 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
2999 3ecf6786 Iustin Pop

3000 3ecf6786 Iustin Pop
  """
3001 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
3002 a0c3fea1 Michael Hanselmann
3003 a0c3fea1 Michael Hanselmann
3004 a8083063 Iustin Pop
def _CreateDisks(cfg, instance):
3005 a8083063 Iustin Pop
  """Create all disks for an instance.
3006 a8083063 Iustin Pop

3007 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
3008 a8083063 Iustin Pop

3009 a8083063 Iustin Pop
  Args:
3010 a8083063 Iustin Pop
    instance: the instance object
3011 a8083063 Iustin Pop

3012 a8083063 Iustin Pop
  Returns:
3013 a8083063 Iustin Pop
    True or False showing the success of the creation process
3014 a8083063 Iustin Pop

3015 a8083063 Iustin Pop
  """
3016 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
3017 a0c3fea1 Michael Hanselmann
3018 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
3019 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3020 0f1a06e3 Manuel Franceschini
    result = rpc.call_file_storage_dir_create(instance.primary_node,
3021 0f1a06e3 Manuel Franceschini
                                              file_storage_dir)
3022 0f1a06e3 Manuel Franceschini
3023 0f1a06e3 Manuel Franceschini
    if not result:
3024 b62ddbe5 Guido Trotter
      logger.Error("Could not connect to node '%s'" % instance.primary_node)
3025 0f1a06e3 Manuel Franceschini
      return False
3026 0f1a06e3 Manuel Franceschini
3027 0f1a06e3 Manuel Franceschini
    if not result[0]:
3028 0f1a06e3 Manuel Franceschini
      logger.Error("failed to create directory '%s'" % file_storage_dir)
3029 0f1a06e3 Manuel Franceschini
      return False
3030 0f1a06e3 Manuel Franceschini
3031 a8083063 Iustin Pop
  for device in instance.disks:
3032 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
3033 1c6e3627 Manuel Franceschini
                (device.iv_name, instance.name))
3034 a8083063 Iustin Pop
    #HARDCODE
3035 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
3036 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, secondary_node, instance,
3037 3f78eef2 Iustin Pop
                                        device, False, info):
3038 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
3039 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
3040 a8083063 Iustin Pop
        return False
3041 a8083063 Iustin Pop
    #HARDCODE
3042 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
3043 3f78eef2 Iustin Pop
                                    instance, device, info):
3044 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
3045 a8083063 Iustin Pop
                   device.iv_name)
3046 a8083063 Iustin Pop
      return False
3047 1c6e3627 Manuel Franceschini
3048 a8083063 Iustin Pop
  return True
3049 a8083063 Iustin Pop
3050 a8083063 Iustin Pop
3051 a8083063 Iustin Pop
def _RemoveDisks(instance, cfg):
3052 a8083063 Iustin Pop
  """Remove all disks for an instance.
3053 a8083063 Iustin Pop

3054 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
3055 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
3056 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
3057 a8083063 Iustin Pop
  with `_CreateDisks()`).
3058 a8083063 Iustin Pop

3059 a8083063 Iustin Pop
  Args:
3060 a8083063 Iustin Pop
    instance: the instance object
3061 a8083063 Iustin Pop

3062 a8083063 Iustin Pop
  Returns:
3063 a8083063 Iustin Pop
    True or False showing the success of the removal proces
3064 a8083063 Iustin Pop

3065 a8083063 Iustin Pop
  """
3066 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
3067 a8083063 Iustin Pop
3068 a8083063 Iustin Pop
  result = True
3069 a8083063 Iustin Pop
  for device in instance.disks:
3070 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
3071 a8083063 Iustin Pop
      cfg.SetDiskID(disk, node)
3072 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, disk):
3073 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
3074 a8083063 Iustin Pop
                     " continuing anyway" %
3075 a8083063 Iustin Pop
                     (device.iv_name, node))
3076 a8083063 Iustin Pop
        result = False
3077 0f1a06e3 Manuel Franceschini
3078 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
3079 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3080 0f1a06e3 Manuel Franceschini
    if not rpc.call_file_storage_dir_remove(instance.primary_node,
3081 0f1a06e3 Manuel Franceschini
                                            file_storage_dir):
3082 0f1a06e3 Manuel Franceschini
      logger.Error("could not remove directory '%s'" % file_storage_dir)
3083 0f1a06e3 Manuel Franceschini
      result = False
3084 0f1a06e3 Manuel Franceschini
3085 a8083063 Iustin Pop
  return result
3086 a8083063 Iustin Pop
3087 a8083063 Iustin Pop
3088 e2fe6369 Iustin Pop
def _ComputeDiskSize(disk_template, disk_size, swap_size):
3089 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
3090 e2fe6369 Iustin Pop

3091 e2fe6369 Iustin Pop
  This is currently hard-coded for the two-drive layout.
3092 e2fe6369 Iustin Pop

3093 e2fe6369 Iustin Pop
  """
3094 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
3095 e2fe6369 Iustin Pop
  req_size_dict = {
3096 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
3097 e2fe6369 Iustin Pop
    constants.DT_PLAIN: disk_size + swap_size,
3098 e2fe6369 Iustin Pop
    # 256 MB are added for drbd metadata, 128MB for each drbd device
3099 e2fe6369 Iustin Pop
    constants.DT_DRBD8: disk_size + swap_size + 256,
3100 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
3101 e2fe6369 Iustin Pop
  }
3102 e2fe6369 Iustin Pop
3103 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
3104 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
3105 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
3106 e2fe6369 Iustin Pop
3107 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
3108 e2fe6369 Iustin Pop
3109 e2fe6369 Iustin Pop
3110 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
3111 a8083063 Iustin Pop
  """Create an instance.
3112 a8083063 Iustin Pop

3113 a8083063 Iustin Pop
  """
3114 a8083063 Iustin Pop
  HPATH = "instance-add"
3115 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3116 538475ca Iustin Pop
  _OP_REQP = ["instance_name", "mem_size", "disk_size",
3117 a8083063 Iustin Pop
              "disk_template", "swap_size", "mode", "start", "vcpus",
3118 1862d460 Alexander Schreiber
              "wait_for_sync", "ip_check", "mac"]
3119 7baf741d Guido Trotter
  REQ_BGL = False
3120 7baf741d Guido Trotter
3121 7baf741d Guido Trotter
  def _ExpandNode(self, node):
3122 7baf741d Guido Trotter
    """Expands and checks one node name.
3123 7baf741d Guido Trotter

3124 7baf741d Guido Trotter
    """
3125 7baf741d Guido Trotter
    node_full = self.cfg.ExpandNodeName(node)
3126 7baf741d Guido Trotter
    if node_full is None:
3127 7baf741d Guido Trotter
      raise errors.OpPrereqError("Unknown node %s" % node)
3128 7baf741d Guido Trotter
    return node_full
3129 7baf741d Guido Trotter
3130 7baf741d Guido Trotter
  def ExpandNames(self):
3131 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
3132 7baf741d Guido Trotter

3133 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
3134 7baf741d Guido Trotter

3135 7baf741d Guido Trotter
    """
3136 7baf741d Guido Trotter
    self.needed_locks = {}
3137 7baf741d Guido Trotter
3138 7baf741d Guido Trotter
    # set optional parameters to none if they don't exist
3139 7baf741d Guido Trotter
    for attr in ["kernel_path", "initrd_path", "pnode", "snode",
3140 7baf741d Guido Trotter
                 "iallocator", "hvm_boot_order", "hvm_acpi", "hvm_pae",
3141 7baf741d Guido Trotter
                 "hvm_cdrom_image_path", "hvm_nic_type", "hvm_disk_type",
3142 7baf741d Guido Trotter
                 "vnc_bind_address"]:
3143 7baf741d Guido Trotter
      if not hasattr(self.op, attr):
3144 7baf741d Guido Trotter
        setattr(self.op, attr, None)
3145 7baf741d Guido Trotter
3146 7baf741d Guido Trotter
    # verify creation mode
3147 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
3148 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
3149 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
3150 7baf741d Guido Trotter
                                 self.op.mode)
3151 7baf741d Guido Trotter
    # disk template and mirror node verification
3152 7baf741d Guido Trotter
    if self.op.disk_template not in constants.DISK_TEMPLATES:
3153 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid disk template name")
3154 7baf741d Guido Trotter
3155 7baf741d Guido Trotter
    #### instance parameters check
3156 7baf741d Guido Trotter
3157 7baf741d Guido Trotter
    # instance name verification
3158 7baf741d Guido Trotter
    hostname1 = utils.HostInfo(self.op.instance_name)
3159 7baf741d Guido Trotter
    self.op.instance_name = instance_name = hostname1.name
3160 7baf741d Guido Trotter
3161 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
3162 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
3163 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
3164 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3165 7baf741d Guido Trotter
                                 instance_name)
3166 7baf741d Guido Trotter
3167 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
3168 7baf741d Guido Trotter
3169 7baf741d Guido Trotter
    # ip validity checks
3170 7baf741d Guido Trotter
    ip = getattr(self.op, "ip", None)
3171 7baf741d Guido Trotter
    if ip is None or ip.lower() == "none":
3172 7baf741d Guido Trotter
      inst_ip = None
3173 7baf741d Guido Trotter
    elif ip.lower() == "auto":
3174 7baf741d Guido Trotter
      inst_ip = hostname1.ip
3175 7baf741d Guido Trotter
    else:
3176 7baf741d Guido Trotter
      if not utils.IsValidIP(ip):
3177 7baf741d Guido Trotter
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
3178 7baf741d Guido Trotter
                                   " like a valid IP" % ip)
3179 7baf741d Guido Trotter
      inst_ip = ip
3180 7baf741d Guido Trotter
    self.inst_ip = self.op.ip = inst_ip
3181 7baf741d Guido Trotter
    # used in CheckPrereq for ip ping check
3182 7baf741d Guido Trotter
    self.check_ip = hostname1.ip
3183 7baf741d Guido Trotter
3184 7baf741d Guido Trotter
    # MAC address verification
3185 7baf741d Guido Trotter
    if self.op.mac != "auto":
3186 7baf741d Guido Trotter
      if not utils.IsValidMac(self.op.mac.lower()):
3187 7baf741d Guido Trotter
        raise errors.OpPrereqError("invalid MAC address specified: %s" %
3188 7baf741d Guido Trotter
                                   self.op.mac)
3189 7baf741d Guido Trotter
3190 7baf741d Guido Trotter
    # boot order verification
3191 7baf741d Guido Trotter
    if self.op.hvm_boot_order is not None:
3192 7baf741d Guido Trotter
      if len(self.op.hvm_boot_order.strip("acdn")) != 0:
3193 7baf741d Guido Trotter
        raise errors.OpPrereqError("invalid boot order specified,"
3194 7baf741d Guido Trotter
                                   " must be one or more of [acdn]")
3195 7baf741d Guido Trotter
    # file storage checks
3196 7baf741d Guido Trotter
    if (self.op.file_driver and
3197 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
3198 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
3199 7baf741d Guido Trotter
                                 self.op.file_driver)
3200 7baf741d Guido Trotter
3201 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
3202 7baf741d Guido Trotter
      raise errors.OpPrereqError("File storage directory path not absolute")
3203 7baf741d Guido Trotter
3204 7baf741d Guido Trotter
    ### Node/iallocator related checks
3205 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
3206 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
3207 7baf741d Guido Trotter
                                 " node must be given")
3208 7baf741d Guido Trotter
3209 7baf741d Guido Trotter
    if self.op.iallocator:
3210 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3211 7baf741d Guido Trotter
    else:
3212 7baf741d Guido Trotter
      self.op.pnode = self._ExpandNode(self.op.pnode)
3213 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
3214 7baf741d Guido Trotter
      if self.op.snode is not None:
3215 7baf741d Guido Trotter
        self.op.snode = self._ExpandNode(self.op.snode)
3216 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
3217 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
3218 7baf741d Guido Trotter
3219 7baf741d Guido Trotter
    # in case of import lock the source node too
3220 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
3221 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
3222 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
3223 7baf741d Guido Trotter
3224 7baf741d Guido Trotter
      if src_node is None or src_path is None:
3225 7baf741d Guido Trotter
        raise errors.OpPrereqError("Importing an instance requires source"
3226 7baf741d Guido Trotter
                                   " node and path options")
3227 7baf741d Guido Trotter
3228 7baf741d Guido Trotter
      if not os.path.isabs(src_path):
3229 7baf741d Guido Trotter
        raise errors.OpPrereqError("The source path must be absolute")
3230 7baf741d Guido Trotter
3231 7baf741d Guido Trotter
      self.op.src_node = src_node = self._ExpandNode(src_node)
3232 7baf741d Guido Trotter
      if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
3233 7baf741d Guido Trotter
        self.needed_locks[locking.LEVEL_NODE].append(src_node)
3234 7baf741d Guido Trotter
3235 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
3236 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
3237 7baf741d Guido Trotter
        raise errors.OpPrereqError("No guest OS specified")
3238 a8083063 Iustin Pop
3239 538475ca Iustin Pop
  def _RunAllocator(self):
3240 538475ca Iustin Pop
    """Run the allocator based on input opcode.
3241 538475ca Iustin Pop

3242 538475ca Iustin Pop
    """
3243 538475ca Iustin Pop
    disks = [{"size": self.op.disk_size, "mode": "w"},
3244 538475ca Iustin Pop
             {"size": self.op.swap_size, "mode": "w"}]
3245 538475ca Iustin Pop
    nics = [{"mac": self.op.mac, "ip": getattr(self.op, "ip", None),
3246 538475ca Iustin Pop
             "bridge": self.op.bridge}]
3247 d1c2dd75 Iustin Pop
    ial = IAllocator(self.cfg, self.sstore,
3248 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
3249 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
3250 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
3251 d1c2dd75 Iustin Pop
                     tags=[],
3252 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
3253 d1c2dd75 Iustin Pop
                     vcpus=self.op.vcpus,
3254 d1c2dd75 Iustin Pop
                     mem_size=self.op.mem_size,
3255 d1c2dd75 Iustin Pop
                     disks=disks,
3256 d1c2dd75 Iustin Pop
                     nics=nics,
3257 29859cb7 Iustin Pop
                     )
3258 d1c2dd75 Iustin Pop
3259 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
3260 d1c2dd75 Iustin Pop
3261 d1c2dd75 Iustin Pop
    if not ial.success:
3262 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3263 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3264 d1c2dd75 Iustin Pop
                                                           ial.info))
3265 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3266 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3267 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
3268 97abc79f Iustin Pop
                                 (self.op.iallocator, len(ial.nodes),
3269 1ce4bbe3 Renรฉ Nussbaumer
                                  ial.required_nodes))
3270 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
3271 538475ca Iustin Pop
    logger.ToStdout("Selected nodes for the instance: %s" %
3272 d1c2dd75 Iustin Pop
                    (", ".join(ial.nodes),))
3273 538475ca Iustin Pop
    logger.Info("Selected nodes for instance %s via iallocator %s: %s" %
3274 d1c2dd75 Iustin Pop
                (self.op.instance_name, self.op.iallocator, ial.nodes))
3275 27579978 Iustin Pop
    if ial.required_nodes == 2:
3276 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
3277 538475ca Iustin Pop
3278 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3279 a8083063 Iustin Pop
    """Build hooks env.
3280 a8083063 Iustin Pop

3281 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3282 a8083063 Iustin Pop

3283 a8083063 Iustin Pop
    """
3284 a8083063 Iustin Pop
    env = {
3285 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
3286 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
3287 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
3288 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
3289 a8083063 Iustin Pop
      }
3290 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3291 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
3292 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
3293 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_IMAGE"] = self.src_image
3294 396e1b78 Michael Hanselmann
3295 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
3296 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
3297 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
3298 396e1b78 Michael Hanselmann
      status=self.instance_status,
3299 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
3300 396e1b78 Michael Hanselmann
      memory=self.op.mem_size,
3301 396e1b78 Michael Hanselmann
      vcpus=self.op.vcpus,
3302 c7b27e9e Iustin Pop
      nics=[(self.inst_ip, self.op.bridge, self.op.mac)],
3303 396e1b78 Michael Hanselmann
    ))
3304 a8083063 Iustin Pop
3305 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
3306 a8083063 Iustin Pop
          self.secondaries)
3307 a8083063 Iustin Pop
    return env, nl, nl
3308 a8083063 Iustin Pop
3309 a8083063 Iustin Pop
3310 a8083063 Iustin Pop
  def CheckPrereq(self):
3311 a8083063 Iustin Pop
    """Check prerequisites.
3312 a8083063 Iustin Pop

3313 a8083063 Iustin Pop
    """
3314 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
3315 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
3316 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
3317 eedc99de Manuel Franceschini
                                 " instances")
3318 eedc99de Manuel Franceschini
3319 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3320 7baf741d Guido Trotter
      src_node = self.op.src_node
3321 7baf741d Guido Trotter
      src_path = self.op.src_path
3322 a8083063 Iustin Pop
3323 a8083063 Iustin Pop
      export_info = rpc.call_export_info(src_node, src_path)
3324 a8083063 Iustin Pop
3325 a8083063 Iustin Pop
      if not export_info:
3326 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
3327 a8083063 Iustin Pop
3328 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
3329 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
3330 a8083063 Iustin Pop
3331 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
3332 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
3333 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
3334 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
3335 a8083063 Iustin Pop
3336 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
3337 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Can't import instance with more than"
3338 3ecf6786 Iustin Pop
                                   " one data disk")
3339 a8083063 Iustin Pop
3340 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
3341 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
3342 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
3343 a8083063 Iustin Pop
                                                         'disk0_dump'))
3344 a8083063 Iustin Pop
      self.src_image = diskimage
3345 901a65c1 Iustin Pop
3346 7baf741d Guido Trotter
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
3347 901a65c1 Iustin Pop
3348 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
3349 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
3350 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
3351 901a65c1 Iustin Pop
3352 901a65c1 Iustin Pop
    if self.op.ip_check:
3353 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
3354 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3355 7baf741d Guido Trotter
                                   (self.check_ip, instance_name))
3356 901a65c1 Iustin Pop
3357 901a65c1 Iustin Pop
    # bridge verification
3358 901a65c1 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
3359 901a65c1 Iustin Pop
    if bridge is None:
3360 901a65c1 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
3361 901a65c1 Iustin Pop
    else:
3362 901a65c1 Iustin Pop
      self.op.bridge = bridge
3363 901a65c1 Iustin Pop
3364 538475ca Iustin Pop
    #### allocator run
3365 538475ca Iustin Pop
3366 538475ca Iustin Pop
    if self.op.iallocator is not None:
3367 538475ca Iustin Pop
      self._RunAllocator()
3368 0f1a06e3 Manuel Franceschini
3369 901a65c1 Iustin Pop
    #### node related checks
3370 901a65c1 Iustin Pop
3371 901a65c1 Iustin Pop
    # check primary node
3372 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
3373 7baf741d Guido Trotter
    assert self.pnode is not None, \
3374 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
3375 901a65c1 Iustin Pop
    self.secondaries = []
3376 901a65c1 Iustin Pop
3377 901a65c1 Iustin Pop
    # mirror node verification
3378 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
3379 7baf741d Guido Trotter
      if self.op.snode is None:
3380 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
3381 3ecf6786 Iustin Pop
                                   " a mirror node")
3382 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
3383 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
3384 3ecf6786 Iustin Pop
                                   " the primary node.")
3385 7baf741d Guido Trotter
      self.secondaries.append(self.op.snode)
3386 a8083063 Iustin Pop
3387 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
3388 e2fe6369 Iustin Pop
                                self.op.disk_size, self.op.swap_size)
3389 ed1ebc60 Guido Trotter
3390 8d75db10 Iustin Pop
    # Check lv size requirements
3391 8d75db10 Iustin Pop
    if req_size is not None:
3392 8d75db10 Iustin Pop
      nodenames = [pnode.name] + self.secondaries
3393 8d75db10 Iustin Pop
      nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
3394 8d75db10 Iustin Pop
      for node in nodenames:
3395 8d75db10 Iustin Pop
        info = nodeinfo.get(node, None)
3396 8d75db10 Iustin Pop
        if not info:
3397 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
3398 3e91897b Iustin Pop
                                     " from node '%s'" % node)
3399 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
3400 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
3401 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
3402 8d75db10 Iustin Pop
                                     " node %s" % node)
3403 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
3404 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
3405 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
3406 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
3407 ed1ebc60 Guido Trotter
3408 a8083063 Iustin Pop
    # os verification
3409 00fe9e38 Guido Trotter
    os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
3410 dfa96ded Guido Trotter
    if not os_obj:
3411 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
3412 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
3413 a8083063 Iustin Pop
3414 3b6d8c9b Iustin Pop
    if self.op.kernel_path == constants.VALUE_NONE:
3415 3b6d8c9b Iustin Pop
      raise errors.OpPrereqError("Can't set instance kernel to none")
3416 3b6d8c9b Iustin Pop
3417 901a65c1 Iustin Pop
    # bridge check on primary node
3418 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
3419 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
3420 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
3421 3ecf6786 Iustin Pop
                                 (self.op.bridge, pnode.name))
3422 a8083063 Iustin Pop
3423 49ce1563 Iustin Pop
    # memory check on primary node
3424 49ce1563 Iustin Pop
    if self.op.start:
3425 49ce1563 Iustin Pop
      _CheckNodeFreeMemory(self.cfg, self.pnode.name,
3426 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
3427 49ce1563 Iustin Pop
                           self.op.mem_size)
3428 49ce1563 Iustin Pop
3429 31a853d2 Iustin Pop
    # hvm_cdrom_image_path verification
3430 31a853d2 Iustin Pop
    if self.op.hvm_cdrom_image_path is not None:
3431 7baf741d Guido Trotter
      # FIXME (als): shouldn't these checks happen on the destination node?
3432 31a853d2 Iustin Pop
      if not os.path.isabs(self.op.hvm_cdrom_image_path):
3433 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The path to the HVM CDROM image must"
3434 31a853d2 Iustin Pop
                                   " be an absolute path or None, not %s" %
3435 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
3436 31a853d2 Iustin Pop
      if not os.path.isfile(self.op.hvm_cdrom_image_path):
3437 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The HVM CDROM image must either be a"
3438 31a853d2 Iustin Pop
                                   " regular file or a symlink pointing to"
3439 31a853d2 Iustin Pop
                                   " an existing regular file, not %s" %
3440 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
3441 31a853d2 Iustin Pop
3442 31a853d2 Iustin Pop
    # vnc_bind_address verification
3443 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is not None:
3444 31a853d2 Iustin Pop
      if not utils.IsValidIP(self.op.vnc_bind_address):
3445 31a853d2 Iustin Pop
        raise errors.OpPrereqError("given VNC bind address '%s' doesn't look"
3446 31a853d2 Iustin Pop
                                   " like a valid IP address" %
3447 31a853d2 Iustin Pop
                                   self.op.vnc_bind_address)
3448 31a853d2 Iustin Pop
3449 5397e0b7 Alexander Schreiber
    # Xen HVM device type checks
3450 5397e0b7 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
3451 5397e0b7 Alexander Schreiber
      if self.op.hvm_nic_type not in constants.HT_HVM_VALID_NIC_TYPES:
3452 5397e0b7 Alexander Schreiber
        raise errors.OpPrereqError("Invalid NIC type %s specified for Xen HVM"
3453 5397e0b7 Alexander Schreiber
                                   " hypervisor" % self.op.hvm_nic_type)
3454 5397e0b7 Alexander Schreiber
      if self.op.hvm_disk_type not in constants.HT_HVM_VALID_DISK_TYPES:
3455 5397e0b7 Alexander Schreiber
        raise errors.OpPrereqError("Invalid disk type %s specified for Xen HVM"
3456 5397e0b7 Alexander Schreiber
                                   " hypervisor" % self.op.hvm_disk_type)
3457 5397e0b7 Alexander Schreiber
3458 a8083063 Iustin Pop
    if self.op.start:
3459 a8083063 Iustin Pop
      self.instance_status = 'up'
3460 a8083063 Iustin Pop
    else:
3461 a8083063 Iustin Pop
      self.instance_status = 'down'
3462 a8083063 Iustin Pop
3463 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3464 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
3465 a8083063 Iustin Pop

3466 a8083063 Iustin Pop
    """
3467 a8083063 Iustin Pop
    instance = self.op.instance_name
3468 a8083063 Iustin Pop
    pnode_name = self.pnode.name
3469 a8083063 Iustin Pop
3470 1862d460 Alexander Schreiber
    if self.op.mac == "auto":
3471 ba4b62cf Iustin Pop
      mac_address = self.cfg.GenerateMAC()
3472 1862d460 Alexander Schreiber
    else:
3473 ba4b62cf Iustin Pop
      mac_address = self.op.mac
3474 1862d460 Alexander Schreiber
3475 1862d460 Alexander Schreiber
    nic = objects.NIC(bridge=self.op.bridge, mac=mac_address)
3476 a8083063 Iustin Pop
    if self.inst_ip is not None:
3477 a8083063 Iustin Pop
      nic.ip = self.inst_ip
3478 a8083063 Iustin Pop
3479 2a6469d5 Alexander Schreiber
    ht_kind = self.sstore.GetHypervisorType()
3480 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
3481 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
3482 2a6469d5 Alexander Schreiber
    else:
3483 2a6469d5 Alexander Schreiber
      network_port = None
3484 58acb49d Alexander Schreiber
3485 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is None:
3486 31a853d2 Iustin Pop
      self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
3487 31a853d2 Iustin Pop
3488 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
3489 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
3490 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
3491 2c313123 Manuel Franceschini
    else:
3492 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
3493 2c313123 Manuel Franceschini
3494 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
3495 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
3496 0f1a06e3 Manuel Franceschini
                                        self.sstore.GetFileStorageDir(),
3497 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
3498 0f1a06e3 Manuel Franceschini
3499 0f1a06e3 Manuel Franceschini
3500 923b1523 Iustin Pop
    disks = _GenerateDiskTemplate(self.cfg,
3501 a8083063 Iustin Pop
                                  self.op.disk_template,
3502 a8083063 Iustin Pop
                                  instance, pnode_name,
3503 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
3504 0f1a06e3 Manuel Franceschini
                                  self.op.swap_size,
3505 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
3506 0f1a06e3 Manuel Franceschini
                                  self.op.file_driver)
3507 a8083063 Iustin Pop
3508 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3509 a8083063 Iustin Pop
                            primary_node=pnode_name,
3510 a8083063 Iustin Pop
                            memory=self.op.mem_size,
3511 a8083063 Iustin Pop
                            vcpus=self.op.vcpus,
3512 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
3513 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
3514 a8083063 Iustin Pop
                            status=self.instance_status,
3515 58acb49d Alexander Schreiber
                            network_port=network_port,
3516 3b6d8c9b Iustin Pop
                            kernel_path=self.op.kernel_path,
3517 3b6d8c9b Iustin Pop
                            initrd_path=self.op.initrd_path,
3518 25c5878d Alexander Schreiber
                            hvm_boot_order=self.op.hvm_boot_order,
3519 31a853d2 Iustin Pop
                            hvm_acpi=self.op.hvm_acpi,
3520 31a853d2 Iustin Pop
                            hvm_pae=self.op.hvm_pae,
3521 31a853d2 Iustin Pop
                            hvm_cdrom_image_path=self.op.hvm_cdrom_image_path,
3522 31a853d2 Iustin Pop
                            vnc_bind_address=self.op.vnc_bind_address,
3523 5397e0b7 Alexander Schreiber
                            hvm_nic_type=self.op.hvm_nic_type,
3524 5397e0b7 Alexander Schreiber
                            hvm_disk_type=self.op.hvm_disk_type,
3525 a8083063 Iustin Pop
                            )
3526 a8083063 Iustin Pop
3527 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
3528 a8083063 Iustin Pop
    if not _CreateDisks(self.cfg, iobj):
3529 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3530 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance)
3531 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
3532 a8083063 Iustin Pop
3533 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
3534 a8083063 Iustin Pop
3535 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
3536 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
3537 7baf741d Guido Trotter
    # added the instance to the config
3538 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
3539 a1578d63 Iustin Pop
    # Remove the temp. assignements for the instance's drbds
3540 a1578d63 Iustin Pop
    self.cfg.ReleaseDRBDMinors(instance)
3541 a8083063 Iustin Pop
3542 a8083063 Iustin Pop
    if self.op.wait_for_sync:
3543 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc)
3544 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3545 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
3546 a8083063 Iustin Pop
      time.sleep(15)
3547 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
3548 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc, oneshot=True)
3549 a8083063 Iustin Pop
    else:
3550 a8083063 Iustin Pop
      disk_abort = False
3551 a8083063 Iustin Pop
3552 a8083063 Iustin Pop
    if disk_abort:
3553 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3554 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3555 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
3556 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
3557 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3558 3ecf6786 Iustin Pop
                               " this instance")
3559 a8083063 Iustin Pop
3560 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3561 a8083063 Iustin Pop
                (instance, pnode_name))
3562 a8083063 Iustin Pop
3563 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3564 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3565 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3566 a8083063 Iustin Pop
        if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
3567 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3568 3ecf6786 Iustin Pop
                                   " on node %s" %
3569 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3570 a8083063 Iustin Pop
3571 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3572 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3573 a8083063 Iustin Pop
        src_node = self.op.src_node
3574 a8083063 Iustin Pop
        src_image = self.src_image
3575 a8083063 Iustin Pop
        if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
3576 a8083063 Iustin Pop
                                                src_node, src_image):
3577 3ecf6786 Iustin Pop
          raise errors.OpExecError("Could not import os for instance"
3578 3ecf6786 Iustin Pop
                                   " %s on node %s" %
3579 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3580 a8083063 Iustin Pop
      else:
3581 a8083063 Iustin Pop
        # also checked in the prereq part
3582 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3583 3ecf6786 Iustin Pop
                                     % self.op.mode)
3584 a8083063 Iustin Pop
3585 a8083063 Iustin Pop
    if self.op.start:
3586 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
3587 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3588 a8083063 Iustin Pop
      if not rpc.call_instance_start(pnode_name, iobj, None):
3589 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3590 a8083063 Iustin Pop
3591 a8083063 Iustin Pop
3592 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3593 a8083063 Iustin Pop
  """Connect to an instance's console.
3594 a8083063 Iustin Pop

3595 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3596 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3597 a8083063 Iustin Pop
  console.
3598 a8083063 Iustin Pop

3599 a8083063 Iustin Pop
  """
3600 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3601 8659b73e Guido Trotter
  REQ_BGL = False
3602 8659b73e Guido Trotter
3603 8659b73e Guido Trotter
  def ExpandNames(self):
3604 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
3605 a8083063 Iustin Pop
3606 a8083063 Iustin Pop
  def CheckPrereq(self):
3607 a8083063 Iustin Pop
    """Check prerequisites.
3608 a8083063 Iustin Pop

3609 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3610 a8083063 Iustin Pop

3611 a8083063 Iustin Pop
    """
3612 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3613 8659b73e Guido Trotter
    assert self.instance is not None, \
3614 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3615 a8083063 Iustin Pop
3616 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3617 a8083063 Iustin Pop
    """Connect to the console of an instance
3618 a8083063 Iustin Pop

3619 a8083063 Iustin Pop
    """
3620 a8083063 Iustin Pop
    instance = self.instance
3621 a8083063 Iustin Pop
    node = instance.primary_node
3622 a8083063 Iustin Pop
3623 a8083063 Iustin Pop
    node_insts = rpc.call_instance_list([node])[node]
3624 a8083063 Iustin Pop
    if node_insts is False:
3625 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3626 a8083063 Iustin Pop
3627 a8083063 Iustin Pop
    if instance.name not in node_insts:
3628 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3629 a8083063 Iustin Pop
3630 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
3631 a8083063 Iustin Pop
3632 a8083063 Iustin Pop
    hyper = hypervisor.GetHypervisor()
3633 30989e69 Alexander Schreiber
    console_cmd = hyper.GetShellCommandForConsole(instance)
3634 b047857b Michael Hanselmann
3635 82122173 Iustin Pop
    # build ssh cmdline
3636 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
3637 a8083063 Iustin Pop
3638 a8083063 Iustin Pop
3639 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3640 a8083063 Iustin Pop
  """Replace the disks of an instance.
3641 a8083063 Iustin Pop

3642 a8083063 Iustin Pop
  """
3643 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3644 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3645 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
3646 efd990e4 Guido Trotter
  REQ_BGL = False
3647 efd990e4 Guido Trotter
3648 efd990e4 Guido Trotter
  def ExpandNames(self):
3649 efd990e4 Guido Trotter
    self._ExpandAndLockInstance()
3650 efd990e4 Guido Trotter
3651 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
3652 efd990e4 Guido Trotter
      self.op.remote_node = None
3653 efd990e4 Guido Trotter
3654 efd990e4 Guido Trotter
    ia_name = getattr(self.op, "iallocator", None)
3655 efd990e4 Guido Trotter
    if ia_name is not None:
3656 efd990e4 Guido Trotter
      if self.op.remote_node is not None:
3657 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Give either the iallocator or the new"
3658 efd990e4 Guido Trotter
                                   " secondary, not both")
3659 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3660 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
3661 efd990e4 Guido Trotter
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
3662 efd990e4 Guido Trotter
      if remote_node is None:
3663 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Node '%s' not known" %
3664 efd990e4 Guido Trotter
                                   self.op.remote_node)
3665 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
3666 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
3667 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3668 efd990e4 Guido Trotter
    else:
3669 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
3670 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3671 efd990e4 Guido Trotter
3672 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
3673 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
3674 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
3675 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
3676 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
3677 efd990e4 Guido Trotter
      self._LockInstancesNodes()
3678 a8083063 Iustin Pop
3679 b6e82a65 Iustin Pop
  def _RunAllocator(self):
3680 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
3681 b6e82a65 Iustin Pop

3682 b6e82a65 Iustin Pop
    """
3683 b6e82a65 Iustin Pop
    ial = IAllocator(self.cfg, self.sstore,
3684 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
3685 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
3686 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
3687 b6e82a65 Iustin Pop
3688 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
3689 b6e82a65 Iustin Pop
3690 b6e82a65 Iustin Pop
    if not ial.success:
3691 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3692 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3693 b6e82a65 Iustin Pop
                                                           ial.info))
3694 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3695 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3696 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
3697 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
3698 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
3699 b6e82a65 Iustin Pop
    logger.ToStdout("Selected new secondary for the instance: %s" %
3700 b6e82a65 Iustin Pop
                    self.op.remote_node)
3701 b6e82a65 Iustin Pop
3702 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3703 a8083063 Iustin Pop
    """Build hooks env.
3704 a8083063 Iustin Pop

3705 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3706 a8083063 Iustin Pop

3707 a8083063 Iustin Pop
    """
3708 a8083063 Iustin Pop
    env = {
3709 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
3710 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3711 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3712 a8083063 Iustin Pop
      }
3713 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3714 0834c866 Iustin Pop
    nl = [
3715 0834c866 Iustin Pop
      self.sstore.GetMasterNode(),
3716 0834c866 Iustin Pop
      self.instance.primary_node,
3717 0834c866 Iustin Pop
      ]
3718 0834c866 Iustin Pop
    if self.op.remote_node is not None:
3719 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
3720 a8083063 Iustin Pop
    return env, nl, nl
3721 a8083063 Iustin Pop
3722 a8083063 Iustin Pop
  def CheckPrereq(self):
3723 a8083063 Iustin Pop
    """Check prerequisites.
3724 a8083063 Iustin Pop

3725 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3726 a8083063 Iustin Pop

3727 a8083063 Iustin Pop
    """
3728 efd990e4 Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3729 efd990e4 Guido Trotter
    assert instance is not None, \
3730 efd990e4 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3731 a8083063 Iustin Pop
    self.instance = instance
3732 a8083063 Iustin Pop
3733 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3734 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3735 a9e0c397 Iustin Pop
                                 " network mirrored.")
3736 a8083063 Iustin Pop
3737 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3738 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3739 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3740 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3741 a8083063 Iustin Pop
3742 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
3743 a9e0c397 Iustin Pop
3744 b6e82a65 Iustin Pop
    ia_name = getattr(self.op, "iallocator", None)
3745 b6e82a65 Iustin Pop
    if ia_name is not None:
3746 de8c7666 Guido Trotter
      self._RunAllocator()
3747 b6e82a65 Iustin Pop
3748 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
3749 a9e0c397 Iustin Pop
    if remote_node is not None:
3750 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
3751 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
3752 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
3753 a9e0c397 Iustin Pop
    else:
3754 a9e0c397 Iustin Pop
      self.remote_node_info = None
3755 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3756 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3757 3ecf6786 Iustin Pop
                                 " the instance.")
3758 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
3759 0834c866 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_SEC:
3760 0834c866 Iustin Pop
        # this is for DRBD8, where we can't execute the same mode of
3761 0834c866 Iustin Pop
        # replacement as for drbd7 (no different port allocated)
3762 0834c866 Iustin Pop
        raise errors.OpPrereqError("Same secondary given, cannot execute"
3763 0834c866 Iustin Pop
                                   " replacement")
3764 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3765 7df43a76 Iustin Pop
      if (self.op.mode == constants.REPLACE_DISK_ALL and
3766 7df43a76 Iustin Pop
          remote_node is not None):
3767 7df43a76 Iustin Pop
        # switch to replace secondary mode
3768 7df43a76 Iustin Pop
        self.op.mode = constants.REPLACE_DISK_SEC
3769 7df43a76 Iustin Pop
3770 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
3771 12c3449a Michael Hanselmann
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
3772 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
3773 a9e0c397 Iustin Pop
                                   " both at once")
3774 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
3775 a9e0c397 Iustin Pop
        if remote_node is not None:
3776 12c3449a Michael Hanselmann
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
3777 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
3778 a9e0c397 Iustin Pop
                                     " node disk replacement")
3779 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
3780 cff90b79 Iustin Pop
        self.oth_node = instance.secondary_nodes[0]
3781 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
3782 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
3783 a9e0c397 Iustin Pop
                                    # we don't change the secondary
3784 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
3785 cff90b79 Iustin Pop
        self.oth_node = instance.primary_node
3786 a9e0c397 Iustin Pop
      else:
3787 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
3788 a9e0c397 Iustin Pop
3789 a9e0c397 Iustin Pop
    for name in self.op.disks:
3790 a9e0c397 Iustin Pop
      if instance.FindDisk(name) is None:
3791 a9e0c397 Iustin Pop
        raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3792 a9e0c397 Iustin Pop
                                   (name, instance.name))
3793 a8083063 Iustin Pop
3794 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
3795 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
3796 a9e0c397 Iustin Pop

3797 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3798 a9e0c397 Iustin Pop
      - for each disk to be replaced:
3799 a9e0c397 Iustin Pop
        - create new LVs on the target node with unique names
3800 a9e0c397 Iustin Pop
        - detach old LVs from the drbd device
3801 a9e0c397 Iustin Pop
        - rename old LVs to name_replaced.<time_t>
3802 a9e0c397 Iustin Pop
        - rename new LVs to old LVs
3803 a9e0c397 Iustin Pop
        - attach the new LVs (with the old names now) to the drbd device
3804 a9e0c397 Iustin Pop
      - wait for sync across all devices
3805 a9e0c397 Iustin Pop
      - for each modified disk:
3806 a9e0c397 Iustin Pop
        - remove old LVs (which have the name name_replaces.<time_t>)
3807 a9e0c397 Iustin Pop

3808 a9e0c397 Iustin Pop
    Failures are not very well handled.
3809 cff90b79 Iustin Pop

3810 a9e0c397 Iustin Pop
    """
3811 cff90b79 Iustin Pop
    steps_total = 6
3812 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3813 a9e0c397 Iustin Pop
    instance = self.instance
3814 a9e0c397 Iustin Pop
    iv_names = {}
3815 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3816 a9e0c397 Iustin Pop
    # start of work
3817 a9e0c397 Iustin Pop
    cfg = self.cfg
3818 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
3819 cff90b79 Iustin Pop
    oth_node = self.oth_node
3820 cff90b79 Iustin Pop
3821 cff90b79 Iustin Pop
    # Step: check device activation
3822 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3823 cff90b79 Iustin Pop
    info("checking volume groups")
3824 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
3825 cff90b79 Iustin Pop
    results = rpc.call_vg_list([oth_node, tgt_node])
3826 cff90b79 Iustin Pop
    if not results:
3827 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3828 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
3829 cff90b79 Iustin Pop
      res = results.get(node, False)
3830 cff90b79 Iustin Pop
      if not res or my_vg not in res:
3831 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3832 cff90b79 Iustin Pop
                                 (my_vg, node))
3833 cff90b79 Iustin Pop
    for dev in instance.disks:
3834 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3835 cff90b79 Iustin Pop
        continue
3836 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
3837 cff90b79 Iustin Pop
        info("checking %s on %s" % (dev.iv_name, node))
3838 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
3839 cff90b79 Iustin Pop
        if not rpc.call_blockdev_find(node, dev):
3840 cff90b79 Iustin Pop
          raise errors.OpExecError("Can't find device %s on node %s" %
3841 cff90b79 Iustin Pop
                                   (dev.iv_name, node))
3842 cff90b79 Iustin Pop
3843 cff90b79 Iustin Pop
    # Step: check other node consistency
3844 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3845 cff90b79 Iustin Pop
    for dev in instance.disks:
3846 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3847 cff90b79 Iustin Pop
        continue
3848 cff90b79 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, oth_node))
3849 cff90b79 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, oth_node,
3850 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
3851 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
3852 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
3853 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
3854 cff90b79 Iustin Pop
3855 cff90b79 Iustin Pop
    # Step: create new storage
3856 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3857 a9e0c397 Iustin Pop
    for dev in instance.disks:
3858 a9e0c397 Iustin Pop
      if not dev.iv_name in self.op.disks:
3859 a9e0c397 Iustin Pop
        continue
3860 a9e0c397 Iustin Pop
      size = dev.size
3861 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
3862 a9e0c397 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3863 a9e0c397 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3864 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3865 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
3866 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3867 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
3868 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
3869 a9e0c397 Iustin Pop
      old_lvs = dev.children
3870 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
3871 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
3872 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
3873 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3874 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3875 a9e0c397 Iustin Pop
      # are talking about the secondary node
3876 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
3877 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, tgt_node, instance, new_lv,
3878 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3879 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3880 a9e0c397 Iustin Pop
                                   " node '%s'" %
3881 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
3882 a9e0c397 Iustin Pop
3883 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
3884 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
3885 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
3886 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
3887 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
3888 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
3889 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
3890 cff90b79 Iustin Pop
      #dev.children = []
3891 cff90b79 Iustin Pop
      #cfg.Update(instance)
3892 a9e0c397 Iustin Pop
3893 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
3894 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
3895 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
3896 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
3897 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
3898 cff90b79 Iustin Pop
3899 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
3900 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
3901 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
3902 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
3903 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
3904 cff90b79 Iustin Pop
      rlist = []
3905 cff90b79 Iustin Pop
      for to_ren in old_lvs:
3906 cff90b79 Iustin Pop
        find_res = rpc.call_blockdev_find(tgt_node, to_ren)
3907 cff90b79 Iustin Pop
        if find_res is not None: # device exists
3908 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
3909 cff90b79 Iustin Pop
3910 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
3911 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3912 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
3913 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
3914 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
3915 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
3916 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3917 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
3918 cff90b79 Iustin Pop
3919 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
3920 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
3921 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
3922 a9e0c397 Iustin Pop
3923 cff90b79 Iustin Pop
      for disk in old_lvs:
3924 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
3925 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
3926 a9e0c397 Iustin Pop
3927 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
3928 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
3929 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
3930 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
3931 a9e0c397 Iustin Pop
          if not rpc.call_blockdev_remove(tgt_node, new_lv):
3932 79caa9ed Guido Trotter
            warning("Can't rollback device %s", hint="manually cleanup unused"
3933 cff90b79 Iustin Pop
                    " logical volumes")
3934 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
3935 a9e0c397 Iustin Pop
3936 a9e0c397 Iustin Pop
      dev.children = new_lvs
3937 a9e0c397 Iustin Pop
      cfg.Update(instance)
3938 a9e0c397 Iustin Pop
3939 cff90b79 Iustin Pop
    # Step: wait for sync
3940 a9e0c397 Iustin Pop
3941 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3942 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3943 a9e0c397 Iustin Pop
    # return value
3944 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3945 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3946 a9e0c397 Iustin Pop
3947 a9e0c397 Iustin Pop
    # so check manually all the devices
3948 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3949 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3950 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3951 a9e0c397 Iustin Pop
      if is_degr:
3952 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3953 a9e0c397 Iustin Pop
3954 cff90b79 Iustin Pop
    # Step: remove old storage
3955 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3956 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3957 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
3958 a9e0c397 Iustin Pop
      for lv in old_lvs:
3959 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
3960 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(tgt_node, lv):
3961 79caa9ed Guido Trotter
          warning("Can't remove old LV", hint="manually remove unused LVs")
3962 a9e0c397 Iustin Pop
          continue
3963 a9e0c397 Iustin Pop
3964 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
3965 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
3966 a9e0c397 Iustin Pop

3967 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3968 a9e0c397 Iustin Pop
      - for all disks of the instance:
3969 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
3970 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
3971 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
3972 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
3973 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
3974 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
3975 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
3976 a9e0c397 Iustin Pop
          not network enabled
3977 a9e0c397 Iustin Pop
      - wait for sync across all devices
3978 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
3979 a9e0c397 Iustin Pop

3980 a9e0c397 Iustin Pop
    Failures are not very well handled.
3981 0834c866 Iustin Pop

3982 a9e0c397 Iustin Pop
    """
3983 0834c866 Iustin Pop
    steps_total = 6
3984 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3985 a9e0c397 Iustin Pop
    instance = self.instance
3986 a9e0c397 Iustin Pop
    iv_names = {}
3987 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3988 a9e0c397 Iustin Pop
    # start of work
3989 a9e0c397 Iustin Pop
    cfg = self.cfg
3990 a9e0c397 Iustin Pop
    old_node = self.tgt_node
3991 a9e0c397 Iustin Pop
    new_node = self.new_node
3992 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
3993 0834c866 Iustin Pop
3994 0834c866 Iustin Pop
    # Step: check device activation
3995 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3996 0834c866 Iustin Pop
    info("checking volume groups")
3997 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
3998 0834c866 Iustin Pop
    results = rpc.call_vg_list([pri_node, new_node])
3999 0834c866 Iustin Pop
    if not results:
4000 0834c866 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
4001 0834c866 Iustin Pop
    for node in pri_node, new_node:
4002 0834c866 Iustin Pop
      res = results.get(node, False)
4003 0834c866 Iustin Pop
      if not res or my_vg not in res:
4004 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
4005 0834c866 Iustin Pop
                                 (my_vg, node))
4006 0834c866 Iustin Pop
    for dev in instance.disks:
4007 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
4008 0834c866 Iustin Pop
        continue
4009 0834c866 Iustin Pop
      info("checking %s on %s" % (dev.iv_name, pri_node))
4010 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4011 0834c866 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
4012 0834c866 Iustin Pop
        raise errors.OpExecError("Can't find device %s on node %s" %
4013 0834c866 Iustin Pop
                                 (dev.iv_name, pri_node))
4014 0834c866 Iustin Pop
4015 0834c866 Iustin Pop
    # Step: check other node consistency
4016 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
4017 0834c866 Iustin Pop
    for dev in instance.disks:
4018 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
4019 0834c866 Iustin Pop
        continue
4020 0834c866 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, pri_node))
4021 0834c866 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, pri_node, True, ldisk=True):
4022 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
4023 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
4024 0834c866 Iustin Pop
                                 pri_node)
4025 0834c866 Iustin Pop
4026 0834c866 Iustin Pop
    # Step: create new storage
4027 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
4028 468b46f9 Iustin Pop
    for dev in instance.disks:
4029 a9e0c397 Iustin Pop
      size = dev.size
4030 0834c866 Iustin Pop
      info("adding new local storage on %s for %s" % (new_node, dev.iv_name))
4031 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
4032 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
4033 a9e0c397 Iustin Pop
      # are talking about the secondary node
4034 a9e0c397 Iustin Pop
      for new_lv in dev.children:
4035 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, new_node, instance, new_lv,
4036 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
4037 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
4038 a9e0c397 Iustin Pop
                                   " node '%s'" %
4039 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
4040 a9e0c397 Iustin Pop
4041 0834c866 Iustin Pop
4042 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
4043 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
4044 a1578d63 Iustin Pop
    # error and the success paths
4045 a1578d63 Iustin Pop
    minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
4046 a1578d63 Iustin Pop
                                   instance.name)
4047 468b46f9 Iustin Pop
    logging.debug("Allocated minors %s" % (minors,))
4048 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
4049 468b46f9 Iustin Pop
    for dev, new_minor in zip(instance.disks, minors):
4050 0834c866 Iustin Pop
      size = dev.size
4051 0834c866 Iustin Pop
      info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
4052 a9e0c397 Iustin Pop
      # create new devices on new_node
4053 ffa1c0dc Iustin Pop
      if pri_node == dev.logical_id[0]:
4054 ffa1c0dc Iustin Pop
        new_logical_id = (pri_node, new_node,
4055 f9518d38 Iustin Pop
                          dev.logical_id[2], dev.logical_id[3], new_minor,
4056 f9518d38 Iustin Pop
                          dev.logical_id[5])
4057 ffa1c0dc Iustin Pop
      else:
4058 ffa1c0dc Iustin Pop
        new_logical_id = (new_node, pri_node,
4059 f9518d38 Iustin Pop
                          dev.logical_id[2], new_minor, dev.logical_id[4],
4060 f9518d38 Iustin Pop
                          dev.logical_id[5])
4061 468b46f9 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children, new_logical_id)
4062 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
4063 a1578d63 Iustin Pop
                    new_logical_id)
4064 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
4065 ffa1c0dc Iustin Pop
                              logical_id=new_logical_id,
4066 a9e0c397 Iustin Pop
                              children=dev.children)
4067 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, new_node, instance,
4068 3f78eef2 Iustin Pop
                                        new_drbd, False,
4069 a9e0c397 Iustin Pop
                                      _GetInstanceInfoText(instance)):
4070 a1578d63 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
4071 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
4072 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
4073 a9e0c397 Iustin Pop
4074 0834c866 Iustin Pop
    for dev in instance.disks:
4075 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
4076 0834c866 Iustin Pop
      info("shutting down drbd for %s on old node" % dev.iv_name)
4077 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
4078 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_shutdown(old_node, dev):
4079 0834c866 Iustin Pop
        warning("Failed to shutdown drbd for %s on old node" % dev.iv_name,
4080 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
4081 a9e0c397 Iustin Pop
4082 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
4083 642445d9 Iustin Pop
    done = 0
4084 642445d9 Iustin Pop
    for dev in instance.disks:
4085 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4086 f9518d38 Iustin Pop
      # set the network part of the physical (unique in bdev terms) id
4087 f9518d38 Iustin Pop
      # to None, meaning detach from network
4088 f9518d38 Iustin Pop
      dev.physical_id = (None, None, None, None) + dev.physical_id[4:]
4089 642445d9 Iustin Pop
      # and 'find' the device, which will 'fix' it to match the
4090 642445d9 Iustin Pop
      # standalone state
4091 642445d9 Iustin Pop
      if rpc.call_blockdev_find(pri_node, dev):
4092 642445d9 Iustin Pop
        done += 1
4093 642445d9 Iustin Pop
      else:
4094 642445d9 Iustin Pop
        warning("Failed to detach drbd %s from network, unusual case" %
4095 642445d9 Iustin Pop
                dev.iv_name)
4096 642445d9 Iustin Pop
4097 642445d9 Iustin Pop
    if not done:
4098 642445d9 Iustin Pop
      # no detaches succeeded (very unlikely)
4099 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance.name)
4100 642445d9 Iustin Pop
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
4101 642445d9 Iustin Pop
4102 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
4103 642445d9 Iustin Pop
    # the instance to point to the new secondary
4104 642445d9 Iustin Pop
    info("updating instance configuration")
4105 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
4106 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
4107 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4108 642445d9 Iustin Pop
    cfg.Update(instance)
4109 a1578d63 Iustin Pop
    # we can remove now the temp minors as now the new values are
4110 a1578d63 Iustin Pop
    # written to the config file (and therefore stable)
4111 a1578d63 Iustin Pop
    self.cfg.ReleaseDRBDMinors(instance.name)
4112 a9e0c397 Iustin Pop
4113 642445d9 Iustin Pop
    # and now perform the drbd attach
4114 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
4115 642445d9 Iustin Pop
    failures = []
4116 642445d9 Iustin Pop
    for dev in instance.disks:
4117 642445d9 Iustin Pop
      info("attaching primary drbd for %s to new secondary node" % dev.iv_name)
4118 642445d9 Iustin Pop
      # since the attach is smart, it's enough to 'find' the device,
4119 642445d9 Iustin Pop
      # it will automatically activate the network, if the physical_id
4120 642445d9 Iustin Pop
      # is correct
4121 642445d9 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4122 ffa1c0dc Iustin Pop
      logging.debug("Disk to attach: %s", dev)
4123 642445d9 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
4124 642445d9 Iustin Pop
        warning("can't attach drbd %s to new secondary!" % dev.iv_name,
4125 642445d9 Iustin Pop
                "please do a gnt-instance info to see the status of disks")
4126 a9e0c397 Iustin Pop
4127 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
4128 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
4129 a9e0c397 Iustin Pop
    # return value
4130 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
4131 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
4132 a9e0c397 Iustin Pop
4133 a9e0c397 Iustin Pop
    # so check manually all the devices
4134 ffa1c0dc Iustin Pop
    for name, (dev, old_lvs, _) in iv_names.iteritems():
4135 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4136 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(pri_node, dev)[5]
4137 a9e0c397 Iustin Pop
      if is_degr:
4138 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
4139 a9e0c397 Iustin Pop
4140 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
4141 ffa1c0dc Iustin Pop
    for name, (dev, old_lvs, _) in iv_names.iteritems():
4142 0834c866 Iustin Pop
      info("remove logical volumes for %s" % name)
4143 a9e0c397 Iustin Pop
      for lv in old_lvs:
4144 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
4145 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(old_node, lv):
4146 0834c866 Iustin Pop
          warning("Can't remove LV on old secondary",
4147 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
4148 a9e0c397 Iustin Pop
4149 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
4150 a9e0c397 Iustin Pop
    """Execute disk replacement.
4151 a9e0c397 Iustin Pop

4152 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
4153 a9e0c397 Iustin Pop

4154 a9e0c397 Iustin Pop
    """
4155 a9e0c397 Iustin Pop
    instance = self.instance
4156 22985314 Guido Trotter
4157 22985314 Guido Trotter
    # Activate the instance disks if we're replacing them on a down instance
4158 22985314 Guido Trotter
    if instance.status == "down":
4159 023e3296 Guido Trotter
      _StartInstanceDisks(self.cfg, instance, True)
4160 22985314 Guido Trotter
4161 abdf0113 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
4162 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
4163 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
4164 a9e0c397 Iustin Pop
      else:
4165 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
4166 a9e0c397 Iustin Pop
    else:
4167 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
4168 22985314 Guido Trotter
4169 22985314 Guido Trotter
    ret = fn(feedback_fn)
4170 22985314 Guido Trotter
4171 22985314 Guido Trotter
    # Deactivate the instance disks if we're replacing them on a down instance
4172 22985314 Guido Trotter
    if instance.status == "down":
4173 023e3296 Guido Trotter
      _SafeShutdownInstanceDisks(instance, self.cfg)
4174 22985314 Guido Trotter
4175 22985314 Guido Trotter
    return ret
4176 a9e0c397 Iustin Pop
4177 a8083063 Iustin Pop
4178 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
4179 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
4180 8729e0d7 Iustin Pop

4181 8729e0d7 Iustin Pop
  """
4182 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
4183 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4184 8729e0d7 Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount"]
4185 31e63dbf Guido Trotter
  REQ_BGL = False
4186 31e63dbf Guido Trotter
4187 31e63dbf Guido Trotter
  def ExpandNames(self):
4188 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
4189 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4190 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4191 31e63dbf Guido Trotter
4192 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
4193 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
4194 31e63dbf Guido Trotter
      self._LockInstancesNodes()
4195 8729e0d7 Iustin Pop
4196 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
4197 8729e0d7 Iustin Pop
    """Build hooks env.
4198 8729e0d7 Iustin Pop

4199 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
4200 8729e0d7 Iustin Pop

4201 8729e0d7 Iustin Pop
    """
4202 8729e0d7 Iustin Pop
    env = {
4203 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
4204 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
4205 8729e0d7 Iustin Pop
      }
4206 8729e0d7 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4207 8729e0d7 Iustin Pop
    nl = [
4208 8729e0d7 Iustin Pop
      self.sstore.GetMasterNode(),
4209 8729e0d7 Iustin Pop
      self.instance.primary_node,
4210 8729e0d7 Iustin Pop
      ]
4211 8729e0d7 Iustin Pop
    return env, nl, nl
4212 8729e0d7 Iustin Pop
4213 8729e0d7 Iustin Pop
  def CheckPrereq(self):
4214 8729e0d7 Iustin Pop
    """Check prerequisites.
4215 8729e0d7 Iustin Pop

4216 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
4217 8729e0d7 Iustin Pop

4218 8729e0d7 Iustin Pop
    """
4219 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4220 31e63dbf Guido Trotter
    assert instance is not None, \
4221 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4222 31e63dbf Guido Trotter
4223 8729e0d7 Iustin Pop
    self.instance = instance
4224 8729e0d7 Iustin Pop
4225 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
4226 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
4227 8729e0d7 Iustin Pop
                                 " growing.")
4228 8729e0d7 Iustin Pop
4229 8729e0d7 Iustin Pop
    if instance.FindDisk(self.op.disk) is None:
4230 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
4231 c7cdfc90 Iustin Pop
                                 (self.op.disk, instance.name))
4232 8729e0d7 Iustin Pop
4233 8729e0d7 Iustin Pop
    nodenames = [instance.primary_node] + list(instance.secondary_nodes)
4234 8729e0d7 Iustin Pop
    nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
4235 8729e0d7 Iustin Pop
    for node in nodenames:
4236 8729e0d7 Iustin Pop
      info = nodeinfo.get(node, None)
4237 8729e0d7 Iustin Pop
      if not info:
4238 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
4239 8729e0d7 Iustin Pop
                                   " from node '%s'" % node)
4240 8729e0d7 Iustin Pop
      vg_free = info.get('vg_free', None)
4241 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
4242 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
4243 8729e0d7 Iustin Pop
                                   " node %s" % node)
4244 8729e0d7 Iustin Pop
      if self.op.amount > info['vg_free']:
4245 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
4246 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
4247 8729e0d7 Iustin Pop
                                   (node, info['vg_free'], self.op.amount))
4248 8729e0d7 Iustin Pop
4249 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
4250 8729e0d7 Iustin Pop
    """Execute disk grow.
4251 8729e0d7 Iustin Pop

4252 8729e0d7 Iustin Pop
    """
4253 8729e0d7 Iustin Pop
    instance = self.instance
4254 8729e0d7 Iustin Pop
    disk = instance.FindDisk(self.op.disk)
4255 8729e0d7 Iustin Pop
    for node in (instance.secondary_nodes + (instance.primary_node,)):
4256 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
4257 8729e0d7 Iustin Pop
      result = rpc.call_blockdev_grow(node, disk, self.op.amount)
4258 86de84dd Guido Trotter
      if not result or not isinstance(result, (list, tuple)) or len(result) != 2:
4259 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s" % node)
4260 8729e0d7 Iustin Pop
      elif not result[0]:
4261 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s: %s" %
4262 8729e0d7 Iustin Pop
                                 (node, result[1]))
4263 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
4264 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
4265 8729e0d7 Iustin Pop
    return
4266 8729e0d7 Iustin Pop
4267 8729e0d7 Iustin Pop
4268 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
4269 a8083063 Iustin Pop
  """Query runtime instance data.
4270 a8083063 Iustin Pop

4271 a8083063 Iustin Pop
  """
4272 a8083063 Iustin Pop
  _OP_REQP = ["instances"]
4273 a987fa48 Guido Trotter
  REQ_BGL = False
4274 a987fa48 Guido Trotter
  def ExpandNames(self):
4275 a987fa48 Guido Trotter
    self.needed_locks = {}
4276 a987fa48 Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
4277 a987fa48 Guido Trotter
4278 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
4279 a987fa48 Guido Trotter
      raise errors.OpPrereqError("Invalid argument type 'instances'")
4280 a987fa48 Guido Trotter
4281 a987fa48 Guido Trotter
    if self.op.instances:
4282 a987fa48 Guido Trotter
      self.wanted_names = []
4283 a987fa48 Guido Trotter
      for name in self.op.instances:
4284 a987fa48 Guido Trotter
        full_name = self.cfg.ExpandInstanceName(name)
4285 a987fa48 Guido Trotter
        if full_name is None:
4286 a987fa48 Guido Trotter
          raise errors.OpPrereqError("Instance '%s' not known" %
4287 a987fa48 Guido Trotter
                                     self.op.instance_name)
4288 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
4289 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
4290 a987fa48 Guido Trotter
    else:
4291 a987fa48 Guido Trotter
      self.wanted_names = None
4292 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
4293 a987fa48 Guido Trotter
4294 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4295 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4296 a987fa48 Guido Trotter
4297 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
4298 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
4299 a987fa48 Guido Trotter
      self._LockInstancesNodes()
4300 a8083063 Iustin Pop
4301 a8083063 Iustin Pop
  def CheckPrereq(self):
4302 a8083063 Iustin Pop
    """Check prerequisites.
4303 a8083063 Iustin Pop

4304 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
4305 a8083063 Iustin Pop

4306 a8083063 Iustin Pop
    """
4307 a987fa48 Guido Trotter
    if self.wanted_names is None:
4308 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
4309 a8083063 Iustin Pop
4310 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
4311 a987fa48 Guido Trotter
                             in self.wanted_names]
4312 a987fa48 Guido Trotter
    return
4313 a8083063 Iustin Pop
4314 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
4315 a8083063 Iustin Pop
    """Compute block device status.
4316 a8083063 Iustin Pop

4317 a8083063 Iustin Pop
    """
4318 a8083063 Iustin Pop
    self.cfg.SetDiskID(dev, instance.primary_node)
4319 a8083063 Iustin Pop
    dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
4320 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
4321 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
4322 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
4323 a8083063 Iustin Pop
        snode = dev.logical_id[1]
4324 a8083063 Iustin Pop
      else:
4325 a8083063 Iustin Pop
        snode = dev.logical_id[0]
4326 a8083063 Iustin Pop
4327 a8083063 Iustin Pop
    if snode:
4328 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
4329 a8083063 Iustin Pop
      dev_sstatus = rpc.call_blockdev_find(snode, dev)
4330 a8083063 Iustin Pop
    else:
4331 a8083063 Iustin Pop
      dev_sstatus = None
4332 a8083063 Iustin Pop
4333 a8083063 Iustin Pop
    if dev.children:
4334 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
4335 a8083063 Iustin Pop
                      for child in dev.children]
4336 a8083063 Iustin Pop
    else:
4337 a8083063 Iustin Pop
      dev_children = []
4338 a8083063 Iustin Pop
4339 a8083063 Iustin Pop
    data = {
4340 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
4341 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
4342 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
4343 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
4344 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
4345 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
4346 a8083063 Iustin Pop
      "children": dev_children,
4347 a8083063 Iustin Pop
      }
4348 a8083063 Iustin Pop
4349 a8083063 Iustin Pop
    return data
4350 a8083063 Iustin Pop
4351 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4352 a8083063 Iustin Pop
    """Gather and return data"""
4353 a8083063 Iustin Pop
    result = {}
4354 a8083063 Iustin Pop
    for instance in self.wanted_instances:
4355 a8083063 Iustin Pop
      remote_info = rpc.call_instance_info(instance.primary_node,
4356 a8083063 Iustin Pop
                                                instance.name)
4357 a8083063 Iustin Pop
      if remote_info and "state" in remote_info:
4358 a8083063 Iustin Pop
        remote_state = "up"
4359 a8083063 Iustin Pop
      else:
4360 a8083063 Iustin Pop
        remote_state = "down"
4361 a8083063 Iustin Pop
      if instance.status == "down":
4362 a8083063 Iustin Pop
        config_state = "down"
4363 a8083063 Iustin Pop
      else:
4364 a8083063 Iustin Pop
        config_state = "up"
4365 a8083063 Iustin Pop
4366 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
4367 a8083063 Iustin Pop
               for device in instance.disks]
4368 a8083063 Iustin Pop
4369 a8083063 Iustin Pop
      idict = {
4370 a8083063 Iustin Pop
        "name": instance.name,
4371 a8083063 Iustin Pop
        "config_state": config_state,
4372 a8083063 Iustin Pop
        "run_state": remote_state,
4373 a8083063 Iustin Pop
        "pnode": instance.primary_node,
4374 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
4375 a8083063 Iustin Pop
        "os": instance.os,
4376 a8083063 Iustin Pop
        "memory": instance.memory,
4377 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4378 a8083063 Iustin Pop
        "disks": disks,
4379 f55ff7ec Iustin Pop
        "vcpus": instance.vcpus,
4380 a8083063 Iustin Pop
        }
4381 a8083063 Iustin Pop
4382 a8340917 Iustin Pop
      htkind = self.sstore.GetHypervisorType()
4383 a8340917 Iustin Pop
      if htkind == constants.HT_XEN_PVM30:
4384 a8340917 Iustin Pop
        idict["kernel_path"] = instance.kernel_path
4385 a8340917 Iustin Pop
        idict["initrd_path"] = instance.initrd_path
4386 a8340917 Iustin Pop
4387 a8340917 Iustin Pop
      if htkind == constants.HT_XEN_HVM31:
4388 a8340917 Iustin Pop
        idict["hvm_boot_order"] = instance.hvm_boot_order
4389 a8340917 Iustin Pop
        idict["hvm_acpi"] = instance.hvm_acpi
4390 a8340917 Iustin Pop
        idict["hvm_pae"] = instance.hvm_pae
4391 a8340917 Iustin Pop
        idict["hvm_cdrom_image_path"] = instance.hvm_cdrom_image_path
4392 5397e0b7 Alexander Schreiber
        idict["hvm_nic_type"] = instance.hvm_nic_type
4393 5397e0b7 Alexander Schreiber
        idict["hvm_disk_type"] = instance.hvm_disk_type
4394 a8340917 Iustin Pop
4395 a8340917 Iustin Pop
      if htkind in constants.HTS_REQ_PORT:
4396 d0c11cf7 Alexander Schreiber
        if instance.vnc_bind_address is None:
4397 d0c11cf7 Alexander Schreiber
          vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
4398 d0c11cf7 Alexander Schreiber
        else:
4399 d0c11cf7 Alexander Schreiber
          vnc_bind_address = instance.vnc_bind_address
4400 34b6ab97 Alexander Schreiber
        if instance.network_port is None:
4401 34b6ab97 Alexander Schreiber
          vnc_console_port = None
4402 d0c11cf7 Alexander Schreiber
        elif vnc_bind_address == constants.BIND_ADDRESS_GLOBAL:
4403 a4273aba Alexander Schreiber
          vnc_console_port = "%s:%s" % (instance.primary_node,
4404 34b6ab97 Alexander Schreiber
                                       instance.network_port)
4405 d0c11cf7 Alexander Schreiber
        elif vnc_bind_address == constants.LOCALHOST_IP_ADDRESS:
4406 d0c11cf7 Alexander Schreiber
          vnc_console_port = "%s:%s on node %s" % (vnc_bind_address,
4407 a4273aba Alexander Schreiber
                                                   instance.network_port,
4408 a4273aba Alexander Schreiber
                                                   instance.primary_node)
4409 34b6ab97 Alexander Schreiber
        else:
4410 34b6ab97 Alexander Schreiber
          vnc_console_port = "%s:%s" % (instance.vnc_bind_address,
4411 34b6ab97 Alexander Schreiber
                                        instance.network_port)
4412 34b6ab97 Alexander Schreiber
        idict["vnc_console_port"] = vnc_console_port
4413 d0c11cf7 Alexander Schreiber
        idict["vnc_bind_address"] = vnc_bind_address
4414 a8340917 Iustin Pop
        idict["network_port"] = instance.network_port
4415 a8340917 Iustin Pop
4416 a8083063 Iustin Pop
      result[instance.name] = idict
4417 a8083063 Iustin Pop
4418 a8083063 Iustin Pop
    return result
4419 a8083063 Iustin Pop
4420 a8083063 Iustin Pop
4421 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
4422 a8083063 Iustin Pop
  """Modifies an instances's parameters.
4423 a8083063 Iustin Pop

4424 a8083063 Iustin Pop
  """
4425 a8083063 Iustin Pop
  HPATH = "instance-modify"
4426 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4427 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
4428 1a5c7281 Guido Trotter
  REQ_BGL = False
4429 1a5c7281 Guido Trotter
4430 1a5c7281 Guido Trotter
  def ExpandNames(self):
4431 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
4432 a8083063 Iustin Pop
4433 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4434 a8083063 Iustin Pop
    """Build hooks env.
4435 a8083063 Iustin Pop

4436 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
4437 a8083063 Iustin Pop

4438 a8083063 Iustin Pop
    """
4439 396e1b78 Michael Hanselmann
    args = dict()
4440 a8083063 Iustin Pop
    if self.mem:
4441 396e1b78 Michael Hanselmann
      args['memory'] = self.mem
4442 a8083063 Iustin Pop
    if self.vcpus:
4443 396e1b78 Michael Hanselmann
      args['vcpus'] = self.vcpus
4444 ef756965 Iustin Pop
    if self.do_ip or self.do_bridge or self.mac:
4445 396e1b78 Michael Hanselmann
      if self.do_ip:
4446 396e1b78 Michael Hanselmann
        ip = self.ip
4447 396e1b78 Michael Hanselmann
      else:
4448 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
4449 396e1b78 Michael Hanselmann
      if self.bridge:
4450 396e1b78 Michael Hanselmann
        bridge = self.bridge
4451 396e1b78 Michael Hanselmann
      else:
4452 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
4453 ef756965 Iustin Pop
      if self.mac:
4454 ef756965 Iustin Pop
        mac = self.mac
4455 ef756965 Iustin Pop
      else:
4456 ef756965 Iustin Pop
        mac = self.instance.nics[0].mac
4457 ef756965 Iustin Pop
      args['nics'] = [(ip, bridge, mac)]
4458 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance, override=args)
4459 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
4460 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
4461 a8083063 Iustin Pop
    return env, nl, nl
4462 a8083063 Iustin Pop
4463 a8083063 Iustin Pop
  def CheckPrereq(self):
4464 a8083063 Iustin Pop
    """Check prerequisites.
4465 a8083063 Iustin Pop

4466 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
4467 a8083063 Iustin Pop

4468 a8083063 Iustin Pop
    """
4469 1a5c7281 Guido Trotter
    # FIXME: all the parameters could be checked before, in ExpandNames, or in
4470 1a5c7281 Guido Trotter
    # a separate CheckArguments function, if we implement one, so the operation
4471 1a5c7281 Guido Trotter
    # can be aborted without waiting for any lock, should it have an error...
4472 a8083063 Iustin Pop
    self.mem = getattr(self.op, "mem", None)
4473 a8083063 Iustin Pop
    self.vcpus = getattr(self.op, "vcpus", None)
4474 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
4475 1862d460 Alexander Schreiber
    self.mac = getattr(self.op, "mac", None)
4476 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
4477 973d7867 Iustin Pop
    self.kernel_path = getattr(self.op, "kernel_path", None)
4478 973d7867 Iustin Pop
    self.initrd_path = getattr(self.op, "initrd_path", None)
4479 25c5878d Alexander Schreiber
    self.hvm_boot_order = getattr(self.op, "hvm_boot_order", None)
4480 31a853d2 Iustin Pop
    self.hvm_acpi = getattr(self.op, "hvm_acpi", None)
4481 31a853d2 Iustin Pop
    self.hvm_pae = getattr(self.op, "hvm_pae", None)
4482 5397e0b7 Alexander Schreiber
    self.hvm_nic_type = getattr(self.op, "hvm_nic_type", None)
4483 5397e0b7 Alexander Schreiber
    self.hvm_disk_type = getattr(self.op, "hvm_disk_type", None)
4484 31a853d2 Iustin Pop
    self.hvm_cdrom_image_path = getattr(self.op, "hvm_cdrom_image_path", None)
4485 31a853d2 Iustin Pop
    self.vnc_bind_address = getattr(self.op, "vnc_bind_address", None)
4486 4300c4b6 Guido Trotter
    self.force = getattr(self.op, "force", None)
4487 31a853d2 Iustin Pop
    all_parms = [self.mem, self.vcpus, self.ip, self.bridge, self.mac,
4488 31a853d2 Iustin Pop
                 self.kernel_path, self.initrd_path, self.hvm_boot_order,
4489 31a853d2 Iustin Pop
                 self.hvm_acpi, self.hvm_pae, self.hvm_cdrom_image_path,
4490 5397e0b7 Alexander Schreiber
                 self.vnc_bind_address, self.hvm_nic_type, self.hvm_disk_type]
4491 31a853d2 Iustin Pop
    if all_parms.count(None) == len(all_parms):
4492 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
4493 a8083063 Iustin Pop
    if self.mem is not None:
4494 a8083063 Iustin Pop
      try:
4495 a8083063 Iustin Pop
        self.mem = int(self.mem)
4496 a8083063 Iustin Pop
      except ValueError, err:
4497 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid memory size: %s" % str(err))
4498 a8083063 Iustin Pop
    if self.vcpus is not None:
4499 a8083063 Iustin Pop
      try:
4500 a8083063 Iustin Pop
        self.vcpus = int(self.vcpus)
4501 a8083063 Iustin Pop
      except ValueError, err:
4502 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid vcpus number: %s" % str(err))
4503 a8083063 Iustin Pop
    if self.ip is not None:
4504 a8083063 Iustin Pop
      self.do_ip = True
4505 a8083063 Iustin Pop
      if self.ip.lower() == "none":
4506 a8083063 Iustin Pop
        self.ip = None
4507 a8083063 Iustin Pop
      else:
4508 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
4509 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
4510 a8083063 Iustin Pop
    else:
4511 a8083063 Iustin Pop
      self.do_ip = False
4512 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
4513 1862d460 Alexander Schreiber
    if self.mac is not None:
4514 1862d460 Alexander Schreiber
      if self.cfg.IsMacInUse(self.mac):
4515 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('MAC address %s already in use in cluster' %
4516 1862d460 Alexander Schreiber
                                   self.mac)
4517 1862d460 Alexander Schreiber
      if not utils.IsValidMac(self.mac):
4518 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('Invalid MAC address %s' % self.mac)
4519 a8083063 Iustin Pop
4520 973d7867 Iustin Pop
    if self.kernel_path is not None:
4521 973d7867 Iustin Pop
      self.do_kernel_path = True
4522 973d7867 Iustin Pop
      if self.kernel_path == constants.VALUE_NONE:
4523 973d7867 Iustin Pop
        raise errors.OpPrereqError("Can't set instance to no kernel")
4524 973d7867 Iustin Pop
4525 973d7867 Iustin Pop
      if self.kernel_path != constants.VALUE_DEFAULT:
4526 973d7867 Iustin Pop
        if not os.path.isabs(self.kernel_path):
4527 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The kernel path must be an absolute"
4528 973d7867 Iustin Pop
                                    " filename")
4529 8cafeb26 Iustin Pop
    else:
4530 8cafeb26 Iustin Pop
      self.do_kernel_path = False
4531 973d7867 Iustin Pop
4532 973d7867 Iustin Pop
    if self.initrd_path is not None:
4533 973d7867 Iustin Pop
      self.do_initrd_path = True
4534 973d7867 Iustin Pop
      if self.initrd_path not in (constants.VALUE_NONE,
4535 973d7867 Iustin Pop
                                  constants.VALUE_DEFAULT):
4536 2bc22872 Iustin Pop
        if not os.path.isabs(self.initrd_path):
4537 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The initrd path must be an absolute"
4538 973d7867 Iustin Pop
                                    " filename")
4539 8cafeb26 Iustin Pop
    else:
4540 8cafeb26 Iustin Pop
      self.do_initrd_path = False
4541 973d7867 Iustin Pop
4542 25c5878d Alexander Schreiber
    # boot order verification
4543 25c5878d Alexander Schreiber
    if self.hvm_boot_order is not None:
4544 25c5878d Alexander Schreiber
      if self.hvm_boot_order != constants.VALUE_DEFAULT:
4545 25c5878d Alexander Schreiber
        if len(self.hvm_boot_order.strip("acdn")) != 0:
4546 25c5878d Alexander Schreiber
          raise errors.OpPrereqError("invalid boot order specified,"
4547 25c5878d Alexander Schreiber
                                     " must be one or more of [acdn]"
4548 25c5878d Alexander Schreiber
                                     " or 'default'")
4549 25c5878d Alexander Schreiber
4550 31a853d2 Iustin Pop
    # hvm_cdrom_image_path verification
4551 31a853d2 Iustin Pop
    if self.op.hvm_cdrom_image_path is not None:
4552 3fc175f0 Alexander Schreiber
      if not (os.path.isabs(self.op.hvm_cdrom_image_path) or
4553 3fc175f0 Alexander Schreiber
              self.op.hvm_cdrom_image_path.lower() == "none"):
4554 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The path to the HVM CDROM image must"
4555 31a853d2 Iustin Pop
                                   " be an absolute path or None, not %s" %
4556 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
4557 3fc175f0 Alexander Schreiber
      if not (os.path.isfile(self.op.hvm_cdrom_image_path) or
4558 3fc175f0 Alexander Schreiber
              self.op.hvm_cdrom_image_path.lower() == "none"):
4559 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The HVM CDROM image must either be a"
4560 31a853d2 Iustin Pop
                                   " regular file or a symlink pointing to"
4561 31a853d2 Iustin Pop
                                   " an existing regular file, not %s" %
4562 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
4563 31a853d2 Iustin Pop
4564 31a853d2 Iustin Pop
    # vnc_bind_address verification
4565 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is not None:
4566 31a853d2 Iustin Pop
      if not utils.IsValidIP(self.op.vnc_bind_address):
4567 31a853d2 Iustin Pop
        raise errors.OpPrereqError("given VNC bind address '%s' doesn't look"
4568 31a853d2 Iustin Pop
                                   " like a valid IP address" %
4569 31a853d2 Iustin Pop
                                   self.op.vnc_bind_address)
4570 31a853d2 Iustin Pop
4571 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4572 1a5c7281 Guido Trotter
    assert self.instance is not None, \
4573 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4574 cfefe007 Guido Trotter
    self.warn = []
4575 cfefe007 Guido Trotter
    if self.mem is not None and not self.force:
4576 cfefe007 Guido Trotter
      pnode = self.instance.primary_node
4577 cfefe007 Guido Trotter
      nodelist = [pnode]
4578 cfefe007 Guido Trotter
      nodelist.extend(instance.secondary_nodes)
4579 cfefe007 Guido Trotter
      instance_info = rpc.call_instance_info(pnode, instance.name)
4580 cfefe007 Guido Trotter
      nodeinfo = rpc.call_node_info(nodelist, self.cfg.GetVGName())
4581 cfefe007 Guido Trotter
4582 cfefe007 Guido Trotter
      if pnode not in nodeinfo or not isinstance(nodeinfo[pnode], dict):
4583 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
4584 cfefe007 Guido Trotter
        self.warn.append("Can't get info from primary node %s" % pnode)
4585 cfefe007 Guido Trotter
      else:
4586 cfefe007 Guido Trotter
        if instance_info:
4587 cfefe007 Guido Trotter
          current_mem = instance_info['memory']
4588 cfefe007 Guido Trotter
        else:
4589 cfefe007 Guido Trotter
          # Assume instance not running
4590 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
4591 cfefe007 Guido Trotter
          # and we have no other way to check)
4592 cfefe007 Guido Trotter
          current_mem = 0
4593 cfefe007 Guido Trotter
        miss_mem = self.mem - current_mem - nodeinfo[pnode]['memory_free']
4594 cfefe007 Guido Trotter
        if miss_mem > 0:
4595 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
4596 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
4597 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
4598 cfefe007 Guido Trotter
4599 cfefe007 Guido Trotter
      for node in instance.secondary_nodes:
4600 cfefe007 Guido Trotter
        if node not in nodeinfo or not isinstance(nodeinfo[node], dict):
4601 cfefe007 Guido Trotter
          self.warn.append("Can't get info from secondary node %s" % node)
4602 cfefe007 Guido Trotter
        elif self.mem > nodeinfo[node]['memory_free']:
4603 cfefe007 Guido Trotter
          self.warn.append("Not enough memory to failover instance to secondary"
4604 cfefe007 Guido Trotter
                           " node %s" % node)
4605 cfefe007 Guido Trotter
4606 5bc84f33 Alexander Schreiber
    # Xen HVM device type checks
4607 5bc84f33 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
4608 5bc84f33 Alexander Schreiber
      if self.op.hvm_nic_type is not None:
4609 5bc84f33 Alexander Schreiber
        if self.op.hvm_nic_type not in constants.HT_HVM_VALID_NIC_TYPES:
4610 5bc84f33 Alexander Schreiber
          raise errors.OpPrereqError("Invalid NIC type %s specified for Xen"
4611 5bc84f33 Alexander Schreiber
                                     " HVM  hypervisor" % self.op.hvm_nic_type)
4612 5bc84f33 Alexander Schreiber
      if self.op.hvm_disk_type is not None:
4613 5bc84f33 Alexander Schreiber
        if self.op.hvm_disk_type not in constants.HT_HVM_VALID_DISK_TYPES:
4614 5bc84f33 Alexander Schreiber
          raise errors.OpPrereqError("Invalid disk type %s specified for Xen"
4615 5bc84f33 Alexander Schreiber
                                     " HVM hypervisor" % self.op.hvm_disk_type)
4616 5bc84f33 Alexander Schreiber
4617 a8083063 Iustin Pop
    return
4618 a8083063 Iustin Pop
4619 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4620 a8083063 Iustin Pop
    """Modifies an instance.
4621 a8083063 Iustin Pop

4622 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
4623 a8083063 Iustin Pop
    """
4624 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
4625 cfefe007 Guido Trotter
    # feedback_fn there.
4626 cfefe007 Guido Trotter
    for warn in self.warn:
4627 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
4628 cfefe007 Guido Trotter
4629 a8083063 Iustin Pop
    result = []
4630 a8083063 Iustin Pop
    instance = self.instance
4631 a8083063 Iustin Pop
    if self.mem:
4632 a8083063 Iustin Pop
      instance.memory = self.mem
4633 a8083063 Iustin Pop
      result.append(("mem", self.mem))
4634 a8083063 Iustin Pop
    if self.vcpus:
4635 a8083063 Iustin Pop
      instance.vcpus = self.vcpus
4636 a8083063 Iustin Pop
      result.append(("vcpus",  self.vcpus))
4637 a8083063 Iustin Pop
    if self.do_ip:
4638 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
4639 a8083063 Iustin Pop
      result.append(("ip", self.ip))
4640 a8083063 Iustin Pop
    if self.bridge:
4641 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
4642 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
4643 1862d460 Alexander Schreiber
    if self.mac:
4644 1862d460 Alexander Schreiber
      instance.nics[0].mac = self.mac
4645 1862d460 Alexander Schreiber
      result.append(("mac", self.mac))
4646 973d7867 Iustin Pop
    if self.do_kernel_path:
4647 973d7867 Iustin Pop
      instance.kernel_path = self.kernel_path
4648 973d7867 Iustin Pop
      result.append(("kernel_path", self.kernel_path))
4649 973d7867 Iustin Pop
    if self.do_initrd_path:
4650 973d7867 Iustin Pop
      instance.initrd_path = self.initrd_path
4651 973d7867 Iustin Pop
      result.append(("initrd_path", self.initrd_path))
4652 25c5878d Alexander Schreiber
    if self.hvm_boot_order:
4653 25c5878d Alexander Schreiber
      if self.hvm_boot_order == constants.VALUE_DEFAULT:
4654 25c5878d Alexander Schreiber
        instance.hvm_boot_order = None
4655 25c5878d Alexander Schreiber
      else:
4656 25c5878d Alexander Schreiber
        instance.hvm_boot_order = self.hvm_boot_order
4657 25c5878d Alexander Schreiber
      result.append(("hvm_boot_order", self.hvm_boot_order))
4658 3fc175f0 Alexander Schreiber
    if self.hvm_acpi is not None:
4659 ec1ba002 Iustin Pop
      instance.hvm_acpi = self.hvm_acpi
4660 31a853d2 Iustin Pop
      result.append(("hvm_acpi", self.hvm_acpi))
4661 3fc175f0 Alexander Schreiber
    if self.hvm_pae is not None:
4662 ec1ba002 Iustin Pop
      instance.hvm_pae = self.hvm_pae
4663 31a853d2 Iustin Pop
      result.append(("hvm_pae", self.hvm_pae))
4664 5397e0b7 Alexander Schreiber
    if self.hvm_nic_type is not None:
4665 5397e0b7 Alexander Schreiber
      instance.hvm_nic_type = self.hvm_nic_type
4666 5397e0b7 Alexander Schreiber
      result.append(("hvm_nic_type", self.hvm_nic_type))
4667 5397e0b7 Alexander Schreiber
    if self.hvm_disk_type is not None:
4668 5397e0b7 Alexander Schreiber
      instance.hvm_disk_type = self.hvm_disk_type
4669 5397e0b7 Alexander Schreiber
      result.append(("hvm_disk_type", self.hvm_disk_type))
4670 31a853d2 Iustin Pop
    if self.hvm_cdrom_image_path:
4671 3fc175f0 Alexander Schreiber
      if self.hvm_cdrom_image_path == constants.VALUE_NONE:
4672 3fc175f0 Alexander Schreiber
        instance.hvm_cdrom_image_path = None
4673 3fc175f0 Alexander Schreiber
      else:
4674 3fc175f0 Alexander Schreiber
        instance.hvm_cdrom_image_path = self.hvm_cdrom_image_path
4675 31a853d2 Iustin Pop
      result.append(("hvm_cdrom_image_path", self.hvm_cdrom_image_path))
4676 31a853d2 Iustin Pop
    if self.vnc_bind_address:
4677 31a853d2 Iustin Pop
      instance.vnc_bind_address = self.vnc_bind_address
4678 31a853d2 Iustin Pop
      result.append(("vnc_bind_address", self.vnc_bind_address))
4679 a8083063 Iustin Pop
4680 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
4681 a8083063 Iustin Pop
4682 a8083063 Iustin Pop
    return result
4683 a8083063 Iustin Pop
4684 a8083063 Iustin Pop
4685 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
4686 a8083063 Iustin Pop
  """Query the exports list
4687 a8083063 Iustin Pop

4688 a8083063 Iustin Pop
  """
4689 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
4690 21a15682 Guido Trotter
  REQ_BGL = False
4691 21a15682 Guido Trotter
4692 21a15682 Guido Trotter
  def ExpandNames(self):
4693 21a15682 Guido Trotter
    self.needed_locks = {}
4694 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
4695 21a15682 Guido Trotter
    if not self.op.nodes:
4696 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4697 21a15682 Guido Trotter
    else:
4698 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
4699 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
4700 a8083063 Iustin Pop
4701 a8083063 Iustin Pop
  def CheckPrereq(self):
4702 21a15682 Guido Trotter
    """Check prerequisites.
4703 a8083063 Iustin Pop

4704 a8083063 Iustin Pop
    """
4705 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
4706 a8083063 Iustin Pop
4707 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4708 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
4709 a8083063 Iustin Pop

4710 a8083063 Iustin Pop
    Returns:
4711 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
4712 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
4713 a8083063 Iustin Pop
      that node.
4714 a8083063 Iustin Pop

4715 a8083063 Iustin Pop
    """
4716 a7ba5e53 Iustin Pop
    return rpc.call_export_list(self.nodes)
4717 a8083063 Iustin Pop
4718 a8083063 Iustin Pop
4719 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
4720 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
4721 a8083063 Iustin Pop

4722 a8083063 Iustin Pop
  """
4723 a8083063 Iustin Pop
  HPATH = "instance-export"
4724 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4725 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
4726 6657590e Guido Trotter
  REQ_BGL = False
4727 6657590e Guido Trotter
4728 6657590e Guido Trotter
  def ExpandNames(self):
4729 6657590e Guido Trotter
    self._ExpandAndLockInstance()
4730 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
4731 6657590e Guido Trotter
    #
4732 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
4733 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
4734 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
4735 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
4736 6657590e Guido Trotter
    #    then one to remove, after
4737 6657590e Guido Trotter
    #  - removing the removal operation altoghether
4738 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4739 6657590e Guido Trotter
4740 6657590e Guido Trotter
  def DeclareLocks(self, level):
4741 6657590e Guido Trotter
    """Last minute lock declaration."""
4742 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
4743 a8083063 Iustin Pop
4744 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4745 a8083063 Iustin Pop
    """Build hooks env.
4746 a8083063 Iustin Pop

4747 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
4748 a8083063 Iustin Pop

4749 a8083063 Iustin Pop
    """
4750 a8083063 Iustin Pop
    env = {
4751 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
4752 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
4753 a8083063 Iustin Pop
      }
4754 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4755 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
4756 a8083063 Iustin Pop
          self.op.target_node]
4757 a8083063 Iustin Pop
    return env, nl, nl
4758 a8083063 Iustin Pop
4759 a8083063 Iustin Pop
  def CheckPrereq(self):
4760 a8083063 Iustin Pop
    """Check prerequisites.
4761 a8083063 Iustin Pop

4762 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
4763 a8083063 Iustin Pop

4764 a8083063 Iustin Pop
    """
4765 6657590e Guido Trotter
    instance_name = self.op.instance_name
4766 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
4767 6657590e Guido Trotter
    assert self.instance is not None, \
4768 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
4769 a8083063 Iustin Pop
4770 6657590e Guido Trotter
    self.dst_node = self.cfg.GetNodeInfo(
4771 6657590e Guido Trotter
      self.cfg.ExpandNodeName(self.op.target_node))
4772 a8083063 Iustin Pop
4773 6657590e Guido Trotter
    assert self.dst_node is not None, \
4774 6657590e Guido Trotter
          "Cannot retrieve locked node %s" % self.op.target_node
4775 a8083063 Iustin Pop
4776 b6023d6c Manuel Franceschini
    # instance disk type verification
4777 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
4778 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
4779 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
4780 b6023d6c Manuel Franceschini
                                   " file-based disks")
4781 b6023d6c Manuel Franceschini
4782 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4783 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
4784 a8083063 Iustin Pop

4785 a8083063 Iustin Pop
    """
4786 a8083063 Iustin Pop
    instance = self.instance
4787 a8083063 Iustin Pop
    dst_node = self.dst_node
4788 a8083063 Iustin Pop
    src_node = instance.primary_node
4789 a8083063 Iustin Pop
    if self.op.shutdown:
4790 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
4791 fb300fb7 Guido Trotter
      if not rpc.call_instance_shutdown(src_node, instance):
4792 38206f3c Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
4793 38206f3c Iustin Pop
                                 (instance.name, src_node))
4794 a8083063 Iustin Pop
4795 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
4796 a8083063 Iustin Pop
4797 a8083063 Iustin Pop
    snap_disks = []
4798 a8083063 Iustin Pop
4799 a8083063 Iustin Pop
    try:
4800 a8083063 Iustin Pop
      for disk in instance.disks:
4801 a8083063 Iustin Pop
        if disk.iv_name == "sda":
4802 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
4803 a8083063 Iustin Pop
          new_dev_name = rpc.call_blockdev_snapshot(src_node, disk)
4804 a8083063 Iustin Pop
4805 a8083063 Iustin Pop
          if not new_dev_name:
4806 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
4807 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
4808 a8083063 Iustin Pop
          else:
4809 fe96220b Iustin Pop
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
4810 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
4811 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
4812 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
4813 a8083063 Iustin Pop
            snap_disks.append(new_dev)
4814 a8083063 Iustin Pop
4815 a8083063 Iustin Pop
    finally:
4816 fb300fb7 Guido Trotter
      if self.op.shutdown and instance.status == "up":
4817 fb300fb7 Guido Trotter
        if not rpc.call_instance_start(src_node, instance, None):
4818 fb300fb7 Guido Trotter
          _ShutdownInstanceDisks(instance, self.cfg)
4819 fb300fb7 Guido Trotter
          raise errors.OpExecError("Could not start instance")
4820 a8083063 Iustin Pop
4821 a8083063 Iustin Pop
    # TODO: check for size
4822 a8083063 Iustin Pop
4823 a8083063 Iustin Pop
    for dev in snap_disks:
4824 16687b98 Manuel Franceschini
      if not rpc.call_snapshot_export(src_node, dev, dst_node.name, instance):
4825 16687b98 Manuel Franceschini
        logger.Error("could not export block device %s from node %s to node %s"
4826 16687b98 Manuel Franceschini
                     % (dev.logical_id[1], src_node, dst_node.name))
4827 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(src_node, dev):
4828 16687b98 Manuel Franceschini
        logger.Error("could not remove snapshot block device %s from node %s" %
4829 16687b98 Manuel Franceschini
                     (dev.logical_id[1], src_node))
4830 a8083063 Iustin Pop
4831 a8083063 Iustin Pop
    if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
4832 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
4833 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
4834 a8083063 Iustin Pop
4835 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
4836 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
4837 a8083063 Iustin Pop
4838 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
4839 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
4840 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
4841 a8083063 Iustin Pop
    if nodelist:
4842 204f2086 Guido Trotter
      exportlist = rpc.call_export_list(nodelist)
4843 a8083063 Iustin Pop
      for node in exportlist:
4844 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
4845 a8083063 Iustin Pop
          if not rpc.call_export_remove(node, instance.name):
4846 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
4847 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))
4848 5c947f38 Iustin Pop
4849 5c947f38 Iustin Pop
4850 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
4851 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
4852 9ac99fda Guido Trotter

4853 9ac99fda Guido Trotter
  """
4854 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
4855 3656b3af Guido Trotter
  REQ_BGL = False
4856 3656b3af Guido Trotter
4857 3656b3af Guido Trotter
  def ExpandNames(self):
4858 3656b3af Guido Trotter
    self.needed_locks = {}
4859 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
4860 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
4861 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
4862 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4863 9ac99fda Guido Trotter
4864 9ac99fda Guido Trotter
  def CheckPrereq(self):
4865 9ac99fda Guido Trotter
    """Check prerequisites.
4866 9ac99fda Guido Trotter
    """
4867 9ac99fda Guido Trotter
    pass
4868 9ac99fda Guido Trotter
4869 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
4870 9ac99fda Guido Trotter
    """Remove any export.
4871 9ac99fda Guido Trotter

4872 9ac99fda Guido Trotter
    """
4873 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4874 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
4875 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
4876 9ac99fda Guido Trotter
    fqdn_warn = False
4877 9ac99fda Guido Trotter
    if not instance_name:
4878 9ac99fda Guido Trotter
      fqdn_warn = True
4879 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
4880 9ac99fda Guido Trotter
4881 3656b3af Guido Trotter
    exportlist = rpc.call_export_list(self.acquired_locks[locking.LEVEL_NODE])
4882 9ac99fda Guido Trotter
    found = False
4883 9ac99fda Guido Trotter
    for node in exportlist:
4884 9ac99fda Guido Trotter
      if instance_name in exportlist[node]:
4885 9ac99fda Guido Trotter
        found = True
4886 9ac99fda Guido Trotter
        if not rpc.call_export_remove(node, instance_name):
4887 9ac99fda Guido Trotter
          logger.Error("could not remove export for instance %s"
4888 9ac99fda Guido Trotter
                       " on node %s" % (instance_name, node))
4889 9ac99fda Guido Trotter
4890 9ac99fda Guido Trotter
    if fqdn_warn and not found:
4891 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
4892 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
4893 9ac99fda Guido Trotter
                  " Domain Name.")
4894 9ac99fda Guido Trotter
4895 9ac99fda Guido Trotter
4896 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
4897 5c947f38 Iustin Pop
  """Generic tags LU.
4898 5c947f38 Iustin Pop

4899 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
4900 5c947f38 Iustin Pop

4901 5c947f38 Iustin Pop
  """
4902 5c947f38 Iustin Pop
4903 8646adce Guido Trotter
  def ExpandNames(self):
4904 8646adce Guido Trotter
    self.needed_locks = {}
4905 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
4906 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
4907 5c947f38 Iustin Pop
      if name is None:
4908 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
4909 3ecf6786 Iustin Pop
                                   (self.op.name,))
4910 5c947f38 Iustin Pop
      self.op.name = name
4911 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = name
4912 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
4913 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
4914 5c947f38 Iustin Pop
      if name is None:
4915 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
4916 3ecf6786 Iustin Pop
                                   (self.op.name,))
4917 5c947f38 Iustin Pop
      self.op.name = name
4918 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = name
4919 8646adce Guido Trotter
4920 8646adce Guido Trotter
  def CheckPrereq(self):
4921 8646adce Guido Trotter
    """Check prerequisites.
4922 8646adce Guido Trotter

4923 8646adce Guido Trotter
    """
4924 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
4925 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
4926 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
4927 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
4928 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
4929 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
4930 5c947f38 Iustin Pop
    else:
4931 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
4932 3ecf6786 Iustin Pop
                                 str(self.op.kind))
4933 5c947f38 Iustin Pop
4934 5c947f38 Iustin Pop
4935 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
4936 5c947f38 Iustin Pop
  """Returns the tags of a given object.
4937 5c947f38 Iustin Pop

4938 5c947f38 Iustin Pop
  """
4939 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
4940 8646adce Guido Trotter
  REQ_BGL = False
4941 5c947f38 Iustin Pop
4942 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4943 5c947f38 Iustin Pop
    """Returns the tag list.
4944 5c947f38 Iustin Pop

4945 5c947f38 Iustin Pop
    """
4946 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
4947 5c947f38 Iustin Pop
4948 5c947f38 Iustin Pop
4949 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
4950 73415719 Iustin Pop
  """Searches the tags for a given pattern.
4951 73415719 Iustin Pop

4952 73415719 Iustin Pop
  """
4953 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
4954 8646adce Guido Trotter
  REQ_BGL = False
4955 8646adce Guido Trotter
4956 8646adce Guido Trotter
  def ExpandNames(self):
4957 8646adce Guido Trotter
    self.needed_locks = {}
4958 73415719 Iustin Pop
4959 73415719 Iustin Pop
  def CheckPrereq(self):
4960 73415719 Iustin Pop
    """Check prerequisites.
4961 73415719 Iustin Pop

4962 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
4963 73415719 Iustin Pop

4964 73415719 Iustin Pop
    """
4965 73415719 Iustin Pop
    try:
4966 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
4967 73415719 Iustin Pop
    except re.error, err:
4968 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
4969 73415719 Iustin Pop
                                 (self.op.pattern, err))
4970 73415719 Iustin Pop
4971 73415719 Iustin Pop
  def Exec(self, feedback_fn):
4972 73415719 Iustin Pop
    """Returns the tag list.
4973 73415719 Iustin Pop

4974 73415719 Iustin Pop
    """
4975 73415719 Iustin Pop
    cfg = self.cfg
4976 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
4977 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
4978 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
4979 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
4980 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
4981 73415719 Iustin Pop
    results = []
4982 73415719 Iustin Pop
    for path, target in tgts:
4983 73415719 Iustin Pop
      for tag in target.GetTags():
4984 73415719 Iustin Pop
        if self.re.search(tag):
4985 73415719 Iustin Pop
          results.append((path, tag))
4986 73415719 Iustin Pop
    return results
4987 73415719 Iustin Pop
4988 73415719 Iustin Pop
4989 f27302fa Iustin Pop
class LUAddTags(TagsLU):
4990 5c947f38 Iustin Pop
  """Sets a tag on a given object.
4991 5c947f38 Iustin Pop

4992 5c947f38 Iustin Pop
  """
4993 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4994 8646adce Guido Trotter
  REQ_BGL = False
4995 5c947f38 Iustin Pop
4996 5c947f38 Iustin Pop
  def CheckPrereq(self):
4997 5c947f38 Iustin Pop
    """Check prerequisites.
4998 5c947f38 Iustin Pop

4999 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
5000 5c947f38 Iustin Pop

5001 5c947f38 Iustin Pop
    """
5002 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
5003 f27302fa Iustin Pop
    for tag in self.op.tags:
5004 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
5005 5c947f38 Iustin Pop
5006 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5007 5c947f38 Iustin Pop
    """Sets the tag.
5008 5c947f38 Iustin Pop

5009 5c947f38 Iustin Pop
    """
5010 5c947f38 Iustin Pop
    try:
5011 f27302fa Iustin Pop
      for tag in self.op.tags:
5012 f27302fa Iustin Pop
        self.target.AddTag(tag)
5013 5c947f38 Iustin Pop
    except errors.TagError, err:
5014 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
5015 5c947f38 Iustin Pop
    try:
5016 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
5017 5c947f38 Iustin Pop
    except errors.ConfigurationError:
5018 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
5019 3ecf6786 Iustin Pop
                                " config file and the operation has been"
5020 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
5021 5c947f38 Iustin Pop
5022 5c947f38 Iustin Pop
5023 f27302fa Iustin Pop
class LUDelTags(TagsLU):
5024 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
5025 5c947f38 Iustin Pop

5026 5c947f38 Iustin Pop
  """
5027 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
5028 8646adce Guido Trotter
  REQ_BGL = False
5029 5c947f38 Iustin Pop
5030 5c947f38 Iustin Pop
  def CheckPrereq(self):
5031 5c947f38 Iustin Pop
    """Check prerequisites.
5032 5c947f38 Iustin Pop

5033 5c947f38 Iustin Pop
    This checks that we have the given tag.
5034 5c947f38 Iustin Pop

5035 5c947f38 Iustin Pop
    """
5036 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
5037 f27302fa Iustin Pop
    for tag in self.op.tags:
5038 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
5039 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
5040 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
5041 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
5042 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
5043 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
5044 f27302fa Iustin Pop
      diff_names.sort()
5045 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
5046 f27302fa Iustin Pop
                                 (",".join(diff_names)))
5047 5c947f38 Iustin Pop
5048 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5049 5c947f38 Iustin Pop
    """Remove the tag from the object.
5050 5c947f38 Iustin Pop

5051 5c947f38 Iustin Pop
    """
5052 f27302fa Iustin Pop
    for tag in self.op.tags:
5053 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
5054 5c947f38 Iustin Pop
    try:
5055 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
5056 5c947f38 Iustin Pop
    except errors.ConfigurationError:
5057 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
5058 3ecf6786 Iustin Pop
                                " config file and the operation has been"
5059 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
5060 06009e27 Iustin Pop
5061 0eed6e61 Guido Trotter
5062 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
5063 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
5064 06009e27 Iustin Pop

5065 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
5066 06009e27 Iustin Pop
  time.
5067 06009e27 Iustin Pop

5068 06009e27 Iustin Pop
  """
5069 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
5070 fbe9022f Guido Trotter
  REQ_BGL = False
5071 06009e27 Iustin Pop
5072 fbe9022f Guido Trotter
  def ExpandNames(self):
5073 fbe9022f Guido Trotter
    """Expand names and set required locks.
5074 06009e27 Iustin Pop

5075 fbe9022f Guido Trotter
    This expands the node list, if any.
5076 06009e27 Iustin Pop

5077 06009e27 Iustin Pop
    """
5078 fbe9022f Guido Trotter
    self.needed_locks = {}
5079 06009e27 Iustin Pop
    if self.op.on_nodes:
5080 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
5081 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
5082 fbe9022f Guido Trotter
      # more information.
5083 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
5084 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
5085 fbe9022f Guido Trotter
5086 fbe9022f Guido Trotter
  def CheckPrereq(self):
5087 fbe9022f Guido Trotter
    """Check prerequisites.
5088 fbe9022f Guido Trotter

5089 fbe9022f Guido Trotter
    """
5090 06009e27 Iustin Pop
5091 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
5092 06009e27 Iustin Pop
    """Do the actual sleep.
5093 06009e27 Iustin Pop

5094 06009e27 Iustin Pop
    """
5095 06009e27 Iustin Pop
    if self.op.on_master:
5096 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
5097 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
5098 06009e27 Iustin Pop
    if self.op.on_nodes:
5099 06009e27 Iustin Pop
      result = rpc.call_test_delay(self.op.on_nodes, self.op.duration)
5100 06009e27 Iustin Pop
      if not result:
5101 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
5102 06009e27 Iustin Pop
      for node, node_result in result.items():
5103 06009e27 Iustin Pop
        if not node_result:
5104 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
5105 06009e27 Iustin Pop
                                   " result: %s" % (node, node_result))
5106 d61df03e Iustin Pop
5107 d61df03e Iustin Pop
5108 d1c2dd75 Iustin Pop
class IAllocator(object):
5109 d1c2dd75 Iustin Pop
  """IAllocator framework.
5110 d61df03e Iustin Pop

5111 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
5112 d1c2dd75 Iustin Pop
    - cfg/sstore that are needed to query the cluster
5113 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
5114 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
5115 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
5116 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
5117 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
5118 d1c2dd75 Iustin Pop
      easy usage
5119 d61df03e Iustin Pop

5120 d61df03e Iustin Pop
  """
5121 29859cb7 Iustin Pop
  _ALLO_KEYS = [
5122 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
5123 d1c2dd75 Iustin Pop
    "os", "tags", "nics", "vcpus",
5124 d1c2dd75 Iustin Pop
    ]
5125 29859cb7 Iustin Pop
  _RELO_KEYS = [
5126 29859cb7 Iustin Pop
    "relocate_from",
5127 29859cb7 Iustin Pop
    ]
5128 d1c2dd75 Iustin Pop
5129 29859cb7 Iustin Pop
  def __init__(self, cfg, sstore, mode, name, **kwargs):
5130 d1c2dd75 Iustin Pop
    self.cfg = cfg
5131 d1c2dd75 Iustin Pop
    self.sstore = sstore
5132 d1c2dd75 Iustin Pop
    # init buffer variables
5133 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
5134 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
5135 29859cb7 Iustin Pop
    self.mode = mode
5136 29859cb7 Iustin Pop
    self.name = name
5137 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
5138 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
5139 29859cb7 Iustin Pop
    self.relocate_from = None
5140 27579978 Iustin Pop
    # computed fields
5141 27579978 Iustin Pop
    self.required_nodes = None
5142 d1c2dd75 Iustin Pop
    # init result fields
5143 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
5144 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5145 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
5146 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
5147 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
5148 29859cb7 Iustin Pop
    else:
5149 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
5150 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
5151 d1c2dd75 Iustin Pop
    for key in kwargs:
5152 29859cb7 Iustin Pop
      if key not in keyset:
5153 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
5154 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
5155 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
5156 29859cb7 Iustin Pop
    for key in keyset:
5157 d1c2dd75 Iustin Pop
      if key not in kwargs:
5158 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
5159 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
5160 d1c2dd75 Iustin Pop
    self._BuildInputData()
5161 d1c2dd75 Iustin Pop
5162 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
5163 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
5164 d1c2dd75 Iustin Pop

5165 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
5166 d1c2dd75 Iustin Pop

5167 d1c2dd75 Iustin Pop
    """
5168 d1c2dd75 Iustin Pop
    cfg = self.cfg
5169 d1c2dd75 Iustin Pop
    # cluster data
5170 d1c2dd75 Iustin Pop
    data = {
5171 d1c2dd75 Iustin Pop
      "version": 1,
5172 d1c2dd75 Iustin Pop
      "cluster_name": self.sstore.GetClusterName(),
5173 d1c2dd75 Iustin Pop
      "cluster_tags": list(cfg.GetClusterInfo().GetTags()),
5174 6286519f Iustin Pop
      "hypervisor_type": self.sstore.GetHypervisorType(),
5175 d1c2dd75 Iustin Pop
      # we don't have job IDs
5176 d61df03e Iustin Pop
      }
5177 d61df03e Iustin Pop
5178 6286519f Iustin Pop
    i_list = [cfg.GetInstanceInfo(iname) for iname in cfg.GetInstanceList()]
5179 6286519f Iustin Pop
5180 d1c2dd75 Iustin Pop
    # node data
5181 d1c2dd75 Iustin Pop
    node_results = {}
5182 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
5183 d1c2dd75 Iustin Pop
    node_data = rpc.call_node_info(node_list, cfg.GetVGName())
5184 d1c2dd75 Iustin Pop
    for nname in node_list:
5185 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
5186 d1c2dd75 Iustin Pop
      if nname not in node_data or not isinstance(node_data[nname], dict):
5187 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't get data for node %s" % nname)
5188 d1c2dd75 Iustin Pop
      remote_info = node_data[nname]
5189 b2662e7f Iustin Pop
      for attr in ['memory_total', 'memory_free', 'memory_dom0',
5190 4337cf1b Iustin Pop
                   'vg_size', 'vg_free', 'cpu_total']:
5191 d1c2dd75 Iustin Pop
        if attr not in remote_info:
5192 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' didn't return attribute '%s'" %
5193 d1c2dd75 Iustin Pop
                                   (nname, attr))
5194 d1c2dd75 Iustin Pop
        try:
5195 b2662e7f Iustin Pop
          remote_info[attr] = int(remote_info[attr])
5196 d1c2dd75 Iustin Pop
        except ValueError, err:
5197 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' returned invalid value for '%s':"
5198 d1c2dd75 Iustin Pop
                                   " %s" % (nname, attr, str(err)))
5199 6286519f Iustin Pop
      # compute memory used by primary instances
5200 6286519f Iustin Pop
      i_p_mem = i_p_up_mem = 0
5201 6286519f Iustin Pop
      for iinfo in i_list:
5202 6286519f Iustin Pop
        if iinfo.primary_node == nname:
5203 6286519f Iustin Pop
          i_p_mem += iinfo.memory
5204 6286519f Iustin Pop
          if iinfo.status == "up":
5205 6286519f Iustin Pop
            i_p_up_mem += iinfo.memory
5206 6286519f Iustin Pop
5207 b2662e7f Iustin Pop
      # compute memory used by instances
5208 d1c2dd75 Iustin Pop
      pnr = {
5209 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
5210 b2662e7f Iustin Pop
        "total_memory": remote_info['memory_total'],
5211 b2662e7f Iustin Pop
        "reserved_memory": remote_info['memory_dom0'],
5212 b2662e7f Iustin Pop
        "free_memory": remote_info['memory_free'],
5213 6286519f Iustin Pop
        "i_pri_memory": i_p_mem,
5214 6286519f Iustin Pop
        "i_pri_up_memory": i_p_up_mem,
5215 b2662e7f Iustin Pop
        "total_disk": remote_info['vg_size'],
5216 b2662e7f Iustin Pop
        "free_disk": remote_info['vg_free'],
5217 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
5218 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
5219 4337cf1b Iustin Pop
        "total_cpus": remote_info['cpu_total'],
5220 d1c2dd75 Iustin Pop
        }
5221 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
5222 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
5223 d1c2dd75 Iustin Pop
5224 d1c2dd75 Iustin Pop
    # instance data
5225 d1c2dd75 Iustin Pop
    instance_data = {}
5226 6286519f Iustin Pop
    for iinfo in i_list:
5227 d1c2dd75 Iustin Pop
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
5228 d1c2dd75 Iustin Pop
                  for n in iinfo.nics]
5229 d1c2dd75 Iustin Pop
      pir = {
5230 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
5231 d1c2dd75 Iustin Pop
        "should_run": iinfo.status == "up",
5232 d1c2dd75 Iustin Pop
        "vcpus": iinfo.vcpus,
5233 d1c2dd75 Iustin Pop
        "memory": iinfo.memory,
5234 d1c2dd75 Iustin Pop
        "os": iinfo.os,
5235 d1c2dd75 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
5236 d1c2dd75 Iustin Pop
        "nics": nic_data,
5237 d1c2dd75 Iustin Pop
        "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
5238 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
5239 d1c2dd75 Iustin Pop
        }
5240 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
5241 d61df03e Iustin Pop
5242 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
5243 d61df03e Iustin Pop
5244 d1c2dd75 Iustin Pop
    self.in_data = data
5245 d61df03e Iustin Pop
5246 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
5247 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
5248 d61df03e Iustin Pop

5249 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
5250 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5251 d61df03e Iustin Pop

5252 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5253 d1c2dd75 Iustin Pop
    done.
5254 d61df03e Iustin Pop

5255 d1c2dd75 Iustin Pop
    """
5256 d1c2dd75 Iustin Pop
    data = self.in_data
5257 d1c2dd75 Iustin Pop
    if len(self.disks) != 2:
5258 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Only two-disk configurations supported")
5259 d1c2dd75 Iustin Pop
5260 d1c2dd75 Iustin Pop
    disk_space = _ComputeDiskSize(self.disk_template,
5261 d1c2dd75 Iustin Pop
                                  self.disks[0]["size"], self.disks[1]["size"])
5262 d1c2dd75 Iustin Pop
5263 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
5264 27579978 Iustin Pop
      self.required_nodes = 2
5265 27579978 Iustin Pop
    else:
5266 27579978 Iustin Pop
      self.required_nodes = 1
5267 d1c2dd75 Iustin Pop
    request = {
5268 d1c2dd75 Iustin Pop
      "type": "allocate",
5269 d1c2dd75 Iustin Pop
      "name": self.name,
5270 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
5271 d1c2dd75 Iustin Pop
      "tags": self.tags,
5272 d1c2dd75 Iustin Pop
      "os": self.os,
5273 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
5274 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
5275 d1c2dd75 Iustin Pop
      "disks": self.disks,
5276 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
5277 d1c2dd75 Iustin Pop
      "nics": self.nics,
5278 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5279 d1c2dd75 Iustin Pop
      }
5280 d1c2dd75 Iustin Pop
    data["request"] = request
5281 298fe380 Iustin Pop
5282 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
5283 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
5284 298fe380 Iustin Pop

5285 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
5286 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5287 d61df03e Iustin Pop

5288 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5289 d1c2dd75 Iustin Pop
    done.
5290 d61df03e Iustin Pop

5291 d1c2dd75 Iustin Pop
    """
5292 27579978 Iustin Pop
    instance = self.cfg.GetInstanceInfo(self.name)
5293 27579978 Iustin Pop
    if instance is None:
5294 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
5295 27579978 Iustin Pop
                                   " IAllocator" % self.name)
5296 27579978 Iustin Pop
5297 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
5298 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
5299 27579978 Iustin Pop
5300 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
5301 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
5302 2a139bb0 Iustin Pop
5303 27579978 Iustin Pop
    self.required_nodes = 1
5304 27579978 Iustin Pop
5305 27579978 Iustin Pop
    disk_space = _ComputeDiskSize(instance.disk_template,
5306 27579978 Iustin Pop
                                  instance.disks[0].size,
5307 27579978 Iustin Pop
                                  instance.disks[1].size)
5308 27579978 Iustin Pop
5309 d1c2dd75 Iustin Pop
    request = {
5310 2a139bb0 Iustin Pop
      "type": "relocate",
5311 d1c2dd75 Iustin Pop
      "name": self.name,
5312 27579978 Iustin Pop
      "disk_space_total": disk_space,
5313 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5314 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
5315 d1c2dd75 Iustin Pop
      }
5316 27579978 Iustin Pop
    self.in_data["request"] = request
5317 d61df03e Iustin Pop
5318 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
5319 d1c2dd75 Iustin Pop
    """Build input data structures.
5320 d61df03e Iustin Pop

5321 d1c2dd75 Iustin Pop
    """
5322 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
5323 d61df03e Iustin Pop
5324 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5325 d1c2dd75 Iustin Pop
      self._AddNewInstance()
5326 d1c2dd75 Iustin Pop
    else:
5327 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
5328 d61df03e Iustin Pop
5329 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
5330 d61df03e Iustin Pop
5331 8d528b7c Iustin Pop
  def Run(self, name, validate=True, call_fn=rpc.call_iallocator_runner):
5332 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
5333 298fe380 Iustin Pop

5334 d1c2dd75 Iustin Pop
    """
5335 d1c2dd75 Iustin Pop
    data = self.in_text
5336 298fe380 Iustin Pop
5337 8d528b7c Iustin Pop
    result = call_fn(self.sstore.GetMasterNode(), name, self.in_text)
5338 298fe380 Iustin Pop
5339 43f5ea7a Guido Trotter
    if not isinstance(result, (list, tuple)) or len(result) != 4:
5340 8d528b7c Iustin Pop
      raise errors.OpExecError("Invalid result from master iallocator runner")
5341 8d528b7c Iustin Pop
5342 8d528b7c Iustin Pop
    rcode, stdout, stderr, fail = result
5343 8d528b7c Iustin Pop
5344 8d528b7c Iustin Pop
    if rcode == constants.IARUN_NOTFOUND:
5345 8d528b7c Iustin Pop
      raise errors.OpExecError("Can't find allocator '%s'" % name)
5346 8d528b7c Iustin Pop
    elif rcode == constants.IARUN_FAILURE:
5347 38206f3c Iustin Pop
      raise errors.OpExecError("Instance allocator call failed: %s,"
5348 38206f3c Iustin Pop
                               " output: %s" % (fail, stdout+stderr))
5349 8d528b7c Iustin Pop
    self.out_text = stdout
5350 d1c2dd75 Iustin Pop
    if validate:
5351 d1c2dd75 Iustin Pop
      self._ValidateResult()
5352 298fe380 Iustin Pop
5353 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
5354 d1c2dd75 Iustin Pop
    """Process the allocator results.
5355 538475ca Iustin Pop

5356 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
5357 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
5358 538475ca Iustin Pop

5359 d1c2dd75 Iustin Pop
    """
5360 d1c2dd75 Iustin Pop
    try:
5361 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
5362 d1c2dd75 Iustin Pop
    except Exception, err:
5363 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
5364 d1c2dd75 Iustin Pop
5365 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
5366 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
5367 538475ca Iustin Pop
5368 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
5369 d1c2dd75 Iustin Pop
      if key not in rdict:
5370 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
5371 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
5372 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
5373 538475ca Iustin Pop
5374 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
5375 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
5376 d1c2dd75 Iustin Pop
                               " is not a list")
5377 d1c2dd75 Iustin Pop
    self.out_data = rdict
5378 538475ca Iustin Pop
5379 538475ca Iustin Pop
5380 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
5381 d61df03e Iustin Pop
  """Run allocator tests.
5382 d61df03e Iustin Pop

5383 d61df03e Iustin Pop
  This LU runs the allocator tests
5384 d61df03e Iustin Pop

5385 d61df03e Iustin Pop
  """
5386 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
5387 d61df03e Iustin Pop
5388 d61df03e Iustin Pop
  def CheckPrereq(self):
5389 d61df03e Iustin Pop
    """Check prerequisites.
5390 d61df03e Iustin Pop

5391 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
5392 d61df03e Iustin Pop

5393 d61df03e Iustin Pop
    """
5394 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5395 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
5396 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
5397 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
5398 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
5399 d61df03e Iustin Pop
                                     attr)
5400 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
5401 d61df03e Iustin Pop
      if iname is not None:
5402 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
5403 d61df03e Iustin Pop
                                   iname)
5404 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
5405 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
5406 d61df03e Iustin Pop
      for row in self.op.nics:
5407 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5408 d61df03e Iustin Pop
            "mac" not in row or
5409 d61df03e Iustin Pop
            "ip" not in row or
5410 d61df03e Iustin Pop
            "bridge" not in row):
5411 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5412 d61df03e Iustin Pop
                                     " 'nics' parameter")
5413 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
5414 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
5415 298fe380 Iustin Pop
      if len(self.op.disks) != 2:
5416 298fe380 Iustin Pop
        raise errors.OpPrereqError("Only two-disk configurations supported")
5417 d61df03e Iustin Pop
      for row in self.op.disks:
5418 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5419 d61df03e Iustin Pop
            "size" not in row or
5420 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
5421 d61df03e Iustin Pop
            "mode" not in row or
5422 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
5423 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5424 d61df03e Iustin Pop
                                     " 'disks' parameter")
5425 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
5426 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
5427 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
5428 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
5429 d61df03e Iustin Pop
      if fname is None:
5430 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
5431 d61df03e Iustin Pop
                                   self.op.name)
5432 d61df03e Iustin Pop
      self.op.name = fname
5433 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
5434 d61df03e Iustin Pop
    else:
5435 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
5436 d61df03e Iustin Pop
                                 self.op.mode)
5437 d61df03e Iustin Pop
5438 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
5439 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
5440 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
5441 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
5442 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
5443 d61df03e Iustin Pop
                                 self.op.direction)
5444 d61df03e Iustin Pop
5445 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
5446 d61df03e Iustin Pop
    """Run the allocator test.
5447 d61df03e Iustin Pop

5448 d61df03e Iustin Pop
    """
5449 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5450 29859cb7 Iustin Pop
      ial = IAllocator(self.cfg, self.sstore,
5451 29859cb7 Iustin Pop
                       mode=self.op.mode,
5452 29859cb7 Iustin Pop
                       name=self.op.name,
5453 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
5454 29859cb7 Iustin Pop
                       disks=self.op.disks,
5455 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
5456 29859cb7 Iustin Pop
                       os=self.op.os,
5457 29859cb7 Iustin Pop
                       tags=self.op.tags,
5458 29859cb7 Iustin Pop
                       nics=self.op.nics,
5459 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
5460 29859cb7 Iustin Pop
                       )
5461 29859cb7 Iustin Pop
    else:
5462 29859cb7 Iustin Pop
      ial = IAllocator(self.cfg, self.sstore,
5463 29859cb7 Iustin Pop
                       mode=self.op.mode,
5464 29859cb7 Iustin Pop
                       name=self.op.name,
5465 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
5466 29859cb7 Iustin Pop
                       )
5467 d61df03e Iustin Pop
5468 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
5469 d1c2dd75 Iustin Pop
      result = ial.in_text
5470 298fe380 Iustin Pop
    else:
5471 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
5472 d1c2dd75 Iustin Pop
      result = ial.out_text
5473 298fe380 Iustin Pop
    return result